本篇内容介绍了“PostgreSQL的ExecHashJoin依赖其他函数的实现逻辑是什么”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!

一、数据结构

JoinState
Hash/NestLoop/Merge Join的基类

/*----------------*JoinStateinformation**Superclassforstatenodesofjoinplans.*Hash/NestLoop/MergeJoin的基类*----------------*/typedefstructJoinState{PlanStateps;//基类PlanStateJoinTypejointype;//连接类型//在找到一个匹配innertuple的时候,如需要跳转到下一个outertuple,则该值为Tboolsingle_match;/*Trueifweshouldskiptonextoutertuple*afterfindingoneinnermatch*///连接条件表达式(除了ps.qual)ExprState*joinqual;/*JOINquals(inadditiontops.qual)*/}JoinState;

HashJoinState
Hash Join运行期状态结构体

/*thesestructsaredefinedinexecutor/hashjoin.h:*/typedefstructHashJoinTupleData*HashJoinTuple;typedefstructHashJoinTableData*HashJoinTable;typedefstructHashJoinState{JoinStatejs;/*基类;itsfirstfieldisNodeTag*/ExprState*hashclauses;//hash连接条件List*hj_OuterHashKeys;/*外表条件链表;listofExprStatenodes*/List*hj_InnerHashKeys;/*内表连接条件;listofExprStatenodes*/List*hj_HashOperators;/*操作符OIDs链表;listofoperatorOIDs*/HashJoinTablehj_HashTable;//Hash表uint32hj_CurHashValue;//当前的Hash值inthj_CurBucketNo;//当前的bucket编号inthj_CurSkewBucketNo;//行倾斜bucket编号HashJoinTuplehj_CurTuple;//当前元组TupleTableSlot*hj_OuterTupleSlot;//outerrelationslotTupleTableSlot*hj_HashTupleSlot;//HashtupleslotTupleTableSlot*hj_NullOuterTupleSlot;//用于外连接的outer虚拟slotTupleTableSlot*hj_NullInnerTupleSlot;//用于外连接的inner虚拟slotTupleTableSlot*hj_FirstOuterTupleSlot;//inthj_JoinState;//JoinState状态boolhj_MatchedOuter;//是否匹配boolhj_OuterNotEmpty;//outerrelation是否为空}HashJoinState;

HashJoinTable
Hash表数据结构

typedefstructHashJoinTableData{intnbuckets;/*内存中的hash桶数;#bucketsinthein-memoryhashtable*/intlog2_nbuckets;/*2的对数(nbuckets必须是2的幂);itslog2(nbucketsmustbeapowerof2)*/intnbuckets_original;/*首次hash时的桶数;#bucketswhenstartingthefirsthash*/intnbuckets_optimal;/*优化后的桶数(每个批次);optimal#buckets(perbatch)*/intlog2_nbuckets_optimal;/*2的对数;log2(nbuckets_optimal)*//*buckets[i]isheadoflistoftuplesini'thin-memorybucket*///bucket[i]是内存中第i个桶中的元组链表的headitemunion{/*unsharedarrayisper-batchstorage,asareallthetuples*///未共享数组是按批处理存储的,所有元组均如此structHashJoinTupleData**unshared;/*sharedarrayisper-queryDSAarea,asareallthetuples*///共享数组是每个查询的DSA区域,所有元组均如此dsa_pointer_atomic*shared;}buckets;boolkeepNulls;/*如不匹配则存储NULL元组,该值为T;truetostoreunmatchableNULLtuples*/boolskewEnabled;/*是否使用倾斜优化?;areweusingskewoptimization?*/HashSkewBucket**skewBucket;/*倾斜的hash表桶数;hashtableofskewbuckets*/intskewBucketLen;/*skewBucket数组大小;sizeofskewBucketarray(apowerof2!)*/intnSkewBuckets;/*活动的倾斜桶数;numberofactiveskewbuckets*/int*skewBucketNums;/*活动倾斜桶数组索引;arrayindexesofactiveskewbuckets*/intnbatch;/*批次数;numberofbatches*/intcurbatch;/*当前批次,第一轮为0;currentbatch#;0during1stpass*/intnbatch_original;/*在开始inner扫描时的批次;nbatchwhenwestartedinnerscan*/intnbatch_outstart;/*在开始outer扫描时的批次;nbatchwhenwestartedouterscan*/boolgrowEnabled;/*关闭nbatch增加的标记;flagtoshutoffnbatchincreases*/doubletotalTuples;/*从innerplan获得的元组数;#tuplesobtainedfrominnerplan*/doublepartialTuples;/*通过hashjoin获得的inner元组数;#tuplesobtainedfrominnerplanbyme*/doubleskewTuples;/*倾斜元组数;#tuplesinsertedintoskewtuples*//**Thesearraysareallocatedforthelifeofthehashjoin,butonlyif*nbatch>1.Afileisopenedonlywhenwefirstwriteatupleintoit*(otherwiseitspointerremainsNULL).Notethatthezero'tharray*elementsnevergetused,sincewewillprocessratherthandumpoutany*tuplesofbatchzero.*这些数组在散列连接的生命周期内分配,但仅当nbatch>1时分配。*只有当第一次将元组写入文件时,文件才会打开(否则它的指针将保持NULL)。*注意,第0个数组元素永远不会被使用,因为批次0的元组永远不会转储.*/BufFile**innerBatchFile;/*每个批次的inner虚拟临时文件缓存;bufferedvirtualtempfileperbatch*/BufFile**outerBatchFile;/*每个批次的outer虚拟临时文件缓存;bufferedvirtualtempfileperbatch*//**Infoaboutthedatatype-specifichashfunctionsforthedatatypesbeing*hashed.Thesearearraysofthesamelengthasthenumberofhashjoin*clauses(hashkeys).*有关正在散列的数据类型的特定于数据类型的散列函数的信息。*这些数组的长度与散列连接子句(散列键)的数量相同。*/FmgrInfo*outer_hashfunctions;/*outerhash函数FmgrInfo结构体;lookupdataforhashfunctions*/FmgrInfo*inner_hashfunctions;/*innerhash函数FmgrInfo结构体;lookupdataforhashfunctions*/bool*hashStrict;/*每个hash操作符是严格?iseachhashjoinoperatorstrict?*/SizespaceUsed;/*元组使用的当前内存空间大小;memoryspacecurrentlyusedbytuples*/SizespaceAllowed;/*空间使用上限;upperlimitforspaceused*/SizespacePeak;/*峰值的空间使用;peakspaceused*/SizespaceUsedSkew;/*倾斜哈希表的当前空间使用情况;skewhashtable'scurrentspaceusage*/SizespaceAllowedSkew;/*倾斜哈希表的使用上限;upperlimitforskewhashtable*/MemoryContexthashCxt;/*整个散列连接存储的上下文;contextforwhole-hash-joinstorage*/MemoryContextbatchCxt;/*该批次存储的上下文;contextforthis-batch-onlystorage*//*usedfordenseallocationoftuples(intolinkedchunks)*///用于密集分配元组(到链接块中)HashMemoryChunkchunks;/*整个批次使用一个链表;onelistforthewholebatch*//*SharedandprivatestateforParallelHash.*///并行hash使用的共享和私有状态HashMemoryChunkcurrent_chunk;/*后台进程的当前chunk;thisbackend'scurrentchunk*/dsa_area*area;/*用于分配内存的DSA区域;DSAareatoallocatememoryfrom*/ParallelHashJoinState*parallel_state;//并行执行状态ParallelHashJoinBatchAccessor*batches;//并行访问器dsa_pointercurrent_chunk_shared;//当前chunk的开始指针}HashJoinTableData;typedefstructHashJoinTableData*HashJoinTable;

HashJoinTupleData
Hash连接元组数据

/*----------------------------------------------------------------*hash-joinhashtablestructures**EachactivehashjoinhasaHashJoinTablecontrolblock,whichis*palloc'dintheexecutor'sper-querycontext.Allotherstorageneeded*forthehashjoiniskeptinprivatememorycontexts,twoforeachhashjoin.*Thismakesiteasyandfasttoreleasethestoragewhenwedon'tneedit*anymore.(Exception:dataassociatedwiththetempfileslivesinthe*per-querycontexttoo,sincewealwayscallbuffile.cinthatcontext.)*每个活动的hashjoin都有一个可散列的控制块,它在执行程序的每个查询上下文中都是通过palloc分配的。*hashjoin所需的所有其他存储都保存在私有内存上下文中,每个hashjoin有两个。*当不再需要它的时候,这使得释放它变得简单和快速。*(例外:与临时文件相关的数据也存在于每个查询上下文中,因为在这种情况下总是调用buffile.c。)**Thehashtablecontextsaremadechildrenoftheper-querycontext,ensuring*thattheywillbediscardedatendofstatementevenifthejoinis*abortedearlybyanerror.(Likewise,anytemporaryfileswemakewill*becleanedupbythevirtualfilemanagerineventofanerror.)*hashtable上下文是每个查询上下文的子上下文,确保在语句结束时丢弃它们,即使连接因错误而提前中止。*(同样,如果出现错误,虚拟文件管理器将清理创建的任何临时文件。)**Storagethatshouldlivethroughtheentirejoinisallocatedfromthe*"hashCxt",whilestoragethatisonlywantedforthecurrentbatchis*allocatedinthe"batchCxt".ByresettingthebatchCxtattheendof*eachbatch,wefreealltheper-batchstoragereliablyandwithouttedium.*通过整个连接的存储空间应从“hashCxt”分配,而只需要当前批处理的存储空间在“batchCxt”中分配。*通过在每个批处理结束时重置batchCxt,可以可靠地释放每个批处理的所有存储,而不会感到单调乏味。**Duringfirstscanofinnerrelation,wegetitstuplesfromexecutor.*Ifnbatch>1thentuplesthatdon'tbelonginfirstbatchgetsaved*intoinner-batchtempfiles.Thesamestatementsapplyforthe*firstscanoftheouterrelation,exceptwewritetuplestoouter-batch*tempfiles.Afterfinishingthefirstscan,wedothefollowingfor*eachremainingbatch:*1.Readtuplesfrominnerbatchfile,loadintohashbuckets.*2.Readtuplesfromouterbatchfile,matchtohashbucketsandoutput.*在内部关系的第一次扫描中,从执行者那里得到了它的元组。*如果nbatch>1,那么不属于第一批的元组将保存到批内临时文件中。*相同的语句适用于外关系的第一次扫描,但是我们将元组写入外部批处理临时文件。*完成第一次扫描后,我们对每批剩余的元组做如下处理:*1.从内部批处理文件读取元组,加载到散列桶中。*2.从外部批处理文件读取元组,匹配哈希桶和输出。**Itispossibletoincreasenbatchontheflyifthein-memoryhashtable*getstoobig.Thehash-value-to-batchcomputationisarrangedsothatthis*canonlycauseatupletogointoalaterbatchthanpreviouslythought,*neverintoanearlierbatch.Whenweincreasenbatch,werescanthehash*tableanddumpoutanytuplesthatarenowofalaterbatchtothecorrect*innerbatchfile.Subsequently,whilereadingeitherinnerorouterbatch*files,wemightfindtuplesthatnolongerbelongtothecurrentbatch;*ifso,wejustdumpthemouttothecorrectbatchfile.*如果内存中的哈希表太大,可以动态增加nbatch。*散列值到批处理的计算是这样安排的:*这只会导致元组进入比以前认为的更晚的批处理,而不会进入更早的批处理。*当增加nbatch时,重新扫描哈希表,并将现在属于后面批处理的任何元组转储到正确的内部批处理文件。*随后,在读取内部或外部批处理文件时,可能会发现不再属于当前批处理的元组;*如果是这样,只需将它们转储到正确的批处理文件即可。*----------------------------------------------------------------*//*theseareinnodes/execnodes.h:*//*typedefstructHashJoinTupleData*HashJoinTuple;*//*typedefstructHashJoinTableData*HashJoinTable;*/typedefstructHashJoinTupleData{/*linktonexttupleinsamebucket*///link同一个桶中的下一个元组union{structHashJoinTupleData*unshared;dsa_pointershared;}next;uint32hashvalue;/*元组的hash值;tuple'shashcode*//*Tupledata,inMinimalTupleformat,followsonaMAXALIGNboundary*/}HashJoinTupleData;#defineHJTUPLE_OVERHEADMAXALIGN(sizeof(HashJoinTupleData))#defineHJTUPLE_MINTUPLE(hjtup)\((MinimalTuple)((char*)(hjtup)+HJTUPLE_OVERHEAD))二、源码解读

ExecScanHashBucket
搜索匹配当前outer relation tuple的hash桶,寻找匹配的inner relation元组。

/*----------------------------------------------------------------------------------------------------HJ_SCAN_BUCKET阶段----------------------------------------------------------------------------------------------------*//**ExecScanHashBucket*scanahashbucketformatchestothecurrentoutertuple*搜索匹配当前outerrelationtuple的hash桶**Thecurrentoutertuplemustbestoredinecontext->ecxt_outertuple.*当前的outerrelationtuple必须存储在econtext->ecxt_outertuple中**Onsuccess,theinnertupleisstoredintohjstate->hj_CurTupleand*econtext->ecxt_innertuple,usinghjstate->hj_HashTupleSlotastheslot*forthelatter.*成功后,内部元组存储到hjstate->hj_CurTuple和econtext->ecxt_innertuple中,*使用hjstate->hj_HashTupleSlot作为后者的slot。*/boolExecScanHashBucket(HashJoinState*hjstate,ExprContext*econtext){ExprState*hjclauses=hjstate->hashclauses;//hash连接条件表达式HashJoinTablehashtable=hjstate->hj_HashTable;//Hash表HashJoinTuplehashTuple=hjstate->hj_CurTuple;//当前的Tupleuint32hashvalue=hjstate->hj_CurHashValue;//hash值/**hj_CurTupleistheaddressofthetuplelastreturnedfromthecurrent*bucket,orNULLifit'stimetostartscanninganewbucket.*hj_CurTuple是最近从当前桶返回的元组的地址,如果需要开始扫描新桶,则为NULL。**Ifthetuplehashedtoaskewbucketthenscantheskewbucket*otherwisescanthestandardhashtablebucket.*如果元组散列到倾斜桶,则扫描倾斜桶,否则扫描标准哈希表桶。*/if(hashTuple!=NULL)hashTuple=hashTuple->next.unshared;//hashTuple,通过指针获取下一个elseif(hjstate->hj_CurSkewBucketNo!=INVALID_SKEW_BUCKET_NO)//如为NULL,而且使用倾斜优化,则从倾斜桶中获取hashTuple=hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;else////如为NULL,不使用倾斜优化,从常规的bucket中获取hashTuple=hashtable->buckets.unshared[hjstate->hj_CurBucketNo];while(hashTuple!=NULL)//循环{if(hashTuple->hashvalue==hashvalue)//hash值一致{TupleTableSlot*inntuple;//innertuple/*inserthashtable'stupleintoexecslotsoExecQualseesit*///把Hash表中的tuple插入到执行器的slot中,作为函数ExecQual的输入使用inntuple=ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),hjstate->hj_HashTupleSlot,false);/*donotpfree*/econtext->ecxt_innertuple=inntuple;//赋值if(ExecQualAndReset(hjclauses,econtext))//判断连接条件是否满足{hjstate->hj_CurTuple=hashTuple;//满足,则赋值&返回Treturntrue;}}hashTuple=hashTuple->next.unshared;//从Hash表中获取下一个tuple}/**nomatch*不匹配,返回F*/returnfalse;}/**StoreaminimaltupleintoTTSOpsMinimalTupletypeslot.*存储最小化的tuple到TTSOpsMinimalTuple类型的slot中**IfthetargetslotisnotguaranteedtobeTTSOpsMinimalTupletypeslot,*usethe,moreexpensive,ExecForceStoreMinimalTuple().*如果目标slot不能确保是TTSOpsMinimalTuple类型,使用代价更高的ExecForceStoreMinimalTuple()函数*/TupleTableSlot*ExecStoreMinimalTuple(MinimalTuplemtup,TupleTableSlot*slot,boolshouldFree){/**sanitychecks*安全检查*/Assert(mtup!=NULL);Assert(slot!=NULL);Assert(slot->tts_tupleDescriptor!=NULL);if(unlikely(!TTS_IS_MINIMALTUPLE(slot)))//类型检查elog(ERROR,"tryingtostoreaminimaltupleintowrongtypeofslot");tts_minimal_store_tuple(slot,mtup,shouldFree);//存储returnslot;//返回slot}staticvoidtts_minimal_store_tuple(TupleTableSlot*slot,MinimalTuplemtup,boolshouldFree){MinimalTupleTableSlot*mslot=(MinimalTupleTableSlot*)slot;//获取slottts_minimal_clear(slot);//清除原来的slot//安全检查Assert(!TTS_SHOULDFREE(slot));Assert(TTS_EMPTY(slot));//设置slot信息slot->tts_flags&=~TTS_FLAG_EMPTY;slot->tts_nvalid=0;mslot->off=0;//存储到mslot中mslot->mintuple=mtup;Assert(mslot->tuple==&mslot->minhdr);mslot->minhdr.t_len=mtup->t_len+MINIMAL_TUPLE_OFFSET;mslot->minhdr.t_data=(HeapTupleHeader)((char*)mtup-MINIMAL_TUPLE_OFFSET);/*noneedtosett_selfort_tableOidsincewewon'tallowaccess*///不需要设置t_sefl或者t_tableOid,因为不允许访问if(shouldFree)slot->tts_flags|=TTS_FLAG_SHOULDFREE;elseAssert(!TTS_SHOULDFREE(slot));}/**ExecQualAndReset()-evaluatequalwithExecQual()andresetexpression*context.*ExecQualAndReset()-使用ExecQual()解析并重置表达式*/#ifndefFRONTENDstaticinlineboolExecQualAndReset(ExprState*state,ExprContext*econtext){boolret=ExecQual(state,econtext);//调用ExecQual/*inlineResetExprContext,toavoidorderingissueinthisfile*///内联ResetExprContext,避免在这个文件中的orderingMemoryContextReset(econtext->ecxt_per_tuple_memory);returnret;}#endif#defineHeapTupleHeaderSetMatch(tup)\(\(tup)->t_infomask2|=HEAP_TUPLE_HAS_MATCH\)三、跟踪分析

测试脚本如下

testdb=#setenable_nestloop=false;SETtestdb=#setenable_mergejoin=false;SETtestdb=#explainverboseselectdw.*,grjf.grbh,grjf.xm,grjf.ny,grjf.jetestdb-#fromt_dwxxdw,lateral(selectgr.grbh,gr.xm,jf.ny,jf.jetestdb(#fromt_grxxgrinnerjoint_jfxxjftestdb(#ongr.dwbh=dw.dwbhtestdb(#andgr.grbh=jf.grbh)grjftestdb-#orderbydw.dwbh;QUERYPLAN-----------------------------------------------------------------------------------------------Sort(cost=14828.83..15078.46rows=99850width=47)Output:dw.dwmc,dw.dwbh,dw.dwdz,gr.grbh,gr.xm,jf.ny,jf.jeSortKey:dw.dwbh->HashJoin(cost=3176.00..6537.55rows=99850width=47)Output:dw.dwmc,dw.dwbh,dw.dwdz,gr.grbh,gr.xm,jf.ny,jf.jeHashCond:((gr.grbh)::text=(jf.grbh)::text)->HashJoin(cost=289.00..2277.61rows=99850width=32)Output:dw.dwmc,dw.dwbh,dw.dwdz,gr.grbh,gr.xmInnerUnique:trueHashCond:((gr.dwbh)::text=(dw.dwbh)::text)->SeqScanonpublic.t_grxxgr(cost=0.00..1726.00rows=100000width=16)Output:gr.dwbh,gr.grbh,gr.xm,gr.xb,gr.nl->Hash(cost=164.00..164.00rows=10000width=20)Output:dw.dwmc,dw.dwbh,dw.dwdz->SeqScanonpublic.t_dwxxdw(cost=0.00..164.00rows=10000width=20)Output:dw.dwmc,dw.dwbh,dw.dwdz->Hash(cost=1637.00..1637.00rows=100000width=20)Output:jf.ny,jf.je,jf.grbh->SeqScanonpublic.t_jfxxjf(cost=0.00..1637.00rows=100000width=20)Output:jf.ny,jf.je,jf.grbh(20rows)

启动gdb,设置断点

(gdb)bExecScanHashBucketBreakpoint1at0x6ff25b:filenodeHash.c,line1910.(gdb)cContinuing.Breakpoint1,ExecScanHashBucket(hjstate=0x2bb8738,econtext=0x2bb8950)atnodeHash.c:19101910ExprState*hjclauses=hjstate->hashclauses;

设置相关变量

1910ExprState*hjclauses=hjstate->hashclauses;(gdb)n1911HashJoinTablehashtable=hjstate->hj_HashTable;(gdb)1912HashJoinTuplehashTuple=hjstate->hj_CurTuple;(gdb)1913uint32hashvalue=hjstate->hj_CurHashValue;(gdb)1922if(hashTuple!=NULL)

hash join连接条件

(gdb)p*hjclauses$1={tag={type=T_ExprState},flags=7'\a',resnull=false,resvalue=0,resultslot=0x0,steps=0x2bc4bc8,evalfunc=0x6d1a6e<ExecInterpExprStillValid>,expr=0x2bb60c0,evalfunc_private=0x6cf625<ExecInterpExpr>,steps_len=7,steps_alloc=16,parent=0x2bb8738,ext_params=0x0,innermost_caseval=0x0,innermost_casenull=0x0,innermost_domainval=0x0,innermost_domainnull=0x0}

hash表

(gdb)phashtable$2=(HashJoinTable)0x2bc9de8(gdb)p*hashtable$3={nbuckets=16384,log2_nbuckets=14,nbuckets_original=16384,nbuckets_optimal=16384,log2_nbuckets_optimal=14,buckets={unshared=0x7f0fc1345050,shared=0x7f0fc1345050},keepNulls=false,skewEnabled=false,skewBucket=0x0,skewBucketLen=0,nSkewBuckets=0,skewBucketNums=0x0,nbatch=1,curbatch=0,nbatch_original=1,nbatch_outstart=1,growEnabled=true,totalTuples=10000,partialTuples=10000,skewTuples=0,innerBatchFile=0x0,outerBatchFile=0x0,outer_hashfunctions=0x2bdc228,inner_hashfunctions=0x2bdc280,hashStrict=0x2bdc2d8,spaceUsed=677754,spaceAllowed=16777216,spacePeak=677754,spaceUsedSkew=0,spaceAllowedSkew=335544,hashCxt=0x2bdc110,batchCxt=0x2bde120,chunks=0x2c708f0,current_chunk=0x0,area=0x0,parallel_state=0x0,batches=0x0,current_chunk_shared=0}

hash桶中的元组&hash值

(gdb)p*hashTupleCannotaccessmemoryataddress0x0(gdb)phashvalue$4=2324234220(gdb)

从常规hash桶中获取hash元组

(gdb)n1924elseif(hjstate->hj_CurSkewBucketNo!=INVALID_SKEW_BUCKET_NO)(gdb)phjstate->hj_CurSkewBucketNo$5=-1(gdb)n1927hashTuple=hashtable->buckets.unshared[hjstate->hj_CurBucketNo];(gdb)1929while(hashTuple!=NULL)(gdb)phjstate->hj_CurBucketNo$7=16364(gdb)p*hashTuple$6={next={unshared=0x0,shared=0},hashvalue=1822113772}

判断hash值是否一致

(gdb)n1931if(hashTuple->hashvalue==hashvalue)(gdb)phashTuple->hashvalue$8=1822113772(gdb)phashvalue$9=2324234220(gdb)

不一致,继续下一个元组

(gdb)n1948hashTuple=hashTuple->next.unshared;(gdb)1929while(hashTuple!=NULL)

下一个元组为NULL,返回F,说明没有匹配的元组

(gdb)p*hashTupleCannotaccessmemoryataddress0x0(gdb)n1954returnfalse;

在ExecStoreMinimalTuple上设置断点(这时候Hash值是一致的)

(gdb)bExecStoreMinimalTupleBreakpoint2at0x6e8cbf:fileexecTuples.c,line427.(gdb)cContinuing.Breakpoint1,ExecScanHashBucket(hjstate=0x2bb8738,econtext=0x2bb8950)atnodeHash.c:19101910ExprState*hjclauses=hjstate->hashclauses;(gdb)del1(gdb)cContinuing.Breakpoint2,ExecStoreMinimalTuple(mtup=0x2be81b0,slot=0x2bb9c18,shouldFree=false)atexecTuples.c:427427Assert(mtup!=NULL);(gdb)finishRuntillexitfrom#0ExecStoreMinimalTuple(mtup=0x2be81b0,slot=0x2bb9c18,shouldFree=false)atexecTuples.c:4270x00000000006ff335inExecScanHashBucket(hjstate=0x2bb8738,econtext=0x2bb8950)atnodeHash.c:19361936inntuple=ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),Valuereturnedis$10=(TupleTableSlot*)0x2bb9c18(gdb)n1939econtext->ecxt_innertuple=inntuple;

匹配成功,返回T

(gdb)n1941if(ExecQualAndReset(hjclauses,econtext))(gdb)1943hjstate->hj_CurTuple=hashTuple;(gdb)1944returntrue;(gdb)1955}(gdb)

HJ_SCAN_BUCKET阶段,实现的逻辑是扫描Hash桶,寻找inner relation中与outer relation元组匹配的元组,如匹配,则把匹配的Tuple存储在hjstate->hj_CurTuple中.

“PostgreSQL的ExecHashJoin依赖其他函数的实现逻辑是什么”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注亿速云网站,小编将为大家输出更多高质量的实用文章!