LCOV - code coverage report
Current view: top level - src/backend/executor - nodeGather.c (source / functions) Coverage Total Hit
Test: Code coverage Lines: 99.4 % 168 167
Test Date: 2026-01-26 10:56:24 Functions: 100.0 % 8 8
Legend: Lines:     hit not hit
Branches: + taken - not taken # not executed
Branches: 91.6 % 83 76

             Branch data     Line data    Source code
       1                 :             : /*-------------------------------------------------------------------------
       2                 :             :  *
       3                 :             :  * nodeGather.c
       4                 :             :  *        Support routines for scanning a plan via multiple workers.
       5                 :             :  *
       6                 :             :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7                 :             :  * Portions Copyright (c) 1994, Regents of the University of California
       8                 :             :  *
       9                 :             :  * A Gather executor launches parallel workers to run multiple copies of a
      10                 :             :  * plan.  It can also run the plan itself, if the workers are not available
      11                 :             :  * or have not started up yet.  It then merges all of the results it produces
      12                 :             :  * and the results from the workers into a single output stream.  Therefore,
      13                 :             :  * it will normally be used with a plan where running multiple copies of the
      14                 :             :  * same plan does not produce duplicate output, such as parallel-aware
      15                 :             :  * SeqScan.
      16                 :             :  *
      17                 :             :  * Alternatively, a Gather node can be configured to use just one worker
      18                 :             :  * and the single-copy flag can be set.  In this case, the Gather node will
      19                 :             :  * run the plan in one worker and will not execute the plan itself.  In
      20                 :             :  * this case, it simply returns whatever tuples were returned by the worker.
      21                 :             :  * If a worker cannot be obtained, then it will run the plan itself and
      22                 :             :  * return the results.  Therefore, a plan used with a single-copy Gather
      23                 :             :  * node need not be parallel-aware.
      24                 :             :  *
      25                 :             :  * IDENTIFICATION
      26                 :             :  *        src/backend/executor/nodeGather.c
      27                 :             :  *
      28                 :             :  *-------------------------------------------------------------------------
      29                 :             :  */
      30                 :             : 
      31                 :             : #include "postgres.h"
      32                 :             : 
      33                 :             : #include "executor/execParallel.h"
      34                 :             : #include "executor/executor.h"
      35                 :             : #include "executor/nodeGather.h"
      36                 :             : #include "executor/tqueue.h"
      37                 :             : #include "miscadmin.h"
      38                 :             : #include "optimizer/optimizer.h"
      39                 :             : #include "utils/wait_event.h"
      40                 :             : 
      41                 :             : 
      42                 :             : static TupleTableSlot *ExecGather(PlanState *pstate);
      43                 :             : static TupleTableSlot *gather_getnext(GatherState *gatherstate);
      44                 :             : static MinimalTuple gather_readnext(GatherState *gatherstate);
      45                 :             : static void ExecShutdownGatherWorkers(GatherState *node);
      46                 :             : 
      47                 :             : 
      48                 :             : /* ----------------------------------------------------------------
      49                 :             :  *              ExecInitGather
      50                 :             :  * ----------------------------------------------------------------
      51                 :             :  */
      52                 :             : GatherState *
      53                 :         193 : ExecInitGather(Gather *node, EState *estate, int eflags)
      54                 :             : {
      55                 :         193 :         GatherState *gatherstate;
      56                 :         193 :         Plan       *outerNode;
      57                 :         193 :         TupleDesc       tupDesc;
      58                 :             : 
      59                 :             :         /* Gather node doesn't have innerPlan node. */
      60         [ +  - ]:         193 :         Assert(innerPlan(node) == NULL);
      61                 :             : 
      62                 :             :         /*
      63                 :             :          * create state structure
      64                 :             :          */
      65                 :         193 :         gatherstate = makeNode(GatherState);
      66                 :         193 :         gatherstate->ps.plan = (Plan *) node;
      67                 :         193 :         gatherstate->ps.state = estate;
      68                 :         193 :         gatherstate->ps.ExecProcNode = ExecGather;
      69                 :             : 
      70                 :         193 :         gatherstate->initialized = false;
      71                 :         193 :         gatherstate->need_to_scan_locally =
      72         [ +  + ]:         193 :                 !node->single_copy && parallel_leader_participation;
      73                 :         193 :         gatherstate->tuples_needed = -1;
      74                 :             : 
      75                 :             :         /*
      76                 :             :          * Miscellaneous initialization
      77                 :             :          *
      78                 :             :          * create expression context for node
      79                 :             :          */
      80                 :         193 :         ExecAssignExprContext(estate, &gatherstate->ps);
      81                 :             : 
      82                 :             :         /*
      83                 :             :          * now initialize outer plan
      84                 :             :          */
      85                 :         193 :         outerNode = outerPlan(node);
      86                 :         193 :         outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags);
      87                 :         193 :         tupDesc = ExecGetResultType(outerPlanState(gatherstate));
      88                 :             : 
      89                 :             :         /*
      90                 :             :          * Leader may access ExecProcNode result directly (if
      91                 :             :          * need_to_scan_locally), or from workers via tuple queue.  So we can't
      92                 :             :          * trivially rely on the slot type being fixed for expressions evaluated
      93                 :             :          * within this node.
      94                 :             :          */
      95                 :         193 :         gatherstate->ps.outeropsset = true;
      96                 :         193 :         gatherstate->ps.outeropsfixed = false;
      97                 :             : 
      98                 :             :         /*
      99                 :             :          * Initialize result type and projection.
     100                 :             :          */
     101                 :         193 :         ExecInitResultTypeTL(&gatherstate->ps);
     102                 :         193 :         ExecConditionalAssignProjectionInfo(&gatherstate->ps, tupDesc, OUTER_VAR);
     103                 :             : 
     104                 :             :         /*
     105                 :             :          * Without projections result slot type is not trivially known, see
     106                 :             :          * comment above.
     107                 :             :          */
     108         [ +  + ]:         193 :         if (gatherstate->ps.ps_ProjInfo == NULL)
     109                 :             :         {
     110                 :         180 :                 gatherstate->ps.resultopsset = true;
     111                 :         180 :                 gatherstate->ps.resultopsfixed = false;
     112                 :         180 :         }
     113                 :             : 
     114                 :             :         /*
     115                 :             :          * Initialize funnel slot to same tuple descriptor as outer plan.
     116                 :             :          */
     117                 :         193 :         gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate, tupDesc,
     118                 :             :                                                                                                           &TTSOpsMinimalTuple);
     119                 :             : 
     120                 :             :         /*
     121                 :             :          * Gather doesn't support checking a qual (it's always more efficient to
     122                 :             :          * do it in the child node).
     123                 :             :          */
     124         [ +  - ]:         193 :         Assert(!node->plan.qual);
     125                 :             : 
     126                 :         386 :         return gatherstate;
     127                 :         193 : }
     128                 :             : 
     129                 :             : /* ----------------------------------------------------------------
     130                 :             :  *              ExecGather(node)
     131                 :             :  *
     132                 :             :  *              Scans the relation via multiple workers and returns
     133                 :             :  *              the next qualifying tuple.
     134                 :             :  * ----------------------------------------------------------------
     135                 :             :  */
     136                 :             : static TupleTableSlot *
     137                 :      531439 : ExecGather(PlanState *pstate)
     138                 :             : {
     139                 :      531439 :         GatherState *node = castNode(GatherState, pstate);
     140                 :      531439 :         TupleTableSlot *slot;
     141                 :      531439 :         ExprContext *econtext;
     142                 :             : 
     143         [ +  + ]:      531439 :         CHECK_FOR_INTERRUPTS();
     144                 :             : 
     145                 :             :         /*
     146                 :             :          * Initialize the parallel context and workers on first execution. We do
     147                 :             :          * this on first execution rather than during node initialization, as it
     148                 :             :          * needs to allocate a large dynamic segment, so it is better to do it
     149                 :             :          * only if it is really needed.
     150                 :             :          */
     151         [ +  + ]:      531439 :         if (!node->initialized)
     152                 :             :         {
     153                 :         143 :                 EState     *estate = node->ps.state;
     154                 :         143 :                 Gather     *gather = (Gather *) node->ps.plan;
     155                 :             : 
     156                 :             :                 /*
     157                 :             :                  * Sometimes we might have to run without parallelism; but if parallel
     158                 :             :                  * mode is active then we can try to fire up some workers.
     159                 :             :                  */
     160   [ +  -  +  + ]:         143 :                 if (gather->num_workers > 0 && estate->es_use_parallel_mode)
     161                 :             :                 {
     162                 :         136 :                         ParallelContext *pcxt;
     163                 :             : 
     164                 :             :                         /* Initialize, or re-initialize, shared state needed by workers. */
     165         [ +  + ]:         136 :                         if (!node->pei)
     166                 :         196 :                                 node->pei = ExecInitParallelPlan(outerPlanState(node),
     167                 :          98 :                                                                                                  estate,
     168                 :          98 :                                                                                                  gather->initParam,
     169                 :          98 :                                                                                                  gather->num_workers,
     170                 :          98 :                                                                                                  node->tuples_needed);
     171                 :             :                         else
     172                 :          76 :                                 ExecParallelReinitialize(outerPlanState(node),
     173                 :          38 :                                                                                  node->pei,
     174                 :          38 :                                                                                  gather->initParam);
     175                 :             : 
     176                 :             :                         /*
     177                 :             :                          * Register backend workers. We might not get as many as we
     178                 :             :                          * requested, or indeed any at all.
     179                 :             :                          */
     180                 :         136 :                         pcxt = node->pei->pcxt;
     181                 :         136 :                         LaunchParallelWorkers(pcxt);
     182                 :             :                         /* We save # workers launched for the benefit of EXPLAIN */
     183                 :         136 :                         node->nworkers_launched = pcxt->nworkers_launched;
     184                 :             : 
     185                 :             :                         /*
     186                 :             :                          * Count number of workers originally wanted and actually
     187                 :             :                          * launched.
     188                 :             :                          */
     189                 :         136 :                         estate->es_parallel_workers_to_launch += pcxt->nworkers_to_launch;
     190                 :         136 :                         estate->es_parallel_workers_launched += pcxt->nworkers_launched;
     191                 :             : 
     192                 :             :                         /* Set up tuple queue readers to read the results. */
     193         [ +  + ]:         136 :                         if (pcxt->nworkers_launched > 0)
     194                 :             :                         {
     195                 :         135 :                                 ExecParallelCreateReaders(node->pei);
     196                 :             :                                 /* Make a working array showing the active readers */
     197                 :         135 :                                 node->nreaders = pcxt->nworkers_launched;
     198                 :         135 :                                 node->reader = (TupleQueueReader **)
     199                 :         135 :                                         palloc(node->nreaders * sizeof(TupleQueueReader *));
     200                 :         135 :                                 memcpy(node->reader, node->pei->reader,
     201                 :             :                                            node->nreaders * sizeof(TupleQueueReader *));
     202                 :         135 :                         }
     203                 :             :                         else
     204                 :             :                         {
     205                 :             :                                 /* No workers?  Then never mind. */
     206                 :           1 :                                 node->nreaders = 0;
     207                 :           1 :                                 node->reader = NULL;
     208                 :             :                         }
     209                 :         136 :                         node->nextreader = 0;
     210                 :         136 :                 }
     211                 :             : 
     212                 :             :                 /* Run plan locally if no workers or enabled and not single-copy. */
     213                 :         278 :                 node->need_to_scan_locally = (node->nreaders == 0)
     214   [ +  +  +  + ]:         143 :                         || (!gather->single_copy && parallel_leader_participation);
     215                 :         143 :                 node->initialized = true;
     216                 :         143 :         }
     217                 :             : 
     218                 :             :         /*
     219                 :             :          * Reset per-tuple memory context to free any expression evaluation
     220                 :             :          * storage allocated in the previous tuple cycle.
     221                 :             :          */
     222                 :      531439 :         econtext = node->ps.ps_ExprContext;
     223                 :      531439 :         ResetExprContext(econtext);
     224                 :             : 
     225                 :             :         /*
     226                 :             :          * Get next tuple, either from one of our workers, or by running the plan
     227                 :             :          * ourselves.
     228                 :             :          */
     229                 :      531439 :         slot = gather_getnext(node);
     230   [ +  +  +  + ]:      531439 :         if (TupIsNull(slot))
     231                 :         137 :                 return NULL;
     232                 :             : 
     233                 :             :         /* If no projection is required, we're done. */
     234         [ +  + ]:      531302 :         if (node->ps.ps_ProjInfo == NULL)
     235                 :      531296 :                 return slot;
     236                 :             : 
     237                 :             :         /*
     238                 :             :          * Form the result tuple using ExecProject(), and return it.
     239                 :             :          */
     240                 :           6 :         econtext->ecxt_outertuple = slot;
     241                 :           6 :         return ExecProject(node->ps.ps_ProjInfo);
     242                 :      531439 : }
     243                 :             : 
     244                 :             : /* ----------------------------------------------------------------
     245                 :             :  *              ExecEndGather
     246                 :             :  *
     247                 :             :  *              frees any storage allocated through C routines.
     248                 :             :  * ----------------------------------------------------------------
     249                 :             :  */
     250                 :             : void
     251                 :         191 : ExecEndGather(GatherState *node)
     252                 :             : {
     253                 :         191 :         ExecEndNode(outerPlanState(node));      /* let children clean up first */
     254                 :         191 :         ExecShutdownGather(node);
     255                 :         191 : }
     256                 :             : 
     257                 :             : /*
     258                 :             :  * Read the next tuple.  We might fetch a tuple from one of the tuple queues
     259                 :             :  * using gather_readnext, or if no tuple queue contains a tuple and the
     260                 :             :  * single_copy flag is not set, we might generate one locally instead.
     261                 :             :  */
     262                 :             : static TupleTableSlot *
     263                 :      531439 : gather_getnext(GatherState *gatherstate)
     264                 :             : {
     265                 :      531439 :         PlanState  *outerPlan = outerPlanState(gatherstate);
     266                 :      531439 :         TupleTableSlot *outerTupleSlot;
     267                 :      531439 :         TupleTableSlot *fslot = gatherstate->funnel_slot;
     268                 :      531439 :         MinimalTuple tup;
     269                 :             : 
     270   [ +  +  +  + ]:      531656 :         while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
     271                 :             :         {
     272         [ +  + ]:      531521 :                 CHECK_FOR_INTERRUPTS();
     273                 :             : 
     274         [ +  + ]:      531521 :                 if (gatherstate->nreaders > 0)
     275                 :             :                 {
     276                 :      481909 :                         tup = gather_readnext(gatherstate);
     277                 :             : 
     278         [ +  + ]:      481909 :                         if (HeapTupleIsValid(tup))
     279                 :             :                         {
     280                 :      499716 :                                 ExecStoreMinimalTuple(tup,      /* tuple to store */
     281                 :      249858 :                                                                           fslot,        /* slot to store the tuple */
     282                 :             :                                                                           false);       /* don't pfree tuple  */
     283                 :      249858 :                                 return fslot;
     284                 :             :                         }
     285                 :      232051 :                 }
     286                 :             : 
     287         [ +  + ]:      281663 :                 if (gatherstate->need_to_scan_locally)
     288                 :             :                 {
     289                 :      281541 :                         EState     *estate = gatherstate->ps.state;
     290                 :             : 
     291                 :             :                         /* Install our DSA area while executing the plan. */
     292                 :      281541 :                         estate->es_query_dsa =
     293         [ +  + ]:      281541 :                                 gatherstate->pei ? gatherstate->pei->area : NULL;
     294                 :      281541 :                         outerTupleSlot = ExecProcNode(outerPlan);
     295                 :      281541 :                         estate->es_query_dsa = NULL;
     296                 :             : 
     297   [ +  +  +  + ]:      281541 :                         if (!TupIsNull(outerTupleSlot))
     298                 :      281444 :                                 return outerTupleSlot;
     299                 :             : 
     300                 :          97 :                         gatherstate->need_to_scan_locally = false;
     301         [ +  + ]:      281541 :                 }
     302                 :             :         }
     303                 :             : 
     304                 :         135 :         return ExecClearTuple(fslot);
     305                 :      531439 : }
     306                 :             : 
     307                 :             : /*
     308                 :             :  * Attempt to read a tuple from one of our parallel workers.
     309                 :             :  */
     310                 :             : static MinimalTuple
     311                 :      481911 : gather_readnext(GatherState *gatherstate)
     312                 :             : {
     313                 :      481911 :         int                     nvisited = 0;
     314                 :             : 
     315                 :     1103437 :         for (;;)
     316                 :             :         {
     317                 :     1103665 :                 TupleQueueReader *reader;
     318                 :     1103665 :                 MinimalTuple tup;
     319                 :     1103665 :                 bool            readerdone;
     320                 :             : 
     321                 :             :                 /* Check for async events, particularly messages from workers. */
     322         [ +  + ]:     1103665 :                 CHECK_FOR_INTERRUPTS();
     323                 :             : 
     324                 :             :                 /*
     325                 :             :                  * Attempt to read a tuple, but don't block if none is available.
     326                 :             :                  *
     327                 :             :                  * Note that TupleQueueReaderNext will just return NULL for a worker
     328                 :             :                  * which fails to initialize.  We'll treat that worker as having
     329                 :             :                  * produced no tuples; WaitForParallelWorkersToFinish will error out
     330                 :             :                  * when we get there.
     331                 :             :                  */
     332         [ -  + ]:     1103665 :                 Assert(gatherstate->nextreader < gatherstate->nreaders);
     333                 :     1103665 :                 reader = gatherstate->reader[gatherstate->nextreader];
     334                 :     1103665 :                 tup = TupleQueueReaderNext(reader, true, &readerdone);
     335                 :             : 
     336                 :             :                 /*
     337                 :             :                  * If this reader is done, remove it from our working array of active
     338                 :             :                  * readers.  If all readers are done, we're outta here.
     339                 :             :                  */
     340         [ +  + ]:     1103665 :                 if (readerdone)
     341                 :             :                 {
     342         [ +  - ]:         363 :                         Assert(!tup);
     343                 :         363 :                         --gatherstate->nreaders;
     344         [ +  + ]:         363 :                         if (gatherstate->nreaders == 0)
     345                 :             :                         {
     346                 :         133 :                                 ExecShutdownGatherWorkers(gatherstate);
     347                 :         133 :                                 return NULL;
     348                 :             :                         }
     349                 :         230 :                         memmove(&gatherstate->reader[gatherstate->nextreader],
     350                 :             :                                         &gatherstate->reader[gatherstate->nextreader + 1],
     351                 :             :                                         sizeof(TupleQueueReader *)
     352                 :             :                                         * (gatherstate->nreaders - gatherstate->nextreader));
     353         [ +  + ]:         230 :                         if (gatherstate->nextreader >= gatherstate->nreaders)
     354                 :          78 :                                 gatherstate->nextreader = 0;
     355                 :         230 :                         continue;
     356                 :             :                 }
     357                 :             : 
     358                 :             :                 /* If we got a tuple, return it. */
     359         [ +  + ]:     1103302 :                 if (tup)
     360                 :      249858 :                         return tup;
     361                 :             : 
     362                 :             :                 /*
     363                 :             :                  * Advance nextreader pointer in round-robin fashion.  Note that we
     364                 :             :                  * only reach this code if we weren't able to get a tuple from the
     365                 :             :                  * current worker.  We used to advance the nextreader pointer after
     366                 :             :                  * every tuple, but it turns out to be much more efficient to keep
     367                 :             :                  * reading from the same queue until that would require blocking.
     368                 :             :                  */
     369                 :      853444 :                 gatherstate->nextreader++;
     370         [ +  + ]:      853444 :                 if (gatherstate->nextreader >= gatherstate->nreaders)
     371                 :      233389 :                         gatherstate->nextreader = 0;
     372                 :             : 
     373                 :             :                 /* Have we visited every (surviving) TupleQueueReader? */
     374                 :      853444 :                 nvisited++;
     375         [ +  + ]:      853444 :                 if (nvisited >= gatherstate->nreaders)
     376                 :             :                 {
     377                 :             :                         /*
     378                 :             :                          * If (still) running plan locally, return NULL so caller can
     379                 :             :                          * generate another tuple from the local copy of the plan.
     380                 :             :                          */
     381         [ +  + ]:      233218 :                         if (gatherstate->need_to_scan_locally)
     382                 :      231918 :                                 return NULL;
     383                 :             : 
     384                 :             :                         /* Nothing to do except wait for developments. */
     385                 :        1300 :                         (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
     386                 :             :                                                          WAIT_EVENT_EXECUTE_GATHER);
     387                 :        1300 :                         ResetLatch(MyLatch);
     388                 :        1300 :                         nvisited = 0;
     389                 :        1300 :                 }
     390      [ +  +  + ]:     1103665 :         }
     391                 :      481911 : }
     392                 :             : 
     393                 :             : /* ----------------------------------------------------------------
     394                 :             :  *              ExecShutdownGatherWorkers
     395                 :             :  *
     396                 :             :  *              Stop all the parallel workers.
     397                 :             :  * ----------------------------------------------------------------
     398                 :             :  */
     399                 :             : static void
     400                 :         479 : ExecShutdownGatherWorkers(GatherState *node)
     401                 :             : {
     402         [ +  + ]:         479 :         if (node->pei != NULL)
     403                 :         267 :                 ExecParallelFinish(node->pei);
     404                 :             : 
     405                 :             :         /* Flush local copy of reader array */
     406         [ +  + ]:         479 :         if (node->reader)
     407                 :         133 :                 pfree(node->reader);
     408                 :         479 :         node->reader = NULL;
     409                 :         479 : }
     410                 :             : 
     411                 :             : /* ----------------------------------------------------------------
     412                 :             :  *              ExecShutdownGather
     413                 :             :  *
     414                 :             :  *              Destroy the setup for parallel workers including parallel context.
     415                 :             :  * ----------------------------------------------------------------
     416                 :             :  */
     417                 :             : void
     418                 :         296 : ExecShutdownGather(GatherState *node)
     419                 :             : {
     420                 :         296 :         ExecShutdownGatherWorkers(node);
     421                 :             : 
     422                 :             :         /* Now destroy the parallel context. */
     423         [ +  + ]:         296 :         if (node->pei != NULL)
     424                 :             :         {
     425                 :          96 :                 ExecParallelCleanup(node->pei);
     426                 :          96 :                 node->pei = NULL;
     427                 :          96 :         }
     428                 :         296 : }
     429                 :             : 
     430                 :             : /* ----------------------------------------------------------------
     431                 :             :  *                                              Join Support
     432                 :             :  * ----------------------------------------------------------------
     433                 :             :  */
     434                 :             : 
     435                 :             : /* ----------------------------------------------------------------
     436                 :             :  *              ExecReScanGather
     437                 :             :  *
     438                 :             :  *              Prepare to re-scan the result of a Gather.
     439                 :             :  * ----------------------------------------------------------------
     440                 :             :  */
     441                 :             : void
     442                 :          50 : ExecReScanGather(GatherState *node)
     443                 :             : {
     444                 :          50 :         Gather     *gather = (Gather *) node->ps.plan;
     445                 :          50 :         PlanState  *outerPlan = outerPlanState(node);
     446                 :             : 
     447                 :             :         /* Make sure any existing workers are gracefully shut down */
     448                 :          50 :         ExecShutdownGatherWorkers(node);
     449                 :             : 
     450                 :             :         /* Mark node so that shared state will be rebuilt at next call */
     451                 :          50 :         node->initialized = false;
     452                 :             : 
     453                 :             :         /*
     454                 :             :          * Set child node's chgParam to tell it that the next scan might deliver a
     455                 :             :          * different set of rows within the leader process.  (The overall rowset
     456                 :             :          * shouldn't change, but the leader process's subset might; hence nodes
     457                 :             :          * between here and the parallel table scan node mustn't optimize on the
     458                 :             :          * assumption of an unchanging rowset.)
     459                 :             :          */
     460         [ -  + ]:          50 :         if (gather->rescan_param >= 0)
     461                 :         100 :                 outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
     462                 :          50 :                                                                                          gather->rescan_param);
     463                 :             : 
     464                 :             :         /*
     465                 :             :          * If chgParam of subnode is not null then plan will be re-scanned by
     466                 :             :          * first ExecProcNode.  Note: because this does nothing if we have a
     467                 :             :          * rescan_param, it's currently guaranteed that parallel-aware child nodes
     468                 :             :          * will not see a ReScan call until after they get a ReInitializeDSM call.
     469                 :             :          * That ordering might not be something to rely on, though.  A good rule
     470                 :             :          * of thumb is that ReInitializeDSM should reset only shared state, ReScan
     471                 :             :          * should reset only local state, and anything that depends on both of
     472                 :             :          * those steps being finished must wait until the first ExecProcNode call.
     473                 :             :          */
     474         [ +  - ]:          50 :         if (outerPlan->chgParam == NULL)
     475                 :           0 :                 ExecReScan(outerPlan);
     476                 :          50 : }
        

Generated by: LCOV version 2.3.2-1