Branch data Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * planner.c
4 : : * The query optimizer external interface.
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/optimizer/plan/planner.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : :
16 : : #include "postgres.h"
17 : :
18 : : #include <limits.h>
19 : : #include <math.h>
20 : :
21 : : #include "access/genam.h"
22 : : #include "access/parallel.h"
23 : : #include "access/sysattr.h"
24 : : #include "access/table.h"
25 : : #include "catalog/pg_aggregate.h"
26 : : #include "catalog/pg_inherits.h"
27 : : #include "catalog/pg_proc.h"
28 : : #include "catalog/pg_type.h"
29 : : #include "executor/executor.h"
30 : : #include "foreign/fdwapi.h"
31 : : #include "jit/jit.h"
32 : : #include "lib/bipartite_match.h"
33 : : #include "lib/knapsack.h"
34 : : #include "miscadmin.h"
35 : : #include "nodes/makefuncs.h"
36 : : #include "nodes/nodeFuncs.h"
37 : : #ifdef OPTIMIZER_DEBUG
38 : : #include "nodes/print.h"
39 : : #endif
40 : : #include "nodes/supportnodes.h"
41 : : #include "optimizer/appendinfo.h"
42 : : #include "optimizer/clauses.h"
43 : : #include "optimizer/cost.h"
44 : : #include "optimizer/optimizer.h"
45 : : #include "optimizer/paramassign.h"
46 : : #include "optimizer/pathnode.h"
47 : : #include "optimizer/paths.h"
48 : : #include "optimizer/plancat.h"
49 : : #include "optimizer/planmain.h"
50 : : #include "optimizer/planner.h"
51 : : #include "optimizer/prep.h"
52 : : #include "optimizer/subselect.h"
53 : : #include "optimizer/tlist.h"
54 : : #include "parser/analyze.h"
55 : : #include "parser/parse_agg.h"
56 : : #include "parser/parse_clause.h"
57 : : #include "parser/parse_relation.h"
58 : : #include "parser/parsetree.h"
59 : : #include "partitioning/partdesc.h"
60 : : #include "rewrite/rewriteManip.h"
61 : : #include "utils/acl.h"
62 : : #include "utils/backend_status.h"
63 : : #include "utils/lsyscache.h"
64 : : #include "utils/rel.h"
65 : : #include "utils/selfuncs.h"
66 : :
67 : : /* GUC parameters */
68 : : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
69 : : int debug_parallel_query = DEBUG_PARALLEL_OFF;
70 : : bool parallel_leader_participation = true;
71 : : bool enable_distinct_reordering = true;
72 : :
73 : : /* Hook for plugins to get control in planner() */
74 : : planner_hook_type planner_hook = NULL;
75 : :
76 : : /* Hook for plugins to get control after PlannerGlobal is initialized */
77 : : planner_setup_hook_type planner_setup_hook = NULL;
78 : :
79 : : /* Hook for plugins to get control before PlannerGlobal is discarded */
80 : : planner_shutdown_hook_type planner_shutdown_hook = NULL;
81 : :
82 : : /* Hook for plugins to get control when grouping_planner() plans upper rels */
83 : : create_upper_paths_hook_type create_upper_paths_hook = NULL;
84 : :
85 : :
86 : : /* Expression kind codes for preprocess_expression */
87 : : #define EXPRKIND_QUAL 0
88 : : #define EXPRKIND_TARGET 1
89 : : #define EXPRKIND_RTFUNC 2
90 : : #define EXPRKIND_RTFUNC_LATERAL 3
91 : : #define EXPRKIND_VALUES 4
92 : : #define EXPRKIND_VALUES_LATERAL 5
93 : : #define EXPRKIND_LIMIT 6
94 : : #define EXPRKIND_APPINFO 7
95 : : #define EXPRKIND_PHV 8
96 : : #define EXPRKIND_TABLESAMPLE 9
97 : : #define EXPRKIND_ARBITER_ELEM 10
98 : : #define EXPRKIND_TABLEFUNC 11
99 : : #define EXPRKIND_TABLEFUNC_LATERAL 12
100 : : #define EXPRKIND_GROUPEXPR 13
101 : :
102 : : /*
103 : : * Data specific to grouping sets
104 : : */
105 : : typedef struct
106 : : {
107 : : List *rollups;
108 : : List *hash_sets_idx;
109 : : double dNumHashGroups;
110 : : bool any_hashable;
111 : : Bitmapset *unsortable_refs;
112 : : Bitmapset *unhashable_refs;
113 : : List *unsortable_sets;
114 : : int *tleref_to_colnum_map;
115 : : } grouping_sets_data;
116 : :
117 : : /*
118 : : * Temporary structure for use during WindowClause reordering in order to be
119 : : * able to sort WindowClauses on partitioning/ordering prefix.
120 : : */
121 : : typedef struct
122 : : {
123 : : WindowClause *wc;
124 : : List *uniqueOrder; /* A List of unique ordering/partitioning
125 : : * clauses per Window */
126 : : } WindowClauseSortData;
127 : :
128 : : /* Passthrough data for standard_qp_callback */
129 : : typedef struct
130 : : {
131 : : List *activeWindows; /* active windows, if any */
132 : : grouping_sets_data *gset_data; /* grouping sets data, if any */
133 : : SetOperationStmt *setop; /* parent set operation or NULL if not a
134 : : * subquery belonging to a set operation */
135 : : } standard_qp_extra;
136 : :
137 : : /* Local functions */
138 : : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139 : : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140 : : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
141 : : SetOperationStmt *setops);
142 : : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
143 : : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 : : int *tleref_to_colnum_map);
145 : : static void preprocess_rowmarks(PlannerInfo *root);
146 : : static double preprocess_limit(PlannerInfo *root,
147 : : double tuple_fraction,
148 : : int64 *offset_est, int64 *count_est);
149 : : static List *preprocess_groupclause(PlannerInfo *root, List *force);
150 : : static List *extract_rollup_sets(List *groupingSets);
151 : : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152 : : static void standard_qp_callback(PlannerInfo *root, void *extra);
153 : : static double get_number_of_groups(PlannerInfo *root,
154 : : double path_rows,
155 : : grouping_sets_data *gd,
156 : : List *target_list);
157 : : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
158 : : RelOptInfo *input_rel,
159 : : PathTarget *target,
160 : : bool target_parallel_safe,
161 : : grouping_sets_data *gd);
162 : : static bool is_degenerate_grouping(PlannerInfo *root);
163 : : static void create_degenerate_grouping_paths(PlannerInfo *root,
164 : : RelOptInfo *input_rel,
165 : : RelOptInfo *grouped_rel);
166 : : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
167 : : PathTarget *target, bool target_parallel_safe,
168 : : Node *havingQual);
169 : : static void create_ordinary_grouping_paths(PlannerInfo *root,
170 : : RelOptInfo *input_rel,
171 : : RelOptInfo *grouped_rel,
172 : : const AggClauseCosts *agg_costs,
173 : : grouping_sets_data *gd,
174 : : GroupPathExtraData *extra,
175 : : RelOptInfo **partially_grouped_rel_p);
176 : : static void consider_groupingsets_paths(PlannerInfo *root,
177 : : RelOptInfo *grouped_rel,
178 : : Path *path,
179 : : bool is_sorted,
180 : : bool can_hash,
181 : : grouping_sets_data *gd,
182 : : const AggClauseCosts *agg_costs,
183 : : double dNumGroups);
184 : : static RelOptInfo *create_window_paths(PlannerInfo *root,
185 : : RelOptInfo *input_rel,
186 : : PathTarget *input_target,
187 : : PathTarget *output_target,
188 : : bool output_target_parallel_safe,
189 : : WindowFuncLists *wflists,
190 : : List *activeWindows);
191 : : static void create_one_window_path(PlannerInfo *root,
192 : : RelOptInfo *window_rel,
193 : : Path *path,
194 : : PathTarget *input_target,
195 : : PathTarget *output_target,
196 : : WindowFuncLists *wflists,
197 : : List *activeWindows);
198 : : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
199 : : RelOptInfo *input_rel,
200 : : PathTarget *target);
201 : : static void create_partial_distinct_paths(PlannerInfo *root,
202 : : RelOptInfo *input_rel,
203 : : RelOptInfo *final_distinct_rel,
204 : : PathTarget *target);
205 : : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
206 : : RelOptInfo *input_rel,
207 : : RelOptInfo *distinct_rel);
208 : : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
209 : : List *needed_pathkeys,
210 : : List *path_pathkeys);
211 : : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
212 : : RelOptInfo *input_rel,
213 : : PathTarget *target,
214 : : bool target_parallel_safe,
215 : : double limit_tuples);
216 : : static PathTarget *make_group_input_target(PlannerInfo *root,
217 : : PathTarget *final_target);
218 : : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
219 : : PathTarget *grouping_target,
220 : : Node *havingQual);
221 : : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
222 : : static void optimize_window_clauses(PlannerInfo *root,
223 : : WindowFuncLists *wflists);
224 : : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
225 : : static void name_active_windows(List *activeWindows);
226 : : static PathTarget *make_window_input_target(PlannerInfo *root,
227 : : PathTarget *final_target,
228 : : List *activeWindows);
229 : : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
230 : : List *tlist);
231 : : static PathTarget *make_sort_input_target(PlannerInfo *root,
232 : : PathTarget *final_target,
233 : : bool *have_postponed_srfs);
234 : : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
235 : : List *targets, List *targets_contain_srfs);
236 : : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
237 : : RelOptInfo *grouped_rel,
238 : : RelOptInfo *partially_grouped_rel,
239 : : const AggClauseCosts *agg_costs,
240 : : grouping_sets_data *gd,
241 : : GroupPathExtraData *extra);
242 : : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
243 : : RelOptInfo *grouped_rel,
244 : : RelOptInfo *input_rel,
245 : : grouping_sets_data *gd,
246 : : GroupPathExtraData *extra,
247 : : bool force_rel_creation);
248 : : static Path *make_ordered_path(PlannerInfo *root,
249 : : RelOptInfo *rel,
250 : : Path *path,
251 : : Path *cheapest_path,
252 : : List *pathkeys,
253 : : double limit_tuples);
254 : : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
255 : : static bool can_partial_agg(PlannerInfo *root);
256 : : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
257 : : RelOptInfo *rel,
258 : : List *scanjoin_targets,
259 : : List *scanjoin_targets_contain_srfs,
260 : : bool scanjoin_target_parallel_safe,
261 : : bool tlist_same_exprs);
262 : : static void create_partitionwise_grouping_paths(PlannerInfo *root,
263 : : RelOptInfo *input_rel,
264 : : RelOptInfo *grouped_rel,
265 : : RelOptInfo *partially_grouped_rel,
266 : : const AggClauseCosts *agg_costs,
267 : : grouping_sets_data *gd,
268 : : PartitionwiseAggregateType patype,
269 : : GroupPathExtraData *extra);
270 : : static bool group_by_has_partkey(RelOptInfo *input_rel,
271 : : List *targetList,
272 : : List *groupClause);
273 : : static int common_prefix_cmp(const void *a, const void *b);
274 : : static List *generate_setop_child_grouplist(SetOperationStmt *op,
275 : : List *targetlist);
276 : : static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
277 : : List *sortPathkeys, List *groupClause,
278 : : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
279 : : static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
280 : : List *sortPathkeys, List *groupClause,
281 : : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282 : :
283 : :
284 : : /*****************************************************************************
285 : : *
286 : : * Query optimizer entry point
287 : : *
288 : : * Inputs:
289 : : * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 : : * query_string: source text for the query tree (used for error reports)
291 : : * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 : : * boundParams: passed-in parameter values, or NULL if none
293 : : * es: ExplainState if being called from EXPLAIN, else NULL
294 : : *
295 : : * The result is a PlannedStmt tree.
296 : : *
297 : : * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 : : * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 : : * Parameter values not so marked are still relied on for estimation purposes.
300 : : *
301 : : * The ExplainState pointer is not currently used by the core planner, but it
302 : : * is passed through to some planner hooks so that they can report information
303 : : * back to EXPLAIN extension hooks.
304 : : *
305 : : * To support loadable plugins that monitor or modify planner behavior,
306 : : * we provide a hook variable that lets a plugin get control before and
307 : : * after the standard planning process. The plugin would normally call
308 : : * standard_planner().
309 : : *
310 : : * Note to plugin authors: standard_planner() scribbles on its Query input,
311 : : * so you'd better copy that data structure if you want to plan more than once.
312 : : *
313 : : *****************************************************************************/
314 : : PlannedStmt *
315 : 44394 : planner(Query *parse, const char *query_string, int cursorOptions,
316 : : ParamListInfo boundParams, ExplainState *es)
317 : : {
318 : 44394 : PlannedStmt *result;
319 : :
320 [ - + ]: 44394 : if (planner_hook)
321 : 0 : result = (*planner_hook) (parse, query_string, cursorOptions,
322 : 0 : boundParams, es);
323 : : else
324 : 88788 : result = standard_planner(parse, query_string, cursorOptions,
325 : 44394 : boundParams, es);
326 : :
327 : 44394 : pgstat_report_plan_id(result->planId, false);
328 : :
329 : 88788 : return result;
330 : 44394 : }
331 : :
332 : : PlannedStmt *
333 : 44394 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 : : ParamListInfo boundParams, ExplainState *es)
335 : : {
336 : 44394 : PlannedStmt *result;
337 : 44394 : PlannerGlobal *glob;
338 : 44394 : double tuple_fraction;
339 : 44394 : PlannerInfo *root;
340 : 44394 : RelOptInfo *final_rel;
341 : 44394 : Path *best_path;
342 : 44394 : Plan *top_plan;
343 : 44394 : ListCell *lp,
344 : : *lr;
345 : :
346 : : /*
347 : : * Set up global state for this planner invocation. This data is needed
348 : : * across all levels of sub-Query that might exist in the given command,
349 : : * so we keep it in a separate struct that's linked to by each per-Query
350 : : * PlannerInfo.
351 : : */
352 : 44394 : glob = makeNode(PlannerGlobal);
353 : :
354 : 44394 : glob->boundParams = boundParams;
355 : 44394 : glob->subplans = NIL;
356 : 44394 : glob->subpaths = NIL;
357 : 44394 : glob->subroots = NIL;
358 : 44394 : glob->rewindPlanIDs = NULL;
359 : 44394 : glob->finalrtable = NIL;
360 : 44394 : glob->allRelids = NULL;
361 : 44394 : glob->prunableRelids = NULL;
362 : 44394 : glob->finalrteperminfos = NIL;
363 : 44394 : glob->finalrowmarks = NIL;
364 : 44394 : glob->resultRelations = NIL;
365 : 44394 : glob->appendRelations = NIL;
366 : 44394 : glob->partPruneInfos = NIL;
367 : 44394 : glob->relationOids = NIL;
368 : 44394 : glob->invalItems = NIL;
369 : 44394 : glob->paramExecTypes = NIL;
370 : 44394 : glob->lastPHId = 0;
371 : 44394 : glob->lastRowMarkId = 0;
372 : 44394 : glob->lastPlanNodeId = 0;
373 : 44394 : glob->transientPlan = false;
374 : 44394 : glob->dependsOnRole = false;
375 : 44394 : glob->partition_directory = NULL;
376 : 44394 : glob->rel_notnullatts_hash = NULL;
377 : :
378 : : /*
379 : : * Assess whether it's feasible to use parallel mode for this query. We
380 : : * can't do this in a standalone backend, or if the command will try to
381 : : * modify any data, or if this is a cursor operation, or if GUCs are set
382 : : * to values that don't permit parallelism, or if parallel-unsafe
383 : : * functions are present in the query tree.
384 : : *
385 : : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 : : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 : : * the command is writing into a completely new table which workers won't
388 : : * be able to see. If the workers could see the table, the fact that
389 : : * group locking would cause them to ignore the leader's heavyweight GIN
390 : : * page locks would make this unsafe. We'll have to fix that somehow if
391 : : * we want to allow parallel inserts in general; updates and deletes have
392 : : * additional problems especially around combo CIDs.)
393 : : *
394 : : * For now, we don't try to use parallel mode if we're running inside a
395 : : * parallel worker. We might eventually be able to relax this
396 : : * restriction, but for now it seems best not to have parallel workers
397 : : * trying to create their own parallel workers.
398 : : */
399 [ + + ]: 44394 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
400 [ + + ]: 41931 : IsUnderPostmaster &&
401 [ + + ]: 41867 : parse->commandType == CMD_SELECT &&
402 [ + + ]: 34756 : !parse->hasModifyingCTE &&
403 [ + + + + ]: 34734 : max_parallel_workers_per_gather > 0 &&
404 : 34518 : !IsParallelWorker())
405 : : {
406 : : /* all the cheap tests pass, so scan the query tree */
407 : 34510 : glob->maxParallelHazard = max_parallel_hazard(parse);
408 : 34510 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
409 : 34510 : }
410 : : else
411 : : {
412 : : /* skip the query tree scan, just assume it's unsafe */
413 : 9884 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
414 : 9884 : glob->parallelModeOK = false;
415 : : }
416 : :
417 : : /*
418 : : * glob->parallelModeNeeded is normally set to false here and changed to
419 : : * true during plan creation if a Gather or Gather Merge plan is actually
420 : : * created (cf. create_gather_plan, create_gather_merge_plan).
421 : : *
422 : : * However, if debug_parallel_query = on or debug_parallel_query =
423 : : * regress, then we impose parallel mode whenever it's safe to do so, even
424 : : * if the final plan doesn't use parallelism. It's not safe to do so if
425 : : * the query contains anything parallel-unsafe; parallelModeOK will be
426 : : * false in that case. Note that parallelModeOK can't change after this
427 : : * point. Otherwise, everything in the query is either parallel-safe or
428 : : * parallel-restricted, and in either case it should be OK to impose
429 : : * parallel-mode restrictions. If that ends up breaking something, then
430 : : * either some function the user included in the query is incorrectly
431 : : * labeled as parallel-safe or parallel-restricted when in reality it's
432 : : * parallel-unsafe, or else the query planner itself has a bug.
433 : : */
434 [ + + ]: 76566 : glob->parallelModeNeeded = glob->parallelModeOK &&
435 : 32172 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
436 : :
437 : : /* Determine what fraction of the plan is likely to be scanned */
438 [ + + ]: 44394 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 : : {
440 : : /*
441 : : * We have no real idea how many tuples the user will ultimately FETCH
442 : : * from a cursor, but it is often the case that he doesn't want 'em
443 : : * all, or would prefer a fast-start plan anyway so that he can
444 : : * process some of the tuples sooner. Use a GUC parameter to decide
445 : : * what fraction to optimize for.
446 : : */
447 : 423 : tuple_fraction = cursor_tuple_fraction;
448 : :
449 : : /*
450 : : * We document cursor_tuple_fraction as simply being a fraction, which
451 : : * means the edge cases 0 and 1 have to be treated specially here. We
452 : : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 : : */
454 [ - + ]: 423 : if (tuple_fraction >= 1.0)
455 : 0 : tuple_fraction = 0.0;
456 [ + - ]: 423 : else if (tuple_fraction <= 0.0)
457 : 0 : tuple_fraction = 1e-10;
458 : 423 : }
459 : : else
460 : : {
461 : : /* Default assumption is we need all the tuples */
462 : 43971 : tuple_fraction = 0.0;
463 : : }
464 : :
465 : : /*
466 : : * Compute the initial path generation strategy mask.
467 : : *
468 : : * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
469 : : * GUC, and so the corresponding bits are always set in the default
470 : : * strategy mask.
471 : : *
472 : : * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
473 : : * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
474 : : * corresponds to this exactly: enable_indexscan=off disables both
475 : : * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
476 : : * converts the index-only scan paths that we would have considered into
477 : : * index scan paths.
478 : : */
479 : 44394 : glob->default_pgs_mask = PGS_APPEND | PGS_MERGE_APPEND | PGS_FOREIGNJOIN |
480 : : PGS_GATHER | PGS_CONSIDER_NONPARTIAL;
481 [ - + ]: 44394 : if (enable_tidscan)
482 : 44394 : glob->default_pgs_mask |= PGS_TIDSCAN;
483 [ + + ]: 44394 : if (enable_seqscan)
484 : 42223 : glob->default_pgs_mask |= PGS_SEQSCAN;
485 [ + + ]: 44394 : if (enable_indexscan)
486 : 43955 : glob->default_pgs_mask |= PGS_INDEXSCAN | PGS_INDEXONLYSCAN;
487 [ + + ]: 44394 : if (enable_indexonlyscan)
488 : 44118 : glob->default_pgs_mask |= PGS_CONSIDER_INDEXONLY;
489 [ + + ]: 44394 : if (enable_bitmapscan)
490 : 42729 : glob->default_pgs_mask |= PGS_BITMAPSCAN;
491 [ + + ]: 44394 : if (enable_mergejoin)
492 : : {
493 : 44015 : glob->default_pgs_mask |= PGS_MERGEJOIN_PLAIN;
494 [ + + ]: 44015 : if (enable_material)
495 : 44002 : glob->default_pgs_mask |= PGS_MERGEJOIN_MATERIALIZE;
496 : 44015 : }
497 [ + + ]: 44394 : if (enable_nestloop)
498 : : {
499 : 44338 : glob->default_pgs_mask |= PGS_NESTLOOP_PLAIN;
500 [ + + ]: 44338 : if (enable_material)
501 : 44302 : glob->default_pgs_mask |= PGS_NESTLOOP_MATERIALIZE;
502 [ + + ]: 44338 : if (enable_memoize)
503 : 44317 : glob->default_pgs_mask |= PGS_NESTLOOP_MEMOIZE;
504 : 44338 : }
505 [ + + ]: 44394 : if (enable_hashjoin)
506 : 43962 : glob->default_pgs_mask |= PGS_HASHJOIN;
507 [ - + ]: 44394 : if (enable_gathermerge)
508 : 44394 : glob->default_pgs_mask |= PGS_GATHER_MERGE;
509 [ + + ]: 44394 : if (enable_partitionwise_join)
510 : 426 : glob->default_pgs_mask |= PGS_CONSIDER_PARTITIONWISE;
511 : :
512 : : /* Allow plugins to take control after we've initialized "glob" */
513 [ + + ]: 44394 : if (planner_setup_hook)
514 : 44330 : (*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
515 : :
516 : : /* primary planning entry point (may recurse for subqueries) */
517 : 44394 : root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
518 : : NULL);
519 : :
520 : : /* Select best Path and turn it into a Plan */
521 : 44394 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
522 : 44394 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
523 : :
524 : 44394 : top_plan = create_plan(root, best_path);
525 : :
526 : : /*
527 : : * If creating a plan for a scrollable cursor, make sure it can run
528 : : * backwards on demand. Add a Material node at the top at need.
529 : : */
530 [ + + ]: 44394 : if (cursorOptions & CURSOR_OPT_SCROLL)
531 : : {
532 [ + + ]: 44 : if (!ExecSupportsBackwardScan(top_plan))
533 : 5 : top_plan = materialize_finished_plan(top_plan);
534 : 44 : }
535 : :
536 : : /*
537 : : * Optionally add a Gather node for testing purposes, provided this is
538 : : * actually a safe thing to do.
539 : : *
540 : : * We can add Gather even when top_plan has parallel-safe initPlans, but
541 : : * then we have to move the initPlans to the Gather node because of
542 : : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
543 : : * regression tests when debug_parallel_query = regress, because initPlans
544 : : * that would normally appear on the top_plan move to the Gather, causing
545 : : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
546 : : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
547 : : */
548 [ + + ]: 44394 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
549 [ + + # # ]: 30 : top_plan->parallel_safe &&
550 [ - + ]: 20 : (top_plan->initPlan == NIL ||
551 : 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
552 : : {
553 : 20 : Gather *gather = makeNode(Gather);
554 : 20 : Cost initplan_cost;
555 : 20 : bool unsafe_initplans;
556 : :
557 : 20 : gather->plan.targetlist = top_plan->targetlist;
558 : 20 : gather->plan.qual = NIL;
559 : 20 : gather->plan.lefttree = top_plan;
560 : 20 : gather->plan.righttree = NULL;
561 : 20 : gather->num_workers = 1;
562 : 20 : gather->single_copy = true;
563 : 20 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
564 : :
565 : : /* Transfer any initPlans to the new top node */
566 : 20 : gather->plan.initPlan = top_plan->initPlan;
567 : 20 : top_plan->initPlan = NIL;
568 : :
569 : : /*
570 : : * Since this Gather has no parallel-aware descendants to signal to,
571 : : * we don't need a rescan Param.
572 : : */
573 : 20 : gather->rescan_param = -1;
574 : :
575 : : /*
576 : : * Ideally we'd use cost_gather here, but setting up dummy path data
577 : : * to satisfy it doesn't seem much cleaner than knowing what it does.
578 : : */
579 : 40 : gather->plan.startup_cost = top_plan->startup_cost +
580 : 20 : parallel_setup_cost;
581 : 60 : gather->plan.total_cost = top_plan->total_cost +
582 : 40 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
583 : 20 : gather->plan.plan_rows = top_plan->plan_rows;
584 : 20 : gather->plan.plan_width = top_plan->plan_width;
585 : 20 : gather->plan.parallel_aware = false;
586 : 20 : gather->plan.parallel_safe = false;
587 : :
588 : : /*
589 : : * Delete the initplans' cost from top_plan. We needn't add it to the
590 : : * Gather node, since the above coding already included it there.
591 : : */
592 : 20 : SS_compute_initplan_cost(gather->plan.initPlan,
593 : : &initplan_cost, &unsafe_initplans);
594 : 20 : top_plan->startup_cost -= initplan_cost;
595 : 20 : top_plan->total_cost -= initplan_cost;
596 : :
597 : : /* use parallel mode for parallel plans. */
598 : 20 : root->glob->parallelModeNeeded = true;
599 : :
600 : 20 : top_plan = &gather->plan;
601 : 20 : }
602 : :
603 : : /*
604 : : * If any Params were generated, run through the plan tree and compute
605 : : * each plan node's extParam/allParam sets. Ideally we'd merge this into
606 : : * set_plan_references' tree traversal, but for now it has to be separate
607 : : * because we need to visit subplans before not after main plan.
608 : : */
609 [ + + ]: 44394 : if (glob->paramExecTypes != NIL)
610 : : {
611 [ + - ]: 13416 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
612 [ + + + + : 17799 : forboth(lp, glob->subplans, lr, glob->subroots)
+ + + + +
+ + + ]
613 : : {
614 : 4383 : Plan *subplan = (Plan *) lfirst(lp);
615 : 4383 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
616 : :
617 : 4383 : SS_finalize_plan(subroot, subplan);
618 : 4383 : }
619 : 13416 : SS_finalize_plan(root, top_plan);
620 : 13416 : }
621 : :
622 : : /* final cleanup of the plan */
623 [ + - ]: 44394 : Assert(glob->finalrtable == NIL);
624 [ + - ]: 44394 : Assert(glob->finalrteperminfos == NIL);
625 [ + - ]: 44394 : Assert(glob->finalrowmarks == NIL);
626 [ + - ]: 44394 : Assert(glob->resultRelations == NIL);
627 [ + - ]: 44394 : Assert(glob->appendRelations == NIL);
628 : 44394 : top_plan = set_plan_references(root, top_plan);
629 : : /* ... and the subplans (both regular subplans and initplans) */
630 [ + - ]: 44394 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
631 [ + + + + : 48016 : forboth(lp, glob->subplans, lr, glob->subroots)
+ + + + +
+ + + ]
632 : : {
633 : 3622 : Plan *subplan = (Plan *) lfirst(lp);
634 : 3622 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
635 : :
636 : 3622 : lfirst(lp) = set_plan_references(subroot, subplan);
637 : 3622 : }
638 : :
639 : : /* build the PlannedStmt result */
640 : 44394 : result = makeNode(PlannedStmt);
641 : :
642 : 44394 : result->commandType = parse->commandType;
643 : 44394 : result->queryId = parse->queryId;
644 : 44394 : result->planOrigin = PLAN_STMT_STANDARD;
645 : 44394 : result->hasReturning = (parse->returningList != NIL);
646 : 44394 : result->hasModifyingCTE = parse->hasModifyingCTE;
647 : 44394 : result->canSetTag = parse->canSetTag;
648 : 44394 : result->transientPlan = glob->transientPlan;
649 : 44394 : result->dependsOnRole = glob->dependsOnRole;
650 : 44394 : result->parallelModeNeeded = glob->parallelModeNeeded;
651 : 44394 : result->planTree = top_plan;
652 : 44394 : result->partPruneInfos = glob->partPruneInfos;
653 : 44394 : result->rtable = glob->finalrtable;
654 : 88788 : result->unprunableRelids = bms_difference(glob->allRelids,
655 : 44394 : glob->prunableRelids);
656 : 44394 : result->permInfos = glob->finalrteperminfos;
657 : 44394 : result->subrtinfos = glob->subrtinfos;
658 : 44394 : result->resultRelations = glob->resultRelations;
659 : 44394 : result->appendRelations = glob->appendRelations;
660 : 44394 : result->subplans = glob->subplans;
661 : 44394 : result->rewindPlanIDs = glob->rewindPlanIDs;
662 : 44394 : result->rowMarks = glob->finalrowmarks;
663 : 44394 : result->relationOids = glob->relationOids;
664 : 44394 : result->invalItems = glob->invalItems;
665 : 44394 : result->paramExecTypes = glob->paramExecTypes;
666 : : /* utilityStmt should be null, but we might as well copy it */
667 : 44394 : result->utilityStmt = parse->utilityStmt;
668 : 44394 : result->elidedNodes = glob->elidedNodes;
669 : 44394 : result->stmt_location = parse->stmt_location;
670 : 44394 : result->stmt_len = parse->stmt_len;
671 : :
672 : 44394 : result->jitFlags = PGJIT_NONE;
673 [ + + + - : 44394 : if (jit_enabled && jit_above_cost >= 0 &&
+ + ]
674 : 43527 : top_plan->total_cost > jit_above_cost)
675 : : {
676 : 159 : result->jitFlags |= PGJIT_PERFORM;
677 : :
678 : : /*
679 : : * Decide how much effort should be put into generating better code.
680 : : */
681 [ + - + + ]: 159 : if (jit_optimize_above_cost >= 0 &&
682 : 159 : top_plan->total_cost > jit_optimize_above_cost)
683 : 64 : result->jitFlags |= PGJIT_OPT3;
684 [ + - + + ]: 159 : if (jit_inline_above_cost >= 0 &&
685 : 159 : top_plan->total_cost > jit_inline_above_cost)
686 : 64 : result->jitFlags |= PGJIT_INLINE;
687 : :
688 : : /*
689 : : * Decide which operations should be JITed.
690 : : */
691 [ - + ]: 159 : if (jit_expressions)
692 : 159 : result->jitFlags |= PGJIT_EXPR;
693 [ - + ]: 159 : if (jit_tuple_deforming)
694 : 159 : result->jitFlags |= PGJIT_DEFORM;
695 : 159 : }
696 : :
697 : : /* Allow plugins to take control before we discard "glob" */
698 [ + + ]: 44394 : if (planner_shutdown_hook)
699 : 43569 : (*planner_shutdown_hook) (glob, parse, query_string, result);
700 : :
701 [ + + ]: 44394 : if (glob->partition_directory != NULL)
702 : 1376 : DestroyPartitionDirectory(glob->partition_directory);
703 : :
704 : 88788 : return result;
705 : 44394 : }
706 : :
707 : :
708 : : /*--------------------
709 : : * subquery_planner
710 : : * Invokes the planner on a subquery. We recurse to here for each
711 : : * sub-SELECT found in the query tree.
712 : : *
713 : : * glob is the global state for the current planner run.
714 : : * parse is the querytree produced by the parser & rewriter.
715 : : * plan_name is the name to assign to this subplan (NULL at the top level).
716 : : * parent_root is the immediate parent Query's info (NULL at the top level).
717 : : * hasRecursion is true if this is a recursive WITH query.
718 : : * tuple_fraction is the fraction of tuples we expect will be retrieved.
719 : : * tuple_fraction is interpreted as explained for grouping_planner, below.
720 : : * setops is used for set operation subqueries to provide the subquery with
721 : : * the context in which it's being used so that Paths correctly sorted for the
722 : : * set operation can be generated. NULL when not planning a set operation
723 : : * child, or when a child of a set op that isn't interested in sorted input.
724 : : *
725 : : * Basically, this routine does the stuff that should only be done once
726 : : * per Query object. It then calls grouping_planner. At one time,
727 : : * grouping_planner could be invoked recursively on the same Query object;
728 : : * that's not currently true, but we keep the separation between the two
729 : : * routines anyway, in case we need it again someday.
730 : : *
731 : : * subquery_planner will be called recursively to handle sub-Query nodes
732 : : * found within the query's expressions and rangetable.
733 : : *
734 : : * Returns the PlannerInfo struct ("root") that contains all data generated
735 : : * while planning the subquery. In particular, the Path(s) attached to
736 : : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
737 : : * cheapest way(s) to implement the query. The top level will select the
738 : : * best Path and pass it through createplan.c to produce a finished Plan.
739 : : *--------------------
740 : : */
741 : : PlannerInfo *
742 : 52340 : subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
743 : : PlannerInfo *parent_root, bool hasRecursion,
744 : : double tuple_fraction, SetOperationStmt *setops)
745 : : {
746 : 52340 : PlannerInfo *root;
747 : 52340 : List *newWithCheckOptions;
748 : 52340 : List *newHaving;
749 : 52340 : bool hasOuterJoins;
750 : 52340 : bool hasResultRTEs;
751 : 52340 : RelOptInfo *final_rel;
752 : 52340 : ListCell *l;
753 : :
754 : : /* Create a PlannerInfo data structure for this subquery */
755 : 52340 : root = makeNode(PlannerInfo);
756 : 52340 : root->parse = parse;
757 : 52340 : root->glob = glob;
758 [ + + ]: 52340 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
759 : 52340 : root->plan_name = plan_name;
760 : 52340 : root->parent_root = parent_root;
761 : 52340 : root->plan_params = NIL;
762 : 52340 : root->outer_params = NULL;
763 : 52340 : root->planner_cxt = CurrentMemoryContext;
764 : 52340 : root->init_plans = NIL;
765 : 52340 : root->cte_plan_ids = NIL;
766 : 52340 : root->multiexpr_params = NIL;
767 : 52340 : root->join_domains = NIL;
768 : 52340 : root->eq_classes = NIL;
769 : 52340 : root->ec_merging_done = false;
770 : 52340 : root->last_rinfo_serial = 0;
771 : 52340 : root->all_result_relids =
772 [ + + ]: 52340 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
773 : 52340 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
774 : 52340 : root->append_rel_list = NIL;
775 : 52340 : root->row_identity_vars = NIL;
776 : 52340 : root->rowMarks = NIL;
777 : 52340 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
778 : 52340 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
779 : 52340 : root->processed_groupClause = NIL;
780 : 52340 : root->processed_distinctClause = NIL;
781 : 52340 : root->processed_tlist = NIL;
782 : 52340 : root->update_colnos = NIL;
783 : 52340 : root->grouping_map = NULL;
784 : 52340 : root->minmax_aggs = NIL;
785 : 52340 : root->qual_security_level = 0;
786 : 52340 : root->hasPseudoConstantQuals = false;
787 : 52340 : root->hasAlternativeSubPlans = false;
788 : 52340 : root->placeholdersFrozen = false;
789 : 52340 : root->hasRecursion = hasRecursion;
790 : 52340 : root->assumeReplanning = false;
791 [ + + ]: 52340 : if (hasRecursion)
792 : 74 : root->wt_param_id = assign_special_exec_param(root);
793 : : else
794 : 52266 : root->wt_param_id = -1;
795 : 52340 : root->non_recursive_path = NULL;
796 : :
797 : : /*
798 : : * Create the top-level join domain. This won't have valid contents until
799 : : * deconstruct_jointree fills it in, but the node needs to exist before
800 : : * that so we can build EquivalenceClasses referencing it.
801 : : */
802 : 52340 : root->join_domains = list_make1(makeNode(JoinDomain));
803 : :
804 : : /*
805 : : * If there is a WITH list, process each WITH query and either convert it
806 : : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
807 : : */
808 [ + + ]: 52340 : if (parse->cteList)
809 : 274 : SS_process_ctes(root);
810 : :
811 : : /*
812 : : * If it's a MERGE command, transform the joinlist as appropriate.
813 : : */
814 : 52340 : transform_MERGE_to_join(parse);
815 : :
816 : : /*
817 : : * Scan the rangetable for relation RTEs and retrieve the necessary
818 : : * catalog information for each relation. Using this information, clear
819 : : * the inh flag for any relation that has no children, collect not-null
820 : : * attribute numbers for any relation that has column not-null
821 : : * constraints, and expand virtual generated columns for any relation that
822 : : * contains them. Note that this step does not descend into sublinks and
823 : : * subqueries; if we pull up any sublinks or subqueries below, their
824 : : * relation RTEs are processed just before pulling them up.
825 : : */
826 : 52340 : parse = root->parse = preprocess_relation_rtes(root);
827 : :
828 : : /*
829 : : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
830 : : * that we don't need so many special cases to deal with that situation.
831 : : */
832 : 52340 : replace_empty_jointree(parse);
833 : :
834 : : /*
835 : : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
836 : : * to transform them into joins. Note that this step does not descend
837 : : * into subqueries; if we pull up any subqueries below, their SubLinks are
838 : : * processed just before pulling them up.
839 : : */
840 [ + + ]: 52340 : if (parse->hasSubLinks)
841 : 3925 : pull_up_sublinks(root);
842 : :
843 : : /*
844 : : * Scan the rangetable for function RTEs, do const-simplification on them,
845 : : * and then inline them if possible (producing subqueries that might get
846 : : * pulled up next). Recursion issues here are handled in the same way as
847 : : * for SubLinks.
848 : : */
849 : 52340 : preprocess_function_rtes(root);
850 : :
851 : : /*
852 : : * Check to see if any subqueries in the jointree can be merged into this
853 : : * query.
854 : : */
855 : 52340 : pull_up_subqueries(root);
856 : :
857 : : /*
858 : : * If this is a simple UNION ALL query, flatten it into an appendrel. We
859 : : * do this now because it requires applying pull_up_subqueries to the leaf
860 : : * queries of the UNION ALL, which weren't touched above because they
861 : : * weren't referenced by the jointree (they will be after we do this).
862 : : */
863 [ + + ]: 52340 : if (parse->setOperations)
864 : 845 : flatten_simple_union_all(root);
865 : :
866 : : /*
867 : : * Survey the rangetable to see what kinds of entries are present. We can
868 : : * skip some later processing if relevant SQL features are not used; for
869 : : * example if there are no JOIN RTEs we can avoid the expense of doing
870 : : * flatten_join_alias_vars(). This must be done after we have finished
871 : : * adding rangetable entries, of course. (Note: actually, processing of
872 : : * inherited or partitioned rels can cause RTEs for their child tables to
873 : : * get added later; but those must all be RTE_RELATION entries, so they
874 : : * don't invalidate the conclusions drawn here.)
875 : : */
876 : 52340 : root->hasJoinRTEs = false;
877 : 52340 : root->hasLateralRTEs = false;
878 : 52340 : root->group_rtindex = 0;
879 : 52340 : hasOuterJoins = false;
880 : 52340 : hasResultRTEs = false;
881 [ + + + + : 141644 : foreach(l, parse->rtable)
+ + ]
882 : : {
883 : 89304 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
884 : :
885 [ + + + + ]: 89304 : switch (rte->rtekind)
886 : : {
887 : : case RTE_JOIN:
888 : 8496 : root->hasJoinRTEs = true;
889 [ + + ]: 8496 : if (IS_OUTER_JOIN(rte->jointype))
890 : 4654 : hasOuterJoins = true;
891 : 8496 : break;
892 : : case RTE_RESULT:
893 : 18591 : hasResultRTEs = true;
894 : 18591 : break;
895 : : case RTE_GROUP:
896 [ - + ]: 743 : Assert(parse->hasGroupRTE);
897 : 743 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
898 : 743 : break;
899 : : default:
900 : : /* No work here for other RTE types */
901 : 61474 : break;
902 : : }
903 : :
904 [ + + ]: 89304 : if (rte->lateral)
905 : 449 : root->hasLateralRTEs = true;
906 : :
907 : : /*
908 : : * We can also determine the maximum security level required for any
909 : : * securityQuals now. Addition of inheritance-child RTEs won't affect
910 : : * this, because child tables don't have their own securityQuals; see
911 : : * expand_single_inheritance_child().
912 : : */
913 [ + + ]: 89304 : if (rte->securityQuals)
914 [ - + ]: 460 : root->qual_security_level = Max(root->qual_security_level,
915 : : list_length(rte->securityQuals));
916 : 89304 : }
917 : :
918 : : /*
919 : : * If we have now verified that the query target relation is
920 : : * non-inheriting, mark it as a leaf target.
921 : : */
922 [ + + ]: 52340 : if (parse->resultRelation)
923 : : {
924 : 7319 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
925 : :
926 [ + + ]: 7319 : if (!rte->inh)
927 : 6944 : root->leaf_result_relids =
928 : 6944 : bms_make_singleton(parse->resultRelation);
929 : 7319 : }
930 : :
931 : : /*
932 : : * This would be a convenient time to check access permissions for all
933 : : * relations mentioned in the query, since it would be better to fail now,
934 : : * before doing any detailed planning. However, for historical reasons,
935 : : * we leave this to be done at executor startup.
936 : : *
937 : : * Note, however, that we do need to check access permissions for any view
938 : : * relations mentioned in the query, in order to prevent information being
939 : : * leaked by selectivity estimation functions, which only check view owner
940 : : * permissions on underlying tables (see all_rows_selectable() and its
941 : : * callers). This is a little ugly, because it means that access
942 : : * permissions for views will be checked twice, which is another reason
943 : : * why it would be better to do all the ACL checks here.
944 : : */
945 [ + - + + : 141455 : foreach(l, parse->rtable)
+ + ]
946 : : {
947 : 89115 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
948 : :
949 [ + + + + ]: 89115 : if (rte->perminfoindex != 0 &&
950 : 49185 : rte->relkind == RELKIND_VIEW)
951 : : {
952 : 2353 : RTEPermissionInfo *perminfo;
953 : 2353 : bool result;
954 : :
955 : 2353 : perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
956 : 2353 : result = ExecCheckOneRelPerms(perminfo);
957 [ + + ]: 2353 : if (!result)
958 : 64 : aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW,
959 : 64 : get_rel_name(perminfo->relid));
960 : 2353 : }
961 : 89115 : }
962 : :
963 : : /*
964 : : * Preprocess RowMark information. We need to do this after subquery
965 : : * pullup, so that all base relations are present.
966 : : */
967 : 52340 : preprocess_rowmarks(root);
968 : :
969 : : /*
970 : : * Set hasHavingQual to remember if HAVING clause is present. Needed
971 : : * because preprocess_expression will reduce a constant-true condition to
972 : : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
973 : : */
974 : 52340 : root->hasHavingQual = (parse->havingQual != NULL);
975 : :
976 : : /*
977 : : * Do expression preprocessing on targetlist and quals, as well as other
978 : : * random expressions in the querytree. Note that we do not need to
979 : : * handle sort/group expressions explicitly, because they are actually
980 : : * part of the targetlist.
981 : : */
982 : 52340 : parse->targetList = (List *)
983 : 52340 : preprocess_expression(root, (Node *) parse->targetList,
984 : : EXPRKIND_TARGET);
985 : :
986 : 52340 : newWithCheckOptions = NIL;
987 [ + + + + : 52556 : foreach(l, parse->withCheckOptions)
+ + ]
988 : : {
989 : 596 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
990 : :
991 : 596 : wco->qual = preprocess_expression(root, wco->qual,
992 : : EXPRKIND_QUAL);
993 [ + + ]: 596 : if (wco->qual != NULL)
994 : 406 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
995 : 596 : }
996 : 51528 : parse->withCheckOptions = newWithCheckOptions;
997 : :
998 : 51528 : parse->returningList = (List *)
999 : 51528 : preprocess_expression(root, (Node *) parse->returningList,
1000 : : EXPRKIND_TARGET);
1001 : :
1002 : 51528 : preprocess_qual_conditions(root, (Node *) parse->jointree);
1003 : :
1004 : 51528 : parse->havingQual = preprocess_expression(root, parse->havingQual,
1005 : : EXPRKIND_QUAL);
1006 : :
1007 [ + + + + : 52121 : foreach(l, parse->windowClause)
+ + ]
1008 : : {
1009 : 593 : WindowClause *wc = lfirst_node(WindowClause, l);
1010 : :
1011 : : /* partitionClause/orderClause are sort/group expressions */
1012 : 593 : wc->startOffset = preprocess_expression(root, wc->startOffset,
1013 : : EXPRKIND_LIMIT);
1014 : 593 : wc->endOffset = preprocess_expression(root, wc->endOffset,
1015 : : EXPRKIND_LIMIT);
1016 : 593 : }
1017 : :
1018 : 51528 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1019 : : EXPRKIND_LIMIT);
1020 : 51528 : parse->limitCount = preprocess_expression(root, parse->limitCount,
1021 : : EXPRKIND_LIMIT);
1022 : :
1023 [ + + ]: 51528 : if (parse->onConflict)
1024 : : {
1025 : 284 : parse->onConflict->arbiterElems = (List *)
1026 : 568 : preprocess_expression(root,
1027 : 284 : (Node *) parse->onConflict->arbiterElems,
1028 : : EXPRKIND_ARBITER_ELEM);
1029 : 284 : parse->onConflict->arbiterWhere =
1030 : 568 : preprocess_expression(root,
1031 : 284 : parse->onConflict->arbiterWhere,
1032 : : EXPRKIND_QUAL);
1033 : 284 : parse->onConflict->onConflictSet = (List *)
1034 : 568 : preprocess_expression(root,
1035 : 284 : (Node *) parse->onConflict->onConflictSet,
1036 : : EXPRKIND_TARGET);
1037 : 284 : parse->onConflict->onConflictWhere =
1038 : 568 : preprocess_expression(root,
1039 : 284 : parse->onConflict->onConflictWhere,
1040 : : EXPRKIND_QUAL);
1041 : : /* exclRelTlist contains only Vars, so no preprocessing needed */
1042 : 284 : }
1043 : :
1044 [ + + + + : 52057 : foreach(l, parse->mergeActionList)
+ + ]
1045 : : {
1046 : 529 : MergeAction *action = (MergeAction *) lfirst(l);
1047 : :
1048 : 529 : action->targetList = (List *)
1049 : 1058 : preprocess_expression(root,
1050 : 529 : (Node *) action->targetList,
1051 : : EXPRKIND_TARGET);
1052 : 529 : action->qual =
1053 : 1058 : preprocess_expression(root,
1054 : 529 : (Node *) action->qual,
1055 : : EXPRKIND_QUAL);
1056 : 529 : }
1057 : :
1058 : 51528 : parse->mergeJoinCondition =
1059 : 51528 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1060 : :
1061 : 51528 : root->append_rel_list = (List *)
1062 : 51528 : preprocess_expression(root, (Node *) root->append_rel_list,
1063 : : EXPRKIND_APPINFO);
1064 : :
1065 : : /* Also need to preprocess expressions within RTEs */
1066 [ + + + + : 139930 : foreach(l, parse->rtable)
+ + ]
1067 : : {
1068 : 88402 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1069 : 88402 : int kind;
1070 : 88402 : ListCell *lcsq;
1071 : :
1072 [ + + ]: 88402 : if (rte->rtekind == RTE_RELATION)
1073 : : {
1074 [ + + ]: 47461 : if (rte->tablesample)
1075 : 32 : rte->tablesample = (TableSampleClause *)
1076 : 64 : preprocess_expression(root,
1077 : 32 : (Node *) rte->tablesample,
1078 : : EXPRKIND_TABLESAMPLE);
1079 : 47461 : }
1080 [ + + ]: 40941 : else if (rte->rtekind == RTE_SUBQUERY)
1081 : : {
1082 : : /*
1083 : : * We don't want to do all preprocessing yet on the subquery's
1084 : : * expressions, since that will happen when we plan it. But if it
1085 : : * contains any join aliases of our level, those have to get
1086 : : * expanded now, because planning of the subquery won't do it.
1087 : : * That's only possible if the subquery is LATERAL.
1088 : : */
1089 [ + + + + ]: 8553 : if (rte->lateral && root->hasJoinRTEs)
1090 : 188 : rte->subquery = (Query *)
1091 : 376 : flatten_join_alias_vars(root, root->parse,
1092 : 188 : (Node *) rte->subquery);
1093 : 8553 : }
1094 [ + + ]: 32388 : else if (rte->rtekind == RTE_FUNCTION)
1095 : : {
1096 : : /* Preprocess the function expression(s) fully */
1097 : 3642 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1098 : 3642 : rte->functions = (List *)
1099 : 3642 : preprocess_expression(root, (Node *) rte->functions, kind);
1100 : 3642 : }
1101 [ + + ]: 28746 : else if (rte->rtekind == RTE_TABLEFUNC)
1102 : : {
1103 : : /* Preprocess the function expression(s) fully */
1104 : 103 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1105 : 103 : rte->tablefunc = (TableFunc *)
1106 : 103 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
1107 : 103 : }
1108 [ + + ]: 28643 : else if (rte->rtekind == RTE_VALUES)
1109 : : {
1110 : : /* Preprocess the values lists fully */
1111 : 1116 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1112 : 1116 : rte->values_lists = (List *)
1113 : 1116 : preprocess_expression(root, (Node *) rte->values_lists, kind);
1114 : 1116 : }
1115 [ + + ]: 27527 : else if (rte->rtekind == RTE_GROUP)
1116 : : {
1117 : : /* Preprocess the groupexprs list fully */
1118 : 743 : rte->groupexprs = (List *)
1119 : 743 : preprocess_expression(root, (Node *) rte->groupexprs,
1120 : : EXPRKIND_GROUPEXPR);
1121 : 743 : }
1122 : :
1123 : : /*
1124 : : * Process each element of the securityQuals list as if it were a
1125 : : * separate qual expression (as indeed it is). We need to do it this
1126 : : * way to get proper canonicalization of AND/OR structure. Note that
1127 : : * this converts each element into an implicit-AND sublist.
1128 : : */
1129 [ + + + + : 88925 : foreach(lcsq, rte->securityQuals)
+ + ]
1130 : : {
1131 : 1046 : lfirst(lcsq) = preprocess_expression(root,
1132 : 523 : (Node *) lfirst(lcsq),
1133 : : EXPRKIND_QUAL);
1134 : 523 : }
1135 : 88402 : }
1136 : :
1137 : : /*
1138 : : * Now that we are done preprocessing expressions, and in particular done
1139 : : * flattening join alias variables, get rid of the joinaliasvars lists.
1140 : : * They no longer match what expressions in the rest of the tree look
1141 : : * like, because we have not preprocessed expressions in those lists (and
1142 : : * do not want to; for example, expanding a SubLink there would result in
1143 : : * a useless unreferenced subplan). Leaving them in place simply creates
1144 : : * a hazard for later scans of the tree. We could try to prevent that by
1145 : : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1146 : : * but that doesn't sound very reliable.
1147 : : */
1148 [ + + ]: 51776 : if (root->hasJoinRTEs)
1149 : : {
1150 [ + - + + : 30432 : foreach(l, parse->rtable)
+ + ]
1151 : : {
1152 : 24845 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1153 : :
1154 : 24845 : rte->joinaliasvars = NIL;
1155 : 24845 : }
1156 : 5587 : }
1157 : :
1158 : : /*
1159 : : * Replace any Vars in the subquery's targetlist and havingQual that
1160 : : * reference GROUP outputs with the underlying grouping expressions.
1161 : : *
1162 : : * Note that we need to perform this replacement after we've preprocessed
1163 : : * the grouping expressions. This is to ensure that there is only one
1164 : : * instance of SubPlan for each SubLink contained within the grouping
1165 : : * expressions.
1166 : : */
1167 [ + + ]: 51776 : if (parse->hasGroupRTE)
1168 : : {
1169 : 743 : parse->targetList = (List *)
1170 : 743 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1171 : 743 : parse->havingQual =
1172 : 743 : flatten_group_exprs(root, root->parse, parse->havingQual);
1173 : 743 : }
1174 : :
1175 : : /* Constant-folding might have removed all set-returning functions */
1176 [ + + ]: 51776 : if (parse->hasTargetSRFs)
1177 : 1646 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1178 : :
1179 : : /*
1180 : : * If we have grouping sets, expand the groupingSets tree of this query to
1181 : : * a flat list of grouping sets. We need to do this before optimizing
1182 : : * HAVING, since we can't easily tell if there's an empty grouping set
1183 : : * until we have this representation.
1184 : : */
1185 [ + + ]: 51776 : if (parse->groupingSets)
1186 : : {
1187 : 166 : parse->groupingSets =
1188 : 166 : expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1189 : 166 : }
1190 : :
1191 : : /*
1192 : : * In some cases we may want to transfer a HAVING clause into WHERE. We
1193 : : * cannot do so if the HAVING clause contains aggregates (obviously) or
1194 : : * volatile functions (since a HAVING clause is supposed to be executed
1195 : : * only once per group). We also can't do this if there are any grouping
1196 : : * sets and the clause references any columns that are nullable by the
1197 : : * grouping sets; the nulled values of those columns are not available
1198 : : * before the grouping step. (The test on groupClause might seem wrong,
1199 : : * but it's okay: it's just an optimization to avoid running pull_varnos
1200 : : * when there cannot be any Vars in the HAVING clause.)
1201 : : *
1202 : : * Also, it may be that the clause is so expensive to execute that we're
1203 : : * better off doing it only once per group, despite the loss of
1204 : : * selectivity. This is hard to estimate short of doing the entire
1205 : : * planning process twice, so we use a heuristic: clauses containing
1206 : : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1207 : : * clause into WHERE, in hopes of eliminating tuples before aggregation
1208 : : * instead of after.
1209 : : *
1210 : : * If the query has no empty grouping set then we can simply move such a
1211 : : * clause into WHERE; any group that fails the clause will not be in the
1212 : : * output because none of its tuples will reach the grouping or
1213 : : * aggregation stage. Otherwise we have to keep the clause in HAVING to
1214 : : * ensure that we don't emit a bogus aggregated row. But then the HAVING
1215 : : * clause must be degenerate (variable-free), so we can copy it into WHERE
1216 : : * so that query_planner() can use it in a gating Result node. (This could
1217 : : * be done better, but it seems not worth optimizing.)
1218 : : *
1219 : : * Note that a HAVING clause may contain expressions that are not fully
1220 : : * preprocessed. This can happen if these expressions are part of
1221 : : * grouping items. In such cases, they are replaced with GROUP Vars in
1222 : : * the parser and then replaced back after we're done with expression
1223 : : * preprocessing on havingQual. This is not an issue if the clause
1224 : : * remains in HAVING, because these expressions will be matched to lower
1225 : : * target items in setrefs.c. However, if the clause is moved or copied
1226 : : * into WHERE, we need to ensure that these expressions are fully
1227 : : * preprocessed.
1228 : : *
1229 : : * Note that both havingQual and parse->jointree->quals are in
1230 : : * implicitly-ANDed-list form at this point, even though they are declared
1231 : : * as Node *.
1232 : : */
1233 : 51776 : newHaving = NIL;
1234 [ + + + + : 51855 : foreach(l, (List *) parse->havingQual)
+ + ]
1235 : : {
1236 : 89 : Node *havingclause = (Node *) lfirst(l);
1237 : :
1238 [ + + ]: 89 : if (contain_agg_clause(havingclause) ||
1239 [ + - ]: 84 : contain_volatile_functions(havingclause) ||
1240 [ + - + + ]: 104 : contain_subplans(havingclause) ||
1241 [ + + + + ]: 84 : (parse->groupClause && parse->groupingSets &&
1242 : 20 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1243 : : {
1244 : : /* keep it in HAVING */
1245 : 17 : newHaving = lappend(newHaving, havingclause);
1246 : 17 : }
1247 [ + + + + ]: 80 : else if (parse->groupClause &&
1248 [ + + ]: 66 : (parse->groupingSets == NIL ||
1249 : 8 : (List *) linitial(parse->groupingSets) != NIL))
1250 : : {
1251 : : /* There is GROUP BY, but no empty grouping set */
1252 : 64 : Node *whereclause;
1253 : :
1254 : : /* Preprocess the HAVING clause fully */
1255 : 64 : whereclause = preprocess_expression(root, havingclause,
1256 : : EXPRKIND_QUAL);
1257 : : /* ... and move it to WHERE */
1258 : 64 : parse->jointree->quals = (Node *)
1259 : 128 : list_concat((List *) parse->jointree->quals,
1260 : 64 : (List *) whereclause);
1261 : 64 : }
1262 : : else
1263 : : {
1264 : : /* There is an empty grouping set (perhaps implicitly) */
1265 : 8 : Node *whereclause;
1266 : :
1267 : : /* Preprocess the HAVING clause fully */
1268 : 8 : whereclause = preprocess_expression(root, copyObject(havingclause),
1269 : : EXPRKIND_QUAL);
1270 : : /* ... and put a copy in WHERE */
1271 : 8 : parse->jointree->quals = (Node *)
1272 : 16 : list_concat((List *) parse->jointree->quals,
1273 : 8 : (List *) whereclause);
1274 : : /* ... and also keep it in HAVING */
1275 : 8 : newHaving = lappend(newHaving, havingclause);
1276 : 8 : }
1277 : 79 : }
1278 : 51766 : parse->havingQual = (Node *) newHaving;
1279 : :
1280 : : /*
1281 : : * If we have any outer joins, try to reduce them to plain inner joins.
1282 : : * This step is most easily done after we've done expression
1283 : : * preprocessing.
1284 : : */
1285 [ + + ]: 51766 : if (hasOuterJoins)
1286 : 3410 : reduce_outer_joins(root);
1287 : :
1288 : : /*
1289 : : * If we have any RTE_RESULT relations, see if they can be deleted from
1290 : : * the jointree. We also rely on this processing to flatten single-child
1291 : : * FromExprs underneath outer joins. This step is most effectively done
1292 : : * after we've done expression preprocessing and outer join reduction.
1293 : : */
1294 [ + + + + ]: 51766 : if (hasResultRTEs || hasOuterJoins)
1295 : 21195 : remove_useless_result_rtes(root);
1296 : :
1297 : : /*
1298 : : * Do the main planning.
1299 : : */
1300 : 51766 : grouping_planner(root, tuple_fraction, setops);
1301 : :
1302 : : /*
1303 : : * Capture the set of outer-level param IDs we have access to, for use in
1304 : : * extParam/allParam calculations later.
1305 : : */
1306 : 51766 : SS_identify_outer_params(root);
1307 : :
1308 : : /*
1309 : : * If any initPlans were created in this query level, adjust the surviving
1310 : : * Paths' costs and parallel-safety flags to account for them. The
1311 : : * initPlans won't actually get attached to the plan tree till
1312 : : * create_plan() runs, but we must include their effects now.
1313 : : */
1314 : 51766 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1315 : 51766 : SS_charge_for_initplans(root, final_rel);
1316 : :
1317 : : /*
1318 : : * Make sure we've identified the cheapest Path for the final rel. (By
1319 : : * doing this here not in grouping_planner, we include initPlan costs in
1320 : : * the decision, though it's unlikely that will change anything.)
1321 : : */
1322 : 51766 : set_cheapest(final_rel);
1323 : :
1324 : 103532 : return root;
1325 : 51766 : }
1326 : :
1327 : : /*
1328 : : * preprocess_expression
1329 : : * Do subquery_planner's preprocessing work for an expression,
1330 : : * which can be a targetlist, a WHERE clause (including JOIN/ON
1331 : : * conditions), a HAVING clause, or a few other things.
1332 : : */
1333 : : static Node *
1334 : 434806 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1335 : : {
1336 : : /*
1337 : : * Fall out quickly if expression is empty. This occurs often enough to
1338 : : * be worth checking. Note that null->null is the correct conversion for
1339 : : * implicit-AND result format, too.
1340 : : */
1341 [ + + ]: 434806 : if (expr == NULL)
1342 : 342499 : return NULL;
1343 : :
1344 : : /*
1345 : : * If the query has any join RTEs, replace join alias variables with
1346 : : * base-relation variables. We must do this first, since any expressions
1347 : : * we may extract from the joinaliasvars lists have not been preprocessed.
1348 : : * For example, if we did this after sublink processing, sublinks expanded
1349 : : * out from join aliases would not get processed. But we can skip this in
1350 : : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1351 : : * they can't contain any Vars of the current query level.
1352 : : */
1353 [ + + - + ]: 111975 : if (root->hasJoinRTEs &&
1354 [ + + ]: 19820 : !(kind == EXPRKIND_RTFUNC ||
1355 [ + + ]: 19726 : kind == EXPRKIND_VALUES ||
1356 [ + + ]: 19671 : kind == EXPRKIND_TABLESAMPLE ||
1357 : 19668 : kind == EXPRKIND_TABLEFUNC))
1358 : 19668 : expr = flatten_join_alias_vars(root, root->parse, expr);
1359 : :
1360 : : /*
1361 : : * Simplify constant expressions. For function RTEs, this was already
1362 : : * done by preprocess_function_rtes. (But note we must do it again for
1363 : : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1364 : : * un-simplified subexpressions inserted by flattening of subqueries or
1365 : : * join alias variables.)
1366 : : *
1367 : : * Note: an essential effect of this is to convert named-argument function
1368 : : * calls to positional notation and insert the current actual values of
1369 : : * any default arguments for functions. To ensure that happens, we *must*
1370 : : * process all expressions here. Previous PG versions sometimes skipped
1371 : : * const-simplification if it didn't seem worth the trouble, but we can't
1372 : : * do that anymore.
1373 : : *
1374 : : * Note: this also flattens nested AND and OR expressions into N-argument
1375 : : * form. All processing of a qual expression after this point must be
1376 : : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1377 : : * with AND directly under AND, nor OR directly under OR.
1378 : : */
1379 [ + + ]: 92307 : if (kind != EXPRKIND_RTFUNC)
1380 : 88774 : expr = eval_const_expressions(root, expr);
1381 : :
1382 : : /*
1383 : : * If it's a qual or havingQual, canonicalize it.
1384 : : */
1385 [ + + ]: 92307 : if (kind == EXPRKIND_QUAL)
1386 : : {
1387 : 32877 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1388 : :
1389 : : #ifdef OPTIMIZER_DEBUG
1390 : : printf("After canonicalize_qual()\n");
1391 : : pprint(expr);
1392 : : #endif
1393 : 32877 : }
1394 : :
1395 : : /*
1396 : : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1397 : : * hashfuncid of any that might execute more quickly by using hash lookups
1398 : : * instead of a linear search.
1399 : : */
1400 [ + + + + ]: 92307 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1401 : : {
1402 : 84313 : convert_saop_to_hashed_saop(expr);
1403 : 84313 : }
1404 : :
1405 : : /* Expand SubLinks to SubPlans */
1406 [ + + ]: 92307 : if (root->parse->hasSubLinks)
1407 : 11521 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1408 : :
1409 : : /*
1410 : : * XXX do not insert anything here unless you have grokked the comments in
1411 : : * SS_replace_correlation_vars ...
1412 : : */
1413 : :
1414 : : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1415 [ + + ]: 92307 : if (root->query_level > 1)
1416 : 18005 : expr = SS_replace_correlation_vars(root, expr);
1417 : :
1418 : : /*
1419 : : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1420 : : * don't want to do this before eval_const_expressions, since the latter
1421 : : * would be unable to simplify a top-level AND correctly. Also,
1422 : : * SS_process_sublinks expects explicit-AND format.)
1423 : : */
1424 [ + + ]: 92307 : if (kind == EXPRKIND_QUAL)
1425 : 32877 : expr = (Node *) make_ands_implicit((Expr *) expr);
1426 : :
1427 : 92307 : return expr;
1428 : 434806 : }
1429 : :
1430 : : /*
1431 : : * preprocess_qual_conditions
1432 : : * Recursively scan the query's jointree and do subquery_planner's
1433 : : * preprocessing work on each qual condition found therein.
1434 : : */
1435 : : static void
1436 : 127755 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1437 : : {
1438 [ + - ]: 127755 : if (jtnode == NULL)
1439 : 0 : return;
1440 [ + + ]: 127755 : if (IsA(jtnode, RangeTblRef))
1441 : : {
1442 : : /* nothing to do here */
1443 : 64736 : }
1444 [ + + ]: 63019 : else if (IsA(jtnode, FromExpr))
1445 : : {
1446 : 53548 : FromExpr *f = (FromExpr *) jtnode;
1447 : 53548 : ListCell *l;
1448 : :
1449 [ + + + + : 110709 : foreach(l, f->fromlist)
+ + ]
1450 : 57161 : preprocess_qual_conditions(root, lfirst(l));
1451 : :
1452 : 53548 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1453 : 53548 : }
1454 [ + - ]: 9471 : else if (IsA(jtnode, JoinExpr))
1455 : : {
1456 : 9471 : JoinExpr *j = (JoinExpr *) jtnode;
1457 : :
1458 : 9471 : preprocess_qual_conditions(root, j->larg);
1459 : 9471 : preprocess_qual_conditions(root, j->rarg);
1460 : :
1461 : 9471 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1462 : 9471 : }
1463 : : else
1464 [ # # # # ]: 0 : elog(ERROR, "unrecognized node type: %d",
1465 : : (int) nodeTag(jtnode));
1466 : 127755 : }
1467 : :
1468 : : /*
1469 : : * preprocess_phv_expression
1470 : : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1471 : : *
1472 : : * If a LATERAL subquery references an output of another subquery, and that
1473 : : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1474 : : * join, then we'll push the PlaceHolderVar expression down into the subquery
1475 : : * and later pull it back up during find_lateral_references, which runs after
1476 : : * subquery_planner has preprocessed all the expressions that were in the
1477 : : * current query level to start with. So we need to preprocess it then.
1478 : : */
1479 : : Expr *
1480 : 15 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1481 : : {
1482 : 15 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1483 : : }
1484 : :
1485 : : /*--------------------
1486 : : * grouping_planner
1487 : : * Perform planning steps related to grouping, aggregation, etc.
1488 : : *
1489 : : * This function adds all required top-level processing to the scan/join
1490 : : * Path(s) produced by query_planner.
1491 : : *
1492 : : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1493 : : * tuple_fraction is interpreted as follows:
1494 : : * 0: expect all tuples to be retrieved (normal case)
1495 : : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1496 : : * from the plan to be retrieved
1497 : : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1498 : : * expected to be retrieved (ie, a LIMIT specification).
1499 : : * setops is used for set operation subqueries to provide the subquery with
1500 : : * the context in which it's being used so that Paths correctly sorted for the
1501 : : * set operation can be generated. NULL when not planning a set operation
1502 : : * child, or when a child of a set op that isn't interested in sorted input.
1503 : : *
1504 : : * Returns nothing; the useful output is in the Paths we attach to the
1505 : : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1506 : : * root->processed_tlist contains the final processed targetlist.
1507 : : *
1508 : : * Note that we have not done set_cheapest() on the final rel; it's convenient
1509 : : * to leave this to the caller.
1510 : : *--------------------
1511 : : */
1512 : : static void
1513 : 51643 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1514 : : SetOperationStmt *setops)
1515 : : {
1516 : 51643 : Query *parse = root->parse;
1517 : 51643 : int64 offset_est = 0;
1518 : 51643 : int64 count_est = 0;
1519 : 51643 : double limit_tuples = -1.0;
1520 : 51643 : bool have_postponed_srfs = false;
1521 : 51643 : PathTarget *final_target;
1522 : 51643 : List *final_targets;
1523 : 51643 : List *final_targets_contain_srfs;
1524 : 51643 : bool final_target_parallel_safe;
1525 : 51643 : RelOptInfo *current_rel;
1526 : 51643 : RelOptInfo *final_rel;
1527 : 51643 : FinalPathExtraData extra;
1528 : 51643 : ListCell *lc;
1529 : :
1530 : : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1531 [ + + + + ]: 51643 : if (parse->limitCount || parse->limitOffset)
1532 : : {
1533 : 504 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1534 : : &offset_est, &count_est);
1535 : :
1536 : : /*
1537 : : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1538 : : * estimate the effects of using a bounded sort.
1539 : : */
1540 [ + + + + ]: 504 : if (count_est > 0 && offset_est >= 0)
1541 : 408 : limit_tuples = (double) count_est + (double) offset_est;
1542 : 504 : }
1543 : :
1544 : : /* Make tuple_fraction accessible to lower-level routines */
1545 : 51643 : root->tuple_fraction = tuple_fraction;
1546 : :
1547 [ + + ]: 51643 : if (parse->setOperations)
1548 : : {
1549 : : /*
1550 : : * Construct Paths for set operations. The results will not need any
1551 : : * work except perhaps a top-level sort and/or LIMIT. Note that any
1552 : : * special work for recursive unions is the responsibility of
1553 : : * plan_set_operations.
1554 : : */
1555 : 798 : current_rel = plan_set_operations(root);
1556 : :
1557 : : /*
1558 : : * We should not need to call preprocess_targetlist, since we must be
1559 : : * in a SELECT query node. Instead, use the processed_tlist returned
1560 : : * by plan_set_operations (since this tells whether it returned any
1561 : : * resjunk columns!), and transfer any sort key information from the
1562 : : * original tlist.
1563 : : */
1564 [ + - ]: 798 : Assert(parse->commandType == CMD_SELECT);
1565 : :
1566 : : /* for safety, copy processed_tlist instead of modifying in-place */
1567 : 798 : root->processed_tlist =
1568 : 1596 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1569 : 798 : parse->targetList);
1570 : :
1571 : : /* Also extract the PathTarget form of the setop result tlist */
1572 : 798 : final_target = current_rel->cheapest_total_path->pathtarget;
1573 : :
1574 : : /* And check whether it's parallel safe */
1575 : 798 : final_target_parallel_safe =
1576 : 798 : is_parallel_safe(root, (Node *) final_target->exprs);
1577 : :
1578 : : /* The setop result tlist couldn't contain any SRFs */
1579 [ + - ]: 798 : Assert(!parse->hasTargetSRFs);
1580 : 798 : final_targets = final_targets_contain_srfs = NIL;
1581 : :
1582 : : /*
1583 : : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1584 : : * checked already, but let's make sure).
1585 : : */
1586 [ + - ]: 798 : if (parse->rowMarks)
1587 [ # # # # ]: 0 : ereport(ERROR,
1588 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1589 : : /*------
1590 : : translator: %s is a SQL row locking clause such as FOR UPDATE */
1591 : : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1592 : : LCS_asString(linitial_node(RowMarkClause,
1593 : : parse->rowMarks)->strength))));
1594 : :
1595 : : /*
1596 : : * Calculate pathkeys that represent result ordering requirements
1597 : : */
1598 [ + - ]: 798 : Assert(parse->distinctClause == NIL);
1599 : 1596 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1600 : 798 : parse->sortClause,
1601 : 798 : root->processed_tlist);
1602 : 798 : }
1603 : : else
1604 : : {
1605 : : /* No set operations, do regular planning */
1606 : 50845 : PathTarget *sort_input_target;
1607 : 50845 : List *sort_input_targets;
1608 : 50845 : List *sort_input_targets_contain_srfs;
1609 : 50845 : bool sort_input_target_parallel_safe;
1610 : 50845 : PathTarget *grouping_target;
1611 : 50845 : List *grouping_targets;
1612 : 50845 : List *grouping_targets_contain_srfs;
1613 : 50845 : bool grouping_target_parallel_safe;
1614 : 50845 : PathTarget *scanjoin_target;
1615 : 50845 : List *scanjoin_targets;
1616 : 50845 : List *scanjoin_targets_contain_srfs;
1617 : 50845 : bool scanjoin_target_parallel_safe;
1618 : 50845 : bool scanjoin_target_same_exprs;
1619 : 50845 : bool have_grouping;
1620 : 50845 : WindowFuncLists *wflists = NULL;
1621 : 50845 : List *activeWindows = NIL;
1622 : 50845 : grouping_sets_data *gset_data = NULL;
1623 : 50845 : standard_qp_extra qp_extra;
1624 : :
1625 : : /* A recursive query should always have setOperations */
1626 [ + - ]: 50845 : Assert(!root->hasRecursion);
1627 : :
1628 : : /* Preprocess grouping sets and GROUP BY clause, if any */
1629 [ + + ]: 50845 : if (parse->groupingSets)
1630 : : {
1631 : 158 : gset_data = preprocess_grouping_sets(root);
1632 : 158 : }
1633 [ + + ]: 50687 : else if (parse->groupClause)
1634 : : {
1635 : : /* Preprocess regular GROUP BY clause, if any */
1636 : 590 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1637 : 590 : }
1638 : :
1639 : : /*
1640 : : * Preprocess targetlist. Note that much of the remaining planning
1641 : : * work will be done with the PathTarget representation of tlists, but
1642 : : * we must also maintain the full representation of the final tlist so
1643 : : * that we can transfer its decoration (resnames etc) to the topmost
1644 : : * tlist of the finished Plan. This is kept in processed_tlist.
1645 : : */
1646 : 50845 : preprocess_targetlist(root);
1647 : :
1648 : : /*
1649 : : * Mark all the aggregates with resolved aggtranstypes, and detect
1650 : : * aggregates that are duplicates or can share transition state. We
1651 : : * must do this before slicing and dicing the tlist into various
1652 : : * pathtargets, else some copies of the Aggref nodes might escape
1653 : : * being marked.
1654 : : */
1655 [ + + ]: 50845 : if (parse->hasAggs)
1656 : : {
1657 : 4926 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1658 : 4926 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1659 : 4926 : }
1660 : :
1661 : : /*
1662 : : * Locate any window functions in the tlist. (We don't need to look
1663 : : * anywhere else, since expressions used in ORDER BY will be in there
1664 : : * too.) Note that they could all have been eliminated by constant
1665 : : * folding, in which case we don't need to do any more work.
1666 : : */
1667 [ + + ]: 50845 : if (parse->hasWindowFuncs)
1668 : : {
1669 : 854 : wflists = find_window_functions((Node *) root->processed_tlist,
1670 : 427 : list_length(parse->windowClause));
1671 [ + + ]: 427 : if (wflists->numWindowFuncs > 0)
1672 : : {
1673 : : /*
1674 : : * See if any modifications can be made to each WindowClause
1675 : : * to allow the executor to execute the WindowFuncs more
1676 : : * quickly.
1677 : : */
1678 : 426 : optimize_window_clauses(root, wflists);
1679 : :
1680 : : /* Extract the list of windows actually in use. */
1681 : 426 : activeWindows = select_active_windows(root, wflists);
1682 : :
1683 : : /* Make sure they all have names, for EXPLAIN's use. */
1684 : 426 : name_active_windows(activeWindows);
1685 : 426 : }
1686 : : else
1687 : 1 : parse->hasWindowFuncs = false;
1688 : 427 : }
1689 : :
1690 : : /*
1691 : : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1692 : : * adding logic between here and the query_planner() call. Anything
1693 : : * that is needed in MIN/MAX-optimizable cases will have to be
1694 : : * duplicated in planagg.c.
1695 : : */
1696 [ + + ]: 50845 : if (parse->hasAggs)
1697 : 4926 : preprocess_minmax_aggregates(root);
1698 : :
1699 : : /*
1700 : : * Figure out whether there's a hard limit on the number of rows that
1701 : : * query_planner's result subplan needs to return. Even if we know a
1702 : : * hard limit overall, it doesn't apply if the query has any
1703 : : * grouping/aggregation operations, or SRFs in the tlist.
1704 : : */
1705 [ + + ]: 50845 : if (parse->groupClause ||
1706 [ + + ]: 50110 : parse->groupingSets ||
1707 [ + + ]: 50097 : parse->distinctClause ||
1708 [ + + ]: 49953 : parse->hasAggs ||
1709 [ + + ]: 45670 : parse->hasWindowFuncs ||
1710 [ + + + + ]: 45266 : parse->hasTargetSRFs ||
1711 : 43654 : root->hasHavingQual)
1712 : 7195 : root->limit_tuples = -1.0;
1713 : : else
1714 : 43650 : root->limit_tuples = limit_tuples;
1715 : :
1716 : : /* Set up data needed by standard_qp_callback */
1717 : 50845 : qp_extra.activeWindows = activeWindows;
1718 : 50845 : qp_extra.gset_data = gset_data;
1719 : :
1720 : : /*
1721 : : * If we're a subquery for a set operation, store the SetOperationStmt
1722 : : * in qp_extra.
1723 : : */
1724 : 50845 : qp_extra.setop = setops;
1725 : :
1726 : : /*
1727 : : * Generate the best unsorted and presorted paths for the scan/join
1728 : : * portion of this Query, ie the processing represented by the
1729 : : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1730 : : * We also generate (in standard_qp_callback) pathkey representations
1731 : : * of the query's sort clause, distinct clause, etc.
1732 : : */
1733 : 50845 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1734 : :
1735 : : /*
1736 : : * Convert the query's result tlist into PathTarget format.
1737 : : *
1738 : : * Note: this cannot be done before query_planner() has performed
1739 : : * appendrel expansion, because that might add resjunk entries to
1740 : : * root->processed_tlist. Waiting till afterwards is also helpful
1741 : : * because the target width estimates can use per-Var width numbers
1742 : : * that were obtained within query_planner().
1743 : : */
1744 : 50845 : final_target = create_pathtarget(root, root->processed_tlist);
1745 : 50845 : final_target_parallel_safe =
1746 : 50845 : is_parallel_safe(root, (Node *) final_target->exprs);
1747 : :
1748 : : /*
1749 : : * If ORDER BY was given, consider whether we should use a post-sort
1750 : : * projection, and compute the adjusted target for preceding steps if
1751 : : * so.
1752 : : */
1753 [ + + ]: 50845 : if (parse->sortClause)
1754 : : {
1755 : 16802 : sort_input_target = make_sort_input_target(root,
1756 : 8401 : final_target,
1757 : : &have_postponed_srfs);
1758 : 8401 : sort_input_target_parallel_safe =
1759 : 8401 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1760 : 8401 : }
1761 : : else
1762 : : {
1763 : 42444 : sort_input_target = final_target;
1764 : 42444 : sort_input_target_parallel_safe = final_target_parallel_safe;
1765 : : }
1766 : :
1767 : : /*
1768 : : * If we have window functions to deal with, the output from any
1769 : : * grouping step needs to be what the window functions want;
1770 : : * otherwise, it should be sort_input_target.
1771 : : */
1772 [ + + ]: 50845 : if (activeWindows)
1773 : : {
1774 : 852 : grouping_target = make_window_input_target(root,
1775 : 426 : final_target,
1776 : 426 : activeWindows);
1777 : 426 : grouping_target_parallel_safe =
1778 : 426 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1779 : 426 : }
1780 : : else
1781 : : {
1782 : 50419 : grouping_target = sort_input_target;
1783 : 50419 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1784 : : }
1785 : :
1786 : : /*
1787 : : * If we have grouping or aggregation to do, the topmost scan/join
1788 : : * plan node must emit what the grouping step wants; otherwise, it
1789 : : * should emit grouping_target.
1790 : : */
1791 [ + + + + ]: 100935 : have_grouping = (parse->groupClause || parse->groupingSets ||
1792 [ + + ]: 50090 : parse->hasAggs || root->hasHavingQual);
1793 [ + + ]: 50845 : if (have_grouping)
1794 : : {
1795 : 5048 : scanjoin_target = make_group_input_target(root, final_target);
1796 : 5048 : scanjoin_target_parallel_safe =
1797 : 5048 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1798 : 5048 : }
1799 : : else
1800 : : {
1801 : 45797 : scanjoin_target = grouping_target;
1802 : 45797 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1803 : : }
1804 : :
1805 : : /*
1806 : : * If there are any SRFs in the targetlist, we must separate each of
1807 : : * these PathTargets into SRF-computing and SRF-free targets. Replace
1808 : : * each of the named targets with a SRF-free version, and remember the
1809 : : * list of additional projection steps we need to add afterwards.
1810 : : */
1811 [ + + ]: 50845 : if (parse->hasTargetSRFs)
1812 : : {
1813 : : /* final_target doesn't recompute any SRFs in sort_input_target */
1814 : 1646 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1815 : : &final_targets,
1816 : : &final_targets_contain_srfs);
1817 : 1646 : final_target = linitial_node(PathTarget, final_targets);
1818 [ + - ]: 1646 : Assert(!linitial_int(final_targets_contain_srfs));
1819 : : /* likewise for sort_input_target vs. grouping_target */
1820 : 1646 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1821 : : &sort_input_targets,
1822 : : &sort_input_targets_contain_srfs);
1823 : 1646 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1824 [ + - ]: 1646 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1825 : : /* likewise for grouping_target vs. scanjoin_target */
1826 : 3292 : split_pathtarget_at_srfs_grouping(root,
1827 : 1646 : grouping_target, scanjoin_target,
1828 : : &grouping_targets,
1829 : : &grouping_targets_contain_srfs);
1830 : 1646 : grouping_target = linitial_node(PathTarget, grouping_targets);
1831 [ + - ]: 1646 : Assert(!linitial_int(grouping_targets_contain_srfs));
1832 : : /* scanjoin_target will not have any SRFs precomputed for it */
1833 : 1646 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1834 : : &scanjoin_targets,
1835 : : &scanjoin_targets_contain_srfs);
1836 : 1646 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1837 [ + - ]: 1646 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1838 : 1646 : }
1839 : : else
1840 : : {
1841 : : /* initialize lists; for most of these, dummy values are OK */
1842 : 49199 : final_targets = final_targets_contain_srfs = NIL;
1843 : 49199 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1844 : 49199 : grouping_targets = grouping_targets_contain_srfs = NIL;
1845 : 49199 : scanjoin_targets = list_make1(scanjoin_target);
1846 : 49199 : scanjoin_targets_contain_srfs = NIL;
1847 : : }
1848 : :
1849 : : /* Apply scan/join target. */
1850 : 100071 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1851 [ + + ]: 50845 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1852 : 101690 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1853 : 50845 : scanjoin_targets_contain_srfs,
1854 : 50845 : scanjoin_target_parallel_safe,
1855 : 50845 : scanjoin_target_same_exprs);
1856 : :
1857 : : /*
1858 : : * Save the various upper-rel PathTargets we just computed into
1859 : : * root->upper_targets[]. The core code doesn't use this, but it
1860 : : * provides a convenient place for extensions to get at the info. For
1861 : : * consistency, we save all the intermediate targets, even though some
1862 : : * of the corresponding upperrels might not be needed for this query.
1863 : : */
1864 : 50845 : root->upper_targets[UPPERREL_FINAL] = final_target;
1865 : 50845 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1866 : 50845 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1867 : 50845 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1868 : 50845 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1869 : 50845 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1870 : :
1871 : : /*
1872 : : * If we have grouping and/or aggregation, consider ways to implement
1873 : : * that. We build a new upperrel representing the output of this
1874 : : * phase.
1875 : : */
1876 [ + + ]: 50845 : if (have_grouping)
1877 : : {
1878 : 10094 : current_rel = create_grouping_paths(root,
1879 : 5047 : current_rel,
1880 : 5047 : grouping_target,
1881 : 5047 : grouping_target_parallel_safe,
1882 : 5047 : gset_data);
1883 : : /* Fix things up if grouping_target contains SRFs */
1884 [ + + ]: 5047 : if (parse->hasTargetSRFs)
1885 : 54 : adjust_paths_for_srfs(root, current_rel,
1886 : 27 : grouping_targets,
1887 : 27 : grouping_targets_contain_srfs);
1888 : 5047 : }
1889 : :
1890 : : /*
1891 : : * If we have window functions, consider ways to implement those. We
1892 : : * build a new upperrel representing the output of this phase.
1893 : : */
1894 [ + + ]: 50845 : if (activeWindows)
1895 : : {
1896 : 852 : current_rel = create_window_paths(root,
1897 : 426 : current_rel,
1898 : 426 : grouping_target,
1899 : 426 : sort_input_target,
1900 : 426 : sort_input_target_parallel_safe,
1901 : 426 : wflists,
1902 : 426 : activeWindows);
1903 : : /* Fix things up if sort_input_target contains SRFs */
1904 [ + + ]: 426 : if (parse->hasTargetSRFs)
1905 : 4 : adjust_paths_for_srfs(root, current_rel,
1906 : 2 : sort_input_targets,
1907 : 2 : sort_input_targets_contain_srfs);
1908 : 426 : }
1909 : :
1910 : : /*
1911 : : * If there is a DISTINCT clause, consider ways to implement that. We
1912 : : * build a new upperrel representing the output of this phase.
1913 : : */
1914 [ + + ]: 50845 : if (parse->distinctClause)
1915 : : {
1916 : 298 : current_rel = create_distinct_paths(root,
1917 : 149 : current_rel,
1918 : 149 : sort_input_target);
1919 : 149 : }
1920 : 50845 : } /* end of if (setOperations) */
1921 : :
1922 : : /*
1923 : : * If ORDER BY was given, consider ways to implement that, and generate a
1924 : : * new upperrel containing only paths that emit the correct ordering and
1925 : : * project the correct final_target. We can apply the original
1926 : : * limit_tuples limit in sort costing here, but only if there are no
1927 : : * postponed SRFs.
1928 : : */
1929 [ + + ]: 51643 : if (parse->sortClause)
1930 : : {
1931 : 17976 : current_rel = create_ordered_paths(root,
1932 : 8988 : current_rel,
1933 : 8988 : final_target,
1934 : 8988 : final_target_parallel_safe,
1935 [ + + ]: 8988 : have_postponed_srfs ? -1.0 :
1936 : 8976 : limit_tuples);
1937 : : /* Fix things up if final_target contains SRFs */
1938 [ + + ]: 8988 : if (parse->hasTargetSRFs)
1939 : 72 : adjust_paths_for_srfs(root, current_rel,
1940 : 36 : final_targets,
1941 : 36 : final_targets_contain_srfs);
1942 : 8988 : }
1943 : :
1944 : : /*
1945 : : * Now we are prepared to build the final-output upperrel.
1946 : : */
1947 : 51643 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1948 : :
1949 : : /*
1950 : : * If the input rel is marked consider_parallel and there's nothing that's
1951 : : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1952 : : * consider_parallel as well. Note that if the query has rowMarks or is
1953 : : * not a SELECT, consider_parallel will be false for every relation in the
1954 : : * query.
1955 : : */
1956 [ + + ]: 51643 : if (current_rel->consider_parallel &&
1957 [ + + + + ]: 20236 : is_parallel_safe(root, parse->limitOffset) &&
1958 : 20232 : is_parallel_safe(root, parse->limitCount))
1959 : 20231 : final_rel->consider_parallel = true;
1960 : :
1961 : : /*
1962 : : * If the current_rel belongs to a single FDW, so does the final_rel.
1963 : : */
1964 : 51643 : final_rel->serverid = current_rel->serverid;
1965 : 51643 : final_rel->userid = current_rel->userid;
1966 : 51643 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1967 : 51643 : final_rel->fdwroutine = current_rel->fdwroutine;
1968 : :
1969 : : /*
1970 : : * Generate paths for the final_rel. Insert all surviving paths, with
1971 : : * LockRows, Limit, and/or ModifyTable steps added if needed.
1972 : : */
1973 [ + - + + : 106191 : foreach(lc, current_rel->pathlist)
+ + ]
1974 : : {
1975 : 54548 : Path *path = (Path *) lfirst(lc);
1976 : :
1977 : : /*
1978 : : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1979 : : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1980 : : * here. If there are only non-locking rowmarks, they should be
1981 : : * handled by the ModifyTable node instead. However, root->rowMarks
1982 : : * is what goes into the LockRows node.)
1983 : : */
1984 [ + + ]: 54548 : if (parse->rowMarks)
1985 : : {
1986 : 1610 : path = (Path *) create_lockrows_path(root, final_rel, path,
1987 : 805 : root->rowMarks,
1988 : 805 : assign_special_exec_param(root));
1989 : 805 : }
1990 : :
1991 : : /*
1992 : : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1993 : : */
1994 [ + + ]: 54548 : if (limit_needed(parse))
1995 : : {
1996 : 1080 : path = (Path *) create_limit_path(root, final_rel, path,
1997 : 540 : parse->limitOffset,
1998 : 540 : parse->limitCount,
1999 : 540 : parse->limitOption,
2000 : 540 : offset_est, count_est);
2001 : 540 : }
2002 : :
2003 : : /*
2004 : : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2005 : : */
2006 [ + + ]: 54548 : if (parse->commandType != CMD_SELECT)
2007 : : {
2008 : 7234 : Index rootRelation;
2009 : 7234 : List *resultRelations = NIL;
2010 : 7234 : List *updateColnosLists = NIL;
2011 : 7234 : List *withCheckOptionLists = NIL;
2012 : 7234 : List *returningLists = NIL;
2013 : 7234 : List *mergeActionLists = NIL;
2014 : 7234 : List *mergeJoinConditions = NIL;
2015 : 7234 : List *rowMarks;
2016 : :
2017 [ + + ]: 7234 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2018 : : {
2019 : : /* Inherited UPDATE/DELETE/MERGE */
2020 : 740 : RelOptInfo *top_result_rel = find_base_rel(root,
2021 : 370 : parse->resultRelation);
2022 : 370 : int resultRelation = -1;
2023 : :
2024 : : /* Pass the root result rel forward to the executor. */
2025 : 370 : rootRelation = parse->resultRelation;
2026 : :
2027 : : /* Add only leaf children to ModifyTable. */
2028 [ + + + + : 1191 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
+ + ]
2029 : 2266 : resultRelation)) >= 0)
2030 : : {
2031 : 1526 : RelOptInfo *this_result_rel = find_base_rel(root,
2032 : 763 : resultRelation);
2033 : :
2034 : : /*
2035 : : * Also exclude any leaf rels that have turned dummy since
2036 : : * being added to the list, for example, by being excluded
2037 : : * by constraint exclusion.
2038 : : */
2039 [ + + ]: 763 : if (IS_DUMMY_REL(this_result_rel))
2040 : 29 : continue;
2041 : :
2042 : : /* Build per-target-rel lists needed by ModifyTable */
2043 : 1468 : resultRelations = lappend_int(resultRelations,
2044 : 734 : resultRelation);
2045 [ + + ]: 734 : if (parse->commandType == CMD_UPDATE)
2046 : : {
2047 : 500 : List *update_colnos = root->update_colnos;
2048 : :
2049 [ + - ]: 500 : if (this_result_rel != top_result_rel)
2050 : 500 : update_colnos =
2051 : 1000 : adjust_inherited_attnums_multilevel(root,
2052 : 500 : update_colnos,
2053 : 500 : this_result_rel->relid,
2054 : 500 : top_result_rel->relid);
2055 : 1000 : updateColnosLists = lappend(updateColnosLists,
2056 : 500 : update_colnos);
2057 : 500 : }
2058 [ + + ]: 734 : if (parse->withCheckOptions)
2059 : : {
2060 : 82 : List *withCheckOptions = parse->withCheckOptions;
2061 : :
2062 [ - + ]: 82 : if (this_result_rel != top_result_rel)
2063 : 82 : withCheckOptions = (List *)
2064 : 164 : adjust_appendrel_attrs_multilevel(root,
2065 : 82 : (Node *) withCheckOptions,
2066 : 82 : this_result_rel,
2067 : 82 : top_result_rel);
2068 : 164 : withCheckOptionLists = lappend(withCheckOptionLists,
2069 : 82 : withCheckOptions);
2070 : 82 : }
2071 [ + + ]: 734 : if (parse->returningList)
2072 : : {
2073 : 111 : List *returningList = parse->returningList;
2074 : :
2075 [ - + ]: 111 : if (this_result_rel != top_result_rel)
2076 : 111 : returningList = (List *)
2077 : 222 : adjust_appendrel_attrs_multilevel(root,
2078 : 111 : (Node *) returningList,
2079 : 111 : this_result_rel,
2080 : 111 : top_result_rel);
2081 : 222 : returningLists = lappend(returningLists,
2082 : 111 : returningList);
2083 : 111 : }
2084 [ + + ]: 734 : if (parse->mergeActionList)
2085 : : {
2086 : 77 : ListCell *l;
2087 : 77 : List *mergeActionList = NIL;
2088 : :
2089 : : /*
2090 : : * Copy MergeActions and translate stuff that
2091 : : * references attribute numbers.
2092 : : */
2093 [ + - + + : 238 : foreach(l, parse->mergeActionList)
+ + ]
2094 : : {
2095 : 161 : MergeAction *action = lfirst(l),
2096 : 161 : *leaf_action = copyObject(action);
2097 : :
2098 : 161 : leaf_action->qual =
2099 : 322 : adjust_appendrel_attrs_multilevel(root,
2100 : 161 : (Node *) action->qual,
2101 : 161 : this_result_rel,
2102 : 161 : top_result_rel);
2103 : 161 : leaf_action->targetList = (List *)
2104 : 322 : adjust_appendrel_attrs_multilevel(root,
2105 : 161 : (Node *) action->targetList,
2106 : 161 : this_result_rel,
2107 : 161 : top_result_rel);
2108 [ + + ]: 161 : if (leaf_action->commandType == CMD_UPDATE)
2109 : 85 : leaf_action->updateColnos =
2110 : 170 : adjust_inherited_attnums_multilevel(root,
2111 : 85 : action->updateColnos,
2112 : 85 : this_result_rel->relid,
2113 : 85 : top_result_rel->relid);
2114 : 322 : mergeActionList = lappend(mergeActionList,
2115 : 161 : leaf_action);
2116 : 161 : }
2117 : :
2118 : 154 : mergeActionLists = lappend(mergeActionLists,
2119 : 77 : mergeActionList);
2120 : 77 : }
2121 [ + + ]: 734 : if (parse->commandType == CMD_MERGE)
2122 : : {
2123 : 77 : Node *mergeJoinCondition = parse->mergeJoinCondition;
2124 : :
2125 [ + - ]: 77 : if (this_result_rel != top_result_rel)
2126 : 77 : mergeJoinCondition =
2127 : 154 : adjust_appendrel_attrs_multilevel(root,
2128 : 77 : mergeJoinCondition,
2129 : 77 : this_result_rel,
2130 : 77 : top_result_rel);
2131 : 154 : mergeJoinConditions = lappend(mergeJoinConditions,
2132 : 77 : mergeJoinCondition);
2133 : 77 : }
2134 [ - + + ]: 763 : }
2135 : :
2136 [ + + ]: 370 : if (resultRelations == NIL)
2137 : : {
2138 : : /*
2139 : : * We managed to exclude every child rel, so generate a
2140 : : * dummy one-relation plan using info for the top target
2141 : : * rel (even though that may not be a leaf target).
2142 : : * Although it's clear that no data will be updated or
2143 : : * deleted, we still need to have a ModifyTable node so
2144 : : * that any statement triggers will be executed. (This
2145 : : * could be cleaner if we fixed nodeModifyTable.c to allow
2146 : : * zero target relations, but that probably wouldn't be a
2147 : : * net win.)
2148 : : */
2149 : 5 : resultRelations = list_make1_int(parse->resultRelation);
2150 [ - + ]: 5 : if (parse->commandType == CMD_UPDATE)
2151 : 5 : updateColnosLists = list_make1(root->update_colnos);
2152 [ + - ]: 5 : if (parse->withCheckOptions)
2153 : 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2154 [ + + ]: 5 : if (parse->returningList)
2155 : 3 : returningLists = list_make1(parse->returningList);
2156 [ + - ]: 5 : if (parse->mergeActionList)
2157 : 0 : mergeActionLists = list_make1(parse->mergeActionList);
2158 [ + - ]: 5 : if (parse->commandType == CMD_MERGE)
2159 : 0 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2160 : 5 : }
2161 : 370 : }
2162 : : else
2163 : : {
2164 : : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2165 : 6864 : rootRelation = 0; /* there's no separate root rel */
2166 : 6864 : resultRelations = list_make1_int(parse->resultRelation);
2167 [ + + ]: 6864 : if (parse->commandType == CMD_UPDATE)
2168 : 876 : updateColnosLists = list_make1(root->update_colnos);
2169 [ + + ]: 6864 : if (parse->withCheckOptions)
2170 : 162 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2171 [ + + ]: 6864 : if (parse->returningList)
2172 : 272 : returningLists = list_make1(parse->returningList);
2173 [ + + ]: 6864 : if (parse->mergeActionList)
2174 : 239 : mergeActionLists = list_make1(parse->mergeActionList);
2175 [ + + ]: 6864 : if (parse->commandType == CMD_MERGE)
2176 : 239 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2177 : : }
2178 : :
2179 : : /*
2180 : : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2181 : : * will have dealt with fetching non-locked marked rows, else we
2182 : : * need to have ModifyTable do that.
2183 : : */
2184 [ - + ]: 7234 : if (parse->rowMarks)
2185 : 0 : rowMarks = NIL;
2186 : : else
2187 : 7234 : rowMarks = root->rowMarks;
2188 : :
2189 : 7234 : path = (Path *)
2190 : 14468 : create_modifytable_path(root, final_rel,
2191 : 7234 : path,
2192 : 7234 : parse->commandType,
2193 : 7234 : parse->canSetTag,
2194 : 7234 : parse->resultRelation,
2195 : 7234 : rootRelation,
2196 : 7234 : resultRelations,
2197 : 7234 : updateColnosLists,
2198 : 7234 : withCheckOptionLists,
2199 : 7234 : returningLists,
2200 : 7234 : rowMarks,
2201 : 7234 : parse->onConflict,
2202 : 7234 : mergeActionLists,
2203 : 7234 : mergeJoinConditions,
2204 : 7234 : assign_special_exec_param(root));
2205 : 7234 : }
2206 : :
2207 : : /* And shove it into final_rel */
2208 : 54548 : add_path(final_rel, path);
2209 : 54548 : }
2210 : :
2211 : : /*
2212 : : * Generate partial paths for final_rel, too, if outer query levels might
2213 : : * be able to make use of them.
2214 : : */
2215 [ + + + + : 51643 : if (final_rel->consider_parallel && root->query_level > 1 &&
+ + ]
2216 : 2955 : !limit_needed(parse))
2217 : : {
2218 [ + - ]: 2925 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2219 [ + + + + : 2943 : foreach(lc, current_rel->partial_pathlist)
+ + ]
2220 : : {
2221 : 18 : Path *partial_path = (Path *) lfirst(lc);
2222 : :
2223 : 18 : add_partial_path(final_rel, partial_path);
2224 : 18 : }
2225 : 2925 : }
2226 : :
2227 : 51643 : extra.limit_needed = limit_needed(parse);
2228 : 51643 : extra.limit_tuples = limit_tuples;
2229 : 51643 : extra.count_est = count_est;
2230 : 51643 : extra.offset_est = offset_est;
2231 : :
2232 : : /*
2233 : : * If there is an FDW that's responsible for all baserels of the query,
2234 : : * let it consider adding ForeignPaths.
2235 : : */
2236 [ - + # # ]: 51643 : if (final_rel->fdwroutine &&
2237 : 0 : final_rel->fdwroutine->GetForeignUpperPaths)
2238 : 0 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2239 : 0 : current_rel, final_rel,
2240 : : &extra);
2241 : :
2242 : : /* Let extensions possibly add some more paths */
2243 [ + - ]: 51643 : if (create_upper_paths_hook)
2244 : 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2245 : 0 : current_rel, final_rel, &extra);
2246 : :
2247 : : /* Note: currently, we leave it to callers to do set_cheapest() */
2248 : 51643 : }
2249 : :
2250 : : /*
2251 : : * Do preprocessing for groupingSets clause and related data.
2252 : : *
2253 : : * We expect that parse->groupingSets has already been expanded into a flat
2254 : : * list of grouping sets (that is, just integer Lists of ressortgroupref
2255 : : * numbers) by expand_grouping_sets(). This function handles the preliminary
2256 : : * steps of organizing the grouping sets into lists of rollups, and preparing
2257 : : * annotations which will later be filled in with size estimates.
2258 : : */
2259 : : static grouping_sets_data *
2260 : 166 : preprocess_grouping_sets(PlannerInfo *root)
2261 : : {
2262 : 166 : Query *parse = root->parse;
2263 : 166 : List *sets;
2264 : 166 : int maxref = 0;
2265 : 166 : ListCell *lc_set;
2266 : 166 : grouping_sets_data *gd = palloc0_object(grouping_sets_data);
2267 : :
2268 : : /*
2269 : : * We don't currently make any attempt to optimize the groupClause when
2270 : : * there are grouping sets, so just duplicate it in processed_groupClause.
2271 : : */
2272 : 166 : root->processed_groupClause = parse->groupClause;
2273 : :
2274 : : /* Detect unhashable and unsortable grouping expressions */
2275 : 166 : gd->any_hashable = false;
2276 : 166 : gd->unhashable_refs = NULL;
2277 : 166 : gd->unsortable_refs = NULL;
2278 : 166 : gd->unsortable_sets = NIL;
2279 : :
2280 [ + + ]: 166 : if (parse->groupClause)
2281 : : {
2282 : 153 : ListCell *lc;
2283 : :
2284 [ + - + + : 486 : foreach(lc, parse->groupClause)
+ + ]
2285 : : {
2286 : 333 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2287 : 333 : Index ref = gc->tleSortGroupRef;
2288 : :
2289 [ + + ]: 333 : if (ref > maxref)
2290 : 325 : maxref = ref;
2291 : :
2292 [ + + ]: 333 : if (!gc->hashable)
2293 : 5 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2294 : :
2295 [ + + ]: 333 : if (!OidIsValid(gc->sortop))
2296 : 7 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2297 : 333 : }
2298 : 153 : }
2299 : :
2300 : : /* Allocate workspace array for remapping */
2301 : 166 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2302 : :
2303 : : /*
2304 : : * If we have any unsortable sets, we must extract them before trying to
2305 : : * prepare rollups. Unsortable sets don't go through
2306 : : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2307 : : * here.
2308 : : */
2309 [ + + ]: 166 : if (!bms_is_empty(gd->unsortable_refs))
2310 : : {
2311 : 7 : List *sortable_sets = NIL;
2312 : 7 : ListCell *lc;
2313 : :
2314 [ + - + + : 21 : foreach(lc, parse->groupingSets)
+ + ]
2315 : : {
2316 : 15 : List *gset = (List *) lfirst(lc);
2317 : :
2318 [ + + ]: 15 : if (bms_overlap_list(gd->unsortable_refs, gset))
2319 : : {
2320 : 8 : GroupingSetData *gs = makeNode(GroupingSetData);
2321 : :
2322 : 8 : gs->set = gset;
2323 : 8 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2324 : :
2325 : : /*
2326 : : * We must enforce here that an unsortable set is hashable;
2327 : : * later code assumes this. Parse analysis only checks that
2328 : : * every individual column is either hashable or sortable.
2329 : : *
2330 : : * Note that passing this test doesn't guarantee we can
2331 : : * generate a plan; there might be other showstoppers.
2332 : : */
2333 [ + + ]: 8 : if (bms_overlap_list(gd->unhashable_refs, gset))
2334 [ + - + - ]: 1 : ereport(ERROR,
2335 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2336 : : errmsg("could not implement GROUP BY"),
2337 : : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2338 : 7 : }
2339 : : else
2340 : 7 : sortable_sets = lappend(sortable_sets, gset);
2341 : 14 : }
2342 : :
2343 [ + + ]: 6 : if (sortable_sets)
2344 : 5 : sets = extract_rollup_sets(sortable_sets);
2345 : : else
2346 : 1 : sets = NIL;
2347 : 6 : }
2348 : : else
2349 : 159 : sets = extract_rollup_sets(parse->groupingSets);
2350 : :
2351 [ + + + + : 433 : foreach(lc_set, sets)
+ + ]
2352 : : {
2353 : 268 : List *current_sets = (List *) lfirst(lc_set);
2354 : 268 : RollupData *rollup = makeNode(RollupData);
2355 : 268 : GroupingSetData *gs;
2356 : :
2357 : : /*
2358 : : * Reorder the current list of grouping sets into correct prefix
2359 : : * order. If only one aggregation pass is needed, try to make the
2360 : : * list match the ORDER BY clause; if more than one pass is needed, we
2361 : : * don't bother with that.
2362 : : *
2363 : : * Note that this reorders the sets from smallest-member-first to
2364 : : * largest-member-first, and applies the GroupingSetData annotations,
2365 : : * though the data will be filled in later.
2366 : : */
2367 : 536 : current_sets = reorder_grouping_sets(current_sets,
2368 [ + + ]: 268 : (list_length(sets) == 1
2369 : 91 : ? parse->sortClause
2370 : : : NIL));
2371 : :
2372 : : /*
2373 : : * Get the initial (and therefore largest) grouping set.
2374 : : */
2375 : 268 : gs = linitial_node(GroupingSetData, current_sets);
2376 : :
2377 : : /*
2378 : : * Order the groupClause appropriately. If the first grouping set is
2379 : : * empty, then the groupClause must also be empty; otherwise we have
2380 : : * to force the groupClause to match that grouping set's order.
2381 : : *
2382 : : * (The first grouping set can be empty even though parse->groupClause
2383 : : * is not empty only if all non-empty grouping sets are unsortable.
2384 : : * The groupClauses for hashed grouping sets are built later on.)
2385 : : */
2386 [ + + ]: 268 : if (gs->set)
2387 : 255 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2388 : : else
2389 : 13 : rollup->groupClause = NIL;
2390 : :
2391 : : /*
2392 : : * Is it hashable? We pretend empty sets are hashable even though we
2393 : : * actually force them not to be hashed later. But don't bother if
2394 : : * there's nothing but empty sets (since in that case we can't hash
2395 : : * anything).
2396 : : */
2397 [ + + + + ]: 268 : if (gs->set &&
2398 : 255 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2399 : : {
2400 : 251 : rollup->hashable = true;
2401 : 251 : gd->any_hashable = true;
2402 : 251 : }
2403 : :
2404 : : /*
2405 : : * Now that we've pinned down an order for the groupClause for this
2406 : : * list of grouping sets, we need to remap the entries in the grouping
2407 : : * sets from sortgrouprefs to plain indices (0-based) into the
2408 : : * groupClause for this collection of grouping sets. We keep the
2409 : : * original form for later use, though.
2410 : : */
2411 : 536 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2412 : 268 : current_sets,
2413 : 268 : gd->tleref_to_colnum_map);
2414 : 268 : rollup->gsets_data = current_sets;
2415 : :
2416 : 268 : gd->rollups = lappend(gd->rollups, rollup);
2417 : 268 : }
2418 : :
2419 [ + + ]: 165 : if (gd->unsortable_sets)
2420 : : {
2421 : : /*
2422 : : * We have not yet pinned down a groupclause for this, but we will
2423 : : * need index-based lists for estimation purposes. Construct
2424 : : * hash_sets_idx based on the entire original groupclause for now.
2425 : : */
2426 : 12 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2427 : 6 : gd->unsortable_sets,
2428 : 6 : gd->tleref_to_colnum_map);
2429 : 6 : gd->any_hashable = true;
2430 : 6 : }
2431 : :
2432 : 330 : return gd;
2433 : 165 : }
2434 : :
2435 : : /*
2436 : : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2437 : : * (without annotation) mapped to indexes into the given groupclause.
2438 : : */
2439 : : static List *
2440 : 755 : remap_to_groupclause_idx(List *groupClause,
2441 : : List *gsets,
2442 : : int *tleref_to_colnum_map)
2443 : : {
2444 : 755 : int ref = 0;
2445 : 755 : List *result = NIL;
2446 : 755 : ListCell *lc;
2447 : :
2448 [ + + + + : 1815 : foreach(lc, groupClause)
+ + ]
2449 : : {
2450 : 1060 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2451 : :
2452 : 1060 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2453 : 1060 : }
2454 : :
2455 [ + - + + : 1734 : foreach(lc, gsets)
+ + ]
2456 : : {
2457 : 979 : List *set = NIL;
2458 : 979 : ListCell *lc2;
2459 : 979 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2460 : :
2461 [ + + + + : 2178 : foreach(lc2, gs->set)
+ + ]
2462 : : {
2463 : 1199 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2464 : 1199 : }
2465 : :
2466 : 979 : result = lappend(result, set);
2467 : 979 : }
2468 : :
2469 : 1510 : return result;
2470 : 755 : }
2471 : :
2472 : :
2473 : : /*
2474 : : * preprocess_rowmarks - set up PlanRowMarks if needed
2475 : : */
2476 : : static void
2477 : 52273 : preprocess_rowmarks(PlannerInfo *root)
2478 : : {
2479 : 52273 : Query *parse = root->parse;
2480 : 52273 : Bitmapset *rels;
2481 : 52273 : List *prowmarks;
2482 : 52273 : ListCell *l;
2483 : 52273 : int i;
2484 : :
2485 [ + + ]: 52273 : if (parse->rowMarks)
2486 : : {
2487 : : /*
2488 : : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2489 : : * grouping, since grouping renders a reference to individual tuple
2490 : : * CTIDs invalid. This is also checked at parse time, but that's
2491 : : * insufficient because of rule substitution, query pullup, etc.
2492 : : */
2493 : 1602 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2494 : 801 : parse->rowMarks)->strength);
2495 : 801 : }
2496 : : else
2497 : : {
2498 : : /*
2499 : : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2500 : : * UPDATE/SHARE.
2501 : : */
2502 [ + + ]: 51472 : if (parse->commandType != CMD_UPDATE &&
2503 [ + + + + ]: 50342 : parse->commandType != CMD_DELETE &&
2504 : 49838 : parse->commandType != CMD_MERGE)
2505 : 49565 : return;
2506 : : }
2507 : :
2508 : : /*
2509 : : * We need to have rowmarks for all base relations except the target. We
2510 : : * make a bitmapset of all base rels and then remove the items we don't
2511 : : * need or have FOR [KEY] UPDATE/SHARE marks for.
2512 : : */
2513 : 2708 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2514 [ + + ]: 2708 : if (parse->resultRelation)
2515 : 1907 : rels = bms_del_member(rels, parse->resultRelation);
2516 : :
2517 : : /*
2518 : : * Convert RowMarkClauses to PlanRowMark representation.
2519 : : */
2520 : 2708 : prowmarks = NIL;
2521 [ + + + + : 3519 : foreach(l, parse->rowMarks)
+ + ]
2522 : : {
2523 : 811 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2524 : 811 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2525 : 811 : PlanRowMark *newrc;
2526 : :
2527 : : /*
2528 : : * Currently, it is syntactically impossible to have FOR UPDATE et al
2529 : : * applied to an update/delete target rel. If that ever becomes
2530 : : * possible, we should drop the target from the PlanRowMark list.
2531 : : */
2532 [ + - ]: 811 : Assert(rc->rti != parse->resultRelation);
2533 : :
2534 : : /*
2535 : : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2536 : : * can't support true locking. Subqueries that got flattened into the
2537 : : * main query should be ignored completely. Any that didn't will get
2538 : : * ROW_MARK_COPY items in the next loop.
2539 : : */
2540 [ + + ]: 811 : if (rte->rtekind != RTE_RELATION)
2541 : 8 : continue;
2542 : :
2543 : 803 : rels = bms_del_member(rels, rc->rti);
2544 : :
2545 : 803 : newrc = makeNode(PlanRowMark);
2546 : 803 : newrc->rti = newrc->prti = rc->rti;
2547 : 803 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2548 : 803 : newrc->markType = select_rowmark_type(rte, rc->strength);
2549 : 803 : newrc->allMarkTypes = (1 << newrc->markType);
2550 : 803 : newrc->strength = rc->strength;
2551 : 803 : newrc->waitPolicy = rc->waitPolicy;
2552 : 803 : newrc->isParent = false;
2553 : :
2554 : 803 : prowmarks = lappend(prowmarks, newrc);
2555 [ + + ]: 811 : }
2556 : :
2557 : : /*
2558 : : * Now, add rowmarks for any non-target, non-locked base relations.
2559 : : */
2560 : 2708 : i = 0;
2561 [ + - + + : 6931 : foreach(l, parse->rtable)
+ + ]
2562 : : {
2563 : 4223 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2564 : 4223 : PlanRowMark *newrc;
2565 : :
2566 : 4223 : i++;
2567 [ + + ]: 4223 : if (!bms_is_member(i, rels))
2568 : 3710 : continue;
2569 : :
2570 : 513 : newrc = makeNode(PlanRowMark);
2571 : 513 : newrc->rti = newrc->prti = i;
2572 : 513 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2573 : 513 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2574 : 513 : newrc->allMarkTypes = (1 << newrc->markType);
2575 : 513 : newrc->strength = LCS_NONE;
2576 : 513 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2577 : 513 : newrc->isParent = false;
2578 : :
2579 : 513 : prowmarks = lappend(prowmarks, newrc);
2580 [ + + ]: 4223 : }
2581 : :
2582 : 2708 : root->rowMarks = prowmarks;
2583 : 52273 : }
2584 : :
2585 : : /*
2586 : : * Select RowMarkType to use for a given table
2587 : : */
2588 : : RowMarkType
2589 : 1644 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2590 : : {
2591 [ + + ]: 1644 : if (rte->rtekind != RTE_RELATION)
2592 : : {
2593 : : /* If it's not a table at all, use ROW_MARK_COPY */
2594 : 184 : return ROW_MARK_COPY;
2595 : : }
2596 [ - + ]: 1460 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2597 : : {
2598 : : /* Let the FDW select the rowmark type, if it wants to */
2599 : 0 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2600 : :
2601 [ # # ]: 0 : if (fdwroutine->GetForeignRowMarkType != NULL)
2602 : 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2603 : : /* Otherwise, use ROW_MARK_COPY by default */
2604 : 0 : return ROW_MARK_COPY;
2605 : 0 : }
2606 : : else
2607 : : {
2608 : : /* Regular table, apply the appropriate lock type */
2609 [ + + + + : 1460 : switch (strength)
+ - ]
2610 : : {
2611 : : case LCS_NONE:
2612 : :
2613 : : /*
2614 : : * We don't need a tuple lock, only the ability to re-fetch
2615 : : * the row.
2616 : : */
2617 : 380 : return ROW_MARK_REFERENCE;
2618 : : break;
2619 : : case LCS_FORKEYSHARE:
2620 : 1021 : return ROW_MARK_KEYSHARE;
2621 : : break;
2622 : : case LCS_FORSHARE:
2623 : 22 : return ROW_MARK_SHARE;
2624 : : break;
2625 : : case LCS_FORNOKEYUPDATE:
2626 : 1 : return ROW_MARK_NOKEYEXCLUSIVE;
2627 : : break;
2628 : : case LCS_FORUPDATE:
2629 : 36 : return ROW_MARK_EXCLUSIVE;
2630 : : break;
2631 : : }
2632 [ # # # # ]: 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2633 : 0 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2634 : : }
2635 : 1644 : }
2636 : :
2637 : : /*
2638 : : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2639 : : *
2640 : : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2641 : : * results back in *count_est and *offset_est. These variables are set to
2642 : : * 0 if the corresponding clause is not present, and -1 if it's present
2643 : : * but we couldn't estimate the value for it. (The "0" convention is OK
2644 : : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2645 : : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2646 : : * usual practice of never estimating less than one row.) These values will
2647 : : * be passed to create_limit_path, which see if you change this code.
2648 : : *
2649 : : * The return value is the suitably adjusted tuple_fraction to use for
2650 : : * planning the query. This adjustment is not overridable, since it reflects
2651 : : * plan actions that grouping_planner() will certainly take, not assumptions
2652 : : * about context.
2653 : : */
2654 : : static double
2655 : 504 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2656 : : int64 *offset_est, int64 *count_est)
2657 : : {
2658 : 504 : Query *parse = root->parse;
2659 : 504 : Node *est;
2660 : 504 : double limit_fraction;
2661 : :
2662 : : /* Should not be called unless LIMIT or OFFSET */
2663 [ + + + - ]: 504 : Assert(parse->limitCount || parse->limitOffset);
2664 : :
2665 : : /*
2666 : : * Try to obtain the clause values. We use estimate_expression_value
2667 : : * primarily because it can sometimes do something useful with Params.
2668 : : */
2669 [ + + ]: 504 : if (parse->limitCount)
2670 : : {
2671 : 412 : est = estimate_expression_value(root, parse->limitCount);
2672 [ + - + + ]: 412 : if (est && IsA(est, Const))
2673 : : {
2674 [ - + ]: 411 : if (((Const *) est)->constisnull)
2675 : : {
2676 : : /* NULL indicates LIMIT ALL, ie, no limit */
2677 : 0 : *count_est = 0; /* treat as not present */
2678 : 0 : }
2679 : : else
2680 : : {
2681 : 411 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2682 [ + + ]: 411 : if (*count_est <= 0)
2683 : 25 : *count_est = 1; /* force to at least 1 */
2684 : : }
2685 : 411 : }
2686 : : else
2687 : 1 : *count_est = -1; /* can't estimate */
2688 : 412 : }
2689 : : else
2690 : 92 : *count_est = 0; /* not present */
2691 : :
2692 [ + + ]: 504 : if (parse->limitOffset)
2693 : : {
2694 : 104 : est = estimate_expression_value(root, parse->limitOffset);
2695 [ + - + + ]: 104 : if (est && IsA(est, Const))
2696 : : {
2697 [ - + ]: 100 : if (((Const *) est)->constisnull)
2698 : : {
2699 : : /* Treat NULL as no offset; the executor will too */
2700 : 0 : *offset_est = 0; /* treat as not present */
2701 : 0 : }
2702 : : else
2703 : : {
2704 : 100 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2705 [ + - ]: 100 : if (*offset_est < 0)
2706 : 0 : *offset_est = 0; /* treat as not present */
2707 : : }
2708 : 100 : }
2709 : : else
2710 : 4 : *offset_est = -1; /* can't estimate */
2711 : 104 : }
2712 : : else
2713 : 400 : *offset_est = 0; /* not present */
2714 : :
2715 [ + + ]: 504 : if (*count_est != 0)
2716 : : {
2717 : : /*
2718 : : * A LIMIT clause limits the absolute number of tuples returned.
2719 : : * However, if it's not a constant LIMIT then we have to guess; for
2720 : : * lack of a better idea, assume 10% of the plan's result is wanted.
2721 : : */
2722 [ + + + + ]: 412 : if (*count_est < 0 || *offset_est < 0)
2723 : : {
2724 : : /* LIMIT or OFFSET is an expression ... punt ... */
2725 : 4 : limit_fraction = 0.10;
2726 : 4 : }
2727 : : else
2728 : : {
2729 : : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2730 : 408 : limit_fraction = (double) *count_est + (double) *offset_est;
2731 : : }
2732 : :
2733 : : /*
2734 : : * If we have absolute limits from both caller and LIMIT, use the
2735 : : * smaller value; likewise if they are both fractional. If one is
2736 : : * fractional and the other absolute, we can't easily determine which
2737 : : * is smaller, but we use the heuristic that the absolute will usually
2738 : : * be smaller.
2739 : : */
2740 [ + + ]: 412 : if (tuple_fraction >= 1.0)
2741 : : {
2742 [ + - ]: 1 : if (limit_fraction >= 1.0)
2743 : : {
2744 : : /* both absolute */
2745 [ - + ]: 1 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2746 : 1 : }
2747 : : else
2748 : : {
2749 : : /* caller absolute, limit fractional; use caller's value */
2750 : : }
2751 : 1 : }
2752 [ + + ]: 411 : else if (tuple_fraction > 0.0)
2753 : : {
2754 [ + - ]: 4 : if (limit_fraction >= 1.0)
2755 : : {
2756 : : /* caller fractional, limit absolute; use limit */
2757 : 4 : tuple_fraction = limit_fraction;
2758 : 4 : }
2759 : : else
2760 : : {
2761 : : /* both fractional */
2762 [ # # ]: 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2763 : : }
2764 : 4 : }
2765 : : else
2766 : : {
2767 : : /* no info from caller, just use limit */
2768 : 407 : tuple_fraction = limit_fraction;
2769 : : }
2770 : 412 : }
2771 [ + + + + ]: 92 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2772 : : {
2773 : : /*
2774 : : * We have an OFFSET but no LIMIT. This acts entirely differently
2775 : : * from the LIMIT case: here, we need to increase rather than decrease
2776 : : * the caller's tuple_fraction, because the OFFSET acts to cause more
2777 : : * tuples to be fetched instead of fewer. This only matters if we got
2778 : : * a tuple_fraction > 0, however.
2779 : : *
2780 : : * As above, use 10% if OFFSET is present but unestimatable.
2781 : : */
2782 [ + - ]: 2 : if (*offset_est < 0)
2783 : 0 : limit_fraction = 0.10;
2784 : : else
2785 : 2 : limit_fraction = (double) *offset_est;
2786 : :
2787 : : /*
2788 : : * If we have absolute counts from both caller and OFFSET, add them
2789 : : * together; likewise if they are both fractional. If one is
2790 : : * fractional and the other absolute, we want to take the larger, and
2791 : : * we heuristically assume that's the fractional one.
2792 : : */
2793 [ - + ]: 2 : if (tuple_fraction >= 1.0)
2794 : : {
2795 [ # # ]: 0 : if (limit_fraction >= 1.0)
2796 : : {
2797 : : /* both absolute, so add them together */
2798 : 0 : tuple_fraction += limit_fraction;
2799 : 0 : }
2800 : : else
2801 : : {
2802 : : /* caller absolute, limit fractional; use limit */
2803 : 0 : tuple_fraction = limit_fraction;
2804 : : }
2805 : 0 : }
2806 : : else
2807 : : {
2808 [ + - ]: 2 : if (limit_fraction >= 1.0)
2809 : : {
2810 : : /* caller fractional, limit absolute; use caller's value */
2811 : 2 : }
2812 : : else
2813 : : {
2814 : : /* both fractional, so add them together */
2815 : 0 : tuple_fraction += limit_fraction;
2816 [ # # ]: 0 : if (tuple_fraction >= 1.0)
2817 : 0 : tuple_fraction = 0.0; /* assume fetch all */
2818 : : }
2819 : : }
2820 : 2 : }
2821 : :
2822 : 1008 : return tuple_fraction;
2823 : 504 : }
2824 : :
2825 : : /*
2826 : : * limit_needed - do we actually need a Limit plan node?
2827 : : *
2828 : : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2829 : : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2830 : : * locution for an optimization fence. (Because other places in the planner
2831 : : * merely check whether parse->limitOffset isn't NULL, it will still work as
2832 : : * an optimization fence --- we're just suppressing unnecessary run-time
2833 : : * overhead.)
2834 : : *
2835 : : * This might look like it could be merged into preprocess_limit, but there's
2836 : : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2837 : : * in preprocess_limit it's good enough to consider estimated values.
2838 : : */
2839 : : bool
2840 : 110947 : limit_needed(Query *parse)
2841 : : {
2842 : 110947 : Node *node;
2843 : :
2844 : 110947 : node = parse->limitCount;
2845 [ + + ]: 110947 : if (node)
2846 : : {
2847 [ + + ]: 1007 : if (IsA(node, Const))
2848 : : {
2849 : : /* NULL indicates LIMIT ALL, ie, no limit */
2850 [ - + ]: 968 : if (!((Const *) node)->constisnull)
2851 : 968 : return true; /* LIMIT with a constant value */
2852 : 0 : }
2853 : : else
2854 : 39 : return true; /* non-constant LIMIT */
2855 : 0 : }
2856 : :
2857 : 109940 : node = parse->limitOffset;
2858 [ + + ]: 109940 : if (node)
2859 : : {
2860 [ + + ]: 263 : if (IsA(node, Const))
2861 : : {
2862 : : /* Treat NULL as no offset; the executor would too */
2863 [ - + ]: 212 : if (!((Const *) node)->constisnull)
2864 : : {
2865 : 212 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2866 : :
2867 [ + + ]: 212 : if (offset != 0)
2868 : 17 : return true; /* OFFSET with a nonzero value */
2869 [ + + ]: 212 : }
2870 : 195 : }
2871 : : else
2872 : 51 : return true; /* non-constant OFFSET */
2873 : 195 : }
2874 : :
2875 : 109872 : return false; /* don't need a Limit plan node */
2876 : 110947 : }
2877 : :
2878 : : /*
2879 : : * preprocess_groupclause - do preparatory work on GROUP BY clause
2880 : : *
2881 : : * The idea here is to adjust the ordering of the GROUP BY elements
2882 : : * (which in itself is semantically insignificant) to match ORDER BY,
2883 : : * thereby allowing a single sort operation to both implement the ORDER BY
2884 : : * requirement and set up for a Unique step that implements GROUP BY.
2885 : : * We also consider partial match between GROUP BY and ORDER BY elements,
2886 : : * which could allow to implement ORDER BY using the incremental sort.
2887 : : *
2888 : : * We also consider other orderings of the GROUP BY elements, which could
2889 : : * match the sort ordering of other possible plans (eg an indexscan) and
2890 : : * thereby reduce cost. This is implemented during the generation of grouping
2891 : : * paths. See get_useful_group_keys_orderings() for details.
2892 : : *
2893 : : * Note: we need no comparable processing of the distinctClause because
2894 : : * the parser already enforced that that matches ORDER BY.
2895 : : *
2896 : : * Note: we return a fresh List, but its elements are the same
2897 : : * SortGroupClauses appearing in parse->groupClause. This is important
2898 : : * because later processing may modify the processed_groupClause list.
2899 : : *
2900 : : * For grouping sets, the order of items is instead forced to agree with that
2901 : : * of the grouping set (and items not in the grouping set are skipped). The
2902 : : * work of sorting the order of grouping set elements to match the ORDER BY if
2903 : : * possible is done elsewhere.
2904 : : */
2905 : : static List *
2906 : 1326 : preprocess_groupclause(PlannerInfo *root, List *force)
2907 : : {
2908 : 1326 : Query *parse = root->parse;
2909 : 1326 : List *new_groupclause = NIL;
2910 : 1326 : ListCell *sl;
2911 : 1326 : ListCell *gl;
2912 : :
2913 : : /* For grouping sets, we need to force the ordering */
2914 [ + + ]: 1326 : if (force)
2915 : : {
2916 [ + - + + : 1783 : foreach(sl, force)
+ + ]
2917 : : {
2918 : 1047 : Index ref = lfirst_int(sl);
2919 : 1047 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2920 : :
2921 : 1047 : new_groupclause = lappend(new_groupclause, cl);
2922 : 1047 : }
2923 : :
2924 : 736 : return new_groupclause;
2925 : : }
2926 : :
2927 : : /* If no ORDER BY, nothing useful to do here */
2928 [ + + ]: 590 : if (parse->sortClause == NIL)
2929 : 335 : return list_copy(parse->groupClause);
2930 : :
2931 : : /*
2932 : : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2933 : : * items, but only as far as we can make a matching prefix.
2934 : : *
2935 : : * This code assumes that the sortClause contains no duplicate items.
2936 : : */
2937 [ + - + + : 601 : foreach(sl, parse->sortClause)
+ + ]
2938 : : {
2939 : 346 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2940 : :
2941 [ + - + + : 765 : foreach(gl, parse->groupClause)
+ + ]
2942 : : {
2943 : 419 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2944 : :
2945 [ + + ]: 419 : if (equal(gc, sc))
2946 : : {
2947 : 252 : new_groupclause = lappend(new_groupclause, gc);
2948 : 252 : break;
2949 : : }
2950 [ + + ]: 419 : }
2951 [ + + ]: 346 : if (gl == NULL)
2952 : 94 : break; /* no match, so stop scanning */
2953 [ + + ]: 346 : }
2954 : :
2955 : :
2956 : : /* If no match at all, no point in reordering GROUP BY */
2957 [ + + ]: 255 : if (new_groupclause == NIL)
2958 : 37 : return list_copy(parse->groupClause);
2959 : :
2960 : : /*
2961 : : * Add any remaining GROUP BY items to the new list. We don't require a
2962 : : * complete match, because even partial match allows ORDER BY to be
2963 : : * implemented using incremental sort. Also, give up if there are any
2964 : : * non-sortable GROUP BY items, since then there's no hope anyway.
2965 : : */
2966 [ + - + + : 492 : foreach(gl, parse->groupClause)
+ + - + ]
2967 : : {
2968 : 274 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2969 : :
2970 [ + + ]: 274 : if (list_member_ptr(new_groupclause, gc))
2971 : 252 : continue; /* it matched an ORDER BY item */
2972 [ + - ]: 22 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2973 : 0 : return list_copy(parse->groupClause);
2974 : 22 : new_groupclause = lappend(new_groupclause, gc);
2975 [ + - + ]: 274 : }
2976 : :
2977 : : /* Success --- install the rearranged GROUP BY list */
2978 [ + - ]: 218 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2979 : 218 : return new_groupclause;
2980 : 1326 : }
2981 : :
2982 : : /*
2983 : : * Extract lists of grouping sets that can be implemented using a single
2984 : : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2985 : : *
2986 : : * Input must be sorted with smallest sets first. Result has each sublist
2987 : : * sorted with smallest sets first.
2988 : : *
2989 : : * We want to produce the absolute minimum possible number of lists here to
2990 : : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2991 : : * of finding the minimal partition of a partially-ordered set into chains
2992 : : * (which is what we need, taking the list of grouping sets as a poset ordered
2993 : : * by set inclusion) can be mapped to the problem of finding the maximum
2994 : : * cardinality matching on a bipartite graph, which is solvable in polynomial
2995 : : * time with a worst case of no worse than O(n^2.5) and usually much
2996 : : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2997 : : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2998 : : * half a second on my modest system even with optimization off and assertions
2999 : : * on.)
3000 : : */
3001 : : static List *
3002 : 164 : extract_rollup_sets(List *groupingSets)
3003 : : {
3004 : 164 : int num_sets_raw = list_length(groupingSets);
3005 : 164 : int num_empty = 0;
3006 : 164 : int num_sets = 0; /* distinct sets */
3007 : 164 : int num_chains = 0;
3008 : 164 : List *result = NIL;
3009 : 164 : List **results;
3010 : 164 : List **orig_sets;
3011 : 164 : Bitmapset **set_masks;
3012 : 164 : int *chains;
3013 : 164 : short **adjacency;
3014 : 164 : short *adjacency_buf;
3015 : 164 : BipartiteMatchState *state;
3016 : 164 : int i;
3017 : 164 : int j;
3018 : 164 : int j_size;
3019 : 164 : ListCell *lc1 = list_head(groupingSets);
3020 : 164 : ListCell *lc;
3021 : :
3022 : : /*
3023 : : * Start by stripping out empty sets. The algorithm doesn't require this,
3024 : : * but the planner currently needs all empty sets to be returned in the
3025 : : * first list, so we strip them here and add them back after.
3026 : : */
3027 [ + + + + ]: 278 : while (lc1 && lfirst(lc1) == NIL)
3028 : : {
3029 : 114 : ++num_empty;
3030 : 114 : lc1 = lnext(groupingSets, lc1);
3031 : : }
3032 : :
3033 : : /* bail out now if it turns out that all we had were empty sets. */
3034 [ + + ]: 164 : if (!lc1)
3035 : 13 : return list_make1(groupingSets);
3036 : :
3037 : : /*----------
3038 : : * We don't strictly need to remove duplicate sets here, but if we don't,
3039 : : * they tend to become scattered through the result, which is a bit
3040 : : * confusing (and irritating if we ever decide to optimize them out).
3041 : : * So we remove them here and add them back after.
3042 : : *
3043 : : * For each non-duplicate set, we fill in the following:
3044 : : *
3045 : : * orig_sets[i] = list of the original set lists
3046 : : * set_masks[i] = bitmapset for testing inclusion
3047 : : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3048 : : *
3049 : : * chains[i] will be the result group this set is assigned to.
3050 : : *
3051 : : * We index all of these from 1 rather than 0 because it is convenient
3052 : : * to leave 0 free for the NIL node in the graph algorithm.
3053 : : *----------
3054 : : */
3055 : 151 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3056 : 151 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3057 : 151 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3058 : 151 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3059 : :
3060 : 151 : j_size = 0;
3061 : 151 : j = 0;
3062 : 151 : i = 1;
3063 : :
3064 [ + - + + : 528 : for_each_cell(lc, groupingSets, lc1)
+ + ]
3065 : : {
3066 : 377 : List *candidate = (List *) lfirst(lc);
3067 : 377 : Bitmapset *candidate_set = NULL;
3068 : 377 : ListCell *lc2;
3069 : 377 : int dup_of = 0;
3070 : :
3071 [ + - + + : 903 : foreach(lc2, candidate)
+ + ]
3072 : : {
3073 : 526 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3074 : 526 : }
3075 : :
3076 : : /* we can only be a dup if we're the same length as a previous set */
3077 [ + + ]: 377 : if (j_size == list_length(candidate))
3078 : : {
3079 : 145 : int k;
3080 : :
3081 [ + + ]: 341 : for (k = j; k < i; ++k)
3082 : : {
3083 [ + + ]: 221 : if (bms_equal(set_masks[k], candidate_set))
3084 : : {
3085 : 25 : dup_of = k;
3086 : 25 : break;
3087 : : }
3088 : 196 : }
3089 : 145 : }
3090 [ + - ]: 232 : else if (j_size < list_length(candidate))
3091 : : {
3092 : 232 : j_size = list_length(candidate);
3093 : 232 : j = i;
3094 : 232 : }
3095 : :
3096 [ + + ]: 377 : if (dup_of > 0)
3097 : : {
3098 : 25 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3099 : 25 : bms_free(candidate_set);
3100 : 25 : }
3101 : : else
3102 : : {
3103 : 352 : int k;
3104 : 352 : int n_adj = 0;
3105 : :
3106 : 352 : orig_sets[i] = list_make1(candidate);
3107 : 352 : set_masks[i] = candidate_set;
3108 : :
3109 : : /* fill in adjacency list; no need to compare equal-size sets */
3110 : :
3111 [ + + ]: 564 : for (k = j - 1; k > 0; --k)
3112 : : {
3113 [ + + ]: 212 : if (bms_is_subset(set_masks[k], candidate_set))
3114 : 185 : adjacency_buf[++n_adj] = k;
3115 : 212 : }
3116 : :
3117 [ + + ]: 352 : if (n_adj > 0)
3118 : : {
3119 : 101 : adjacency_buf[0] = n_adj;
3120 : 101 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3121 : 101 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3122 : 101 : }
3123 : : else
3124 : 251 : adjacency[i] = NULL;
3125 : :
3126 : 352 : ++i;
3127 : 352 : }
3128 : 377 : }
3129 : :
3130 : 151 : num_sets = i - 1;
3131 : :
3132 : : /*
3133 : : * Apply the graph matching algorithm to do the work.
3134 : : */
3135 : 151 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3136 : :
3137 : : /*
3138 : : * Now, the state->pair* fields have the info we need to assign sets to
3139 : : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3140 : : * pair_vu[v] = u (both will be true, but we check both so that we can do
3141 : : * it in one pass)
3142 : : */
3143 : 151 : chains = palloc0((num_sets + 1) * sizeof(int));
3144 : :
3145 [ + + ]: 503 : for (i = 1; i <= num_sets; ++i)
3146 : : {
3147 : 352 : int u = state->pair_vu[i];
3148 : 352 : int v = state->pair_uv[i];
3149 : :
3150 [ + + - + ]: 352 : if (u > 0 && u < i)
3151 : 0 : chains[i] = chains[u];
3152 [ + + - + ]: 352 : else if (v > 0 && v < i)
3153 : 97 : chains[i] = chains[v];
3154 : : else
3155 : 255 : chains[i] = ++num_chains;
3156 : 352 : }
3157 : :
3158 : : /* build result lists. */
3159 : 151 : results = palloc0((num_chains + 1) * sizeof(List *));
3160 : :
3161 [ + + ]: 503 : for (i = 1; i <= num_sets; ++i)
3162 : : {
3163 : 352 : int c = chains[i];
3164 : :
3165 [ + - ]: 352 : Assert(c > 0);
3166 : :
3167 : 352 : results[c] = list_concat(results[c], orig_sets[i]);
3168 : 352 : }
3169 : :
3170 : : /* push any empty sets back on the first list. */
3171 [ + + ]: 242 : while (num_empty-- > 0)
3172 : 91 : results[1] = lcons(NIL, results[1]);
3173 : :
3174 : : /* make result list */
3175 [ + + ]: 406 : for (i = 1; i <= num_chains; ++i)
3176 : 255 : result = lappend(result, results[i]);
3177 : :
3178 : : /*
3179 : : * Free all the things.
3180 : : *
3181 : : * (This is over-fussy for small sets but for large sets we could have
3182 : : * tied up a nontrivial amount of memory.)
3183 : : */
3184 : 151 : BipartiteMatchFree(state);
3185 : 151 : pfree(results);
3186 : 151 : pfree(chains);
3187 [ + + ]: 503 : for (i = 1; i <= num_sets; ++i)
3188 [ + + ]: 453 : if (adjacency[i])
3189 : 101 : pfree(adjacency[i]);
3190 : 151 : pfree(adjacency);
3191 : 151 : pfree(adjacency_buf);
3192 : 151 : pfree(orig_sets);
3193 [ + + ]: 503 : for (i = 1; i <= num_sets; ++i)
3194 : 352 : bms_free(set_masks[i]);
3195 : 151 : pfree(set_masks);
3196 : :
3197 : 151 : return result;
3198 : 164 : }
3199 : :
3200 : : /*
3201 : : * Reorder the elements of a list of grouping sets such that they have correct
3202 : : * prefix relationships. Also inserts the GroupingSetData annotations.
3203 : : *
3204 : : * The input must be ordered with smallest sets first; the result is returned
3205 : : * with largest sets first. Note that the result shares no list substructure
3206 : : * with the input, so it's safe for the caller to modify it later.
3207 : : *
3208 : : * If we're passed in a sortclause, we follow its order of columns to the
3209 : : * extent possible, to minimize the chance that we add unnecessary sorts.
3210 : : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3211 : : * gets implemented in one pass.)
3212 : : */
3213 : : static List *
3214 : 268 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3215 : : {
3216 : 268 : ListCell *lc;
3217 : 268 : List *previous = NIL;
3218 : 268 : List *result = NIL;
3219 : :
3220 [ + - + + : 759 : foreach(lc, groupingSets)
+ + ]
3221 : : {
3222 : 491 : List *candidate = (List *) lfirst(lc);
3223 : 491 : List *new_elems = list_difference_int(candidate, previous);
3224 : 491 : GroupingSetData *gs = makeNode(GroupingSetData);
3225 : :
3226 [ + + + + ]: 604 : while (list_length(sortclause) > list_length(previous) &&
3227 : 85 : new_elems != NIL)
3228 : : {
3229 : 48 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3230 : 48 : int ref = sc->tleSortGroupRef;
3231 : :
3232 [ + + ]: 48 : if (list_member_int(new_elems, ref))
3233 : : {
3234 : 28 : previous = lappend_int(previous, ref);
3235 : 28 : new_elems = list_delete_int(new_elems, ref);
3236 : 28 : }
3237 : : else
3238 : : {
3239 : : /* diverged from the sortclause; give up on it */
3240 : 20 : sortclause = NIL;
3241 : 20 : break;
3242 : : }
3243 [ - + + ]: 48 : }
3244 : :
3245 : 491 : previous = list_concat(previous, new_elems);
3246 : :
3247 : 491 : gs->set = list_copy(previous);
3248 : 491 : result = lcons(gs, result);
3249 : 491 : }
3250 : :
3251 : 268 : list_free(previous);
3252 : :
3253 : 536 : return result;
3254 : 268 : }
3255 : :
3256 : : /*
3257 : : * has_volatile_pathkey
3258 : : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3259 : : * containing a volatile function. Otherwise returns false.
3260 : : */
3261 : : static bool
3262 : 275 : has_volatile_pathkey(List *keys)
3263 : : {
3264 : 275 : ListCell *lc;
3265 : :
3266 [ + + + + : 577 : foreach(lc, keys)
+ + + + ]
3267 : : {
3268 : 302 : PathKey *pathkey = lfirst_node(PathKey, lc);
3269 : :
3270 [ + + ]: 302 : if (pathkey->pk_eclass->ec_has_volatile)
3271 : 2 : return true;
3272 [ + + ]: 302 : }
3273 : :
3274 : 273 : return false;
3275 : 275 : }
3276 : :
3277 : : /*
3278 : : * adjust_group_pathkeys_for_groupagg
3279 : : * Add pathkeys to root->group_pathkeys to reflect the best set of
3280 : : * pre-ordered input for ordered aggregates.
3281 : : *
3282 : : * We define "best" as the pathkeys that suit the largest number of
3283 : : * aggregate functions. We find these by looking at the first ORDER BY /
3284 : : * DISTINCT aggregate and take the pathkeys for that before searching for
3285 : : * other aggregates that require the same or a more strict variation of the
3286 : : * same pathkeys. We then repeat that process for any remaining aggregates
3287 : : * with different pathkeys and if we find another set of pathkeys that suits a
3288 : : * larger number of aggregates then we select those pathkeys instead.
3289 : : *
3290 : : * When the best pathkeys are found we also mark each Aggref that can use
3291 : : * those pathkeys as aggpresorted = true.
3292 : : *
3293 : : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3294 : : * volatile functions, we never make use of these pathkeys. We want to ensure
3295 : : * that sorts using volatile functions are done independently in each Aggref
3296 : : * rather than once at the query level. If we were to allow this then Aggrefs
3297 : : * with compatible sort orders would all transition their rows in the same
3298 : : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3299 : : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3300 : : * better pathkeys to sort on, then the volatile function Aggrefs would be
3301 : : * left to perform their sorts individually. To avoid this inconsistent
3302 : : * behavior which could make Aggref results depend on what other Aggrefs the
3303 : : * query contains, we always force Aggrefs with volatile functions to perform
3304 : : * their own sorts.
3305 : : */
3306 : : static void
3307 : 206 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3308 : : {
3309 : 206 : List *grouppathkeys = root->group_pathkeys;
3310 : 206 : List *bestpathkeys;
3311 : 206 : Bitmapset *bestaggs;
3312 : 206 : Bitmapset *unprocessed_aggs;
3313 : 206 : ListCell *lc;
3314 : 206 : int i;
3315 : :
3316 : : /* Shouldn't be here if there are grouping sets */
3317 [ + - ]: 206 : Assert(root->parse->groupingSets == NIL);
3318 : : /* Shouldn't be here unless there are some ordered aggregates */
3319 [ + - ]: 206 : Assert(root->numOrderedAggs > 0);
3320 : :
3321 : : /* Do nothing if disabled */
3322 [ + + ]: 206 : if (!enable_presorted_aggregate)
3323 : 1 : return;
3324 : :
3325 : : /*
3326 : : * Make a first pass over all AggInfos to collect a Bitmapset containing
3327 : : * the indexes of all AggInfos to be processed below.
3328 : : */
3329 : 205 : unprocessed_aggs = NULL;
3330 [ + - + + : 522 : foreach(lc, root->agginfos)
+ + ]
3331 : : {
3332 : 317 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3333 : 317 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3334 : :
3335 [ + + ]: 317 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3336 : 41 : continue;
3337 : :
3338 : : /* Skip unless there's a DISTINCT or ORDER BY clause */
3339 [ + + + + ]: 276 : if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3340 : 49 : continue;
3341 : :
3342 : : /* Additional safety checks are needed if there's a FILTER clause */
3343 [ + + ]: 227 : if (aggref->aggfilter != NULL)
3344 : : {
3345 : 8 : ListCell *lc2;
3346 : 8 : bool allow_presort = true;
3347 : :
3348 : : /*
3349 : : * When the Aggref has a FILTER clause, it's possible that the
3350 : : * filter removes rows that cannot be sorted because the
3351 : : * expression to sort by results in an error during its
3352 : : * evaluation. This is a problem for presorting as that happens
3353 : : * before the FILTER, whereas without presorting, the Aggregate
3354 : : * node will apply the FILTER *before* sorting. So that we never
3355 : : * try to sort anything that might error, here we aim to skip over
3356 : : * any Aggrefs with arguments with expressions which, when
3357 : : * evaluated, could cause an ERROR. Vars and Consts are ok. There
3358 : : * may be more cases that should be allowed, but more thought
3359 : : * needs to be given. Err on the side of caution.
3360 : : */
3361 [ + - + + : 19 : foreach(lc2, aggref->args)
+ + ]
3362 : : {
3363 : 11 : TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3364 : 11 : Expr *expr = tle->expr;
3365 : :
3366 [ + + ]: 13 : while (IsA(expr, RelabelType))
3367 : 2 : expr = (Expr *) (castNode(RelabelType, expr))->arg;
3368 : :
3369 : : /* Common case, Vars and Consts are ok */
3370 [ + + + + ]: 11 : if (IsA(expr, Var) || IsA(expr, Const))
3371 : 8 : continue;
3372 : :
3373 : : /* Unsupported. Don't try to presort for this Aggref */
3374 : 3 : allow_presort = false;
3375 : 3 : break;
3376 [ + + ]: 11 : }
3377 : :
3378 : : /* Skip unsupported Aggrefs */
3379 [ + + ]: 8 : if (!allow_presort)
3380 : 3 : continue;
3381 [ + + ]: 8 : }
3382 : :
3383 : 448 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3384 : 224 : foreach_current_index(lc));
3385 [ + + ]: 317 : }
3386 : :
3387 : : /*
3388 : : * Now process all the unprocessed_aggs to find the best set of pathkeys
3389 : : * for the given set of aggregates.
3390 : : *
3391 : : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3392 : : * this during the first loop using the pathkeys for the very first
3393 : : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3394 : : * a more strict set of compatible pathkeys. Once the outer loop is
3395 : : * complete, we mark off all the aggregates with compatible pathkeys then
3396 : : * remove those from the unprocessed_aggs and repeat the process to try to
3397 : : * find another set of pathkeys that are suitable for a larger number of
3398 : : * aggregates. The outer loop will stop when there are not enough
3399 : : * unprocessed aggregates for it to be possible to find a set of pathkeys
3400 : : * to suit a larger number of aggregates.
3401 : : */
3402 : 205 : bestpathkeys = NIL;
3403 : 205 : bestaggs = NULL;
3404 [ + + ]: 402 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3405 : : {
3406 : 197 : Bitmapset *aggindexes = NULL;
3407 : 197 : List *currpathkeys = NIL;
3408 : :
3409 : 197 : i = -1;
3410 [ + + ]: 472 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3411 : : {
3412 : 275 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3413 : 275 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3414 : 275 : List *sortlist;
3415 : 275 : List *pathkeys;
3416 : :
3417 [ + + ]: 275 : if (aggref->aggdistinct != NIL)
3418 : 116 : sortlist = aggref->aggdistinct;
3419 : : else
3420 : 159 : sortlist = aggref->aggorder;
3421 : :
3422 : 550 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3423 : 275 : aggref->args);
3424 : :
3425 : : /*
3426 : : * Ignore Aggrefs which have volatile functions in their ORDER BY
3427 : : * or DISTINCT clause.
3428 : : */
3429 [ + + ]: 275 : if (has_volatile_pathkey(pathkeys))
3430 : : {
3431 : 2 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3432 : 2 : continue;
3433 : : }
3434 : :
3435 : : /*
3436 : : * When not set yet, take the pathkeys from the first unprocessed
3437 : : * aggregate.
3438 : : */
3439 [ + + ]: 273 : if (currpathkeys == NIL)
3440 : : {
3441 : 197 : currpathkeys = pathkeys;
3442 : :
3443 : : /* include the GROUP BY pathkeys, if they exist */
3444 [ + + ]: 197 : if (grouppathkeys != NIL)
3445 : 82 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3446 : 41 : currpathkeys);
3447 : :
3448 : : /* record that we found pathkeys for this aggregate */
3449 : 197 : aggindexes = bms_add_member(aggindexes, i);
3450 : 197 : }
3451 : : else
3452 : : {
3453 : : /* now look for a stronger set of matching pathkeys */
3454 : :
3455 : : /* include the GROUP BY pathkeys, if they exist */
3456 [ + + ]: 76 : if (grouppathkeys != NIL)
3457 : 96 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3458 : 48 : pathkeys);
3459 : :
3460 : : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3461 [ + + + ]: 76 : switch (compare_pathkeys(currpathkeys, pathkeys))
3462 : : {
3463 : : case PATHKEYS_BETTER2:
3464 : : /* 'pathkeys' are stronger, use these ones instead */
3465 : 2 : currpathkeys = pathkeys;
3466 : : /* FALLTHROUGH */
3467 : :
3468 : : case PATHKEYS_BETTER1:
3469 : : /* 'pathkeys' are less strict */
3470 : : /* FALLTHROUGH */
3471 : :
3472 : : case PATHKEYS_EQUAL:
3473 : : /* mark this aggregate as covered by 'currpathkeys' */
3474 : 11 : aggindexes = bms_add_member(aggindexes, i);
3475 : 11 : break;
3476 : :
3477 : : case PATHKEYS_DIFFERENT:
3478 : : break;
3479 : : }
3480 : : }
3481 [ + + ]: 275 : }
3482 : :
3483 : : /* remove the aggregates that we've just processed */
3484 : 197 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3485 : :
3486 : : /*
3487 : : * If this pass included more aggregates than the previous best then
3488 : : * use these ones as the best set.
3489 : : */
3490 [ + + ]: 197 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3491 : : {
3492 : 180 : bestaggs = aggindexes;
3493 : 180 : bestpathkeys = currpathkeys;
3494 : 180 : }
3495 : 197 : }
3496 : :
3497 : : /*
3498 : : * If we found any ordered aggregates, update root->group_pathkeys to add
3499 : : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3500 : : * the original GROUP BY pathkeys already.
3501 : : */
3502 [ + + ]: 205 : if (bestpathkeys != NIL)
3503 : 170 : root->group_pathkeys = bestpathkeys;
3504 : :
3505 : : /*
3506 : : * Now that we've found the best set of aggregates we can set the
3507 : : * presorted flag to indicate to the executor that it needn't bother
3508 : : * performing a sort for these Aggrefs. We're able to do this now as
3509 : : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3510 : : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3511 : : * of ordered aggregates.
3512 : : */
3513 : 205 : i = -1;
3514 [ + + ]: 391 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3515 : : {
3516 : 186 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3517 : :
3518 [ + - + + : 375 : foreach(lc, agginfo->aggrefs)
+ + ]
3519 : : {
3520 : 189 : Aggref *aggref = lfirst_node(Aggref, lc);
3521 : :
3522 : 189 : aggref->aggpresorted = true;
3523 : 189 : }
3524 : 186 : }
3525 : 206 : }
3526 : :
3527 : : /*
3528 : : * Compute query_pathkeys and other pathkeys during plan generation
3529 : : */
3530 : : static void
3531 : 50850 : standard_qp_callback(PlannerInfo *root, void *extra)
3532 : : {
3533 : 50850 : Query *parse = root->parse;
3534 : 50850 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3535 : 50850 : List *tlist = root->processed_tlist;
3536 : 50850 : List *activeWindows = qp_extra->activeWindows;
3537 : :
3538 : : /*
3539 : : * Calculate pathkeys that represent grouping/ordering and/or ordered
3540 : : * aggregate requirements.
3541 : : */
3542 [ + + ]: 50850 : if (qp_extra->gset_data)
3543 : : {
3544 : : /*
3545 : : * With grouping sets, just use the first RollupData's groupClause. We
3546 : : * don't make any effort to optimize grouping clauses when there are
3547 : : * grouping sets, nor can we combine aggregate ordering keys with
3548 : : * grouping.
3549 : : */
3550 : 165 : List *rollups = qp_extra->gset_data->rollups;
3551 [ + + ]: 165 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3552 : :
3553 [ + - ]: 165 : if (grouping_is_sortable(groupClause))
3554 : : {
3555 : 165 : bool sortable;
3556 : :
3557 : : /*
3558 : : * The groupClause is logically below the grouping step. So if
3559 : : * there is an RTE entry for the grouping step, we need to remove
3560 : : * its RT index from the sort expressions before we make PathKeys
3561 : : * for them.
3562 : : */
3563 : 165 : root->group_pathkeys =
3564 : 330 : make_pathkeys_for_sortclauses_extended(root,
3565 : : &groupClause,
3566 : 165 : tlist,
3567 : : false,
3568 : 165 : parse->hasGroupRTE,
3569 : : &sortable,
3570 : : false);
3571 [ + - ]: 165 : Assert(sortable);
3572 : 165 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3573 : 165 : }
3574 : : else
3575 : : {
3576 : 0 : root->group_pathkeys = NIL;
3577 : 0 : root->num_groupby_pathkeys = 0;
3578 : : }
3579 : 165 : }
3580 [ + + + + ]: 50685 : else if (parse->groupClause || root->numOrderedAggs > 0)
3581 : : {
3582 : : /*
3583 : : * With a plain GROUP BY list, we can remove any grouping items that
3584 : : * are proven redundant by EquivalenceClass processing. For example,
3585 : : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3586 : : * especially common cases, but they're nearly free to detect. Note
3587 : : * that we remove redundant items from processed_groupClause but not
3588 : : * the original parse->groupClause.
3589 : : */
3590 : 766 : bool sortable;
3591 : :
3592 : : /*
3593 : : * Convert group clauses into pathkeys. Set the ec_sortref field of
3594 : : * EquivalenceClass'es if it's not set yet.
3595 : : */
3596 : 766 : root->group_pathkeys =
3597 : 1532 : make_pathkeys_for_sortclauses_extended(root,
3598 : 766 : &root->processed_groupClause,
3599 : 766 : tlist,
3600 : : true,
3601 : : false,
3602 : : &sortable,
3603 : : true);
3604 [ + - ]: 766 : if (!sortable)
3605 : : {
3606 : : /* Can't sort; no point in considering aggregate ordering either */
3607 : 0 : root->group_pathkeys = NIL;
3608 : 0 : root->num_groupby_pathkeys = 0;
3609 : 0 : }
3610 : : else
3611 : : {
3612 : 766 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3613 : : /* If we have ordered aggs, consider adding onto group_pathkeys */
3614 [ + + ]: 766 : if (root->numOrderedAggs > 0)
3615 : 206 : adjust_group_pathkeys_for_groupagg(root);
3616 : : }
3617 : 766 : }
3618 : : else
3619 : : {
3620 : 49919 : root->group_pathkeys = NIL;
3621 : 49919 : root->num_groupby_pathkeys = 0;
3622 : : }
3623 : :
3624 : : /* We consider only the first (bottom) window in pathkeys logic */
3625 [ + + ]: 50850 : if (activeWindows != NIL)
3626 : : {
3627 : 426 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3628 : :
3629 : 852 : root->window_pathkeys = make_pathkeys_for_window(root,
3630 : 426 : wc,
3631 : 426 : tlist);
3632 : 426 : }
3633 : : else
3634 : 50424 : root->window_pathkeys = NIL;
3635 : :
3636 : : /*
3637 : : * As with GROUP BY, we can discard any DISTINCT items that are proven
3638 : : * redundant by EquivalenceClass processing. The non-redundant list is
3639 : : * kept in root->processed_distinctClause, leaving the original
3640 : : * parse->distinctClause alone.
3641 : : */
3642 [ + + ]: 50850 : if (parse->distinctClause)
3643 : : {
3644 : 149 : bool sortable;
3645 : :
3646 : : /* Make a copy since pathkey processing can modify the list */
3647 : 149 : root->processed_distinctClause = list_copy(parse->distinctClause);
3648 : 149 : root->distinct_pathkeys =
3649 : 298 : make_pathkeys_for_sortclauses_extended(root,
3650 : 149 : &root->processed_distinctClause,
3651 : 149 : tlist,
3652 : : true,
3653 : : false,
3654 : : &sortable,
3655 : : false);
3656 [ + + ]: 149 : if (!sortable)
3657 : 1 : root->distinct_pathkeys = NIL;
3658 : 149 : }
3659 : : else
3660 : 50701 : root->distinct_pathkeys = NIL;
3661 : :
3662 : 50850 : root->sort_pathkeys =
3663 : 101700 : make_pathkeys_for_sortclauses(root,
3664 : 50850 : parse->sortClause,
3665 : 50850 : tlist);
3666 : :
3667 : : /* setting setop_pathkeys might be useful to the union planner */
3668 [ + + ]: 50850 : if (qp_extra->setop != NULL)
3669 : : {
3670 : 1940 : List *groupClauses;
3671 : 1940 : bool sortable;
3672 : :
3673 : 1940 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3674 : :
3675 : 1940 : root->setop_pathkeys =
3676 : 3880 : make_pathkeys_for_sortclauses_extended(root,
3677 : : &groupClauses,
3678 : 1940 : tlist,
3679 : : false,
3680 : : false,
3681 : : &sortable,
3682 : : false);
3683 [ + + ]: 1940 : if (!sortable)
3684 : 4 : root->setop_pathkeys = NIL;
3685 : 1940 : }
3686 : : else
3687 : 48910 : root->setop_pathkeys = NIL;
3688 : :
3689 : : /*
3690 : : * Figure out whether we want a sorted result from query_planner.
3691 : : *
3692 : : * If we have a sortable GROUP BY clause, then we want a result sorted
3693 : : * properly for grouping. Otherwise, if we have window functions to
3694 : : * evaluate, we try to sort for the first window. Otherwise, if there's a
3695 : : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3696 : : * we try to produce output that's sufficiently well sorted for the
3697 : : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3698 : : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3699 : : * for a set operation which can benefit from presorted results and have a
3700 : : * sortable targetlist, we want to sort by the target list.
3701 : : *
3702 : : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3703 : : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3704 : : * that might just leave us failing to exploit an available sort order at
3705 : : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3706 : : * much easier, since we know that the parser ensured that one is a
3707 : : * superset of the other.
3708 : : */
3709 [ + + ]: 50850 : if (root->group_pathkeys)
3710 : 867 : root->query_pathkeys = root->group_pathkeys;
3711 [ + + ]: 49983 : else if (root->window_pathkeys)
3712 : 351 : root->query_pathkeys = root->window_pathkeys;
3713 [ + + + + ]: 99264 : else if (list_length(root->distinct_pathkeys) >
3714 : 49632 : list_length(root->sort_pathkeys))
3715 : 92 : root->query_pathkeys = root->distinct_pathkeys;
3716 [ + + ]: 49540 : else if (root->sort_pathkeys)
3717 : 8018 : root->query_pathkeys = root->sort_pathkeys;
3718 [ + + ]: 41522 : else if (root->setop_pathkeys != NIL)
3719 : 1804 : root->query_pathkeys = root->setop_pathkeys;
3720 : : else
3721 : 39718 : root->query_pathkeys = NIL;
3722 : 50850 : }
3723 : :
3724 : : /*
3725 : : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3726 : : *
3727 : : * path_rows: number of output rows from scan/join step
3728 : : * gd: grouping sets data including list of grouping sets and their clauses
3729 : : * target_list: target list containing group clause references
3730 : : *
3731 : : * If doing grouping sets, we also annotate the gsets data with the estimates
3732 : : * for each set and each individual rollup list, with a view to later
3733 : : * determining whether some combination of them could be hashed instead.
3734 : : */
3735 : : static double
3736 : 6202 : get_number_of_groups(PlannerInfo *root,
3737 : : double path_rows,
3738 : : grouping_sets_data *gd,
3739 : : List *target_list)
3740 : : {
3741 : 6202 : Query *parse = root->parse;
3742 : 6202 : double dNumGroups;
3743 : :
3744 [ + + ]: 6202 : if (parse->groupClause)
3745 : : {
3746 : 1621 : List *groupExprs;
3747 : :
3748 [ + + ]: 1621 : if (parse->groupingSets)
3749 : : {
3750 : : /* Add up the estimates for each grouping set */
3751 : 152 : ListCell *lc;
3752 : :
3753 [ + - ]: 152 : Assert(gd); /* keep Coverity happy */
3754 : :
3755 : 152 : dNumGroups = 0;
3756 : :
3757 [ + + + + : 407 : foreach(lc, gd->rollups)
+ + ]
3758 : : {
3759 : 255 : RollupData *rollup = lfirst_node(RollupData, lc);
3760 : 255 : ListCell *lc2;
3761 : 255 : ListCell *lc3;
3762 : :
3763 : 510 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3764 : 255 : target_list);
3765 : :
3766 : 255 : rollup->numGroups = 0.0;
3767 : :
3768 [ + - + + : 723 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
+ - + + +
+ + + ]
3769 : : {
3770 : 468 : List *gset = (List *) lfirst(lc2);
3771 : 468 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3772 : 936 : double numGroups = estimate_num_groups(root,
3773 : 468 : groupExprs,
3774 : 468 : path_rows,
3775 : : &gset,
3776 : : NULL);
3777 : :
3778 : 468 : gs->numGroups = numGroups;
3779 : 468 : rollup->numGroups += numGroups;
3780 : 468 : }
3781 : :
3782 : 255 : dNumGroups += rollup->numGroups;
3783 : 255 : }
3784 : :
3785 [ + + ]: 152 : if (gd->hash_sets_idx)
3786 : : {
3787 : 6 : ListCell *lc2;
3788 : :
3789 : 6 : gd->dNumHashGroups = 0;
3790 : :
3791 : 12 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3792 : 6 : target_list);
3793 : :
3794 [ + - + + : 13 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
+ - + + +
+ + + ]
3795 : : {
3796 : 7 : List *gset = (List *) lfirst(lc);
3797 : 7 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3798 : 14 : double numGroups = estimate_num_groups(root,
3799 : 7 : groupExprs,
3800 : 7 : path_rows,
3801 : : &gset,
3802 : : NULL);
3803 : :
3804 : 7 : gs->numGroups = numGroups;
3805 : 7 : gd->dNumHashGroups += numGroups;
3806 : 7 : }
3807 : :
3808 : 6 : dNumGroups += gd->dNumHashGroups;
3809 : 6 : }
3810 : 152 : }
3811 : : else
3812 : : {
3813 : : /* Plain GROUP BY -- estimate based on optimized groupClause */
3814 : 2938 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3815 : 1469 : target_list);
3816 : :
3817 : 1469 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3818 : : NULL, NULL);
3819 : : }
3820 : 1621 : }
3821 [ + + ]: 4581 : else if (parse->groupingSets)
3822 : : {
3823 : : /* Empty grouping sets ... one result row for each one */
3824 : 10 : dNumGroups = list_length(parse->groupingSets);
3825 : 10 : }
3826 [ - + # # ]: 4571 : else if (parse->hasAggs || root->hasHavingQual)
3827 : : {
3828 : : /* Plain aggregation, one result row */
3829 : 4571 : dNumGroups = 1;
3830 : 4571 : }
3831 : : else
3832 : : {
3833 : : /* Not grouping */
3834 : 0 : dNumGroups = 1;
3835 : : }
3836 : :
3837 : 12404 : return dNumGroups;
3838 : 6202 : }
3839 : :
3840 : : /*
3841 : : * create_grouping_paths
3842 : : *
3843 : : * Build a new upperrel containing Paths for grouping and/or aggregation.
3844 : : * Along the way, we also build an upperrel for Paths which are partially
3845 : : * grouped and/or aggregated. A partially grouped and/or aggregated path
3846 : : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3847 : : * the only partially grouped paths we build are also partial paths; that
3848 : : * is, they need a Gather and then a FinalizeAggregate.
3849 : : *
3850 : : * input_rel: contains the source-data Paths
3851 : : * target: the pathtarget for the result Paths to compute
3852 : : * gd: grouping sets data including list of grouping sets and their clauses
3853 : : *
3854 : : * Note: all Paths in input_rel are expected to return the target computed
3855 : : * by make_group_input_target.
3856 : : */
3857 : : static RelOptInfo *
3858 : 5048 : create_grouping_paths(PlannerInfo *root,
3859 : : RelOptInfo *input_rel,
3860 : : PathTarget *target,
3861 : : bool target_parallel_safe,
3862 : : grouping_sets_data *gd)
3863 : : {
3864 : 5048 : Query *parse = root->parse;
3865 : 5048 : RelOptInfo *grouped_rel;
3866 : 5048 : RelOptInfo *partially_grouped_rel;
3867 : 5048 : AggClauseCosts agg_costs;
3868 : :
3869 [ + - + - : 30288 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
+ - - + +
+ ]
3870 : 5048 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3871 : :
3872 : : /*
3873 : : * Create grouping relation to hold fully aggregated grouping and/or
3874 : : * aggregation paths.
3875 : : */
3876 : 10096 : grouped_rel = make_grouping_rel(root, input_rel, target,
3877 : 5048 : target_parallel_safe, parse->havingQual);
3878 : :
3879 : : /*
3880 : : * Create either paths for a degenerate grouping or paths for ordinary
3881 : : * grouping, as appropriate.
3882 : : */
3883 [ + + ]: 5048 : if (is_degenerate_grouping(root))
3884 : 7 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3885 : : else
3886 : : {
3887 : 5041 : int flags = 0;
3888 : 5041 : GroupPathExtraData extra;
3889 : :
3890 : : /*
3891 : : * Determine whether it's possible to perform sort-based
3892 : : * implementations of grouping. (Note that if processed_groupClause
3893 : : * is empty, grouping_is_sortable() is trivially true, and all the
3894 : : * pathkeys_contained_in() tests will succeed too, so that we'll
3895 : : * consider every surviving input path.)
3896 : : *
3897 : : * If we have grouping sets, we might be able to sort some but not all
3898 : : * of them; in this case, we need can_sort to be true as long as we
3899 : : * must consider any sorted-input plan.
3900 : : */
3901 [ + + ]: 5041 : if ((gd && gd->rollups != NIL)
3902 [ + + ]: 5041 : || grouping_is_sortable(root->processed_groupClause))
3903 : 5040 : flags |= GROUPING_CAN_USE_SORT;
3904 : :
3905 : : /*
3906 : : * Determine whether we should consider hash-based implementations of
3907 : : * grouping.
3908 : : *
3909 : : * Hashed aggregation only applies if we're grouping. If we have
3910 : : * grouping sets, some groups might be hashable but others not; in
3911 : : * this case we set can_hash true as long as there is nothing globally
3912 : : * preventing us from hashing (and we should therefore consider plans
3913 : : * with hashes).
3914 : : *
3915 : : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3916 : : * BY aggregates. (Doing so would imply storing *all* the input
3917 : : * values in the hash table, and/or running many sorts in parallel,
3918 : : * either of which seems like a certain loser.) We similarly don't
3919 : : * support ordered-set aggregates in hashed aggregation, but that case
3920 : : * is also included in the numOrderedAggs count.
3921 : : *
3922 : : * Note: grouping_is_hashable() is much more expensive to check than
3923 : : * the other gating conditions, so we want to do it last.
3924 : : */
3925 [ + + + + ]: 5747 : if ((parse->groupClause != NIL &&
3926 [ + + ]: 742 : root->numOrderedAggs == 0 &&
3927 [ + - ]: 706 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3928 : 706 : flags |= GROUPING_CAN_USE_HASH;
3929 : :
3930 : : /*
3931 : : * Determine whether partial aggregation is possible.
3932 : : */
3933 [ + + ]: 5041 : if (can_partial_agg(root))
3934 : 4464 : flags |= GROUPING_CAN_PARTIAL_AGG;
3935 : :
3936 : 5041 : extra.flags = flags;
3937 : 5041 : extra.target_parallel_safe = target_parallel_safe;
3938 : 5041 : extra.havingQual = parse->havingQual;
3939 : 5041 : extra.targetList = parse->targetList;
3940 : 5041 : extra.partial_costs_set = false;
3941 : :
3942 : : /*
3943 : : * Determine whether partitionwise aggregation is in theory possible.
3944 : : * It can be disabled by the user, and for now, we don't try to
3945 : : * support grouping sets. create_ordinary_grouping_paths() will check
3946 : : * additional conditions, such as whether input_rel is partitioned.
3947 : : */
3948 [ + + + + ]: 5041 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3949 : 115 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3950 : : else
3951 : 4926 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3952 : :
3953 : 10082 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3954 : 5041 : &agg_costs, gd, &extra,
3955 : : &partially_grouped_rel);
3956 : 5041 : }
3957 : :
3958 : 5048 : set_cheapest(grouped_rel);
3959 : 10096 : return grouped_rel;
3960 : 5048 : }
3961 : :
3962 : : /*
3963 : : * make_grouping_rel
3964 : : *
3965 : : * Create a new grouping rel and set basic properties.
3966 : : *
3967 : : * input_rel represents the underlying scan/join relation.
3968 : : * target is the output expected from the grouping relation.
3969 : : */
3970 : : static RelOptInfo *
3971 : 5404 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3972 : : PathTarget *target, bool target_parallel_safe,
3973 : : Node *havingQual)
3974 : : {
3975 : 5404 : RelOptInfo *grouped_rel;
3976 : :
3977 [ + + + + : 5404 : if (IS_OTHER_REL(input_rel))
- + ]
3978 : : {
3979 : 712 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3980 : 356 : input_rel->relids);
3981 : 356 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3982 : 356 : }
3983 : : else
3984 : : {
3985 : : /*
3986 : : * By tradition, the relids set for the main grouping relation is
3987 : : * NULL. (This could be changed, but might require adjustments
3988 : : * elsewhere.)
3989 : : */
3990 : 5048 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3991 : : }
3992 : :
3993 : : /* Set target. */
3994 : 5404 : grouped_rel->reltarget = target;
3995 : :
3996 : : /*
3997 : : * If the input relation is not parallel-safe, then the grouped relation
3998 : : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3999 : : * target list and HAVING quals are parallel-safe.
4000 : : */
4001 [ + + + + : 5404 : if (input_rel->consider_parallel && target_parallel_safe &&
+ + ]
4002 : 3898 : is_parallel_safe(root, havingQual))
4003 : 3894 : grouped_rel->consider_parallel = true;
4004 : :
4005 : : /* Assume that the same path generation strategies are allowed */
4006 : 5404 : grouped_rel->pgs_mask = input_rel->pgs_mask;
4007 : :
4008 : : /*
4009 : : * If the input rel belongs to a single FDW, so does the grouped rel.
4010 : : */
4011 : 5404 : grouped_rel->serverid = input_rel->serverid;
4012 : 5404 : grouped_rel->userid = input_rel->userid;
4013 : 5404 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4014 : 5404 : grouped_rel->fdwroutine = input_rel->fdwroutine;
4015 : :
4016 : 10808 : return grouped_rel;
4017 : 5404 : }
4018 : :
4019 : : /*
4020 : : * is_degenerate_grouping
4021 : : *
4022 : : * A degenerate grouping is one in which the query has a HAVING qual and/or
4023 : : * grouping sets, but no aggregates and no GROUP BY (which implies that the
4024 : : * grouping sets are all empty).
4025 : : */
4026 : : static bool
4027 : 5048 : is_degenerate_grouping(PlannerInfo *root)
4028 : : {
4029 : 5048 : Query *parse = root->parse;
4030 : :
4031 [ + + ]: 10096 : return (root->hasHavingQual || parse->groupingSets) &&
4032 [ + + ]: 5048 : !parse->hasAggs && parse->groupClause == NIL;
4033 : 5048 : }
4034 : :
4035 : : /*
4036 : : * create_degenerate_grouping_paths
4037 : : *
4038 : : * When the grouping is degenerate (see is_degenerate_grouping), we are
4039 : : * supposed to emit either zero or one row for each grouping set depending on
4040 : : * whether HAVING succeeds. Furthermore, there cannot be any variables in
4041 : : * either HAVING or the targetlist, so we actually do not need the FROM table
4042 : : * at all! We can just throw away the plan-so-far and generate a Result node.
4043 : : * This is a sufficiently unusual corner case that it's not worth contorting
4044 : : * the structure of this module to avoid having to generate the earlier paths
4045 : : * in the first place.
4046 : : */
4047 : : static void
4048 : 7 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4049 : : RelOptInfo *grouped_rel)
4050 : : {
4051 : 7 : Query *parse = root->parse;
4052 : 7 : int nrows;
4053 : 7 : Path *path;
4054 : :
4055 : 7 : nrows = list_length(parse->groupingSets);
4056 [ + + ]: 7 : if (nrows > 1)
4057 : : {
4058 : : /*
4059 : : * Doesn't seem worthwhile writing code to cons up a generate_series
4060 : : * or a values scan to emit multiple rows. Instead just make N clones
4061 : : * and append them. (With a volatile HAVING clause, this means you
4062 : : * might get between 0 and N output rows. Offhand I think that's
4063 : : * desired.)
4064 : : */
4065 : 2 : AppendPathInput append = {0};
4066 : :
4067 [ + + ]: 6 : while (--nrows >= 0)
4068 : : {
4069 : 4 : path = (Path *)
4070 : 8 : create_group_result_path(root, grouped_rel,
4071 : 4 : grouped_rel->reltarget,
4072 : 4 : (List *) parse->havingQual);
4073 : 4 : append.subpaths = lappend(append.subpaths, path);
4074 : : }
4075 : 2 : path = (Path *)
4076 : 4 : create_append_path(root,
4077 : 2 : grouped_rel,
4078 : : append,
4079 : : NIL,
4080 : : NULL,
4081 : : 0,
4082 : : false,
4083 : : -1);
4084 : 2 : }
4085 : : else
4086 : : {
4087 : : /* No grouping sets, or just one, so one output row */
4088 : 5 : path = (Path *)
4089 : 10 : create_group_result_path(root, grouped_rel,
4090 : 5 : grouped_rel->reltarget,
4091 : 5 : (List *) parse->havingQual);
4092 : : }
4093 : :
4094 : 7 : add_path(grouped_rel, path);
4095 : 7 : }
4096 : :
4097 : : /*
4098 : : * create_ordinary_grouping_paths
4099 : : *
4100 : : * Create grouping paths for the ordinary (that is, non-degenerate) case.
4101 : : *
4102 : : * We need to consider sorted and hashed aggregation in the same function,
4103 : : * because otherwise (1) it would be harder to throw an appropriate error
4104 : : * message if neither way works, and (2) we should not allow hashtable size
4105 : : * considerations to dissuade us from using hashing if sorting is not possible.
4106 : : *
4107 : : * *partially_grouped_rel_p will be set to the partially grouped rel which this
4108 : : * function creates, or to NULL if it doesn't create one.
4109 : : */
4110 : : static void
4111 : 5397 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4112 : : RelOptInfo *grouped_rel,
4113 : : const AggClauseCosts *agg_costs,
4114 : : grouping_sets_data *gd,
4115 : : GroupPathExtraData *extra,
4116 : : RelOptInfo **partially_grouped_rel_p)
4117 : : {
4118 : 5397 : RelOptInfo *partially_grouped_rel = NULL;
4119 : 5397 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
4120 : :
4121 : : /*
4122 : : * If this is the topmost grouping relation or if the parent relation is
4123 : : * doing some form of partitionwise aggregation, then we may be able to do
4124 : : * it at this level also. However, if the input relation is not
4125 : : * partitioned, partitionwise aggregate is impossible.
4126 : : */
4127 [ + + + + ]: 5544 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4128 [ + + + - : 471 : IS_PARTITIONED_REL(input_rel))
+ + + - ]
4129 : : {
4130 : : /*
4131 : : * If this is the topmost relation or if the parent relation is doing
4132 : : * full partitionwise aggregation, then we can do full partitionwise
4133 : : * aggregation provided that the GROUP BY clause contains all of the
4134 : : * partitioning columns at this level and the collation used by GROUP
4135 : : * BY matches the partitioning collation. Otherwise, we can do at
4136 : : * most partial partitionwise aggregation. But if partial aggregation
4137 : : * is not supported in general then we can't use it for partitionwise
4138 : : * aggregation either.
4139 : : *
4140 : : * Check parse->groupClause not processed_groupClause, because it's
4141 : : * okay if some of the partitioning columns were proved redundant.
4142 : : */
4143 [ + + + + ]: 143 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4144 : 254 : group_by_has_partkey(input_rel, extra->targetList,
4145 : 127 : root->parse->groupClause))
4146 : 80 : patype = PARTITIONWISE_AGGREGATE_FULL;
4147 [ + + ]: 63 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4148 : 56 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4149 : : else
4150 : 7 : patype = PARTITIONWISE_AGGREGATE_NONE;
4151 : 143 : }
4152 : :
4153 : : /*
4154 : : * Before generating paths for grouped_rel, we first generate any possible
4155 : : * partially grouped paths; that way, later code can easily consider both
4156 : : * parallel and non-parallel approaches to grouping.
4157 : : */
4158 [ + + ]: 5397 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4159 : : {
4160 : 4808 : bool force_rel_creation;
4161 : :
4162 : : /*
4163 : : * If we're doing partitionwise aggregation at this level, force
4164 : : * creation of a partially_grouped_rel so we can add partitionwise
4165 : : * paths to it.
4166 : : */
4167 : 4808 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4168 : :
4169 : 4808 : partially_grouped_rel =
4170 : 9616 : create_partial_grouping_paths(root,
4171 : 4808 : grouped_rel,
4172 : 4808 : input_rel,
4173 : 4808 : gd,
4174 : 4808 : extra,
4175 : 4808 : force_rel_creation);
4176 : 4808 : }
4177 : :
4178 : : /* Set out parameter. */
4179 : 5397 : *partially_grouped_rel_p = partially_grouped_rel;
4180 : :
4181 : : /* Apply partitionwise aggregation technique, if possible. */
4182 [ + + ]: 5397 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4183 : 272 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4184 : 136 : partially_grouped_rel, agg_costs,
4185 : 136 : gd, patype, extra);
4186 : :
4187 : : /* If we are doing partial aggregation only, return. */
4188 [ + + ]: 5397 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4189 : : {
4190 [ + - ]: 142 : Assert(partially_grouped_rel);
4191 : :
4192 [ - + ]: 142 : if (partially_grouped_rel->pathlist)
4193 : 142 : set_cheapest(partially_grouped_rel);
4194 : :
4195 : 142 : return;
4196 : : }
4197 : :
4198 : : /* Gather any partially grouped partial paths. */
4199 [ + + + + ]: 5255 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4200 : 342 : gather_grouping_paths(root, partially_grouped_rel);
4201 : :
4202 : : /* Now choose the best path(s) for partially_grouped_rel. */
4203 [ + + - + ]: 5255 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
4204 : 379 : set_cheapest(partially_grouped_rel);
4205 : :
4206 : : /* Build final grouping paths */
4207 : 10510 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4208 : 5255 : partially_grouped_rel, agg_costs, gd,
4209 : 5255 : extra);
4210 : :
4211 : : /* Give a helpful error if we failed to find any implementation */
4212 [ + + ]: 5255 : if (grouped_rel->pathlist == NIL)
4213 [ + - + - ]: 1 : ereport(ERROR,
4214 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4215 : : errmsg("could not implement GROUP BY"),
4216 : : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4217 : :
4218 : : /*
4219 : : * If there is an FDW that's responsible for all baserels of the query,
4220 : : * let it consider adding ForeignPaths.
4221 : : */
4222 [ - + # # ]: 5254 : if (grouped_rel->fdwroutine &&
4223 : 0 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4224 : 0 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4225 : 0 : input_rel, grouped_rel,
4226 : 0 : extra);
4227 : :
4228 : : /* Let extensions possibly add some more paths */
4229 [ + - ]: 5254 : if (create_upper_paths_hook)
4230 : 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4231 : 0 : input_rel, grouped_rel,
4232 : 0 : extra);
4233 [ - + ]: 5396 : }
4234 : :
4235 : : /*
4236 : : * For a given input path, consider the possible ways of doing grouping sets on
4237 : : * it, by combinations of hashing and sorting. This can be called multiple
4238 : : * times, so it's important that it not scribble on input. No result is
4239 : : * returned, but any generated paths are added to grouped_rel.
4240 : : */
4241 : : static void
4242 : 317 : consider_groupingsets_paths(PlannerInfo *root,
4243 : : RelOptInfo *grouped_rel,
4244 : : Path *path,
4245 : : bool is_sorted,
4246 : : bool can_hash,
4247 : : grouping_sets_data *gd,
4248 : : const AggClauseCosts *agg_costs,
4249 : : double dNumGroups)
4250 : : {
4251 : 317 : Query *parse = root->parse;
4252 : 317 : Size hash_mem_limit = get_hash_memory_limit();
4253 : :
4254 : : /*
4255 : : * If we're not being offered sorted input, then only consider plans that
4256 : : * can be done entirely by hashing.
4257 : : *
4258 : : * We can hash everything if it looks like it'll fit in hash_mem. But if
4259 : : * the input is actually sorted despite not being advertised as such, we
4260 : : * prefer to make use of that in order to use less memory.
4261 : : *
4262 : : * If none of the grouping sets are sortable, then ignore the hash_mem
4263 : : * limit and generate a path anyway, since otherwise we'll just fail.
4264 : : */
4265 [ + + ]: 317 : if (!is_sorted)
4266 : : {
4267 : 146 : List *new_rollups = NIL;
4268 : 146 : RollupData *unhashed_rollup = NULL;
4269 : 146 : List *sets_data;
4270 : 146 : List *empty_sets_data = NIL;
4271 : 146 : List *empty_sets = NIL;
4272 : 146 : ListCell *lc;
4273 : 146 : ListCell *l_start = list_head(gd->rollups);
4274 : 146 : AggStrategy strat = AGG_HASHED;
4275 : 146 : double hashsize;
4276 : 146 : double exclude_groups = 0.0;
4277 : :
4278 [ + - ]: 146 : Assert(can_hash);
4279 : :
4280 : : /*
4281 : : * If the input is coincidentally sorted usefully (which can happen
4282 : : * even if is_sorted is false, since that only means that our caller
4283 : : * has set up the sorting for us), then save some hashtable space by
4284 : : * making use of that. But we need to watch out for degenerate cases:
4285 : : *
4286 : : * 1) If there are any empty grouping sets, then group_pathkeys might
4287 : : * be NIL if all non-empty grouping sets are unsortable. In this case,
4288 : : * there will be a rollup containing only empty groups, and the
4289 : : * pathkeys_contained_in test is vacuously true; this is ok.
4290 : : *
4291 : : * XXX: the above relies on the fact that group_pathkeys is generated
4292 : : * from the first rollup. If we add the ability to consider multiple
4293 : : * sort orders for grouping input, this assumption might fail.
4294 : : *
4295 : : * 2) If there are no empty sets and only unsortable sets, then the
4296 : : * rollups list will be empty (and thus l_start == NULL), and
4297 : : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4298 : : * pathkeys_contained_in test doesn't cause us to crash.
4299 : : */
4300 [ + + + + ]: 146 : if (l_start != NULL &&
4301 : 145 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4302 : : {
4303 : 6 : unhashed_rollup = lfirst_node(RollupData, l_start);
4304 : 6 : exclude_groups = unhashed_rollup->numGroups;
4305 : 6 : l_start = lnext(gd->rollups, l_start);
4306 : 6 : }
4307 : :
4308 : 292 : hashsize = estimate_hashagg_tablesize(root,
4309 : 146 : path,
4310 : 146 : agg_costs,
4311 : 146 : dNumGroups - exclude_groups);
4312 : :
4313 : : /*
4314 : : * gd->rollups is empty if we have only unsortable columns to work
4315 : : * with. Override hash_mem in that case; otherwise, we'll rely on the
4316 : : * sorted-input case to generate usable mixed paths.
4317 : : */
4318 [ + + - + ]: 146 : if (hashsize > hash_mem_limit && gd->rollups)
4319 : 3 : return; /* nope, won't fit */
4320 : :
4321 : : /*
4322 : : * We need to burst the existing rollups list into individual grouping
4323 : : * sets and recompute a groupClause for each set.
4324 : : */
4325 : 143 : sets_data = list_copy(gd->unsortable_sets);
4326 : :
4327 [ + + + + : 366 : for_each_cell(lc, gd->rollups, l_start)
+ + + + ]
4328 : : {
4329 : 223 : RollupData *rollup = lfirst_node(RollupData, lc);
4330 : :
4331 : : /*
4332 : : * If we find an unhashable rollup that's not been skipped by the
4333 : : * "actually sorted" check above, we can't cope; we'd need sorted
4334 : : * input (with a different sort order) but we can't get that here.
4335 : : * So bail out; we'll get a valid path from the is_sorted case
4336 : : * instead.
4337 : : *
4338 : : * The mere presence of empty grouping sets doesn't make a rollup
4339 : : * unhashable (see preprocess_grouping_sets), we handle those
4340 : : * specially below.
4341 : : */
4342 [ + + ]: 223 : if (!rollup->hashable)
4343 : 4 : return;
4344 : :
4345 : 219 : sets_data = list_concat(sets_data, rollup->gsets_data);
4346 [ + + ]: 223 : }
4347 [ + - + + : 564 : foreach(lc, sets_data)
+ + ]
4348 : : {
4349 : 425 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4350 : 425 : List *gset = gs->set;
4351 : 425 : RollupData *rollup;
4352 : :
4353 [ + + ]: 425 : if (gset == NIL)
4354 : : {
4355 : : /* Empty grouping sets can't be hashed. */
4356 : 85 : empty_sets_data = lappend(empty_sets_data, gs);
4357 : 85 : empty_sets = lappend(empty_sets, NIL);
4358 : 85 : }
4359 : : else
4360 : : {
4361 : 340 : rollup = makeNode(RollupData);
4362 : :
4363 : 340 : rollup->groupClause = preprocess_groupclause(root, gset);
4364 : 340 : rollup->gsets_data = list_make1(gs);
4365 : 680 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4366 : 340 : rollup->gsets_data,
4367 : 340 : gd->tleref_to_colnum_map);
4368 : 340 : rollup->numGroups = gs->numGroups;
4369 : 340 : rollup->hashable = true;
4370 : 340 : rollup->is_hashed = true;
4371 : 340 : new_rollups = lappend(new_rollups, rollup);
4372 : : }
4373 : 425 : }
4374 : :
4375 : : /*
4376 : : * If we didn't find anything nonempty to hash, then bail. We'll
4377 : : * generate a path from the is_sorted case.
4378 : : */
4379 [ + - ]: 139 : if (new_rollups == NIL)
4380 : 0 : return;
4381 : :
4382 : : /*
4383 : : * If there were empty grouping sets they should have been in the
4384 : : * first rollup.
4385 : : */
4386 [ + + + - ]: 139 : Assert(!unhashed_rollup || !empty_sets);
4387 : :
4388 [ + + ]: 139 : if (unhashed_rollup)
4389 : : {
4390 : 6 : new_rollups = lappend(new_rollups, unhashed_rollup);
4391 : 6 : strat = AGG_MIXED;
4392 : 6 : }
4393 [ + + ]: 133 : else if (empty_sets)
4394 : : {
4395 : 77 : RollupData *rollup = makeNode(RollupData);
4396 : :
4397 : 77 : rollup->groupClause = NIL;
4398 : 77 : rollup->gsets_data = empty_sets_data;
4399 : 77 : rollup->gsets = empty_sets;
4400 : 77 : rollup->numGroups = list_length(empty_sets);
4401 : 77 : rollup->hashable = false;
4402 : 77 : rollup->is_hashed = false;
4403 : 77 : new_rollups = lappend(new_rollups, rollup);
4404 : 77 : strat = AGG_MIXED;
4405 : 77 : }
4406 : :
4407 : 278 : add_path(grouped_rel, (Path *)
4408 : 278 : create_groupingsets_path(root,
4409 : 139 : grouped_rel,
4410 : 139 : path,
4411 : 139 : (List *) parse->havingQual,
4412 : 139 : strat,
4413 : 139 : new_rollups,
4414 : 139 : agg_costs));
4415 : 139 : return;
4416 : 146 : }
4417 : :
4418 : : /*
4419 : : * If we have sorted input but nothing we can do with it, bail.
4420 : : */
4421 [ + - ]: 171 : if (gd->rollups == NIL)
4422 : 0 : return;
4423 : :
4424 : : /*
4425 : : * Given sorted input, we try and make two paths: one sorted and one mixed
4426 : : * sort/hash. (We need to try both because hashagg might be disabled, or
4427 : : * some columns might not be sortable.)
4428 : : *
4429 : : * can_hash is passed in as false if some obstacle elsewhere (such as
4430 : : * ordered aggs) means that we shouldn't consider hashing at all.
4431 : : */
4432 [ + + - + ]: 171 : if (can_hash && gd->any_hashable)
4433 : : {
4434 : 155 : List *rollups = NIL;
4435 : 155 : List *hash_sets = list_copy(gd->unsortable_sets);
4436 : 155 : double availspace = hash_mem_limit;
4437 : 155 : ListCell *lc;
4438 : :
4439 : : /*
4440 : : * Account first for space needed for groups we can't sort at all.
4441 : : */
4442 : 310 : availspace -= estimate_hashagg_tablesize(root,
4443 : 155 : path,
4444 : 155 : agg_costs,
4445 : 155 : gd->dNumHashGroups);
4446 : :
4447 [ + - + + ]: 155 : if (availspace > 0 && list_length(gd->rollups) > 1)
4448 : : {
4449 : 80 : double scale;
4450 : 80 : int num_rollups = list_length(gd->rollups);
4451 : 80 : int k_capacity;
4452 : 80 : int *k_weights = palloc(num_rollups * sizeof(int));
4453 : 80 : Bitmapset *hash_items = NULL;
4454 : 80 : int i;
4455 : :
4456 : : /*
4457 : : * We treat this as a knapsack problem: the knapsack capacity
4458 : : * represents hash_mem, the item weights are the estimated memory
4459 : : * usage of the hashtables needed to implement a single rollup,
4460 : : * and we really ought to use the cost saving as the item value;
4461 : : * however, currently the costs assigned to sort nodes don't
4462 : : * reflect the comparison costs well, and so we treat all items as
4463 : : * of equal value (each rollup we hash instead saves us one sort).
4464 : : *
4465 : : * To use the discrete knapsack, we need to scale the values to a
4466 : : * reasonably small bounded range. We choose to allow a 5% error
4467 : : * margin; we have no more than 4096 rollups in the worst possible
4468 : : * case, which with a 5% error margin will require a bit over 42MB
4469 : : * of workspace. (Anyone wanting to plan queries that complex had
4470 : : * better have the memory for it. In more reasonable cases, with
4471 : : * no more than a couple of dozen rollups, the memory usage will
4472 : : * be negligible.)
4473 : : *
4474 : : * k_capacity is naturally bounded, but we clamp the values for
4475 : : * scale and weight (below) to avoid overflows or underflows (or
4476 : : * uselessly trying to use a scale factor less than 1 byte).
4477 : : */
4478 [ + - ]: 80 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4479 : 80 : k_capacity = (int) floor(availspace / scale);
4480 : :
4481 : : /*
4482 : : * We leave the first rollup out of consideration since it's the
4483 : : * one that matches the input sort order. We assign indexes "i"
4484 : : * to only those entries considered for hashing; the second loop,
4485 : : * below, must use the same condition.
4486 : : */
4487 : 80 : i = 0;
4488 [ + - + + : 204 : for_each_from(lc, gd->rollups, 1)
+ + ]
4489 : : {
4490 : 124 : RollupData *rollup = lfirst_node(RollupData, lc);
4491 : :
4492 [ - + ]: 124 : if (rollup->hashable)
4493 : : {
4494 : 248 : double sz = estimate_hashagg_tablesize(root,
4495 : 124 : path,
4496 : 124 : agg_costs,
4497 : 124 : rollup->numGroups);
4498 : :
4499 : : /*
4500 : : * If sz is enormous, but hash_mem (and hence scale) is
4501 : : * small, avoid integer overflow here.
4502 : : */
4503 [ + + ]: 124 : k_weights[i] = (int) Min(floor(sz / scale),
4504 : : k_capacity + 1.0);
4505 : 124 : ++i;
4506 : 124 : }
4507 : 124 : }
4508 : :
4509 : : /*
4510 : : * Apply knapsack algorithm; compute the set of items which
4511 : : * maximizes the value stored (in this case the number of sorts
4512 : : * saved) while keeping the total size (approximately) within
4513 : : * capacity.
4514 : : */
4515 [ - + ]: 80 : if (i > 0)
4516 : 80 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4517 : :
4518 [ - + ]: 80 : if (!bms_is_empty(hash_items))
4519 : : {
4520 : 80 : rollups = list_make1(linitial(gd->rollups));
4521 : :
4522 : 80 : i = 0;
4523 [ + - + + : 204 : for_each_from(lc, gd->rollups, 1)
+ + ]
4524 : : {
4525 : 124 : RollupData *rollup = lfirst_node(RollupData, lc);
4526 : :
4527 [ + - ]: 124 : if (rollup->hashable)
4528 : : {
4529 [ + + ]: 124 : if (bms_is_member(i, hash_items))
4530 : 236 : hash_sets = list_concat(hash_sets,
4531 : 118 : rollup->gsets_data);
4532 : : else
4533 : 6 : rollups = lappend(rollups, rollup);
4534 : 124 : ++i;
4535 : 124 : }
4536 : : else
4537 : 0 : rollups = lappend(rollups, rollup);
4538 : 124 : }
4539 : 80 : }
4540 : 80 : }
4541 : :
4542 [ + + + + ]: 155 : if (!rollups && hash_sets)
4543 : 4 : rollups = list_copy(gd->rollups);
4544 : :
4545 [ + + + + : 296 : foreach(lc, hash_sets)
+ + ]
4546 : : {
4547 : 141 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4548 : 141 : RollupData *rollup = makeNode(RollupData);
4549 : :
4550 [ + - ]: 141 : Assert(gs->set != NIL);
4551 : :
4552 : 141 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4553 : 141 : rollup->gsets_data = list_make1(gs);
4554 : 282 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4555 : 141 : rollup->gsets_data,
4556 : 141 : gd->tleref_to_colnum_map);
4557 : 141 : rollup->numGroups = gs->numGroups;
4558 : 141 : rollup->hashable = true;
4559 : 141 : rollup->is_hashed = true;
4560 : 141 : rollups = lcons(rollup, rollups);
4561 : 141 : }
4562 : :
4563 [ + + ]: 155 : if (rollups)
4564 : : {
4565 : 168 : add_path(grouped_rel, (Path *)
4566 : 168 : create_groupingsets_path(root,
4567 : 84 : grouped_rel,
4568 : 84 : path,
4569 : 84 : (List *) parse->havingQual,
4570 : : AGG_MIXED,
4571 : 84 : rollups,
4572 : 84 : agg_costs));
4573 : 84 : }
4574 : 155 : }
4575 : :
4576 : : /*
4577 : : * Now try the simple sorted case.
4578 : : */
4579 [ + + ]: 171 : if (!gd->unsortable_sets)
4580 : 332 : add_path(grouped_rel, (Path *)
4581 : 332 : create_groupingsets_path(root,
4582 : 166 : grouped_rel,
4583 : 166 : path,
4584 : 166 : (List *) parse->havingQual,
4585 : : AGG_SORTED,
4586 : 166 : gd->rollups,
4587 : 166 : agg_costs));
4588 [ - + ]: 317 : }
4589 : :
4590 : : /*
4591 : : * create_window_paths
4592 : : *
4593 : : * Build a new upperrel containing Paths for window-function evaluation.
4594 : : *
4595 : : * input_rel: contains the source-data Paths
4596 : : * input_target: result of make_window_input_target
4597 : : * output_target: what the topmost WindowAggPath should return
4598 : : * wflists: result of find_window_functions
4599 : : * activeWindows: result of select_active_windows
4600 : : *
4601 : : * Note: all Paths in input_rel are expected to return input_target.
4602 : : */
4603 : : static RelOptInfo *
4604 : 426 : create_window_paths(PlannerInfo *root,
4605 : : RelOptInfo *input_rel,
4606 : : PathTarget *input_target,
4607 : : PathTarget *output_target,
4608 : : bool output_target_parallel_safe,
4609 : : WindowFuncLists *wflists,
4610 : : List *activeWindows)
4611 : : {
4612 : 426 : RelOptInfo *window_rel;
4613 : 426 : ListCell *lc;
4614 : :
4615 : : /* For now, do all work in the (WINDOW, NULL) upperrel */
4616 : 426 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4617 : :
4618 : : /*
4619 : : * If the input relation is not parallel-safe, then the window relation
4620 : : * can't be parallel-safe, either. Otherwise, we need to examine the
4621 : : * target list and active windows for non-parallel-safe constructs.
4622 : : */
4623 [ + + - + : 426 : if (input_rel->consider_parallel && output_target_parallel_safe &&
# # ]
4624 : 0 : is_parallel_safe(root, (Node *) activeWindows))
4625 : 0 : window_rel->consider_parallel = true;
4626 : :
4627 : : /*
4628 : : * If the input rel belongs to a single FDW, so does the window rel.
4629 : : */
4630 : 426 : window_rel->serverid = input_rel->serverid;
4631 : 426 : window_rel->userid = input_rel->userid;
4632 : 426 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4633 : 426 : window_rel->fdwroutine = input_rel->fdwroutine;
4634 : :
4635 : : /*
4636 : : * Consider computing window functions starting from the existing
4637 : : * cheapest-total path (which will likely require a sort) as well as any
4638 : : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4639 : : */
4640 [ + - + + : 906 : foreach(lc, input_rel->pathlist)
+ + ]
4641 : : {
4642 : 480 : Path *path = (Path *) lfirst(lc);
4643 : 480 : int presorted_keys;
4644 : :
4645 [ + + ]: 480 : if (path == input_rel->cheapest_total_path ||
4646 : 54 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4647 [ + + + + ]: 54 : &presorted_keys) ||
4648 : 23 : presorted_keys > 0)
4649 : 922 : create_one_window_path(root,
4650 : 461 : window_rel,
4651 : 461 : path,
4652 : 461 : input_target,
4653 : 461 : output_target,
4654 : 461 : wflists,
4655 : 461 : activeWindows);
4656 : 480 : }
4657 : :
4658 : : /*
4659 : : * If there is an FDW that's responsible for all baserels of the query,
4660 : : * let it consider adding ForeignPaths.
4661 : : */
4662 [ - + # # ]: 426 : if (window_rel->fdwroutine &&
4663 : 0 : window_rel->fdwroutine->GetForeignUpperPaths)
4664 : 0 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4665 : 0 : input_rel, window_rel,
4666 : : NULL);
4667 : :
4668 : : /* Let extensions possibly add some more paths */
4669 [ + - ]: 426 : if (create_upper_paths_hook)
4670 : 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4671 : 0 : input_rel, window_rel, NULL);
4672 : :
4673 : : /* Now choose the best path(s) */
4674 : 426 : set_cheapest(window_rel);
4675 : :
4676 : 852 : return window_rel;
4677 : 426 : }
4678 : :
4679 : : /*
4680 : : * Stack window-function implementation steps atop the given Path, and
4681 : : * add the result to window_rel.
4682 : : *
4683 : : * window_rel: upperrel to contain result
4684 : : * path: input Path to use (must return input_target)
4685 : : * input_target: result of make_window_input_target
4686 : : * output_target: what the topmost WindowAggPath should return
4687 : : * wflists: result of find_window_functions
4688 : : * activeWindows: result of select_active_windows
4689 : : */
4690 : : static void
4691 : 461 : create_one_window_path(PlannerInfo *root,
4692 : : RelOptInfo *window_rel,
4693 : : Path *path,
4694 : : PathTarget *input_target,
4695 : : PathTarget *output_target,
4696 : : WindowFuncLists *wflists,
4697 : : List *activeWindows)
4698 : : {
4699 : 461 : PathTarget *window_target;
4700 : 461 : ListCell *l;
4701 : 461 : List *topqual = NIL;
4702 : :
4703 : : /*
4704 : : * Since each window clause could require a different sort order, we stack
4705 : : * up a WindowAgg node for each clause, with sort steps between them as
4706 : : * needed. (We assume that select_active_windows chose a good order for
4707 : : * executing the clauses in.)
4708 : : *
4709 : : * input_target should contain all Vars and Aggs needed for the result.
4710 : : * (In some cases we wouldn't need to propagate all of these all the way
4711 : : * to the top, since they might only be needed as inputs to WindowFuncs.
4712 : : * It's probably not worth trying to optimize that though.) It must also
4713 : : * contain all window partitioning and sorting expressions, to ensure
4714 : : * they're computed only once at the bottom of the stack (that's critical
4715 : : * for volatile functions). As we climb up the stack, we'll add outputs
4716 : : * for the WindowFuncs computed at each level.
4717 : : */
4718 : 461 : window_target = input_target;
4719 : :
4720 [ + - + + : 953 : foreach(l, activeWindows)
+ + ]
4721 : : {
4722 : 492 : WindowClause *wc = lfirst_node(WindowClause, l);
4723 : 492 : List *window_pathkeys;
4724 : 492 : List *runcondition = NIL;
4725 : 492 : int presorted_keys;
4726 : 492 : bool is_sorted;
4727 : 492 : bool topwindow;
4728 : 492 : ListCell *lc2;
4729 : :
4730 : 984 : window_pathkeys = make_pathkeys_for_window(root,
4731 : 492 : wc,
4732 : 492 : root->processed_tlist);
4733 : :
4734 : 984 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4735 : 492 : path->pathkeys,
4736 : : &presorted_keys);
4737 : :
4738 : : /* Sort if necessary */
4739 [ + + ]: 492 : if (!is_sorted)
4740 : : {
4741 : : /*
4742 : : * No presorted keys or incremental sort disabled, just perform a
4743 : : * complete sort.
4744 : : */
4745 [ + + - + ]: 361 : if (presorted_keys == 0 || !enable_incremental_sort)
4746 : 702 : path = (Path *) create_sort_path(root, window_rel,
4747 : 351 : path,
4748 : 351 : window_pathkeys,
4749 : : -1.0);
4750 : : else
4751 : : {
4752 : : /*
4753 : : * Since we have presorted keys and incremental sort is
4754 : : * enabled, just use incremental sort.
4755 : : */
4756 : 20 : path = (Path *) create_incremental_sort_path(root,
4757 : 10 : window_rel,
4758 : 10 : path,
4759 : 10 : window_pathkeys,
4760 : 10 : presorted_keys,
4761 : : -1.0);
4762 : : }
4763 : 361 : }
4764 : :
4765 [ + + ]: 492 : if (lnext(activeWindows, l))
4766 : : {
4767 : : /*
4768 : : * Add the current WindowFuncs to the output target for this
4769 : : * intermediate WindowAggPath. We must copy window_target to
4770 : : * avoid changing the previous path's target.
4771 : : *
4772 : : * Note: a WindowFunc adds nothing to the target's eval costs; but
4773 : : * we do need to account for the increase in tlist width.
4774 : : */
4775 : 31 : int64 tuple_width = window_target->width;
4776 : :
4777 : 31 : window_target = copy_pathtarget(window_target);
4778 [ + - + + : 74 : foreach(lc2, wflists->windowFuncs[wc->winref])
+ + ]
4779 : : {
4780 : 43 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4781 : :
4782 : 43 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4783 : 43 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4784 : 43 : }
4785 : 31 : window_target->width = clamp_width_est(tuple_width);
4786 : 31 : }
4787 : : else
4788 : : {
4789 : : /* Install the goal target in the topmost WindowAgg */
4790 : 461 : window_target = output_target;
4791 : : }
4792 : :
4793 : : /* mark the final item in the list as the top-level window */
4794 : 492 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4795 : :
4796 : : /*
4797 : : * Collect the WindowFuncRunConditions from each WindowFunc and
4798 : : * convert them into OpExprs
4799 : : */
4800 [ + - + + : 1131 : foreach(lc2, wflists->windowFuncs[wc->winref])
+ + ]
4801 : : {
4802 : 639 : ListCell *lc3;
4803 : 639 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4804 : :
4805 [ + + + + : 669 : foreach(lc3, wfunc->runCondition)
+ + ]
4806 : : {
4807 : 60 : WindowFuncRunCondition *wfuncrc =
4808 : 30 : lfirst_node(WindowFuncRunCondition, lc3);
4809 : 30 : Expr *opexpr;
4810 : 30 : Expr *leftop;
4811 : 30 : Expr *rightop;
4812 : :
4813 [ + + ]: 30 : if (wfuncrc->wfunc_left)
4814 : : {
4815 : 27 : leftop = (Expr *) copyObject(wfunc);
4816 : 27 : rightop = copyObject(wfuncrc->arg);
4817 : 27 : }
4818 : : else
4819 : : {
4820 : 3 : leftop = copyObject(wfuncrc->arg);
4821 : 3 : rightop = (Expr *) copyObject(wfunc);
4822 : : }
4823 : :
4824 : 60 : opexpr = make_opclause(wfuncrc->opno,
4825 : : BOOLOID,
4826 : : false,
4827 : 30 : leftop,
4828 : 30 : rightop,
4829 : : InvalidOid,
4830 : 30 : wfuncrc->inputcollid);
4831 : :
4832 : 30 : runcondition = lappend(runcondition, opexpr);
4833 : :
4834 [ + + ]: 30 : if (!topwindow)
4835 : 4 : topqual = lappend(topqual, opexpr);
4836 : 30 : }
4837 : 639 : }
4838 : :
4839 : 492 : path = (Path *)
4840 : 984 : create_windowagg_path(root, window_rel, path, window_target,
4841 : 492 : wflists->windowFuncs[wc->winref],
4842 : 492 : runcondition, wc,
4843 [ + + ]: 492 : topwindow ? topqual : NIL, topwindow);
4844 : 492 : }
4845 : :
4846 : 461 : add_path(window_rel, path);
4847 : 461 : }
4848 : :
4849 : : /*
4850 : : * create_distinct_paths
4851 : : *
4852 : : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4853 : : *
4854 : : * input_rel: contains the source-data Paths
4855 : : * target: the pathtarget for the result Paths to compute
4856 : : *
4857 : : * Note: input paths should already compute the desired pathtarget, since
4858 : : * Sort/Unique won't project anything.
4859 : : */
4860 : : static RelOptInfo *
4861 : 149 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4862 : : PathTarget *target)
4863 : : {
4864 : 149 : RelOptInfo *distinct_rel;
4865 : :
4866 : : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4867 : 149 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4868 : :
4869 : : /*
4870 : : * We don't compute anything at this level, so distinct_rel will be
4871 : : * parallel-safe if the input rel is parallel-safe. In particular, if
4872 : : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4873 : : * output those expressions, and will not be parallel-safe unless those
4874 : : * expressions are parallel-safe.
4875 : : */
4876 : 149 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4877 : :
4878 : : /*
4879 : : * If the input rel belongs to a single FDW, so does the distinct_rel.
4880 : : */
4881 : 149 : distinct_rel->serverid = input_rel->serverid;
4882 : 149 : distinct_rel->userid = input_rel->userid;
4883 : 149 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4884 : 149 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4885 : :
4886 : : /* build distinct paths based on input_rel's pathlist */
4887 : 149 : create_final_distinct_paths(root, input_rel, distinct_rel);
4888 : :
4889 : : /* now build distinct paths based on input_rel's partial_pathlist */
4890 : 149 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4891 : :
4892 : : /* Give a helpful error if we failed to create any paths */
4893 [ + - ]: 149 : if (distinct_rel->pathlist == NIL)
4894 [ # # # # ]: 0 : ereport(ERROR,
4895 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4896 : : errmsg("could not implement DISTINCT"),
4897 : : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4898 : :
4899 : : /*
4900 : : * If there is an FDW that's responsible for all baserels of the query,
4901 : : * let it consider adding ForeignPaths.
4902 : : */
4903 [ - + # # ]: 149 : if (distinct_rel->fdwroutine &&
4904 : 0 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4905 : 0 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4906 : : UPPERREL_DISTINCT,
4907 : 0 : input_rel,
4908 : 0 : distinct_rel,
4909 : : NULL);
4910 : :
4911 : : /* Let extensions possibly add some more paths */
4912 [ + - ]: 149 : if (create_upper_paths_hook)
4913 : 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4914 : 0 : distinct_rel, NULL);
4915 : :
4916 : : /* Now choose the best path(s) */
4917 : 149 : set_cheapest(distinct_rel);
4918 : :
4919 : 298 : return distinct_rel;
4920 : 149 : }
4921 : :
4922 : : /*
4923 : : * create_partial_distinct_paths
4924 : : *
4925 : : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4926 : : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4927 : : * paths on top and add a final unique/aggregate path to remove any duplicate
4928 : : * produced from combining rows from parallel workers.
4929 : : */
4930 : : static void
4931 : 149 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4932 : : RelOptInfo *final_distinct_rel,
4933 : : PathTarget *target)
4934 : : {
4935 : 149 : RelOptInfo *partial_distinct_rel;
4936 : 149 : Query *parse;
4937 : 149 : List *distinctExprs;
4938 : 149 : double numDistinctRows;
4939 : 149 : Path *cheapest_partial_path;
4940 : 149 : ListCell *lc;
4941 : :
4942 : : /* nothing to do when there are no partial paths in the input rel */
4943 [ + + + + ]: 149 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4944 : 131 : return;
4945 : :
4946 : 18 : parse = root->parse;
4947 : :
4948 : : /* can't do parallel DISTINCT ON */
4949 [ - + ]: 18 : if (parse->hasDistinctOn)
4950 : 0 : return;
4951 : :
4952 : 18 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4953 : : NULL);
4954 : 18 : partial_distinct_rel->reltarget = target;
4955 : 18 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4956 : :
4957 : : /*
4958 : : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4959 : : */
4960 : 18 : partial_distinct_rel->serverid = input_rel->serverid;
4961 : 18 : partial_distinct_rel->userid = input_rel->userid;
4962 : 18 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4963 : 18 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4964 : :
4965 : 18 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4966 : :
4967 : 36 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4968 : 18 : parse->targetList);
4969 : :
4970 : : /* estimate how many distinct rows we'll get from each worker */
4971 : 36 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4972 : 18 : cheapest_partial_path->rows,
4973 : : NULL, NULL);
4974 : :
4975 : : /*
4976 : : * Try sorting the cheapest path and incrementally sorting any paths with
4977 : : * presorted keys and put a unique paths atop of those. We'll also
4978 : : * attempt to reorder the required pathkeys to match the input path's
4979 : : * pathkeys as much as possible, in hopes of avoiding a possible need to
4980 : : * re-sort.
4981 : : */
4982 [ - + ]: 18 : if (grouping_is_sortable(root->processed_distinctClause))
4983 : : {
4984 [ + - + + : 39 : foreach(lc, input_rel->partial_pathlist)
+ + ]
4985 : : {
4986 : 21 : Path *input_path = (Path *) lfirst(lc);
4987 : 21 : Path *sorted_path;
4988 : 21 : List *useful_pathkeys_list = NIL;
4989 : :
4990 : 21 : useful_pathkeys_list =
4991 : 42 : get_useful_pathkeys_for_distinct(root,
4992 : 21 : root->distinct_pathkeys,
4993 : 21 : input_path->pathkeys);
4994 [ + - ]: 21 : Assert(list_length(useful_pathkeys_list) > 0);
4995 : :
4996 [ + + + - : 65 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
+ + + + ]
4997 : : {
4998 : 46 : sorted_path = make_ordered_path(root,
4999 : 23 : partial_distinct_rel,
5000 : 23 : input_path,
5001 : 23 : cheapest_partial_path,
5002 : 23 : useful_pathkeys,
5003 : : -1.0);
5004 : :
5005 [ + + ]: 23 : if (sorted_path == NULL)
5006 : 2 : continue;
5007 : :
5008 : : /*
5009 : : * An empty distinct_pathkeys means all tuples have the same
5010 : : * value for the DISTINCT clause. See
5011 : : * create_final_distinct_paths()
5012 : : */
5013 [ + + ]: 21 : if (root->distinct_pathkeys == NIL)
5014 : : {
5015 : 1 : Node *limitCount;
5016 : :
5017 : 1 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5018 : : sizeof(int64),
5019 : 1 : Int64GetDatum(1), false,
5020 : : true);
5021 : :
5022 : : /*
5023 : : * Apply a LimitPath onto the partial path to restrict the
5024 : : * tuples from each worker to 1.
5025 : : * create_final_distinct_paths will need to apply an
5026 : : * additional LimitPath to restrict this to a single row
5027 : : * after the Gather node. If the query already has a
5028 : : * LIMIT clause, then we could end up with three Limit
5029 : : * nodes in the final plan. Consolidating the top two of
5030 : : * these could be done, but does not seem worth troubling
5031 : : * over.
5032 : : */
5033 : 2 : add_partial_path(partial_distinct_rel, (Path *)
5034 : 2 : create_limit_path(root, partial_distinct_rel,
5035 : 1 : sorted_path,
5036 : : NULL,
5037 : 1 : limitCount,
5038 : : LIMIT_OPTION_COUNT,
5039 : : 0, 1));
5040 : 1 : }
5041 : : else
5042 : : {
5043 : 40 : add_partial_path(partial_distinct_rel, (Path *)
5044 : 40 : create_unique_path(root, partial_distinct_rel,
5045 : 20 : sorted_path,
5046 : 20 : list_length(root->distinct_pathkeys),
5047 : 20 : numDistinctRows));
5048 : : }
5049 : 42 : }
5050 : 21 : }
5051 : 18 : }
5052 : :
5053 : : /*
5054 : : * Now try hash aggregate paths, if enabled and hashing is possible. Since
5055 : : * we're not on the hook to ensure we do our best to create at least one
5056 : : * path here, we treat enable_hashagg as a hard off-switch rather than the
5057 : : * slightly softer variant in create_final_distinct_paths.
5058 : : */
5059 [ + + - + ]: 18 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5060 : : {
5061 : 26 : add_partial_path(partial_distinct_rel, (Path *)
5062 : 26 : create_agg_path(root,
5063 : 13 : partial_distinct_rel,
5064 : 13 : cheapest_partial_path,
5065 : 13 : cheapest_partial_path->pathtarget,
5066 : : AGG_HASHED,
5067 : : AGGSPLIT_SIMPLE,
5068 : 13 : root->processed_distinctClause,
5069 : : NIL,
5070 : : NULL,
5071 : 13 : numDistinctRows));
5072 : 13 : }
5073 : :
5074 : : /*
5075 : : * If there is an FDW that's responsible for all baserels of the query,
5076 : : * let it consider adding ForeignPaths.
5077 : : */
5078 [ - + # # ]: 18 : if (partial_distinct_rel->fdwroutine &&
5079 : 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5080 : 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5081 : : UPPERREL_PARTIAL_DISTINCT,
5082 : 0 : input_rel,
5083 : 0 : partial_distinct_rel,
5084 : : NULL);
5085 : :
5086 : : /* Let extensions possibly add some more partial paths */
5087 [ + - ]: 18 : if (create_upper_paths_hook)
5088 : 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5089 : 0 : input_rel, partial_distinct_rel, NULL);
5090 : :
5091 [ - + ]: 18 : if (partial_distinct_rel->partial_pathlist != NIL)
5092 : : {
5093 : 18 : generate_useful_gather_paths(root, partial_distinct_rel, true);
5094 : 18 : set_cheapest(partial_distinct_rel);
5095 : :
5096 : : /*
5097 : : * Finally, create paths to distinctify the final result. This step
5098 : : * is needed to remove any duplicates due to combining rows from
5099 : : * parallel workers.
5100 : : */
5101 : 36 : create_final_distinct_paths(root, partial_distinct_rel,
5102 : 18 : final_distinct_rel);
5103 : 18 : }
5104 [ - + ]: 149 : }
5105 : :
5106 : : /*
5107 : : * create_final_distinct_paths
5108 : : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5109 : : *
5110 : : * input_rel: contains the source-data paths
5111 : : * distinct_rel: destination relation for storing created paths
5112 : : */
5113 : : static RelOptInfo *
5114 : 167 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
5115 : : RelOptInfo *distinct_rel)
5116 : : {
5117 : 167 : Query *parse = root->parse;
5118 : 167 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5119 : 167 : double numDistinctRows;
5120 : 167 : bool allow_hash;
5121 : :
5122 : : /* Estimate number of distinct rows there will be */
5123 [ + + + - : 167 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
+ + - + ]
5124 : 156 : root->hasHavingQual)
5125 : : {
5126 : : /*
5127 : : * If there was grouping or aggregation, use the number of input rows
5128 : : * as the estimated number of DISTINCT rows (ie, assume the input is
5129 : : * already mostly unique).
5130 : : */
5131 : 11 : numDistinctRows = cheapest_input_path->rows;
5132 : 11 : }
5133 : : else
5134 : : {
5135 : : /*
5136 : : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5137 : : */
5138 : 156 : List *distinctExprs;
5139 : :
5140 : 312 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5141 : 156 : parse->targetList);
5142 : 312 : numDistinctRows = estimate_num_groups(root, distinctExprs,
5143 : 156 : cheapest_input_path->rows,
5144 : : NULL, NULL);
5145 : 156 : }
5146 : :
5147 : : /*
5148 : : * Consider sort-based implementations of DISTINCT, if possible.
5149 : : */
5150 [ + + ]: 167 : if (grouping_is_sortable(root->processed_distinctClause))
5151 : : {
5152 : : /*
5153 : : * Firstly, if we have any adequately-presorted paths, just stick a
5154 : : * Unique node on those. We also, consider doing an explicit sort of
5155 : : * the cheapest input path and Unique'ing that. If any paths have
5156 : : * presorted keys then we'll create an incremental sort atop of those
5157 : : * before adding a unique node on the top. We'll also attempt to
5158 : : * reorder the required pathkeys to match the input path's pathkeys as
5159 : : * much as possible, in hopes of avoiding a possible need to re-sort.
5160 : : *
5161 : : * When we have DISTINCT ON, we must sort by the more rigorous of
5162 : : * DISTINCT and ORDER BY, else it won't have the desired behavior.
5163 : : * Also, if we do have to do an explicit sort, we might as well use
5164 : : * the more rigorous ordering to avoid a second sort later. (Note
5165 : : * that the parser will have ensured that one clause is a prefix of
5166 : : * the other.)
5167 : : */
5168 : 166 : List *needed_pathkeys;
5169 : 166 : ListCell *lc;
5170 : 166 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5171 : :
5172 [ + + + + ]: 166 : if (parse->hasDistinctOn &&
5173 : 54 : list_length(root->distinct_pathkeys) <
5174 : 27 : list_length(root->sort_pathkeys))
5175 : 9 : needed_pathkeys = root->sort_pathkeys;
5176 : : else
5177 : 157 : needed_pathkeys = root->distinct_pathkeys;
5178 : :
5179 [ + - + + : 407 : foreach(lc, input_rel->pathlist)
+ + ]
5180 : : {
5181 : 241 : Path *input_path = (Path *) lfirst(lc);
5182 : 241 : Path *sorted_path;
5183 : 241 : List *useful_pathkeys_list = NIL;
5184 : :
5185 : 241 : useful_pathkeys_list =
5186 : 482 : get_useful_pathkeys_for_distinct(root,
5187 : 241 : needed_pathkeys,
5188 : 241 : input_path->pathkeys);
5189 [ + - ]: 241 : Assert(list_length(useful_pathkeys_list) > 0);
5190 : :
5191 [ + + + - : 755 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
+ + + + ]
5192 : : {
5193 : 546 : sorted_path = make_ordered_path(root,
5194 : 273 : distinct_rel,
5195 : 273 : input_path,
5196 : 273 : cheapest_input_path,
5197 : 273 : useful_pathkeys,
5198 : 273 : limittuples);
5199 : :
5200 [ + + ]: 273 : if (sorted_path == NULL)
5201 : 40 : continue;
5202 : :
5203 : : /*
5204 : : * distinct_pathkeys may have become empty if all of the
5205 : : * pathkeys were determined to be redundant. If all of the
5206 : : * pathkeys are redundant then each DISTINCT target must only
5207 : : * allow a single value, therefore all resulting tuples must
5208 : : * be identical (or at least indistinguishable by an equality
5209 : : * check). We can uniquify these tuples simply by just taking
5210 : : * the first tuple. All we do here is add a path to do "LIMIT
5211 : : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5212 : : * still have a non-NIL sort_pathkeys list, so we must still
5213 : : * only do this with paths which are correctly sorted by
5214 : : * sort_pathkeys.
5215 : : */
5216 [ + + ]: 233 : if (root->distinct_pathkeys == NIL)
5217 : : {
5218 : 14 : Node *limitCount;
5219 : :
5220 : 14 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5221 : : sizeof(int64),
5222 : 14 : Int64GetDatum(1), false,
5223 : : true);
5224 : :
5225 : : /*
5226 : : * If the query already has a LIMIT clause, then we could
5227 : : * end up with a duplicate LimitPath in the final plan.
5228 : : * That does not seem worth troubling over too much.
5229 : : */
5230 : 28 : add_path(distinct_rel, (Path *)
5231 : 28 : create_limit_path(root, distinct_rel, sorted_path,
5232 : 14 : NULL, limitCount,
5233 : : LIMIT_OPTION_COUNT, 0, 1));
5234 : 14 : }
5235 : : else
5236 : : {
5237 : 438 : add_path(distinct_rel, (Path *)
5238 : 438 : create_unique_path(root, distinct_rel,
5239 : 219 : sorted_path,
5240 : 219 : list_length(root->distinct_pathkeys),
5241 : 219 : numDistinctRows));
5242 : : }
5243 : 474 : }
5244 : 241 : }
5245 : 166 : }
5246 : :
5247 : : /*
5248 : : * Consider hash-based implementations of DISTINCT, if possible.
5249 : : *
5250 : : * If we were not able to make any other types of path, we *must* hash or
5251 : : * die trying. If we do have other choices, there are two things that
5252 : : * should prevent selection of hashing: if the query uses DISTINCT ON
5253 : : * (because it won't really have the expected behavior if we hash), or if
5254 : : * enable_hashagg is off.
5255 : : *
5256 : : * Note: grouping_is_hashable() is much more expensive to check than the
5257 : : * other gating conditions, so we want to do it last.
5258 : : */
5259 [ + + ]: 167 : if (distinct_rel->pathlist == NIL)
5260 : 1 : allow_hash = true; /* we have no alternatives */
5261 [ + + + + ]: 166 : else if (parse->hasDistinctOn || !enable_hashagg)
5262 : 52 : allow_hash = false; /* policy-based decision not to hash */
5263 : : else
5264 : 114 : allow_hash = true; /* default */
5265 : :
5266 [ + + - + ]: 167 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5267 : : {
5268 : : /* Generate hashed aggregate path --- no sort needed */
5269 : 230 : add_path(distinct_rel, (Path *)
5270 : 230 : create_agg_path(root,
5271 : 115 : distinct_rel,
5272 : 115 : cheapest_input_path,
5273 : 115 : cheapest_input_path->pathtarget,
5274 : : AGG_HASHED,
5275 : : AGGSPLIT_SIMPLE,
5276 : 115 : root->processed_distinctClause,
5277 : : NIL,
5278 : : NULL,
5279 : 115 : numDistinctRows));
5280 : 115 : }
5281 : :
5282 : 334 : return distinct_rel;
5283 : 167 : }
5284 : :
5285 : : /*
5286 : : * get_useful_pathkeys_for_distinct
5287 : : * Get useful orderings of pathkeys for distinctClause by reordering
5288 : : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5289 : : *
5290 : : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5291 : : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5292 : : */
5293 : : static List *
5294 : 262 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5295 : : List *path_pathkeys)
5296 : : {
5297 : 262 : List *useful_pathkeys_list = NIL;
5298 : 262 : List *useful_pathkeys = NIL;
5299 : :
5300 : : /* always include the given 'needed_pathkeys' */
5301 : 524 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5302 : 262 : needed_pathkeys);
5303 : :
5304 [ - + ]: 262 : if (!enable_distinct_reordering)
5305 : 0 : return useful_pathkeys_list;
5306 : :
5307 : : /*
5308 : : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5309 : : * that match 'needed_pathkeys', but only up to the longest matching
5310 : : * prefix.
5311 : : *
5312 : : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5313 : : * list matches initial distinctClause pathkeys; otherwise, it won't have
5314 : : * the desired behavior.
5315 : : */
5316 [ + + + + : 672 : foreach_node(PathKey, pathkey, path_pathkeys)
+ + + + ]
5317 : : {
5318 : : /*
5319 : : * The PathKey nodes are canonical, so they can be checked for
5320 : : * equality by simple pointer comparison.
5321 : : */
5322 [ + + ]: 148 : if (!list_member_ptr(needed_pathkeys, pathkey))
5323 : 1 : break;
5324 [ + + + + ]: 147 : if (root->parse->hasDistinctOn &&
5325 : 19 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5326 : 3 : break;
5327 : :
5328 : 144 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5329 : 406 : }
5330 : :
5331 : : /* If no match at all, no point in reordering needed_pathkeys */
5332 [ + + ]: 262 : if (useful_pathkeys == NIL)
5333 : 162 : return useful_pathkeys_list;
5334 : :
5335 : : /*
5336 : : * If not full match, the resulting pathkey list is not useful without
5337 : : * incremental sort.
5338 : : */
5339 [ + + + + ]: 100 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5340 : 47 : !enable_incremental_sort)
5341 : 10 : return useful_pathkeys_list;
5342 : :
5343 : : /* Append the remaining PathKey nodes in needed_pathkeys */
5344 : 180 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5345 : 90 : needed_pathkeys);
5346 : :
5347 : : /*
5348 : : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5349 : : * just drop it.
5350 : : */
5351 : 180 : if (compare_pathkeys(needed_pathkeys,
5352 [ + + + + ]: 180 : useful_pathkeys) == PATHKEYS_EQUAL)
5353 : 56 : return useful_pathkeys_list;
5354 : :
5355 : 68 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5356 : 34 : useful_pathkeys);
5357 : :
5358 : 34 : return useful_pathkeys_list;
5359 : 262 : }
5360 : :
5361 : : /*
5362 : : * create_ordered_paths
5363 : : *
5364 : : * Build a new upperrel containing Paths for ORDER BY evaluation.
5365 : : *
5366 : : * All paths in the result must satisfy the ORDER BY ordering.
5367 : : * The only new paths we need consider are an explicit full sort
5368 : : * and incremental sort on the cheapest-total existing path.
5369 : : *
5370 : : * input_rel: contains the source-data Paths
5371 : : * target: the output tlist the result Paths must emit
5372 : : * limit_tuples: estimated bound on the number of output tuples,
5373 : : * or -1 if no LIMIT or couldn't estimate
5374 : : *
5375 : : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5376 : : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5377 : : */
5378 : : static RelOptInfo *
5379 : 8988 : create_ordered_paths(PlannerInfo *root,
5380 : : RelOptInfo *input_rel,
5381 : : PathTarget *target,
5382 : : bool target_parallel_safe,
5383 : : double limit_tuples)
5384 : : {
5385 : 8988 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5386 : 8988 : RelOptInfo *ordered_rel;
5387 : 8988 : ListCell *lc;
5388 : :
5389 : : /* For now, do all work in the (ORDERED, NULL) upperrel */
5390 : 8988 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5391 : :
5392 : : /*
5393 : : * If the input relation is not parallel-safe, then the ordered relation
5394 : : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5395 : : * target list is parallel-safe.
5396 : : */
5397 [ + + + + ]: 8988 : if (input_rel->consider_parallel && target_parallel_safe)
5398 : 6072 : ordered_rel->consider_parallel = true;
5399 : :
5400 : : /* Assume that the same path generation strategies are allowed. */
5401 : 8988 : ordered_rel->pgs_mask = input_rel->pgs_mask;
5402 : :
5403 : : /*
5404 : : * If the input rel belongs to a single FDW, so does the ordered_rel.
5405 : : */
5406 : 8988 : ordered_rel->serverid = input_rel->serverid;
5407 : 8988 : ordered_rel->userid = input_rel->userid;
5408 : 8988 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5409 : 8988 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5410 : :
5411 [ + - + + : 22891 : foreach(lc, input_rel->pathlist)
+ + ]
5412 : : {
5413 : 13903 : Path *input_path = (Path *) lfirst(lc);
5414 : 13903 : Path *sorted_path;
5415 : 13903 : bool is_sorted;
5416 : 13903 : int presorted_keys;
5417 : :
5418 : 27806 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5419 : 13903 : input_path->pathkeys, &presorted_keys);
5420 : :
5421 [ + + ]: 13903 : if (is_sorted)
5422 : 5590 : sorted_path = input_path;
5423 : : else
5424 : : {
5425 : : /*
5426 : : * Try at least sorting the cheapest path and also try
5427 : : * incrementally sorting any path which is partially sorted
5428 : : * already (no need to deal with paths which have presorted keys
5429 : : * when incremental sort is disabled unless it's the cheapest
5430 : : * input path).
5431 : : */
5432 [ + + + + ]: 8719 : if (input_path != cheapest_input_path &&
5433 [ + + ]: 604 : (presorted_keys == 0 || !enable_incremental_sort))
5434 : 263 : continue;
5435 : :
5436 : : /*
5437 : : * We've no need to consider both a sort and incremental sort.
5438 : : * We'll just do a sort if there are no presorted keys and an
5439 : : * incremental sort when there are presorted keys.
5440 : : */
5441 [ + + + + ]: 8050 : if (presorted_keys == 0 || !enable_incremental_sort)
5442 : 15274 : sorted_path = (Path *) create_sort_path(root,
5443 : 7637 : ordered_rel,
5444 : 7637 : input_path,
5445 : 7637 : root->sort_pathkeys,
5446 : 7637 : limit_tuples);
5447 : : else
5448 : 826 : sorted_path = (Path *) create_incremental_sort_path(root,
5449 : 413 : ordered_rel,
5450 : 413 : input_path,
5451 : 413 : root->sort_pathkeys,
5452 : 413 : presorted_keys,
5453 : 413 : limit_tuples);
5454 : : }
5455 : :
5456 : : /*
5457 : : * If the pathtarget of the result path has different expressions from
5458 : : * the target to be applied, a projection step is needed.
5459 : : */
5460 [ + + ]: 13640 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5461 : 94 : sorted_path = apply_projection_to_path(root, ordered_rel,
5462 : 47 : sorted_path, target);
5463 : :
5464 : 13640 : add_path(ordered_rel, sorted_path);
5465 [ + + ]: 13903 : }
5466 : :
5467 : : /*
5468 : : * generate_gather_paths() will have already generated a simple Gather
5469 : : * path for the best parallel path, if any, and the loop above will have
5470 : : * considered sorting it. Similarly, generate_gather_paths() will also
5471 : : * have generated order-preserving Gather Merge plans which can be used
5472 : : * without sorting if they happen to match the sort_pathkeys, and the loop
5473 : : * above will have handled those as well. However, there's one more
5474 : : * possibility: it may make sense to sort the cheapest partial path or
5475 : : * incrementally sort any partial path that is partially sorted according
5476 : : * to the required output order and then use Gather Merge.
5477 : : */
5478 [ + + + + : 8988 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
+ + ]
5479 : 6063 : input_rel->partial_pathlist != NIL)
5480 : : {
5481 : 401 : Path *cheapest_partial_path;
5482 : :
5483 : 401 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5484 : :
5485 [ + - + + : 943 : foreach(lc, input_rel->partial_pathlist)
+ + ]
5486 : : {
5487 : 542 : Path *input_path = (Path *) lfirst(lc);
5488 : 542 : Path *sorted_path;
5489 : 542 : bool is_sorted;
5490 : 542 : int presorted_keys;
5491 : 542 : double total_groups;
5492 : :
5493 : 1084 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5494 : 542 : input_path->pathkeys,
5495 : : &presorted_keys);
5496 : :
5497 [ + + ]: 542 : if (is_sorted)
5498 : 121 : continue;
5499 : :
5500 : : /*
5501 : : * Try at least sorting the cheapest path and also try
5502 : : * incrementally sorting any path which is partially sorted
5503 : : * already (no need to deal with paths which have presorted keys
5504 : : * when incremental sort is disabled unless it's the cheapest
5505 : : * partial path).
5506 : : */
5507 [ + + + - ]: 446 : if (input_path != cheapest_partial_path &&
5508 [ + - ]: 25 : (presorted_keys == 0 || !enable_incremental_sort))
5509 : 0 : continue;
5510 : :
5511 : : /*
5512 : : * We've no need to consider both a sort and incremental sort.
5513 : : * We'll just do a sort if there are no presorted keys and an
5514 : : * incremental sort when there are presorted keys.
5515 : : */
5516 [ + + + + ]: 421 : if (presorted_keys == 0 || !enable_incremental_sort)
5517 : 786 : sorted_path = (Path *) create_sort_path(root,
5518 : 393 : ordered_rel,
5519 : 393 : input_path,
5520 : 393 : root->sort_pathkeys,
5521 : 393 : limit_tuples);
5522 : : else
5523 : 56 : sorted_path = (Path *) create_incremental_sort_path(root,
5524 : 28 : ordered_rel,
5525 : 28 : input_path,
5526 : 28 : root->sort_pathkeys,
5527 : 28 : presorted_keys,
5528 : 28 : limit_tuples);
5529 : 421 : total_groups = compute_gather_rows(sorted_path);
5530 : 421 : sorted_path = (Path *)
5531 : 842 : create_gather_merge_path(root, ordered_rel,
5532 : 421 : sorted_path,
5533 : 421 : sorted_path->pathtarget,
5534 : 421 : root->sort_pathkeys, NULL,
5535 : : &total_groups);
5536 : :
5537 : : /*
5538 : : * If the pathtarget of the result path has different expressions
5539 : : * from the target to be applied, a projection step is needed.
5540 : : */
5541 [ + + ]: 421 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5542 : 2 : sorted_path = apply_projection_to_path(root, ordered_rel,
5543 : 1 : sorted_path, target);
5544 : :
5545 : 421 : add_path(ordered_rel, sorted_path);
5546 [ + + ]: 542 : }
5547 : 401 : }
5548 : :
5549 : : /*
5550 : : * If there is an FDW that's responsible for all baserels of the query,
5551 : : * let it consider adding ForeignPaths.
5552 : : */
5553 [ - + # # ]: 8988 : if (ordered_rel->fdwroutine &&
5554 : 0 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5555 : 0 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5556 : 0 : input_rel, ordered_rel,
5557 : : NULL);
5558 : :
5559 : : /* Let extensions possibly add some more paths */
5560 [ + - ]: 8988 : if (create_upper_paths_hook)
5561 : 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5562 : 0 : input_rel, ordered_rel, NULL);
5563 : :
5564 : : /*
5565 : : * No need to bother with set_cheapest here; grouping_planner does not
5566 : : * need us to do it.
5567 : : */
5568 [ + - ]: 8988 : Assert(ordered_rel->pathlist != NIL);
5569 : :
5570 : 17976 : return ordered_rel;
5571 : 8988 : }
5572 : :
5573 : :
5574 : : /*
5575 : : * make_group_input_target
5576 : : * Generate appropriate PathTarget for initial input to grouping nodes.
5577 : : *
5578 : : * If there is grouping or aggregation, the scan/join subplan cannot emit
5579 : : * the query's final targetlist; for example, it certainly can't emit any
5580 : : * aggregate function calls. This routine generates the correct target
5581 : : * for the scan/join subplan.
5582 : : *
5583 : : * The query target list passed from the parser already contains entries
5584 : : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5585 : : * for variables used only in HAVING clauses; so we need to add those
5586 : : * variables to the subplan target list. Also, we flatten all expressions
5587 : : * except GROUP BY items into their component variables; other expressions
5588 : : * will be computed by the upper plan nodes rather than by the subplan.
5589 : : * For example, given a query like
5590 : : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5591 : : * we want to pass this targetlist to the subplan:
5592 : : * a+b,c,d
5593 : : * where the a+b target will be used by the Sort/Group steps, and the
5594 : : * other targets will be used for computing the final results.
5595 : : *
5596 : : * 'final_target' is the query's final target list (in PathTarget form)
5597 : : *
5598 : : * The result is the PathTarget to be computed by the Paths returned from
5599 : : * query_planner().
5600 : : */
5601 : : static PathTarget *
5602 : 5048 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5603 : : {
5604 : 5048 : Query *parse = root->parse;
5605 : 5048 : PathTarget *input_target;
5606 : 5048 : List *non_group_cols;
5607 : 5048 : List *non_group_vars;
5608 : 5048 : int i;
5609 : 5048 : ListCell *lc;
5610 : :
5611 : : /*
5612 : : * We must build a target containing all grouping columns, plus any other
5613 : : * Vars mentioned in the query's targetlist and HAVING qual.
5614 : : */
5615 : 5048 : input_target = create_empty_pathtarget();
5616 : 5048 : non_group_cols = NIL;
5617 : :
5618 : 5048 : i = 0;
5619 [ + + + + : 13149 : foreach(lc, final_target->exprs)
+ + ]
5620 : : {
5621 : 8101 : Expr *expr = (Expr *) lfirst(lc);
5622 [ + - ]: 8101 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5623 : :
5624 [ + + + + : 8101 : if (sgref && root->processed_groupClause &&
+ + ]
5625 : 2968 : get_sortgroupref_clause_noerr(sgref,
5626 : 2968 : root->processed_groupClause) != NULL)
5627 : : {
5628 : : /*
5629 : : * It's a grouping column, so add it to the input target as-is.
5630 : : *
5631 : : * Note that the target is logically below the grouping step. So
5632 : : * with grouping sets we need to remove the RT index of the
5633 : : * grouping step if there is any from the target expression.
5634 : : */
5635 [ + - + + ]: 1196 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5636 : : {
5637 [ - + ]: 331 : Assert(root->group_rtindex > 0);
5638 : 331 : expr = (Expr *)
5639 : 662 : remove_nulling_relids((Node *) expr,
5640 : 331 : bms_make_singleton(root->group_rtindex),
5641 : : NULL);
5642 : 331 : }
5643 : 1196 : add_column_to_pathtarget(input_target, expr, sgref);
5644 : 1196 : }
5645 : : else
5646 : : {
5647 : : /*
5648 : : * Non-grouping column, so just remember the expression for later
5649 : : * call to pull_var_clause.
5650 : : */
5651 : 6905 : non_group_cols = lappend(non_group_cols, expr);
5652 : : }
5653 : :
5654 : 8101 : i++;
5655 : 8101 : }
5656 : :
5657 : : /*
5658 : : * If there's a HAVING clause, we'll need the Vars it uses, too.
5659 : : */
5660 [ + + ]: 5048 : if (parse->havingQual)
5661 : 137 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5662 : :
5663 : : /*
5664 : : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5665 : : * add them to the input target if not already present. (A Var used
5666 : : * directly as a GROUP BY item will be present already.) Note this
5667 : : * includes Vars used in resjunk items, so we are covering the needs of
5668 : : * ORDER BY and window specifications. Vars used within Aggrefs and
5669 : : * WindowFuncs will be pulled out here, too.
5670 : : *
5671 : : * Note that the target is logically below the grouping step. So with
5672 : : * grouping sets we need to remove the RT index of the grouping step if
5673 : : * there is any from the non-group Vars.
5674 : : */
5675 : 5048 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5676 : : PVC_RECURSE_AGGREGATES |
5677 : : PVC_RECURSE_WINDOWFUNCS |
5678 : : PVC_INCLUDE_PLACEHOLDERS);
5679 [ + + + + ]: 5048 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5680 : : {
5681 [ + - ]: 152 : Assert(root->group_rtindex > 0);
5682 : 152 : non_group_vars = (List *)
5683 : 304 : remove_nulling_relids((Node *) non_group_vars,
5684 : 152 : bms_make_singleton(root->group_rtindex),
5685 : : NULL);
5686 : 152 : }
5687 : 5048 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5688 : :
5689 : : /* clean up cruft */
5690 : 5048 : list_free(non_group_vars);
5691 : 5048 : list_free(non_group_cols);
5692 : :
5693 : : /* XXX this causes some redundant cost calculation ... */
5694 : 10096 : return set_pathtarget_cost_width(root, input_target);
5695 : 5048 : }
5696 : :
5697 : : /*
5698 : : * make_partial_grouping_target
5699 : : * Generate appropriate PathTarget for output of partial aggregate
5700 : : * (or partial grouping, if there are no aggregates) nodes.
5701 : : *
5702 : : * A partial aggregation node needs to emit all the same aggregates that
5703 : : * a regular aggregation node would, plus any aggregates used in HAVING;
5704 : : * except that the Aggref nodes should be marked as partial aggregates.
5705 : : *
5706 : : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5707 : : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5708 : : * these would be Vars that are grouped by or used in grouping expressions.)
5709 : : *
5710 : : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5711 : : * havingQual represents the HAVING clause.
5712 : : */
5713 : : static PathTarget *
5714 : 521 : make_partial_grouping_target(PlannerInfo *root,
5715 : : PathTarget *grouping_target,
5716 : : Node *havingQual)
5717 : : {
5718 : 521 : PathTarget *partial_target;
5719 : 521 : List *non_group_cols;
5720 : 521 : List *non_group_exprs;
5721 : 521 : int i;
5722 : 521 : ListCell *lc;
5723 : :
5724 : 521 : partial_target = create_empty_pathtarget();
5725 : 521 : non_group_cols = NIL;
5726 : :
5727 : 521 : i = 0;
5728 [ + - + + : 1876 : foreach(lc, grouping_target->exprs)
+ + ]
5729 : : {
5730 : 1355 : Expr *expr = (Expr *) lfirst(lc);
5731 [ + - ]: 1355 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5732 : :
5733 [ + + + + : 1355 : if (sgref && root->processed_groupClause &&
+ + ]
5734 : 1580 : get_sortgroupref_clause_noerr(sgref,
5735 : 1580 : root->processed_groupClause) != NULL)
5736 : : {
5737 : : /*
5738 : : * It's a grouping column, so add it to the partial_target as-is.
5739 : : * (This allows the upper agg step to repeat the grouping calcs.)
5740 : : */
5741 : 467 : add_column_to_pathtarget(partial_target, expr, sgref);
5742 : 467 : }
5743 : : else
5744 : : {
5745 : : /*
5746 : : * Non-grouping column, so just remember the expression for later
5747 : : * call to pull_var_clause.
5748 : : */
5749 : 888 : non_group_cols = lappend(non_group_cols, expr);
5750 : : }
5751 : :
5752 : 1355 : i++;
5753 : 1355 : }
5754 : :
5755 : : /*
5756 : : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5757 : : */
5758 [ + + ]: 521 : if (havingQual)
5759 : 145 : non_group_cols = lappend(non_group_cols, havingQual);
5760 : :
5761 : : /*
5762 : : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5763 : : * non-group cols (plus HAVING), and add them to the partial_target if not
5764 : : * already present. (An expression used directly as a GROUP BY item will
5765 : : * be present already.) Note this includes Vars used in resjunk items, so
5766 : : * we are covering the needs of ORDER BY and window specifications.
5767 : : */
5768 : 521 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5769 : : PVC_INCLUDE_AGGREGATES |
5770 : : PVC_RECURSE_WINDOWFUNCS |
5771 : : PVC_INCLUDE_PLACEHOLDERS);
5772 : :
5773 : 521 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5774 : :
5775 : : /*
5776 : : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5777 : : * are at the top level of the target list, so we can just scan the list
5778 : : * rather than recursing through the expression trees.
5779 : : */
5780 [ + - + + : 1974 : foreach(lc, partial_target->exprs)
+ + ]
5781 : : {
5782 : 1453 : Aggref *aggref = (Aggref *) lfirst(lc);
5783 : :
5784 [ + + ]: 1453 : if (IsA(aggref, Aggref))
5785 : : {
5786 : 981 : Aggref *newaggref;
5787 : :
5788 : : /*
5789 : : * We shouldn't need to copy the substructure of the Aggref node,
5790 : : * but flat-copy the node itself to avoid damaging other trees.
5791 : : */
5792 : 981 : newaggref = makeNode(Aggref);
5793 : 981 : memcpy(newaggref, aggref, sizeof(Aggref));
5794 : :
5795 : : /* For now, assume serialization is required */
5796 : 981 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5797 : :
5798 : 981 : lfirst(lc) = newaggref;
5799 : 981 : }
5800 : 1453 : }
5801 : :
5802 : : /* clean up cruft */
5803 : 521 : list_free(non_group_exprs);
5804 : 521 : list_free(non_group_cols);
5805 : :
5806 : : /* XXX this causes some redundant cost calculation ... */
5807 : 1042 : return set_pathtarget_cost_width(root, partial_target);
5808 : 521 : }
5809 : :
5810 : : /*
5811 : : * mark_partial_aggref
5812 : : * Adjust an Aggref to make it represent a partial-aggregation step.
5813 : : *
5814 : : * The Aggref node is modified in-place; caller must do any copying required.
5815 : : */
5816 : : void
5817 : 2955 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5818 : : {
5819 : : /* aggtranstype should be computed by this point */
5820 [ + - ]: 2955 : Assert(OidIsValid(agg->aggtranstype));
5821 : : /* ... but aggsplit should still be as the parser left it */
5822 [ + - ]: 2955 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5823 : :
5824 : : /* Mark the Aggref with the intended partial-aggregation mode */
5825 : 2955 : agg->aggsplit = aggsplit;
5826 : :
5827 : : /*
5828 : : * Adjust result type if needed. Normally, a partial aggregate returns
5829 : : * the aggregate's transition type; but if that's INTERNAL and we're
5830 : : * serializing, it returns BYTEA instead.
5831 : : */
5832 [ + + ]: 2955 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5833 : : {
5834 [ + + - + ]: 2575 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5835 : 52 : agg->aggtype = BYTEAOID;
5836 : : else
5837 : 2523 : agg->aggtype = agg->aggtranstype;
5838 : 2575 : }
5839 : 2955 : }
5840 : :
5841 : : /*
5842 : : * postprocess_setop_tlist
5843 : : * Fix up targetlist returned by plan_set_operations().
5844 : : *
5845 : : * We need to transpose sort key info from the orig_tlist into new_tlist.
5846 : : * NOTE: this would not be good enough if we supported resjunk sort keys
5847 : : * for results of set operations --- then, we'd need to project a whole
5848 : : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5849 : : * find any resjunk columns in orig_tlist.
5850 : : */
5851 : : static List *
5852 : 798 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5853 : : {
5854 : 798 : ListCell *l;
5855 : 798 : ListCell *orig_tlist_item = list_head(orig_tlist);
5856 : :
5857 [ + + + + : 2988 : foreach(l, new_tlist)
+ + ]
5858 : : {
5859 : 2190 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5860 : 2190 : TargetEntry *orig_tle;
5861 : :
5862 : : /* ignore resjunk columns in setop result */
5863 [ - + ]: 2190 : if (new_tle->resjunk)
5864 : 0 : continue;
5865 : :
5866 [ - + ]: 2190 : Assert(orig_tlist_item != NULL);
5867 : 2190 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5868 : 2190 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5869 [ + - ]: 2190 : if (orig_tle->resjunk) /* should not happen */
5870 [ # # # # ]: 0 : elog(ERROR, "resjunk output columns are not implemented");
5871 [ - + ]: 2190 : Assert(new_tle->resno == orig_tle->resno);
5872 : 2190 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5873 [ - - + ]: 2190 : }
5874 [ + - ]: 798 : if (orig_tlist_item != NULL)
5875 [ # # # # ]: 0 : elog(ERROR, "resjunk output columns are not implemented");
5876 : 1596 : return new_tlist;
5877 : 798 : }
5878 : :
5879 : : /*
5880 : : * optimize_window_clauses
5881 : : * Call each WindowFunc's prosupport function to see if we're able to
5882 : : * make any adjustments to any of the WindowClause's so that the executor
5883 : : * can execute the window functions in a more optimal way.
5884 : : *
5885 : : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5886 : : * may allow more things to be done here in the future.
5887 : : */
5888 : : static void
5889 : 426 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5890 : : {
5891 : 426 : List *windowClause = root->parse->windowClause;
5892 : 426 : ListCell *lc;
5893 : :
5894 [ + - + + : 893 : foreach(lc, windowClause)
+ + ]
5895 : : {
5896 : 467 : WindowClause *wc = lfirst_node(WindowClause, lc);
5897 : 467 : ListCell *lc2;
5898 : 467 : int optimizedFrameOptions = 0;
5899 : :
5900 [ + - ]: 467 : Assert(wc->winref <= wflists->maxWinRef);
5901 : :
5902 : : /* skip any WindowClauses that have no WindowFuncs */
5903 [ + + ]: 467 : if (wflists->windowFuncs[wc->winref] == NIL)
5904 : 4 : continue;
5905 : :
5906 [ + - + + : 933 : foreach(lc2, wflists->windowFuncs[wc->winref])
+ + ]
5907 : : {
5908 : 470 : SupportRequestOptimizeWindowClause req;
5909 : 470 : SupportRequestOptimizeWindowClause *res;
5910 : 470 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5911 : 470 : Oid prosupport;
5912 : :
5913 : 470 : prosupport = get_func_support(wfunc->winfnoid);
5914 : :
5915 : : /* Check if there's a support function for 'wfunc' */
5916 [ + + ]: 470 : if (!OidIsValid(prosupport))
5917 : 325 : break; /* can't optimize this WindowClause */
5918 : :
5919 : 145 : req.type = T_SupportRequestOptimizeWindowClause;
5920 : 145 : req.window_clause = wc;
5921 : 145 : req.window_func = wfunc;
5922 : 145 : req.frameOptions = wc->frameOptions;
5923 : :
5924 : : /* call the support function */
5925 : 145 : res = (SupportRequestOptimizeWindowClause *)
5926 : 145 : DatumGetPointer(OidFunctionCall1(prosupport,
5927 : : PointerGetDatum(&req)));
5928 : :
5929 : : /*
5930 : : * Skip to next WindowClause if the support function does not
5931 : : * support this request type.
5932 : : */
5933 [ + + ]: 145 : if (res == NULL)
5934 : 37 : break;
5935 : :
5936 : : /*
5937 : : * Save these frameOptions for the first WindowFunc for this
5938 : : * WindowClause.
5939 : : */
5940 [ + + ]: 108 : if (foreach_current_index(lc2) == 0)
5941 : 104 : optimizedFrameOptions = res->frameOptions;
5942 : :
5943 : : /*
5944 : : * On subsequent WindowFuncs, if the frameOptions are not the same
5945 : : * then we're unable to optimize the frameOptions for this
5946 : : * WindowClause.
5947 : : */
5948 [ - + ]: 4 : else if (optimizedFrameOptions != res->frameOptions)
5949 : 0 : break; /* skip to the next WindowClause, if any */
5950 [ + + ]: 470 : }
5951 : :
5952 : : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5953 [ + + - + ]: 463 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5954 : : {
5955 : 101 : ListCell *lc3;
5956 : :
5957 : : /* apply the new frame options */
5958 : 101 : wc->frameOptions = optimizedFrameOptions;
5959 : :
5960 : : /*
5961 : : * We now check to see if changing the frameOptions has caused
5962 : : * this WindowClause to be a duplicate of some other WindowClause.
5963 : : * This can only happen if we have multiple WindowClauses, so
5964 : : * don't bother if there's only 1.
5965 : : */
5966 [ + + ]: 101 : if (list_length(windowClause) == 1)
5967 : 86 : continue;
5968 : :
5969 : : /*
5970 : : * Do the duplicate check and reuse the existing WindowClause if
5971 : : * we find a duplicate.
5972 : : */
5973 [ + - + + : 44 : foreach(lc3, windowClause)
+ + ]
5974 : : {
5975 : 29 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5976 : :
5977 : : /* skip over the WindowClause we're currently editing */
5978 [ + + ]: 29 : if (existing_wc == wc)
5979 : 9 : continue;
5980 : :
5981 : : /*
5982 : : * Perform the same duplicate check that is done in
5983 : : * transformWindowFuncCall.
5984 : : */
5985 [ + - ]: 20 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5986 [ + + ]: 20 : equal(wc->orderClause, existing_wc->orderClause) &&
5987 [ + + ]: 14 : wc->frameOptions == existing_wc->frameOptions &&
5988 [ + - - + ]: 6 : equal(wc->startOffset, existing_wc->startOffset) &&
5989 : 6 : equal(wc->endOffset, existing_wc->endOffset))
5990 : : {
5991 : 6 : ListCell *lc4;
5992 : :
5993 : : /*
5994 : : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5995 : : * This required adjusting each WindowFunc's winref and
5996 : : * moving the WindowFuncs in 'wc' to the list of
5997 : : * WindowFuncs in 'existing_wc'.
5998 : : */
5999 [ + - + + : 13 : foreach(lc4, wflists->windowFuncs[wc->winref])
+ + ]
6000 : : {
6001 : 7 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
6002 : :
6003 : 7 : wfunc->winref = existing_wc->winref;
6004 : 7 : }
6005 : :
6006 : : /* move list items */
6007 : 12 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6008 : 6 : wflists->windowFuncs[wc->winref]);
6009 : 6 : wflists->windowFuncs[wc->winref] = NIL;
6010 : :
6011 : : /*
6012 : : * transformWindowFuncCall() should have made sure there
6013 : : * are no other duplicates, so we needn't bother looking
6014 : : * any further.
6015 : : */
6016 : : break;
6017 : 6 : }
6018 [ + + + ]: 29 : }
6019 [ + + ]: 101 : }
6020 [ - + + ]: 467 : }
6021 : 426 : }
6022 : :
6023 : : /*
6024 : : * select_active_windows
6025 : : * Create a list of the "active" window clauses (ie, those referenced
6026 : : * by non-deleted WindowFuncs) in the order they are to be executed.
6027 : : */
6028 : : static List *
6029 : 426 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
6030 : : {
6031 : 426 : List *windowClause = root->parse->windowClause;
6032 : 426 : List *result = NIL;
6033 : 426 : ListCell *lc;
6034 : 426 : int nActive = 0;
6035 : 426 : WindowClauseSortData *actives = palloc_array(WindowClauseSortData,
6036 : : list_length(windowClause));
6037 : :
6038 : : /* First, construct an array of the active windows */
6039 [ + - + + : 893 : foreach(lc, windowClause)
+ + ]
6040 : : {
6041 : 467 : WindowClause *wc = lfirst_node(WindowClause, lc);
6042 : :
6043 : : /* It's only active if wflists shows some related WindowFuncs */
6044 [ + - ]: 467 : Assert(wc->winref <= wflists->maxWinRef);
6045 [ + + ]: 467 : if (wflists->windowFuncs[wc->winref] == NIL)
6046 : 10 : continue;
6047 : :
6048 : 457 : actives[nActive].wc = wc; /* original clause */
6049 : :
6050 : : /*
6051 : : * For sorting, we want the list of partition keys followed by the
6052 : : * list of sort keys. But pathkeys construction will remove duplicates
6053 : : * between the two, so we can as well (even though we can't detect all
6054 : : * of the duplicates, since some may come from ECs - that might mean
6055 : : * we miss optimization chances here). We must, however, ensure that
6056 : : * the order of entries is preserved with respect to the ones we do
6057 : : * keep.
6058 : : *
6059 : : * partitionClause and orderClause had their own duplicates removed in
6060 : : * parse analysis, so we're only concerned here with removing
6061 : : * orderClause entries that also appear in partitionClause.
6062 : : */
6063 : 457 : actives[nActive].uniqueOrder =
6064 : 914 : list_concat_unique(list_copy(wc->partitionClause),
6065 : 457 : wc->orderClause);
6066 : 457 : nActive++;
6067 [ - + + ]: 467 : }
6068 : :
6069 : : /*
6070 : : * Sort active windows by their partitioning/ordering clauses, ignoring
6071 : : * any framing clauses, so that the windows that need the same sorting are
6072 : : * adjacent in the list. When we come to generate paths, this will avoid
6073 : : * inserting additional Sort nodes.
6074 : : *
6075 : : * This is how we implement a specific requirement from the SQL standard,
6076 : : * which says that when two or more windows are order-equivalent (i.e.
6077 : : * have matching partition and order clauses, even if their names or
6078 : : * framing clauses differ), then all peer rows must be presented in the
6079 : : * same order in all of them. If we allowed multiple sort nodes for such
6080 : : * cases, we'd risk having the peer rows end up in different orders in
6081 : : * equivalent windows due to sort instability. (See General Rule 4 of
6082 : : * <window clause> in SQL2008 - SQL2016.)
6083 : : *
6084 : : * Additionally, if the entire list of clauses of one window is a prefix
6085 : : * of another, put first the window with stronger sorting requirements.
6086 : : * This way we will first sort for stronger window, and won't have to sort
6087 : : * again for the weaker one.
6088 : : */
6089 : 426 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
6090 : :
6091 : : /* build ordered list of the original WindowClause nodes */
6092 [ + + ]: 883 : for (int i = 0; i < nActive; i++)
6093 : 457 : result = lappend(result, actives[i].wc);
6094 : :
6095 : 426 : pfree(actives);
6096 : :
6097 : 852 : return result;
6098 : 426 : }
6099 : :
6100 : : /*
6101 : : * name_active_windows
6102 : : * Ensure all active windows have unique names.
6103 : : *
6104 : : * The parser will have checked that user-assigned window names are unique
6105 : : * within the Query. Here we assign made-up names to any unnamed
6106 : : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6107 : : * at parse time, because it'd mess up decompilation of views.)
6108 : : *
6109 : : * activeWindows: result of select_active_windows
6110 : : */
6111 : : static void
6112 : 426 : name_active_windows(List *activeWindows)
6113 : : {
6114 : 426 : int next_n = 1;
6115 : 426 : char newname[16];
6116 : 426 : ListCell *lc;
6117 : :
6118 [ + - + + : 883 : foreach(lc, activeWindows)
+ + ]
6119 : : {
6120 : 457 : WindowClause *wc = lfirst_node(WindowClause, lc);
6121 : :
6122 : : /* Nothing to do if it has a name already. */
6123 [ + + ]: 457 : if (wc->name)
6124 : 96 : continue;
6125 : :
6126 : : /* Select a name not currently present in the list. */
6127 : 362 : for (;;)
6128 : : {
6129 : 362 : ListCell *lc2;
6130 : :
6131 : 362 : snprintf(newname, sizeof(newname), "w%d", next_n++);
6132 [ + - + + : 783 : foreach(lc2, activeWindows)
+ + ]
6133 : : {
6134 : 421 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
6135 : :
6136 [ + + + + ]: 421 : if (wc2->name && strcmp(wc2->name, newname) == 0)
6137 : 1 : break; /* matched */
6138 [ + + ]: 421 : }
6139 [ + + ]: 362 : if (lc2 == NULL)
6140 : 361 : break; /* reached the end with no match */
6141 [ + + ]: 362 : }
6142 : 361 : wc->name = pstrdup(newname);
6143 [ + + ]: 457 : }
6144 : 426 : }
6145 : :
6146 : : /*
6147 : : * common_prefix_cmp
6148 : : * QSort comparison function for WindowClauseSortData
6149 : : *
6150 : : * Sort the windows by the required sorting clauses. First, compare the sort
6151 : : * clauses themselves. Second, if one window's clauses are a prefix of another
6152 : : * one's clauses, put the window with more sort clauses first.
6153 : : *
6154 : : * We purposefully sort by the highest tleSortGroupRef first. Since
6155 : : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6156 : : * and because here we sort the lowest tleSortGroupRefs last, if a
6157 : : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6158 : : * ORDER BY clause, this makes it more likely that the final WindowAgg will
6159 : : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6160 : : * reducing the total number of sorts required for the query.
6161 : : */
6162 : : static int
6163 : 34 : common_prefix_cmp(const void *a, const void *b)
6164 : : {
6165 : 34 : const WindowClauseSortData *wcsa = a;
6166 : 34 : const WindowClauseSortData *wcsb = b;
6167 : 34 : ListCell *item_a;
6168 : 34 : ListCell *item_b;
6169 : :
6170 [ + + + + : 78 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
+ + + + +
+ + + +
+ ]
6171 : : {
6172 : 44 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6173 : 44 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6174 : :
6175 [ + + ]: 44 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6176 : 2 : return -1;
6177 [ + + ]: 42 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6178 : 11 : return 1;
6179 [ - + ]: 31 : else if (sca->sortop > scb->sortop)
6180 : 0 : return -1;
6181 [ + + ]: 31 : else if (sca->sortop < scb->sortop)
6182 : 4 : return 1;
6183 [ - + # # ]: 27 : else if (sca->nulls_first && !scb->nulls_first)
6184 : 0 : return -1;
6185 [ + - + - ]: 27 : else if (!sca->nulls_first && scb->nulls_first)
6186 : 0 : return 1;
6187 : : /* no need to compare eqop, since it is fully determined by sortop */
6188 [ + + ]: 44 : }
6189 : :
6190 [ + + ]: 17 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6191 : 1 : return -1;
6192 [ + + ]: 16 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6193 : 5 : return 1;
6194 : :
6195 : 11 : return 0;
6196 : 34 : }
6197 : :
6198 : : /*
6199 : : * make_window_input_target
6200 : : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6201 : : *
6202 : : * When the query has window functions, this function computes the desired
6203 : : * target to be computed by the node just below the first WindowAgg.
6204 : : * This tlist must contain all values needed to evaluate the window functions,
6205 : : * compute the final target list, and perform any required final sort step.
6206 : : * If multiple WindowAggs are needed, each intermediate one adds its window
6207 : : * function results onto this base tlist; only the topmost WindowAgg computes
6208 : : * the actual desired target list.
6209 : : *
6210 : : * This function is much like make_group_input_target, though not quite enough
6211 : : * like it to share code. As in that function, we flatten most expressions
6212 : : * into their component variables. But we do not want to flatten window
6213 : : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6214 : : * evaluations of them, which would be bad (possibly even resulting in
6215 : : * inconsistent answers, if they contain volatile functions).
6216 : : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6217 : : * make_group_input_target, because we may no longer have access to the
6218 : : * individual Vars in them.
6219 : : *
6220 : : * Another key difference from make_group_input_target is that we don't
6221 : : * flatten Aggref expressions, since those are to be computed below the
6222 : : * window functions and just referenced like Vars above that.
6223 : : *
6224 : : * 'final_target' is the query's final target list (in PathTarget form)
6225 : : * 'activeWindows' is the list of active windows previously identified by
6226 : : * select_active_windows.
6227 : : *
6228 : : * The result is the PathTarget to be computed by the plan node immediately
6229 : : * below the first WindowAgg node.
6230 : : */
6231 : : static PathTarget *
6232 : 426 : make_window_input_target(PlannerInfo *root,
6233 : : PathTarget *final_target,
6234 : : List *activeWindows)
6235 : : {
6236 : 426 : PathTarget *input_target;
6237 : 426 : Bitmapset *sgrefs;
6238 : 426 : List *flattenable_cols;
6239 : 426 : List *flattenable_vars;
6240 : 426 : int i;
6241 : 426 : ListCell *lc;
6242 : :
6243 [ + - ]: 426 : Assert(root->parse->hasWindowFuncs);
6244 : :
6245 : : /*
6246 : : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6247 : : * into a bitmapset for convenient reference below.
6248 : : */
6249 : 426 : sgrefs = NULL;
6250 [ + - + + : 883 : foreach(lc, activeWindows)
+ + ]
6251 : : {
6252 : 457 : WindowClause *wc = lfirst_node(WindowClause, lc);
6253 : 457 : ListCell *lc2;
6254 : :
6255 [ + + + + : 579 : foreach(lc2, wc->partitionClause)
+ + ]
6256 : : {
6257 : 122 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6258 : :
6259 : 122 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6260 : 122 : }
6261 [ + + + + : 834 : foreach(lc2, wc->orderClause)
+ + ]
6262 : : {
6263 : 377 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6264 : :
6265 : 377 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6266 : 377 : }
6267 : 457 : }
6268 : :
6269 : : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6270 [ + + + + : 456 : foreach(lc, root->processed_groupClause)
+ + ]
6271 : : {
6272 : 30 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6273 : :
6274 : 30 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6275 : 30 : }
6276 : :
6277 : : /*
6278 : : * Construct a target containing all the non-flattenable targetlist items,
6279 : : * and save aside the others for a moment.
6280 : : */
6281 : 426 : input_target = create_empty_pathtarget();
6282 : 426 : flattenable_cols = NIL;
6283 : :
6284 : 426 : i = 0;
6285 [ + - + + : 1802 : foreach(lc, final_target->exprs)
+ + ]
6286 : : {
6287 : 1376 : Expr *expr = (Expr *) lfirst(lc);
6288 [ + - ]: 1376 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6289 : :
6290 : : /*
6291 : : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6292 : : * that such items can't contain window functions, so it's okay to
6293 : : * compute them below the WindowAgg nodes.)
6294 : : */
6295 [ + + + + ]: 1376 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6296 : : {
6297 : : /*
6298 : : * Don't want to deconstruct this value, so add it to the input
6299 : : * target as-is.
6300 : : */
6301 : 471 : add_column_to_pathtarget(input_target, expr, sgref);
6302 : 471 : }
6303 : : else
6304 : : {
6305 : : /*
6306 : : * Column is to be flattened, so just remember the expression for
6307 : : * later call to pull_var_clause.
6308 : : */
6309 : 905 : flattenable_cols = lappend(flattenable_cols, expr);
6310 : : }
6311 : :
6312 : 1376 : i++;
6313 : 1376 : }
6314 : :
6315 : : /*
6316 : : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6317 : : * add them to the input target if not already present. (Some might be
6318 : : * there already because they're used directly as window/group clauses.)
6319 : : *
6320 : : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6321 : : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6322 : : * at higher levels. On the other hand, we should recurse into
6323 : : * WindowFuncs to make sure their input expressions are available.
6324 : : */
6325 : 426 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6326 : : PVC_INCLUDE_AGGREGATES |
6327 : : PVC_RECURSE_WINDOWFUNCS |
6328 : : PVC_INCLUDE_PLACEHOLDERS);
6329 : 426 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6330 : :
6331 : : /* clean up cruft */
6332 : 426 : list_free(flattenable_vars);
6333 : 426 : list_free(flattenable_cols);
6334 : :
6335 : : /* XXX this causes some redundant cost calculation ... */
6336 : 852 : return set_pathtarget_cost_width(root, input_target);
6337 : 426 : }
6338 : :
6339 : : /*
6340 : : * make_pathkeys_for_window
6341 : : * Create a pathkeys list describing the required input ordering
6342 : : * for the given WindowClause.
6343 : : *
6344 : : * Modifies wc's partitionClause to remove any clauses which are deemed
6345 : : * redundant by the pathkey logic.
6346 : : *
6347 : : * The required ordering is first the PARTITION keys, then the ORDER keys.
6348 : : * In the future we might try to implement windowing using hashing, in which
6349 : : * case the ordering could be relaxed, but for now we always sort.
6350 : : */
6351 : : static List *
6352 : 918 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6353 : : List *tlist)
6354 : : {
6355 : 918 : List *window_pathkeys = NIL;
6356 : :
6357 : : /* Throw error if can't sort */
6358 [ + - ]: 918 : if (!grouping_is_sortable(wc->partitionClause))
6359 [ # # # # ]: 0 : ereport(ERROR,
6360 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6361 : : errmsg("could not implement window PARTITION BY"),
6362 : : errdetail("Window partitioning columns must be of sortable datatypes.")));
6363 [ + - ]: 918 : if (!grouping_is_sortable(wc->orderClause))
6364 [ # # # # ]: 0 : ereport(ERROR,
6365 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6366 : : errmsg("could not implement window ORDER BY"),
6367 : : errdetail("Window ordering columns must be of sortable datatypes.")));
6368 : :
6369 : : /*
6370 : : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6371 : : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6372 : : */
6373 [ + + ]: 918 : if (wc->partitionClause != NIL)
6374 : : {
6375 : 214 : bool sortable;
6376 : :
6377 : 428 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6378 : 214 : &wc->partitionClause,
6379 : 214 : tlist,
6380 : : true,
6381 : : false,
6382 : : &sortable,
6383 : : false);
6384 : :
6385 [ + - ]: 214 : Assert(sortable);
6386 : 214 : }
6387 : :
6388 : : /*
6389 : : * In principle, we could also consider removing redundant ORDER BY items
6390 : : * too as doing so does not alter the result of peer row checks done by
6391 : : * the executor. However, we must *not* remove the ordering column for
6392 : : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6393 : : * if it's known to be equal to some partitioning column.
6394 : : */
6395 [ + + ]: 918 : if (wc->orderClause != NIL)
6396 : : {
6397 : 738 : List *orderby_pathkeys;
6398 : :
6399 : 1476 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6400 : 738 : wc->orderClause,
6401 : 738 : tlist);
6402 : :
6403 : : /* Okay, make the combined pathkeys */
6404 [ + + ]: 738 : if (window_pathkeys != NIL)
6405 : 154 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6406 : : else
6407 : 584 : window_pathkeys = orderby_pathkeys;
6408 : 738 : }
6409 : :
6410 : 1836 : return window_pathkeys;
6411 : 918 : }
6412 : :
6413 : : /*
6414 : : * make_sort_input_target
6415 : : * Generate appropriate PathTarget for initial input to Sort step.
6416 : : *
6417 : : * If the query has ORDER BY, this function chooses the target to be computed
6418 : : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6419 : : * project) steps. This might or might not be identical to the query's final
6420 : : * output target.
6421 : : *
6422 : : * The main argument for keeping the sort-input tlist the same as the final
6423 : : * is that we avoid a separate projection node (which will be needed if
6424 : : * they're different, because Sort can't project). However, there are also
6425 : : * advantages to postponing tlist evaluation till after the Sort: it ensures
6426 : : * a consistent order of evaluation for any volatile functions in the tlist,
6427 : : * and if there's also a LIMIT, we can stop the query without ever computing
6428 : : * tlist functions for later rows, which is beneficial for both volatile and
6429 : : * expensive functions.
6430 : : *
6431 : : * Our current policy is to postpone volatile expressions till after the sort
6432 : : * unconditionally (assuming that that's possible, ie they are in plain tlist
6433 : : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6434 : : * postpone set-returning expressions, because running them beforehand would
6435 : : * bloat the sort dataset, and because it might cause unexpected output order
6436 : : * if the sort isn't stable. However there's a constraint on that: all SRFs
6437 : : * in the tlist should be evaluated at the same plan step, so that they can
6438 : : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6439 : : * mustn't postpone any SRFs. (Note that in principle that policy should
6440 : : * probably get applied to the group/window input targetlists too, but we
6441 : : * have not done that historically.) Lastly, expensive expressions are
6442 : : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6443 : : * partial evaluation of the query is possible (if neither is true, we expect
6444 : : * to have to evaluate the expressions for every row anyway), or if there are
6445 : : * any volatile or set-returning expressions (since once we've put in a
6446 : : * projection at all, it won't cost any more to postpone more stuff).
6447 : : *
6448 : : * Another issue that could potentially be considered here is that
6449 : : * evaluating tlist expressions could result in data that's either wider
6450 : : * or narrower than the input Vars, thus changing the volume of data that
6451 : : * has to go through the Sort. However, we usually have only a very bad
6452 : : * idea of the output width of any expression more complex than a Var,
6453 : : * so for now it seems too risky to try to optimize on that basis.
6454 : : *
6455 : : * Note that if we do produce a modified sort-input target, and then the
6456 : : * query ends up not using an explicit Sort, no particular harm is done:
6457 : : * we'll initially use the modified target for the preceding path nodes,
6458 : : * but then change them to the final target with apply_projection_to_path.
6459 : : * Moreover, in such a case the guarantees about evaluation order of
6460 : : * volatile functions still hold, since the rows are sorted already.
6461 : : *
6462 : : * This function has some things in common with make_group_input_target and
6463 : : * make_window_input_target, though the detailed rules for what to do are
6464 : : * different. We never flatten/postpone any grouping or ordering columns;
6465 : : * those are needed before the sort. If we do flatten a particular
6466 : : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6467 : : * computed earlier.
6468 : : *
6469 : : * 'final_target' is the query's final target list (in PathTarget form)
6470 : : * 'have_postponed_srfs' is an output argument, see below
6471 : : *
6472 : : * The result is the PathTarget to be computed by the plan node immediately
6473 : : * below the Sort step (and the Distinct step, if any). This will be
6474 : : * exactly final_target if we decide a projection step wouldn't be helpful.
6475 : : *
6476 : : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6477 : : * any set-returning functions to after the Sort.
6478 : : */
6479 : : static PathTarget *
6480 : 8401 : make_sort_input_target(PlannerInfo *root,
6481 : : PathTarget *final_target,
6482 : : bool *have_postponed_srfs)
6483 : : {
6484 : 8401 : Query *parse = root->parse;
6485 : 8401 : PathTarget *input_target;
6486 : 8401 : int ncols;
6487 : 8401 : bool *col_is_srf;
6488 : 8401 : bool *postpone_col;
6489 : 8401 : bool have_srf;
6490 : 8401 : bool have_volatile;
6491 : 8401 : bool have_expensive;
6492 : 8401 : bool have_srf_sortcols;
6493 : 8401 : bool postpone_srfs;
6494 : 8401 : List *postponable_cols;
6495 : 8401 : List *postponable_vars;
6496 : 8401 : int i;
6497 : 8401 : ListCell *lc;
6498 : :
6499 : : /* Shouldn't get here unless query has ORDER BY */
6500 [ + - ]: 8401 : Assert(parse->sortClause);
6501 : :
6502 : 8401 : *have_postponed_srfs = false; /* default result */
6503 : :
6504 : : /* Inspect tlist and collect per-column information */
6505 : 8401 : ncols = list_length(final_target->exprs);
6506 : 8401 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6507 : 8401 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6508 : 8401 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6509 : :
6510 : 8401 : i = 0;
6511 [ + - + + : 47068 : foreach(lc, final_target->exprs)
+ + ]
6512 : : {
6513 : 38667 : Expr *expr = (Expr *) lfirst(lc);
6514 : :
6515 : : /*
6516 : : * If the column has a sortgroupref, assume it has to be evaluated
6517 : : * before sorting. Generally such columns would be ORDER BY, GROUP
6518 : : * BY, etc targets. One exception is columns that were removed from
6519 : : * GROUP BY by remove_useless_groupby_columns() ... but those would
6520 : : * only be Vars anyway. There don't seem to be any cases where it
6521 : : * would be worth the trouble to double-check.
6522 : : */
6523 [ + - + + ]: 38667 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6524 : : {
6525 : : /*
6526 : : * Check for SRF or volatile functions. Check the SRF case first
6527 : : * because we must know whether we have any postponed SRFs.
6528 : : */
6529 [ + + + + ]: 26607 : if (parse->hasTargetSRFs &&
6530 : 36 : expression_returns_set((Node *) expr))
6531 : : {
6532 : : /* We'll decide below whether these are postponable */
6533 : 16 : col_is_srf[i] = true;
6534 : 16 : have_srf = true;
6535 : 16 : }
6536 [ + + ]: 26591 : else if (contain_volatile_functions((Node *) expr))
6537 : : {
6538 : : /* Unconditionally postpone */
6539 : 22 : postpone_col[i] = true;
6540 : 22 : have_volatile = true;
6541 : 22 : }
6542 : : else
6543 : : {
6544 : : /*
6545 : : * Else check the cost. XXX it's annoying to have to do this
6546 : : * when set_pathtarget_cost_width() just did it. Refactor to
6547 : : * allow sharing the work?
6548 : : */
6549 : 26569 : QualCost cost;
6550 : :
6551 : 26569 : cost_qual_eval_node(&cost, (Node *) expr, root);
6552 : :
6553 : : /*
6554 : : * We arbitrarily define "expensive" as "more than 10X
6555 : : * cpu_operator_cost". Note this will take in any PL function
6556 : : * with default cost.
6557 : : */
6558 [ + + ]: 26569 : if (cost.per_tuple > 10 * cpu_operator_cost)
6559 : : {
6560 : 2287 : postpone_col[i] = true;
6561 : 2287 : have_expensive = true;
6562 : 2287 : }
6563 : 26569 : }
6564 : 26607 : }
6565 : : else
6566 : : {
6567 : : /* For sortgroupref cols, just check if any contain SRFs */
6568 [ + + ]: 12060 : if (!have_srf_sortcols &&
6569 [ + + + + ]: 12056 : parse->hasTargetSRFs &&
6570 : 55 : expression_returns_set((Node *) expr))
6571 : 24 : have_srf_sortcols = true;
6572 : : }
6573 : :
6574 : 38667 : i++;
6575 : 38667 : }
6576 : :
6577 : : /*
6578 : : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6579 : : */
6580 [ + + ]: 8401 : postpone_srfs = (have_srf && !have_srf_sortcols);
6581 : :
6582 : : /*
6583 : : * If we don't need a post-sort projection, just return final_target.
6584 : : */
6585 [ + + + + : 9730 : if (!(postpone_srfs || have_volatile ||
- + ]
6586 [ + + ]: 8369 : (have_expensive &&
6587 [ + + ]: 1335 : (parse->limitCount || root->tuple_fraction > 0))))
6588 : 8363 : return final_target;
6589 : :
6590 : : /*
6591 : : * Report whether the post-sort projection will contain set-returning
6592 : : * functions. This is important because it affects whether the Sort can
6593 : : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6594 : : * to return.
6595 : : */
6596 : 38 : *have_postponed_srfs = postpone_srfs;
6597 : :
6598 : : /*
6599 : : * Construct the sort-input target, taking all non-postponable columns and
6600 : : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6601 : : * the postponable ones.
6602 : : */
6603 : 38 : input_target = create_empty_pathtarget();
6604 : 38 : postponable_cols = NIL;
6605 : :
6606 : 38 : i = 0;
6607 [ + - + + : 317 : foreach(lc, final_target->exprs)
+ + ]
6608 : : {
6609 : 279 : Expr *expr = (Expr *) lfirst(lc);
6610 : :
6611 [ + + + + : 279 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
+ + ]
6612 : 47 : postponable_cols = lappend(postponable_cols, expr);
6613 : : else
6614 : 464 : add_column_to_pathtarget(input_target, expr,
6615 [ + - ]: 232 : get_pathtarget_sortgroupref(final_target, i));
6616 : :
6617 : 279 : i++;
6618 : 279 : }
6619 : :
6620 : : /*
6621 : : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6622 : : * postponable columns, and add them to the sort-input target if not
6623 : : * already present. (Some might be there already.) We mustn't
6624 : : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6625 : : * would be unable to recompute them.
6626 : : */
6627 : 38 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6628 : : PVC_INCLUDE_AGGREGATES |
6629 : : PVC_INCLUDE_WINDOWFUNCS |
6630 : : PVC_INCLUDE_PLACEHOLDERS);
6631 : 38 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6632 : :
6633 : : /* clean up cruft */
6634 : 38 : list_free(postponable_vars);
6635 : 38 : list_free(postponable_cols);
6636 : :
6637 : : /* XXX this represents even more redundant cost calculation ... */
6638 : 38 : return set_pathtarget_cost_width(root, input_target);
6639 : 8401 : }
6640 : :
6641 : : /*
6642 : : * get_cheapest_fractional_path
6643 : : * Find the cheapest path for retrieving a specified fraction of all
6644 : : * the tuples expected to be returned by the given relation.
6645 : : *
6646 : : * Do not consider parameterized paths. If the caller needs a path for upper
6647 : : * rel, it can't have parameterized paths. If the caller needs an append
6648 : : * subpath, it could become limited by the treatment of similar
6649 : : * parameterization of all the subpaths.
6650 : : *
6651 : : * We interpret tuple_fraction the same way as grouping_planner.
6652 : : *
6653 : : * We assume set_cheapest() has been run on the given rel.
6654 : : */
6655 : : Path *
6656 : 47902 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6657 : : {
6658 : 47902 : Path *best_path = rel->cheapest_total_path;
6659 : 47902 : ListCell *l;
6660 : :
6661 : : /* If all tuples will be retrieved, just return the cheapest-total path */
6662 [ + + ]: 47902 : if (tuple_fraction <= 0.0)
6663 : 46899 : return best_path;
6664 : :
6665 : : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6666 [ + + + + ]: 1003 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6667 : 451 : tuple_fraction /= best_path->rows;
6668 : :
6669 [ + - + + : 2868 : foreach(l, rel->pathlist)
+ + ]
6670 : : {
6671 : 1865 : Path *path = (Path *) lfirst(l);
6672 : :
6673 [ + + ]: 1865 : if (path->param_info)
6674 : 32 : continue;
6675 : :
6676 [ + + + + ]: 1833 : if (path == rel->cheapest_total_path ||
6677 : 830 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6678 : 1770 : continue;
6679 : :
6680 : 63 : best_path = path;
6681 [ - + + ]: 1865 : }
6682 : :
6683 : 1003 : return best_path;
6684 : 47902 : }
6685 : :
6686 : : /*
6687 : : * adjust_paths_for_srfs
6688 : : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6689 : : *
6690 : : * The executor can only handle set-returning functions that appear at the
6691 : : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6692 : : * that are not at top level, we need to split up the evaluation into multiple
6693 : : * plan levels in which each level satisfies this constraint. This function
6694 : : * modifies each Path of an upperrel that (might) compute any SRFs in its
6695 : : * output tlist to insert appropriate projection steps.
6696 : : *
6697 : : * The given targets and targets_contain_srfs lists are from
6698 : : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6699 : : * target in targets.
6700 : : */
6701 : : static void
6702 : 1711 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6703 : : List *targets, List *targets_contain_srfs)
6704 : : {
6705 : 1711 : ListCell *lc;
6706 : :
6707 [ + - ]: 1711 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6708 [ + - ]: 1711 : Assert(!linitial_int(targets_contain_srfs));
6709 : :
6710 : : /* If no SRFs appear at this plan level, nothing to do */
6711 [ + + ]: 1711 : if (list_length(targets) == 1)
6712 : 65 : return;
6713 : :
6714 : : /*
6715 : : * Stack SRF-evaluation nodes atop each path for the rel.
6716 : : *
6717 : : * In principle we should re-run set_cheapest() here to identify the
6718 : : * cheapest path, but it seems unlikely that adding the same tlist eval
6719 : : * costs to all the paths would change that, so we don't bother. Instead,
6720 : : * just assume that the cheapest-startup and cheapest-total paths remain
6721 : : * so. (There should be no parameterized paths anymore, so we needn't
6722 : : * worry about updating cheapest_parameterized_paths.)
6723 : : */
6724 [ + - + + : 3299 : foreach(lc, rel->pathlist)
+ + ]
6725 : : {
6726 : 1653 : Path *subpath = (Path *) lfirst(lc);
6727 : 1653 : Path *newpath = subpath;
6728 : 1653 : ListCell *lc1,
6729 : : *lc2;
6730 : :
6731 [ + - ]: 1653 : Assert(subpath->param_info == NULL);
6732 [ + - + + : 5040 : forboth(lc1, targets, lc2, targets_contain_srfs)
+ - + + +
+ + + ]
6733 : : {
6734 : 3387 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6735 : 3387 : bool contains_srfs = (bool) lfirst_int(lc2);
6736 : :
6737 : : /* If this level doesn't contain SRFs, do regular projection */
6738 [ + + ]: 3387 : if (contains_srfs)
6739 : 3326 : newpath = (Path *) create_set_projection_path(root,
6740 : 1663 : rel,
6741 : 1663 : newpath,
6742 : 1663 : thistarget);
6743 : : else
6744 : 3448 : newpath = (Path *) apply_projection_to_path(root,
6745 : 1724 : rel,
6746 : 1724 : newpath,
6747 : 1724 : thistarget);
6748 : 3387 : }
6749 : 1653 : lfirst(lc) = newpath;
6750 [ + + ]: 1653 : if (subpath == rel->cheapest_startup_path)
6751 : 15 : rel->cheapest_startup_path = newpath;
6752 [ + + ]: 1653 : if (subpath == rel->cheapest_total_path)
6753 : 15 : rel->cheapest_total_path = newpath;
6754 : 1653 : }
6755 : :
6756 : : /* Likewise for partial paths, if any */
6757 [ + + + + : 1647 : foreach(lc, rel->partial_pathlist)
+ + ]
6758 : : {
6759 : 1 : Path *subpath = (Path *) lfirst(lc);
6760 : 1 : Path *newpath = subpath;
6761 : 1 : ListCell *lc1,
6762 : : *lc2;
6763 : :
6764 [ + - ]: 1 : Assert(subpath->param_info == NULL);
6765 [ + - + + : 4 : forboth(lc1, targets, lc2, targets_contain_srfs)
+ - + + +
+ + + ]
6766 : : {
6767 : 3 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6768 : 3 : bool contains_srfs = (bool) lfirst_int(lc2);
6769 : :
6770 : : /* If this level doesn't contain SRFs, do regular projection */
6771 [ + + ]: 3 : if (contains_srfs)
6772 : 2 : newpath = (Path *) create_set_projection_path(root,
6773 : 1 : rel,
6774 : 1 : newpath,
6775 : 1 : thistarget);
6776 : : else
6777 : : {
6778 : : /* avoid apply_projection_to_path, in case of multiple refs */
6779 : 4 : newpath = (Path *) create_projection_path(root,
6780 : 2 : rel,
6781 : 2 : newpath,
6782 : 2 : thistarget);
6783 : : }
6784 : 3 : }
6785 : 1 : lfirst(lc) = newpath;
6786 : 1 : }
6787 [ - + ]: 1711 : }
6788 : :
6789 : : /*
6790 : : * expression_planner
6791 : : * Perform planner's transformations on a standalone expression.
6792 : : *
6793 : : * Various utility commands need to evaluate expressions that are not part
6794 : : * of a plannable query. They can do so using the executor's regular
6795 : : * expression-execution machinery, but first the expression has to be fed
6796 : : * through here to transform it from parser output to something executable.
6797 : : *
6798 : : * Currently, we disallow sublinks in standalone expressions, so there's no
6799 : : * real "planning" involved here. (That might not always be true though.)
6800 : : * What we must do is run eval_const_expressions to ensure that any function
6801 : : * calls are converted to positional notation and function default arguments
6802 : : * get inserted. The fact that constant subexpressions get simplified is a
6803 : : * side-effect that is useful when the expression will get evaluated more than
6804 : : * once. Also, we must fix operator function IDs.
6805 : : *
6806 : : * This does not return any information about dependencies of the expression.
6807 : : * Hence callers should use the results only for the duration of the current
6808 : : * query. Callers that would like to cache the results for longer should use
6809 : : * expression_planner_with_deps, probably via the plancache.
6810 : : *
6811 : : * Note: this must not make any damaging changes to the passed-in expression
6812 : : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6813 : : * we first do an expression_tree_mutator-based walk, what is returned will
6814 : : * be a new node tree.) The result is constructed in the current memory
6815 : : * context; beware that this can leak a lot of additional stuff there, too.
6816 : : */
6817 : : Expr *
6818 : 8274 : expression_planner(Expr *expr)
6819 : : {
6820 : 8274 : Node *result;
6821 : :
6822 : : /*
6823 : : * Convert named-argument function calls, insert default arguments and
6824 : : * simplify constant subexprs
6825 : : */
6826 : 8274 : result = eval_const_expressions(NULL, (Node *) expr);
6827 : :
6828 : : /* Fill in opfuncid values if missing */
6829 : 8274 : fix_opfuncids(result);
6830 : :
6831 : 16548 : return (Expr *) result;
6832 : 8274 : }
6833 : :
6834 : : /*
6835 : : * expression_planner_with_deps
6836 : : * Perform planner's transformations on a standalone expression,
6837 : : * returning expression dependency information along with the result.
6838 : : *
6839 : : * This is identical to expression_planner() except that it also returns
6840 : : * information about possible dependencies of the expression, ie identities of
6841 : : * objects whose definitions affect the result. As in a PlannedStmt, these
6842 : : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6843 : : */
6844 : : Expr *
6845 : 52 : expression_planner_with_deps(Expr *expr,
6846 : : List **relationOids,
6847 : : List **invalItems)
6848 : : {
6849 : 52 : Node *result;
6850 : 52 : PlannerGlobal glob;
6851 : 52 : PlannerInfo root;
6852 : :
6853 : : /* Make up dummy planner state so we can use setrefs machinery */
6854 [ + - + - : 1508 : MemSet(&glob, 0, sizeof(glob));
+ - - + +
+ ]
6855 : 52 : glob.type = T_PlannerGlobal;
6856 : 52 : glob.relationOids = NIL;
6857 : 52 : glob.invalItems = NIL;
6858 : :
6859 [ + - + - : 4836 : MemSet(&root, 0, sizeof(root));
+ - - + +
+ ]
6860 : 52 : root.type = T_PlannerInfo;
6861 : 52 : root.glob = &glob;
6862 : :
6863 : : /*
6864 : : * Convert named-argument function calls, insert default arguments and
6865 : : * simplify constant subexprs. Collect identities of inlined functions
6866 : : * and elided domains, too.
6867 : : */
6868 : 52 : result = eval_const_expressions(&root, (Node *) expr);
6869 : :
6870 : : /* Fill in opfuncid values if missing */
6871 : 52 : fix_opfuncids(result);
6872 : :
6873 : : /*
6874 : : * Now walk the finished expression to find anything else we ought to
6875 : : * record as an expression dependency.
6876 : : */
6877 : 52 : (void) extract_query_dependencies_walker(result, &root);
6878 : :
6879 : 52 : *relationOids = glob.relationOids;
6880 : 52 : *invalItems = glob.invalItems;
6881 : :
6882 : 104 : return (Expr *) result;
6883 : 52 : }
6884 : :
6885 : :
6886 : : /*
6887 : : * plan_cluster_use_sort
6888 : : * Use the planner to decide how CLUSTER should implement sorting
6889 : : *
6890 : : * tableOid is the OID of a table to be clustered on its index indexOid
6891 : : * (which is already known to be a btree index). Decide whether it's
6892 : : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6893 : : * Return true to use sorting, false to use an indexscan.
6894 : : *
6895 : : * Note: caller had better already hold some type of lock on the table.
6896 : : */
6897 : : bool
6898 : 27 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6899 : : {
6900 : 27 : PlannerInfo *root;
6901 : 27 : Query *query;
6902 : 27 : PlannerGlobal *glob;
6903 : 27 : RangeTblEntry *rte;
6904 : 27 : RelOptInfo *rel;
6905 : 27 : IndexOptInfo *indexInfo;
6906 : 27 : QualCost indexExprCost;
6907 : 27 : Cost comparisonCost;
6908 : 27 : Path *seqScanPath;
6909 : 27 : Path seqScanAndSortPath;
6910 : 27 : IndexPath *indexScanPath;
6911 : 27 : ListCell *lc;
6912 : :
6913 : : /* We can short-circuit the cost comparison if indexscans are disabled */
6914 [ + + ]: 27 : if (!enable_indexscan)
6915 : 5 : return true; /* use sort */
6916 : :
6917 : : /* Set up mostly-dummy planner state */
6918 : 22 : query = makeNode(Query);
6919 : 22 : query->commandType = CMD_SELECT;
6920 : :
6921 : 22 : glob = makeNode(PlannerGlobal);
6922 : :
6923 : 22 : root = makeNode(PlannerInfo);
6924 : 22 : root->parse = query;
6925 : 22 : root->glob = glob;
6926 : 22 : root->query_level = 1;
6927 : 22 : root->planner_cxt = CurrentMemoryContext;
6928 : 22 : root->wt_param_id = -1;
6929 : 22 : root->join_domains = list_make1(makeNode(JoinDomain));
6930 : :
6931 : : /* Build a minimal RTE for the rel */
6932 : 22 : rte = makeNode(RangeTblEntry);
6933 : 22 : rte->rtekind = RTE_RELATION;
6934 : 22 : rte->relid = tableOid;
6935 : 22 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6936 : 22 : rte->rellockmode = AccessShareLock;
6937 : 22 : rte->lateral = false;
6938 : 22 : rte->inh = false;
6939 : 22 : rte->inFromCl = true;
6940 : 22 : query->rtable = list_make1(rte);
6941 : 22 : addRTEPermissionInfo(&query->rteperminfos, rte);
6942 : :
6943 : : /* Set up RTE/RelOptInfo arrays */
6944 : 22 : setup_simple_rel_arrays(root);
6945 : :
6946 : : /* Build RelOptInfo */
6947 : 22 : rel = build_simple_rel(root, 1, NULL);
6948 : :
6949 : : /* Locate IndexOptInfo for the target index */
6950 : 22 : indexInfo = NULL;
6951 [ + - - + : 47 : foreach(lc, rel->indexlist)
+ - ]
6952 : : {
6953 : 25 : indexInfo = lfirst_node(IndexOptInfo, lc);
6954 [ + + ]: 25 : if (indexInfo->indexoid == indexOid)
6955 : 22 : break;
6956 : 3 : }
6957 : :
6958 : : /*
6959 : : * It's possible that get_relation_info did not generate an IndexOptInfo
6960 : : * for the desired index; this could happen if it's not yet reached its
6961 : : * indcheckxmin usability horizon, or if it's a system index and we're
6962 : : * ignoring system indexes. In such cases we should tell CLUSTER to not
6963 : : * trust the index contents but use seqscan-and-sort.
6964 : : */
6965 [ + - ]: 22 : if (lc == NULL) /* not in the list? */
6966 : 0 : return true; /* use sort */
6967 : :
6968 : : /*
6969 : : * Rather than doing all the pushups that would be needed to use
6970 : : * set_baserel_size_estimates, just do a quick hack for rows and width.
6971 : : */
6972 : 22 : rel->rows = rel->tuples;
6973 : 22 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6974 : :
6975 : 22 : root->total_table_pages = rel->pages;
6976 : :
6977 : : /*
6978 : : * Determine eval cost of the index expressions, if any. We need to
6979 : : * charge twice that amount for each tuple comparison that happens during
6980 : : * the sort, since tuplesort.c will have to re-evaluate the index
6981 : : * expressions each time. (XXX that's pretty inefficient...)
6982 : : */
6983 : 22 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6984 : 22 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6985 : :
6986 : : /* Estimate the cost of seq scan + sort */
6987 : 22 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6988 : 44 : cost_sort(&seqScanAndSortPath, root, NIL,
6989 : 22 : seqScanPath->disabled_nodes,
6990 : 22 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6991 : 22 : comparisonCost, maintenance_work_mem, -1.0);
6992 : :
6993 : : /* Estimate the cost of index scan */
6994 : 22 : indexScanPath = create_index_path(root, indexInfo,
6995 : : NIL, NIL, NIL, NIL,
6996 : : ForwardScanDirection, false,
6997 : : NULL, 1.0, false);
6998 : :
6999 : 22 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
7000 : 27 : }
7001 : :
7002 : : /*
7003 : : * plan_create_index_workers
7004 : : * Use the planner to decide how many parallel worker processes
7005 : : * CREATE INDEX should request for use
7006 : : *
7007 : : * tableOid is the table on which the index is to be built. indexOid is the
7008 : : * OID of an index to be created or reindexed (which must be an index with
7009 : : * support for parallel builds - currently btree, GIN, or BRIN).
7010 : : *
7011 : : * Return value is the number of parallel worker processes to request. It
7012 : : * may be unsafe to proceed if this is 0. Note that this does not include the
7013 : : * leader participating as a worker (value is always a number of parallel
7014 : : * worker processes).
7015 : : *
7016 : : * Note: caller had better already hold some type of lock on the table and
7017 : : * index.
7018 : : */
7019 : : int
7020 : 3573 : plan_create_index_workers(Oid tableOid, Oid indexOid)
7021 : : {
7022 : 3573 : PlannerInfo *root;
7023 : 3573 : Query *query;
7024 : 3573 : PlannerGlobal *glob;
7025 : 3573 : RangeTblEntry *rte;
7026 : 3573 : Relation heap;
7027 : 3573 : Relation index;
7028 : 3573 : RelOptInfo *rel;
7029 : 3573 : int parallel_workers;
7030 : 3573 : BlockNumber heap_blocks;
7031 : 3573 : double reltuples;
7032 : 3573 : double allvisfrac;
7033 : :
7034 : : /*
7035 : : * We don't allow performing parallel operation in standalone backend or
7036 : : * when parallelism is disabled.
7037 : : */
7038 [ + + + + ]: 3573 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
7039 : 24 : return 0;
7040 : :
7041 : : /* Set up largely-dummy planner state */
7042 : 3549 : query = makeNode(Query);
7043 : 3549 : query->commandType = CMD_SELECT;
7044 : :
7045 : 3549 : glob = makeNode(PlannerGlobal);
7046 : :
7047 : 3549 : root = makeNode(PlannerInfo);
7048 : 3549 : root->parse = query;
7049 : 3549 : root->glob = glob;
7050 : 3549 : root->query_level = 1;
7051 : 3549 : root->planner_cxt = CurrentMemoryContext;
7052 : 3549 : root->wt_param_id = -1;
7053 : 3549 : root->join_domains = list_make1(makeNode(JoinDomain));
7054 : :
7055 : : /*
7056 : : * Build a minimal RTE.
7057 : : *
7058 : : * Mark the RTE with inh = true. This is a kludge to prevent
7059 : : * get_relation_info() from fetching index info, which is necessary
7060 : : * because it does not expect that any IndexOptInfo is currently
7061 : : * undergoing REINDEX.
7062 : : */
7063 : 3549 : rte = makeNode(RangeTblEntry);
7064 : 3549 : rte->rtekind = RTE_RELATION;
7065 : 3549 : rte->relid = tableOid;
7066 : 3549 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7067 : 3549 : rte->rellockmode = AccessShareLock;
7068 : 3549 : rte->lateral = false;
7069 : 3549 : rte->inh = true;
7070 : 3549 : rte->inFromCl = true;
7071 : 3549 : query->rtable = list_make1(rte);
7072 : 3549 : addRTEPermissionInfo(&query->rteperminfos, rte);
7073 : :
7074 : : /* Set up RTE/RelOptInfo arrays */
7075 : 3549 : setup_simple_rel_arrays(root);
7076 : :
7077 : : /* Build RelOptInfo */
7078 : 3549 : rel = build_simple_rel(root, 1, NULL);
7079 : :
7080 : : /* Rels are assumed already locked by the caller */
7081 : 3549 : heap = table_open(tableOid, NoLock);
7082 : 3549 : index = index_open(indexOid, NoLock);
7083 : :
7084 : : /*
7085 : : * Determine if it's safe to proceed.
7086 : : *
7087 : : * Currently, parallel workers can't access the leader's temporary tables.
7088 : : * Furthermore, any index predicate or index expressions must be parallel
7089 : : * safe.
7090 : : */
7091 [ + + ]: 3549 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7092 [ + + - + ]: 3222 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
7093 : 3202 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
7094 : : {
7095 : 347 : parallel_workers = 0;
7096 : 347 : goto done;
7097 : : }
7098 : :
7099 : : /*
7100 : : * If parallel_workers storage parameter is set for the table, accept that
7101 : : * as the number of parallel worker processes to launch (though still cap
7102 : : * at max_parallel_maintenance_workers). Note that we deliberately do not
7103 : : * consider any other factor when parallel_workers is set. (e.g., memory
7104 : : * use by workers.)
7105 : : */
7106 [ + + ]: 3202 : if (rel->rel_parallel_workers != -1)
7107 : : {
7108 [ + - ]: 3 : parallel_workers = Min(rel->rel_parallel_workers,
7109 : : max_parallel_maintenance_workers);
7110 : 3 : goto done;
7111 : : }
7112 : :
7113 : : /*
7114 : : * Estimate heap relation size ourselves, since rel->pages cannot be
7115 : : * trusted (heap RTE was marked as inheritance parent)
7116 : : */
7117 : 3199 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7118 : :
7119 : : /*
7120 : : * Determine number of workers to scan the heap relation using generic
7121 : : * model
7122 : : */
7123 : 6398 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7124 : 3199 : max_parallel_maintenance_workers);
7125 : :
7126 : : /*
7127 : : * Cap workers based on available maintenance_work_mem as needed.
7128 : : *
7129 : : * Note that each tuplesort participant receives an even share of the
7130 : : * total maintenance_work_mem budget. Aim to leave participants
7131 : : * (including the leader as a participant) with no less than 32MB of
7132 : : * memory. This leaves cases where maintenance_work_mem is set to 64MB
7133 : : * immediately past the threshold of being capable of launching a single
7134 : : * parallel worker to sort.
7135 : : */
7136 [ + + + + ]: 3283 : while (parallel_workers > 0 &&
7137 : 56 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7138 : 28 : parallel_workers--;
7139 : :
7140 : : done:
7141 : 3549 : index_close(index, NoLock);
7142 : 3549 : table_close(heap, NoLock);
7143 : :
7144 : 3549 : return parallel_workers;
7145 : 3573 : }
7146 : :
7147 : : /*
7148 : : * add_paths_to_grouping_rel
7149 : : *
7150 : : * Add non-partial paths to grouping relation.
7151 : : */
7152 : : static void
7153 : 5255 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
7154 : : RelOptInfo *grouped_rel,
7155 : : RelOptInfo *partially_grouped_rel,
7156 : : const AggClauseCosts *agg_costs,
7157 : : grouping_sets_data *gd,
7158 : : GroupPathExtraData *extra)
7159 : : {
7160 : 5255 : Query *parse = root->parse;
7161 : 5255 : Path *cheapest_path = input_rel->cheapest_total_path;
7162 : 5255 : Path *cheapest_partially_grouped_path = NULL;
7163 : 5255 : ListCell *lc;
7164 : 5255 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7165 : 5255 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7166 : 5255 : List *havingQual = (List *) extra->havingQual;
7167 : 5255 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7168 : 5255 : double dNumGroups = 0;
7169 : 5255 : double dNumFinalGroups = 0;
7170 : :
7171 : : /*
7172 : : * Estimate number of groups for non-split aggregation.
7173 : : */
7174 : 10510 : dNumGroups = get_number_of_groups(root,
7175 : 5255 : cheapest_path->rows,
7176 : 5255 : gd,
7177 : 5255 : extra->targetList);
7178 : :
7179 [ + + + - ]: 5255 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7180 : : {
7181 : 379 : cheapest_partially_grouped_path =
7182 : 379 : partially_grouped_rel->cheapest_total_path;
7183 : :
7184 : : /*
7185 : : * Estimate number of groups for final phase of partial aggregation.
7186 : : */
7187 : 379 : dNumFinalGroups =
7188 : 758 : get_number_of_groups(root,
7189 : 379 : cheapest_partially_grouped_path->rows,
7190 : 379 : gd,
7191 : 379 : extra->targetList);
7192 : 379 : }
7193 : :
7194 [ + + ]: 5255 : if (can_sort)
7195 : : {
7196 : : /*
7197 : : * Use any available suitably-sorted path as input, and also consider
7198 : : * sorting the cheapest-total path and incremental sort on any paths
7199 : : * with presorted keys.
7200 : : */
7201 [ + - + + : 10951 : foreach(lc, input_rel->pathlist)
+ + ]
7202 : : {
7203 : 5697 : ListCell *lc2;
7204 : 5697 : Path *path = (Path *) lfirst(lc);
7205 : 5697 : Path *path_save = path;
7206 : 5697 : List *pathkey_orderings = NIL;
7207 : :
7208 : : /* generate alternative group orderings that might be useful */
7209 : 5697 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7210 : :
7211 [ + - ]: 5697 : Assert(list_length(pathkey_orderings) > 0);
7212 : :
7213 [ + - + + : 11418 : foreach(lc2, pathkey_orderings)
+ + ]
7214 : : {
7215 : 5721 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7216 : :
7217 : : /* restore the path (we replace it in the loop) */
7218 : 5721 : path = path_save;
7219 : :
7220 : 11442 : path = make_ordered_path(root,
7221 : 5721 : grouped_rel,
7222 : 5721 : path,
7223 : 5721 : cheapest_path,
7224 : 5721 : info->pathkeys,
7225 : : -1.0);
7226 [ + + ]: 5721 : if (path == NULL)
7227 : 53 : continue;
7228 : :
7229 : : /* Now decide what to stick atop it */
7230 [ + + ]: 5668 : if (parse->groupingSets)
7231 : : {
7232 : 342 : consider_groupingsets_paths(root, grouped_rel,
7233 : 171 : path, true, can_hash,
7234 : 171 : gd, agg_costs, dNumGroups);
7235 : 171 : }
7236 [ + + ]: 5497 : else if (parse->hasAggs)
7237 : : {
7238 : : /*
7239 : : * We have aggregation, possibly with plain GROUP BY. Make
7240 : : * an AggPath.
7241 : : */
7242 : 10750 : add_path(grouped_rel, (Path *)
7243 : 10750 : create_agg_path(root,
7244 : 5375 : grouped_rel,
7245 : 5375 : path,
7246 : 5375 : grouped_rel->reltarget,
7247 : 5375 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7248 : : AGGSPLIT_SIMPLE,
7249 : 5375 : info->clauses,
7250 : 5375 : havingQual,
7251 : 5375 : agg_costs,
7252 : 5375 : dNumGroups));
7253 : 5375 : }
7254 [ - + ]: 122 : else if (parse->groupClause)
7255 : : {
7256 : : /*
7257 : : * We have GROUP BY without aggregation or grouping sets.
7258 : : * Make a GroupPath.
7259 : : */
7260 : 244 : add_path(grouped_rel, (Path *)
7261 : 244 : create_group_path(root,
7262 : 122 : grouped_rel,
7263 : 122 : path,
7264 : 122 : info->clauses,
7265 : 122 : havingQual,
7266 : 122 : dNumGroups));
7267 : 122 : }
7268 : : else
7269 : : {
7270 : : /* Other cases should have been handled above */
7271 : 0 : Assert(false);
7272 : : }
7273 [ + + ]: 5721 : }
7274 : 5697 : }
7275 : :
7276 : : /*
7277 : : * Instead of operating directly on the input relation, we can
7278 : : * consider finalizing a partially aggregated path.
7279 : : */
7280 [ + + ]: 5254 : if (partially_grouped_rel != NULL)
7281 : : {
7282 [ + - + + : 1037 : foreach(lc, partially_grouped_rel->pathlist)
+ + ]
7283 : : {
7284 : 658 : ListCell *lc2;
7285 : 658 : Path *path = (Path *) lfirst(lc);
7286 : 658 : Path *path_save = path;
7287 : 658 : List *pathkey_orderings = NIL;
7288 : :
7289 : : /* generate alternative group orderings that might be useful */
7290 : 658 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7291 : :
7292 [ + - ]: 658 : Assert(list_length(pathkey_orderings) > 0);
7293 : :
7294 : : /* process all potentially interesting grouping reorderings */
7295 [ + - + + : 1316 : foreach(lc2, pathkey_orderings)
+ + ]
7296 : : {
7297 : 658 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7298 : :
7299 : : /* restore the path (we replace it in the loop) */
7300 : 658 : path = path_save;
7301 : :
7302 : 1316 : path = make_ordered_path(root,
7303 : 658 : grouped_rel,
7304 : 658 : path,
7305 : 658 : cheapest_partially_grouped_path,
7306 : 658 : info->pathkeys,
7307 : : -1.0);
7308 : :
7309 [ + + ]: 658 : if (path == NULL)
7310 : 34 : continue;
7311 : :
7312 [ + + ]: 624 : if (parse->hasAggs)
7313 : 1170 : add_path(grouped_rel, (Path *)
7314 : 1170 : create_agg_path(root,
7315 : 585 : grouped_rel,
7316 : 585 : path,
7317 : 585 : grouped_rel->reltarget,
7318 : 585 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7319 : : AGGSPLIT_FINAL_DESERIAL,
7320 : 585 : info->clauses,
7321 : 585 : havingQual,
7322 : 585 : agg_final_costs,
7323 : 585 : dNumFinalGroups));
7324 : : else
7325 : 78 : add_path(grouped_rel, (Path *)
7326 : 78 : create_group_path(root,
7327 : 39 : grouped_rel,
7328 : 39 : path,
7329 : 39 : info->clauses,
7330 : 39 : havingQual,
7331 : 39 : dNumFinalGroups));
7332 : :
7333 [ + + ]: 658 : }
7334 : 658 : }
7335 : 379 : }
7336 : 5254 : }
7337 : :
7338 [ + + ]: 5255 : if (can_hash)
7339 : : {
7340 [ + + ]: 908 : if (parse->groupingSets)
7341 : : {
7342 : : /*
7343 : : * Try for a hash-only groupingsets path over unsorted input.
7344 : : */
7345 : 292 : consider_groupingsets_paths(root, grouped_rel,
7346 : 146 : cheapest_path, false, true,
7347 : 146 : gd, agg_costs, dNumGroups);
7348 : 146 : }
7349 : : else
7350 : : {
7351 : : /*
7352 : : * Generate a HashAgg Path. We just need an Agg over the
7353 : : * cheapest-total input path, since input order won't matter.
7354 : : */
7355 : 1524 : add_path(grouped_rel, (Path *)
7356 : 1524 : create_agg_path(root, grouped_rel,
7357 : 762 : cheapest_path,
7358 : 762 : grouped_rel->reltarget,
7359 : : AGG_HASHED,
7360 : : AGGSPLIT_SIMPLE,
7361 : 762 : root->processed_groupClause,
7362 : 762 : havingQual,
7363 : 762 : agg_costs,
7364 : 762 : dNumGroups));
7365 : : }
7366 : :
7367 : : /*
7368 : : * Generate a Finalize HashAgg Path atop of the cheapest partially
7369 : : * grouped path, assuming there is one
7370 : : */
7371 [ + + - + ]: 908 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7372 : : {
7373 : 476 : add_path(grouped_rel, (Path *)
7374 : 476 : create_agg_path(root,
7375 : 238 : grouped_rel,
7376 : 238 : cheapest_partially_grouped_path,
7377 : 238 : grouped_rel->reltarget,
7378 : : AGG_HASHED,
7379 : : AGGSPLIT_FINAL_DESERIAL,
7380 : 238 : root->processed_groupClause,
7381 : 238 : havingQual,
7382 : 238 : agg_final_costs,
7383 : 238 : dNumFinalGroups));
7384 : 238 : }
7385 : 908 : }
7386 : :
7387 : : /*
7388 : : * When partitionwise aggregate is used, we might have fully aggregated
7389 : : * paths in the partial pathlist, because add_paths_to_append_rel() will
7390 : : * consider a path for grouped_rel consisting of a Parallel Append of
7391 : : * non-partial paths from each child.
7392 : : */
7393 [ + + ]: 5255 : if (grouped_rel->partial_pathlist != NIL)
7394 : 53 : gather_grouping_paths(root, grouped_rel);
7395 : 5255 : }
7396 : :
7397 : : /*
7398 : : * create_partial_grouping_paths
7399 : : *
7400 : : * Create a new upper relation representing the result of partial aggregation
7401 : : * and populate it with appropriate paths. Note that we don't finalize the
7402 : : * lists of paths here, so the caller can add additional partial or non-partial
7403 : : * paths and must afterward call gather_grouping_paths and set_cheapest on
7404 : : * the returned upper relation.
7405 : : *
7406 : : * All paths for this new upper relation -- both partial and non-partial --
7407 : : * have been partially aggregated but require a subsequent FinalizeAggregate
7408 : : * step.
7409 : : *
7410 : : * NB: This function is allowed to return NULL if it determines that there is
7411 : : * no real need to create a new RelOptInfo.
7412 : : */
7413 : : static RelOptInfo *
7414 : 4808 : create_partial_grouping_paths(PlannerInfo *root,
7415 : : RelOptInfo *grouped_rel,
7416 : : RelOptInfo *input_rel,
7417 : : grouping_sets_data *gd,
7418 : : GroupPathExtraData *extra,
7419 : : bool force_rel_creation)
7420 : : {
7421 : 4808 : Query *parse = root->parse;
7422 : 4808 : RelOptInfo *partially_grouped_rel;
7423 : 4808 : RelOptInfo *eager_agg_rel = NULL;
7424 : 4808 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7425 : 4808 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7426 : 4808 : Path *cheapest_partial_path = NULL;
7427 : 4808 : Path *cheapest_total_path = NULL;
7428 : 4808 : double dNumPartialGroups = 0;
7429 : 4808 : double dNumPartialPartialGroups = 0;
7430 : 4808 : ListCell *lc;
7431 : 4808 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7432 : 4808 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7433 : :
7434 : : /*
7435 : : * Check whether any partially aggregated paths have been generated
7436 : : * through eager aggregation.
7437 : : */
7438 [ + + ]: 4808 : if (input_rel->grouped_rel &&
7439 [ + - + + ]: 159 : !IS_DUMMY_REL(input_rel->grouped_rel) &&
7440 : 159 : input_rel->grouped_rel->pathlist != NIL)
7441 : 149 : eager_agg_rel = input_rel->grouped_rel;
7442 : :
7443 : : /*
7444 : : * Consider whether we should generate partially aggregated non-partial
7445 : : * paths. We can only do this if we have a non-partial path, and only if
7446 : : * the parent of the input rel is performing partial partitionwise
7447 : : * aggregation. (Note that extra->patype is the type of partitionwise
7448 : : * aggregation being used at the parent level, not this level.)
7449 : : */
7450 [ + - + + ]: 4808 : if (input_rel->pathlist != NIL &&
7451 : 4808 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7452 : 142 : cheapest_total_path = input_rel->cheapest_total_path;
7453 : :
7454 : : /*
7455 : : * If parallelism is possible for grouped_rel, then we should consider
7456 : : * generating partially-grouped partial paths. However, if the input rel
7457 : : * has no partial paths, then we can't.
7458 : : */
7459 [ + + + + ]: 4808 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7460 : 426 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7461 : :
7462 : : /*
7463 : : * If we can't partially aggregate partial paths, and we can't partially
7464 : : * aggregate non-partial paths, and no partially aggregated paths were
7465 : : * generated by eager aggregation, then don't bother creating the new
7466 : : * RelOptInfo at all, unless the caller specified force_rel_creation.
7467 : : */
7468 [ + + ]: 4808 : if (cheapest_total_path == NULL &&
7469 [ + + ]: 4666 : cheapest_partial_path == NULL &&
7470 [ + + + + ]: 4324 : eager_agg_rel == NULL &&
7471 : 4303 : !force_rel_creation)
7472 : 4287 : return NULL;
7473 : :
7474 : : /*
7475 : : * Build a new upper relation to represent the result of partially
7476 : : * aggregating the rows from the input relation.
7477 : : */
7478 : 1042 : partially_grouped_rel = fetch_upper_rel(root,
7479 : : UPPERREL_PARTIAL_GROUP_AGG,
7480 : 521 : grouped_rel->relids);
7481 : 521 : partially_grouped_rel->consider_parallel =
7482 : 521 : grouped_rel->consider_parallel;
7483 : 521 : partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7484 : 521 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7485 : 521 : partially_grouped_rel->serverid = grouped_rel->serverid;
7486 : 521 : partially_grouped_rel->userid = grouped_rel->userid;
7487 : 521 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7488 : 521 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7489 : :
7490 : : /*
7491 : : * Build target list for partial aggregate paths. These paths cannot just
7492 : : * emit the same tlist as regular aggregate paths, because (1) we must
7493 : : * include Vars and Aggrefs needed in HAVING, which might not appear in
7494 : : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7495 : : */
7496 : 521 : partially_grouped_rel->reltarget =
7497 : 1042 : make_partial_grouping_target(root, grouped_rel->reltarget,
7498 : 521 : extra->havingQual);
7499 : :
7500 [ + + ]: 521 : if (!extra->partial_costs_set)
7501 : : {
7502 : : /*
7503 : : * Collect statistics about aggregates for estimating costs of
7504 : : * performing aggregation in parallel.
7505 : : */
7506 [ + - + - : 1590 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
+ - - + +
+ ]
7507 [ + - + - : 1590 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
+ - - + +
+ ]
7508 [ + + ]: 265 : if (parse->hasAggs)
7509 : : {
7510 : : /* partial phase */
7511 : 488 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7512 : 244 : agg_partial_costs);
7513 : :
7514 : : /* final phase */
7515 : 488 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7516 : 244 : agg_final_costs);
7517 : 244 : }
7518 : :
7519 : 265 : extra->partial_costs_set = true;
7520 : 265 : }
7521 : :
7522 : : /* Estimate number of partial groups. */
7523 [ + + ]: 521 : if (cheapest_total_path != NULL)
7524 : 142 : dNumPartialGroups =
7525 : 284 : get_number_of_groups(root,
7526 : 142 : cheapest_total_path->rows,
7527 : 142 : gd,
7528 : 142 : extra->targetList);
7529 [ + + ]: 521 : if (cheapest_partial_path != NULL)
7530 : 426 : dNumPartialPartialGroups =
7531 : 852 : get_number_of_groups(root,
7532 : 426 : cheapest_partial_path->rows,
7533 : 426 : gd,
7534 : 426 : extra->targetList);
7535 : :
7536 [ + - + + ]: 521 : if (can_sort && cheapest_total_path != NULL)
7537 : : {
7538 : : /* This should have been checked previously */
7539 [ + + + - ]: 142 : Assert(parse->hasAggs || parse->groupClause);
7540 : :
7541 : : /*
7542 : : * Use any available suitably-sorted path as input, and also consider
7543 : : * sorting the cheapest partial path.
7544 : : */
7545 [ + - + + : 284 : foreach(lc, input_rel->pathlist)
+ + ]
7546 : : {
7547 : 142 : ListCell *lc2;
7548 : 142 : Path *path = (Path *) lfirst(lc);
7549 : 142 : Path *path_save = path;
7550 : 142 : List *pathkey_orderings = NIL;
7551 : :
7552 : : /* generate alternative group orderings that might be useful */
7553 : 142 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7554 : :
7555 [ + - ]: 142 : Assert(list_length(pathkey_orderings) > 0);
7556 : :
7557 : : /* process all potentially interesting grouping reorderings */
7558 [ + - + + : 284 : foreach(lc2, pathkey_orderings)
+ + ]
7559 : : {
7560 : 142 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7561 : :
7562 : : /* restore the path (we replace it in the loop) */
7563 : 142 : path = path_save;
7564 : :
7565 : 284 : path = make_ordered_path(root,
7566 : 142 : partially_grouped_rel,
7567 : 142 : path,
7568 : 142 : cheapest_total_path,
7569 : 142 : info->pathkeys,
7570 : : -1.0);
7571 : :
7572 [ + - ]: 142 : if (path == NULL)
7573 : 0 : continue;
7574 : :
7575 [ + + ]: 142 : if (parse->hasAggs)
7576 : 260 : add_path(partially_grouped_rel, (Path *)
7577 : 260 : create_agg_path(root,
7578 : 130 : partially_grouped_rel,
7579 : 130 : path,
7580 : 130 : partially_grouped_rel->reltarget,
7581 : 130 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7582 : : AGGSPLIT_INITIAL_SERIAL,
7583 : 130 : info->clauses,
7584 : : NIL,
7585 : 130 : agg_partial_costs,
7586 : 130 : dNumPartialGroups));
7587 : : else
7588 : 24 : add_path(partially_grouped_rel, (Path *)
7589 : 24 : create_group_path(root,
7590 : 12 : partially_grouped_rel,
7591 : 12 : path,
7592 : 12 : info->clauses,
7593 : : NIL,
7594 : 12 : dNumPartialGroups));
7595 [ - + ]: 142 : }
7596 : 142 : }
7597 : 142 : }
7598 : :
7599 [ + - + + ]: 521 : if (can_sort && cheapest_partial_path != NULL)
7600 : : {
7601 : : /* Similar to above logic, but for partial paths. */
7602 [ + - + + : 939 : foreach(lc, input_rel->partial_pathlist)
+ + ]
7603 : : {
7604 : 513 : ListCell *lc2;
7605 : 513 : Path *path = (Path *) lfirst(lc);
7606 : 513 : Path *path_save = path;
7607 : 513 : List *pathkey_orderings = NIL;
7608 : :
7609 : : /* generate alternative group orderings that might be useful */
7610 : 513 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7611 : :
7612 [ + - ]: 513 : Assert(list_length(pathkey_orderings) > 0);
7613 : :
7614 : : /* process all potentially interesting grouping reorderings */
7615 [ + - + + : 1026 : foreach(lc2, pathkey_orderings)
+ + ]
7616 : : {
7617 : 513 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7618 : :
7619 : :
7620 : : /* restore the path (we replace it in the loop) */
7621 : 513 : path = path_save;
7622 : :
7623 : 1026 : path = make_ordered_path(root,
7624 : 513 : partially_grouped_rel,
7625 : 513 : path,
7626 : 513 : cheapest_partial_path,
7627 : 513 : info->pathkeys,
7628 : : -1.0);
7629 : :
7630 [ + + ]: 513 : if (path == NULL)
7631 : 1 : continue;
7632 : :
7633 [ + + ]: 512 : if (parse->hasAggs)
7634 : 986 : add_partial_path(partially_grouped_rel, (Path *)
7635 : 986 : create_agg_path(root,
7636 : 493 : partially_grouped_rel,
7637 : 493 : path,
7638 : 493 : partially_grouped_rel->reltarget,
7639 : 493 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7640 : : AGGSPLIT_INITIAL_SERIAL,
7641 : 493 : info->clauses,
7642 : : NIL,
7643 : 493 : agg_partial_costs,
7644 : 493 : dNumPartialPartialGroups));
7645 : : else
7646 : 38 : add_partial_path(partially_grouped_rel, (Path *)
7647 : 38 : create_group_path(root,
7648 : 19 : partially_grouped_rel,
7649 : 19 : path,
7650 : 19 : info->clauses,
7651 : : NIL,
7652 : 19 : dNumPartialPartialGroups));
7653 [ + + ]: 513 : }
7654 : 513 : }
7655 : 426 : }
7656 : :
7657 : : /*
7658 : : * Add a partially-grouped HashAgg Path where possible
7659 : : */
7660 [ + + + + ]: 521 : if (can_hash && cheapest_total_path != NULL)
7661 : : {
7662 : : /* Checked above */
7663 [ + + + - ]: 142 : Assert(parse->hasAggs || parse->groupClause);
7664 : :
7665 : 284 : add_path(partially_grouped_rel, (Path *)
7666 : 284 : create_agg_path(root,
7667 : 142 : partially_grouped_rel,
7668 : 142 : cheapest_total_path,
7669 : 142 : partially_grouped_rel->reltarget,
7670 : : AGG_HASHED,
7671 : : AGGSPLIT_INITIAL_SERIAL,
7672 : 142 : root->processed_groupClause,
7673 : : NIL,
7674 : 142 : agg_partial_costs,
7675 : 142 : dNumPartialGroups));
7676 : 142 : }
7677 : :
7678 : : /*
7679 : : * Now add a partially-grouped HashAgg partial Path where possible
7680 : : */
7681 [ + + + + ]: 521 : if (can_hash && cheapest_partial_path != NULL)
7682 : : {
7683 : 570 : add_partial_path(partially_grouped_rel, (Path *)
7684 : 570 : create_agg_path(root,
7685 : 285 : partially_grouped_rel,
7686 : 285 : cheapest_partial_path,
7687 : 285 : partially_grouped_rel->reltarget,
7688 : : AGG_HASHED,
7689 : : AGGSPLIT_INITIAL_SERIAL,
7690 : 285 : root->processed_groupClause,
7691 : : NIL,
7692 : 285 : agg_partial_costs,
7693 : 285 : dNumPartialPartialGroups));
7694 : 285 : }
7695 : :
7696 : : /*
7697 : : * Add any partially aggregated paths generated by eager aggregation to
7698 : : * the new upper relation after applying projection steps as needed.
7699 : : */
7700 [ + + ]: 521 : if (eager_agg_rel)
7701 : : {
7702 : : /* Add the paths */
7703 [ + - + + : 390 : foreach(lc, eager_agg_rel->pathlist)
+ + ]
7704 : : {
7705 : 241 : Path *path = (Path *) lfirst(lc);
7706 : :
7707 : : /* Shouldn't have any parameterized paths anymore */
7708 [ + - ]: 241 : Assert(path->param_info == NULL);
7709 : :
7710 : 482 : path = (Path *) create_projection_path(root,
7711 : 241 : partially_grouped_rel,
7712 : 241 : path,
7713 : 241 : partially_grouped_rel->reltarget);
7714 : :
7715 : 241 : add_path(partially_grouped_rel, path);
7716 : 241 : }
7717 : :
7718 : : /*
7719 : : * Likewise add the partial paths, but only if parallelism is possible
7720 : : * for partially_grouped_rel.
7721 : : */
7722 [ + + ]: 149 : if (partially_grouped_rel->consider_parallel)
7723 : : {
7724 [ + + + + : 338 : foreach(lc, eager_agg_rel->partial_pathlist)
+ + ]
7725 : : {
7726 : 202 : Path *path = (Path *) lfirst(lc);
7727 : :
7728 : : /* Shouldn't have any parameterized paths anymore */
7729 [ + - ]: 202 : Assert(path->param_info == NULL);
7730 : :
7731 : 404 : path = (Path *) create_projection_path(root,
7732 : 202 : partially_grouped_rel,
7733 : 202 : path,
7734 : 202 : partially_grouped_rel->reltarget);
7735 : :
7736 : 202 : add_partial_path(partially_grouped_rel, path);
7737 : 202 : }
7738 : 136 : }
7739 : 149 : }
7740 : :
7741 : : /*
7742 : : * If there is an FDW that's responsible for all baserels of the query,
7743 : : * let it consider adding partially grouped ForeignPaths.
7744 : : */
7745 [ - + # # ]: 521 : if (partially_grouped_rel->fdwroutine &&
7746 : 0 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7747 : : {
7748 : 0 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7749 : :
7750 : 0 : fdwroutine->GetForeignUpperPaths(root,
7751 : : UPPERREL_PARTIAL_GROUP_AGG,
7752 : 0 : input_rel, partially_grouped_rel,
7753 : 0 : extra);
7754 : 0 : }
7755 : :
7756 : 521 : return partially_grouped_rel;
7757 : 4808 : }
7758 : :
7759 : : /*
7760 : : * make_ordered_path
7761 : : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7762 : : * return NULL if it doesn't make sense to generate an ordered path in
7763 : : * this case.
7764 : : */
7765 : : static Path *
7766 : 7330 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7767 : : Path *cheapest_path, List *pathkeys, double limit_tuples)
7768 : : {
7769 : 7330 : bool is_sorted;
7770 : 7330 : int presorted_keys;
7771 : :
7772 : 14660 : is_sorted = pathkeys_count_contained_in(pathkeys,
7773 : 7330 : path->pathkeys,
7774 : : &presorted_keys);
7775 : :
7776 [ + + ]: 7330 : if (!is_sorted)
7777 : : {
7778 : : /*
7779 : : * Try at least sorting the cheapest path and also try incrementally
7780 : : * sorting any path which is partially sorted already (no need to deal
7781 : : * with paths which have presorted keys when incremental sort is
7782 : : * disabled unless it's the cheapest input path).
7783 : : */
7784 [ + + + + ]: 2183 : if (path != cheapest_path &&
7785 [ + + ]: 221 : (presorted_keys == 0 || !enable_incremental_sort))
7786 : 130 : return NULL;
7787 : :
7788 : : /*
7789 : : * We've no need to consider both a sort and incremental sort. We'll
7790 : : * just do a sort if there are no presorted keys and an incremental
7791 : : * sort when there are presorted keys.
7792 : : */
7793 [ + + + + ]: 1950 : if (presorted_keys == 0 || !enable_incremental_sort)
7794 : 3660 : path = (Path *) create_sort_path(root,
7795 : 1830 : rel,
7796 : 1830 : path,
7797 : 1830 : pathkeys,
7798 : 1830 : limit_tuples);
7799 : : else
7800 : 240 : path = (Path *) create_incremental_sort_path(root,
7801 : 120 : rel,
7802 : 120 : path,
7803 : 120 : pathkeys,
7804 : 120 : presorted_keys,
7805 : 120 : limit_tuples);
7806 : 1950 : }
7807 : :
7808 : 7200 : return path;
7809 : 7330 : }
7810 : :
7811 : : /*
7812 : : * Generate Gather and Gather Merge paths for a grouping relation or partial
7813 : : * grouping relation.
7814 : : *
7815 : : * generate_useful_gather_paths does most of the work, but we also consider a
7816 : : * special case: we could try sorting the data by the group_pathkeys and then
7817 : : * applying Gather Merge.
7818 : : *
7819 : : * NB: This function shouldn't be used for anything other than a grouped or
7820 : : * partially grouped relation not only because of the fact that it explicitly
7821 : : * references group_pathkeys but we pass "true" as the third argument to
7822 : : * generate_useful_gather_paths().
7823 : : */
7824 : : static void
7825 : 395 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7826 : : {
7827 : 395 : ListCell *lc;
7828 : 395 : Path *cheapest_partial_path;
7829 : 395 : List *groupby_pathkeys;
7830 : :
7831 : : /*
7832 : : * This occurs after any partial aggregation has taken place, so trim off
7833 : : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7834 : : */
7835 [ + + ]: 395 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7836 : 6 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7837 : 3 : root->num_groupby_pathkeys);
7838 : : else
7839 : 392 : groupby_pathkeys = root->group_pathkeys;
7840 : :
7841 : : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7842 : 395 : generate_useful_gather_paths(root, rel, true);
7843 : :
7844 : 395 : cheapest_partial_path = linitial(rel->partial_pathlist);
7845 : :
7846 : : /* XXX Shouldn't this also consider the group-key-reordering? */
7847 [ + - + + : 977 : foreach(lc, rel->partial_pathlist)
+ + ]
7848 : : {
7849 : 582 : Path *path = (Path *) lfirst(lc);
7850 : 582 : bool is_sorted;
7851 : 582 : int presorted_keys;
7852 : 582 : double total_groups;
7853 : :
7854 : 1164 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7855 : 582 : path->pathkeys,
7856 : : &presorted_keys);
7857 : :
7858 [ + + ]: 582 : if (is_sorted)
7859 : 341 : continue;
7860 : :
7861 : : /*
7862 : : * Try at least sorting the cheapest path and also try incrementally
7863 : : * sorting any path which is partially sorted already (no need to deal
7864 : : * with paths which have presorted keys when incremental sort is
7865 : : * disabled unless it's the cheapest input path).
7866 : : */
7867 [ - + # # ]: 241 : if (path != cheapest_partial_path &&
7868 [ # # ]: 0 : (presorted_keys == 0 || !enable_incremental_sort))
7869 : 0 : continue;
7870 : :
7871 : : /*
7872 : : * We've no need to consider both a sort and incremental sort. We'll
7873 : : * just do a sort if there are no presorted keys and an incremental
7874 : : * sort when there are presorted keys.
7875 : : */
7876 [ - + # # ]: 241 : if (presorted_keys == 0 || !enable_incremental_sort)
7877 : 482 : path = (Path *) create_sort_path(root, rel, path,
7878 : 241 : groupby_pathkeys,
7879 : : -1.0);
7880 : : else
7881 : 0 : path = (Path *) create_incremental_sort_path(root,
7882 : 0 : rel,
7883 : 0 : path,
7884 : 0 : groupby_pathkeys,
7885 : 0 : presorted_keys,
7886 : : -1.0);
7887 : 241 : total_groups = compute_gather_rows(path);
7888 : 241 : path = (Path *)
7889 : 482 : create_gather_merge_path(root,
7890 : 241 : rel,
7891 : 241 : path,
7892 : 241 : rel->reltarget,
7893 : 241 : groupby_pathkeys,
7894 : : NULL,
7895 : : &total_groups);
7896 : :
7897 : 241 : add_path(rel, path);
7898 [ - + + ]: 582 : }
7899 : 395 : }
7900 : :
7901 : : /*
7902 : : * can_partial_agg
7903 : : *
7904 : : * Determines whether or not partial grouping and/or aggregation is possible.
7905 : : * Returns true when possible, false otherwise.
7906 : : */
7907 : : static bool
7908 : 5041 : can_partial_agg(PlannerInfo *root)
7909 : : {
7910 : 5041 : Query *parse = root->parse;
7911 : :
7912 [ + + + - ]: 5041 : if (!parse->hasAggs && parse->groupClause == NIL)
7913 : : {
7914 : : /*
7915 : : * We don't know how to do parallel aggregation unless we have either
7916 : : * some aggregates or a grouping clause.
7917 : : */
7918 : 0 : return false;
7919 : : }
7920 [ + + ]: 5041 : else if (parse->groupingSets)
7921 : : {
7922 : : /* We don't know how to do grouping sets in parallel. */
7923 : 162 : return false;
7924 : : }
7925 [ + + + + ]: 4879 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7926 : : {
7927 : : /* Insufficient support for partial mode. */
7928 : 415 : return false;
7929 : : }
7930 : :
7931 : : /* Everything looks good. */
7932 : 4464 : return true;
7933 : 5041 : }
7934 : :
7935 : : /*
7936 : : * apply_scanjoin_target_to_paths
7937 : : *
7938 : : * Adjust the final scan/join relation, and recursively all of its children,
7939 : : * to generate the final scan/join target. It would be more correct to model
7940 : : * this as a separate planning step with a new RelOptInfo at the toplevel and
7941 : : * for each child relation, but doing it this way is noticeably cheaper.
7942 : : * Maybe that problem can be solved at some point, but for now we do this.
7943 : : *
7944 : : * If tlist_same_exprs is true, then the scan/join target to be applied has
7945 : : * the same expressions as the existing reltarget, so we need only insert the
7946 : : * appropriate sortgroupref information. By avoiding the creation of
7947 : : * projection paths we save effort both immediately and at plan creation time.
7948 : : */
7949 : : static void
7950 : 54218 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7951 : : RelOptInfo *rel,
7952 : : List *scanjoin_targets,
7953 : : List *scanjoin_targets_contain_srfs,
7954 : : bool scanjoin_target_parallel_safe,
7955 : : bool tlist_same_exprs)
7956 : : {
7957 [ + + + + : 54218 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
+ + + - ]
7958 : 54218 : PathTarget *scanjoin_target;
7959 : 54218 : ListCell *lc;
7960 : :
7961 : : /* This recurses, so be paranoid. */
7962 : 54218 : check_stack_depth();
7963 : :
7964 : : /*
7965 : : * If the rel only has Append and MergeAppend paths, we want to drop its
7966 : : * existing paths and generate new ones. This function would still be
7967 : : * correct if we kept the existing paths: we'd modify them to generate the
7968 : : * correct target above the partitioning Append, and then they'd compete
7969 : : * on cost with paths generating the target below the Append. However, in
7970 : : * our current cost model the latter way is always the same or cheaper
7971 : : * cost, so modifying the existing paths would just be useless work.
7972 : : * Moreover, when the cost is the same, varying roundoff errors might
7973 : : * sometimes allow an existing path to be picked, resulting in undesirable
7974 : : * cross-platform plan variations. So we drop old paths and thereby force
7975 : : * the work to be done below the Append.
7976 : : *
7977 : : * However, there are several cases when this optimization is not safe. If
7978 : : * the rel isn't partitioned, then none of the paths will be Append or
7979 : : * MergeAppend paths, so we should definitely not do this. If it is
7980 : : * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7981 : : * but it can also have join paths that we can't afford to discard.
7982 : : *
7983 : : * Some care is needed, because we have to allow
7984 : : * generate_useful_gather_paths to see the old partial paths in the next
7985 : : * stanza. Hence, zap the main pathlist here, then allow
7986 : : * generate_useful_gather_paths to add path(s) to the main list, and
7987 : : * finally zap the partial pathlist.
7988 : : */
7989 [ + + + + : 54218 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
+ + ]
7990 : 1321 : rel->pathlist = NIL;
7991 : :
7992 : : /*
7993 : : * If the scan/join target is not parallel-safe, partial paths cannot
7994 : : * generate it.
7995 : : */
7996 [ + + ]: 54218 : if (!scanjoin_target_parallel_safe)
7997 : : {
7998 : : /*
7999 : : * Since we can't generate the final scan/join target in parallel
8000 : : * workers, this is our last opportunity to use any partial paths that
8001 : : * exist; so build Gather path(s) that use them and emit whatever the
8002 : : * current reltarget is. We don't do this in the case where the
8003 : : * target is parallel-safe, since we will be able to generate superior
8004 : : * paths by doing it after the final scan/join target has been
8005 : : * applied.
8006 : : */
8007 : 4572 : generate_useful_gather_paths(root, rel, false);
8008 : :
8009 : : /* Can't use parallel query above this level. */
8010 : 4572 : rel->partial_pathlist = NIL;
8011 : 4572 : rel->consider_parallel = false;
8012 : 4572 : }
8013 : :
8014 : : /* Finish dropping old paths for a partitioned rel, per comment above */
8015 [ + + + + : 54218 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
+ + ]
8016 : 1321 : rel->partial_pathlist = NIL;
8017 : :
8018 : : /* Extract SRF-free scan/join target. */
8019 : 54218 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
8020 : :
8021 : : /*
8022 : : * Apply the SRF-free scan/join target to each existing path.
8023 : : *
8024 : : * If the tlist exprs are the same, we can just inject the sortgroupref
8025 : : * information into the existing pathtargets. Otherwise, replace each
8026 : : * path with a projection path that generates the SRF-free scan/join
8027 : : * target. This can't change the ordering of paths within rel->pathlist,
8028 : : * so we just modify the list in place.
8029 : : */
8030 [ + + + + : 114106 : foreach(lc, rel->pathlist)
+ + ]
8031 : : {
8032 : 59888 : Path *subpath = (Path *) lfirst(lc);
8033 : :
8034 : : /* Shouldn't have any parameterized paths anymore */
8035 [ + - ]: 59888 : Assert(subpath->param_info == NULL);
8036 : :
8037 [ + + ]: 59888 : if (tlist_same_exprs)
8038 : 21727 : subpath->pathtarget->sortgrouprefs =
8039 : 21727 : scanjoin_target->sortgrouprefs;
8040 : : else
8041 : : {
8042 : 38161 : Path *newpath;
8043 : :
8044 : 76322 : newpath = (Path *) create_projection_path(root, rel, subpath,
8045 : 38161 : scanjoin_target);
8046 : 38161 : lfirst(lc) = newpath;
8047 : 38161 : }
8048 : 59888 : }
8049 : :
8050 : : /* Likewise adjust the targets for any partial paths. */
8051 [ + + + + : 57627 : foreach(lc, rel->partial_pathlist)
+ + ]
8052 : : {
8053 : 3409 : Path *subpath = (Path *) lfirst(lc);
8054 : :
8055 : : /* Shouldn't have any parameterized paths anymore */
8056 [ + - ]: 3409 : Assert(subpath->param_info == NULL);
8057 : :
8058 [ + + ]: 3409 : if (tlist_same_exprs)
8059 : 2614 : subpath->pathtarget->sortgrouprefs =
8060 : 2614 : scanjoin_target->sortgrouprefs;
8061 : : else
8062 : : {
8063 : 795 : Path *newpath;
8064 : :
8065 : 1590 : newpath = (Path *) create_projection_path(root, rel, subpath,
8066 : 795 : scanjoin_target);
8067 : 795 : lfirst(lc) = newpath;
8068 : 795 : }
8069 : 3409 : }
8070 : :
8071 : : /*
8072 : : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8073 : : * atop each existing path. (Note that this function doesn't look at the
8074 : : * cheapest-path fields, which is a good thing because they're bogus right
8075 : : * now.)
8076 : : */
8077 [ + + ]: 54218 : if (root->parse->hasTargetSRFs)
8078 : 3292 : adjust_paths_for_srfs(root, rel,
8079 : 1646 : scanjoin_targets,
8080 : 1646 : scanjoin_targets_contain_srfs);
8081 : :
8082 : : /*
8083 : : * Update the rel's target to be the final (with SRFs) scan/join target.
8084 : : * This now matches the actual output of all the paths, and we might get
8085 : : * confused in createplan.c if they don't agree. We must do this now so
8086 : : * that any append paths made in the next part will use the correct
8087 : : * pathtarget (cf. create_append_path).
8088 : : *
8089 : : * Note that this is also necessary if GetForeignUpperPaths() gets called
8090 : : * on the final scan/join relation or on any of its children, since the
8091 : : * FDW might look at the rel's target to create ForeignPaths.
8092 : : */
8093 : 54218 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
8094 : :
8095 : : /*
8096 : : * If the relation is partitioned, recursively apply the scan/join target
8097 : : * to all partitions, and generate brand-new Append paths in which the
8098 : : * scan/join target is computed below the Append rather than above it.
8099 : : * Since Append is not projection-capable, that might save a separate
8100 : : * Result node, and it also is important for partitionwise aggregate.
8101 : : */
8102 [ + + ]: 54218 : if (rel_is_partitioned)
8103 : : {
8104 : 1584 : List *live_children = NIL;
8105 : 1584 : int i;
8106 : :
8107 : : /* Adjust each partition. */
8108 : 1584 : i = -1;
8109 [ + + ]: 4964 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8110 : : {
8111 : 3380 : RelOptInfo *child_rel = rel->part_rels[i];
8112 : 3380 : AppendRelInfo **appinfos;
8113 : 3380 : int nappinfos;
8114 : 3380 : List *child_scanjoin_targets = NIL;
8115 : :
8116 [ + - ]: 3380 : Assert(child_rel != NULL);
8117 : :
8118 : : /* Dummy children can be ignored. */
8119 [ + + ]: 3380 : if (IS_DUMMY_REL(child_rel))
8120 : 7 : continue;
8121 : :
8122 : : /* Translate scan/join targets for this child. */
8123 : 3373 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
8124 : : &nappinfos);
8125 [ + - + + : 6746 : foreach(lc, scanjoin_targets)
+ + ]
8126 : : {
8127 : 3373 : PathTarget *target = lfirst_node(PathTarget, lc);
8128 : :
8129 : 3373 : target = copy_pathtarget(target);
8130 : 3373 : target->exprs = (List *)
8131 : 6746 : adjust_appendrel_attrs(root,
8132 : 3373 : (Node *) target->exprs,
8133 : 3373 : nappinfos, appinfos);
8134 : 6746 : child_scanjoin_targets = lappend(child_scanjoin_targets,
8135 : 3373 : target);
8136 : 3373 : }
8137 : 3373 : pfree(appinfos);
8138 : :
8139 : : /* Recursion does the real work. */
8140 : 6746 : apply_scanjoin_target_to_paths(root, child_rel,
8141 : 3373 : child_scanjoin_targets,
8142 : 3373 : scanjoin_targets_contain_srfs,
8143 : 3373 : scanjoin_target_parallel_safe,
8144 : 3373 : tlist_same_exprs);
8145 : :
8146 : : /* Save non-dummy children for Append paths. */
8147 [ - + ]: 3373 : if (!IS_DUMMY_REL(child_rel))
8148 : 3373 : live_children = lappend(live_children, child_rel);
8149 [ - + + ]: 3380 : }
8150 : :
8151 : : /* Build new paths for this relation by appending child paths. */
8152 : 1584 : add_paths_to_append_rel(root, rel, live_children);
8153 : 1584 : }
8154 : :
8155 : : /*
8156 : : * Consider generating Gather or Gather Merge paths. We must only do this
8157 : : * if the relation is parallel safe, and we don't do it for child rels to
8158 : : * avoid creating multiple Gather nodes within the same plan. We must do
8159 : : * this after all paths have been generated and before set_cheapest, since
8160 : : * one of the generated paths may turn out to be the cheapest one.
8161 : : */
8162 [ + + + + : 54218 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
+ + - + ]
8163 : 20438 : generate_useful_gather_paths(root, rel, false);
8164 : :
8165 : : /*
8166 : : * Reassess which paths are the cheapest, now that we've potentially added
8167 : : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8168 : : * this relation.
8169 : : */
8170 : 54218 : set_cheapest(rel);
8171 : 54218 : }
8172 : :
8173 : : /*
8174 : : * create_partitionwise_grouping_paths
8175 : : *
8176 : : * If the partition keys of input relation are part of the GROUP BY clause, all
8177 : : * the rows belonging to a given group come from a single partition. This
8178 : : * allows aggregation/grouping over a partitioned relation to be broken down
8179 : : * into aggregation/grouping on each partition. This should be no worse, and
8180 : : * often better, than the normal approach.
8181 : : *
8182 : : * However, if the GROUP BY clause does not contain all the partition keys,
8183 : : * rows from a given group may be spread across multiple partitions. In that
8184 : : * case, we perform partial aggregation for each group, append the results,
8185 : : * and then finalize aggregation. This is less certain to win than the
8186 : : * previous case. It may win if the PartialAggregate stage greatly reduces
8187 : : * the number of groups, because fewer rows will pass through the Append node.
8188 : : * It may lose if we have lots of small groups.
8189 : : */
8190 : : static void
8191 : 136 : create_partitionwise_grouping_paths(PlannerInfo *root,
8192 : : RelOptInfo *input_rel,
8193 : : RelOptInfo *grouped_rel,
8194 : : RelOptInfo *partially_grouped_rel,
8195 : : const AggClauseCosts *agg_costs,
8196 : : grouping_sets_data *gd,
8197 : : PartitionwiseAggregateType patype,
8198 : : GroupPathExtraData *extra)
8199 : : {
8200 : 136 : List *grouped_live_children = NIL;
8201 : 136 : List *partially_grouped_live_children = NIL;
8202 : 136 : PathTarget *target = grouped_rel->reltarget;
8203 : 136 : bool partial_grouping_valid = true;
8204 : 136 : int i;
8205 : :
8206 [ + - ]: 136 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
8207 [ + + + - ]: 136 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
8208 : : partially_grouped_rel != NULL);
8209 : :
8210 : : /* Add paths for partitionwise aggregation/grouping. */
8211 : 136 : i = -1;
8212 [ + + ]: 492 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8213 : : {
8214 : 356 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
8215 : 356 : PathTarget *child_target;
8216 : 356 : AppendRelInfo **appinfos;
8217 : 356 : int nappinfos;
8218 : 356 : GroupPathExtraData child_extra;
8219 : 356 : RelOptInfo *child_grouped_rel;
8220 : 356 : RelOptInfo *child_partially_grouped_rel;
8221 : :
8222 [ + - ]: 356 : Assert(child_input_rel != NULL);
8223 : :
8224 : : /* Dummy children can be ignored. */
8225 [ - + ]: 356 : if (IS_DUMMY_REL(child_input_rel))
8226 : 0 : continue;
8227 : :
8228 : 356 : child_target = copy_pathtarget(target);
8229 : :
8230 : : /*
8231 : : * Copy the given "extra" structure as is and then override the
8232 : : * members specific to this child.
8233 : : */
8234 : 356 : memcpy(&child_extra, extra, sizeof(child_extra));
8235 : :
8236 : 356 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8237 : : &nappinfos);
8238 : :
8239 : 356 : child_target->exprs = (List *)
8240 : 712 : adjust_appendrel_attrs(root,
8241 : 356 : (Node *) target->exprs,
8242 : 356 : nappinfos, appinfos);
8243 : :
8244 : : /* Translate havingQual and targetList. */
8245 : 356 : child_extra.havingQual = (Node *)
8246 : 712 : adjust_appendrel_attrs(root,
8247 : 356 : extra->havingQual,
8248 : 356 : nappinfos, appinfos);
8249 : 356 : child_extra.targetList = (List *)
8250 : 712 : adjust_appendrel_attrs(root,
8251 : 356 : (Node *) extra->targetList,
8252 : 356 : nappinfos, appinfos);
8253 : :
8254 : : /*
8255 : : * extra->patype was the value computed for our parent rel; patype is
8256 : : * the value for this relation. For the child, our value is its
8257 : : * parent rel's value.
8258 : : */
8259 : 356 : child_extra.patype = patype;
8260 : :
8261 : : /*
8262 : : * Create grouping relation to hold fully aggregated grouping and/or
8263 : : * aggregation paths for the child.
8264 : : */
8265 : 712 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8266 : 356 : child_target,
8267 : 356 : extra->target_parallel_safe,
8268 : 356 : child_extra.havingQual);
8269 : :
8270 : : /* Create grouping paths for this child relation. */
8271 : 712 : create_ordinary_grouping_paths(root, child_input_rel,
8272 : 356 : child_grouped_rel,
8273 : 356 : agg_costs, gd, &child_extra,
8274 : : &child_partially_grouped_rel);
8275 : :
8276 [ + + ]: 356 : if (child_partially_grouped_rel)
8277 : : {
8278 : 256 : partially_grouped_live_children =
8279 : 512 : lappend(partially_grouped_live_children,
8280 : 256 : child_partially_grouped_rel);
8281 : 256 : }
8282 : : else
8283 : 100 : partial_grouping_valid = false;
8284 : :
8285 [ + + ]: 356 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8286 : : {
8287 : 214 : set_cheapest(child_grouped_rel);
8288 : 428 : grouped_live_children = lappend(grouped_live_children,
8289 : 214 : child_grouped_rel);
8290 : 214 : }
8291 : :
8292 : 356 : pfree(appinfos);
8293 [ - - + ]: 356 : }
8294 : :
8295 : : /*
8296 : : * Try to create append paths for partially grouped children. For full
8297 : : * partitionwise aggregation, we might have paths in the partial_pathlist
8298 : : * if parallel aggregation is possible. For partial partitionwise
8299 : : * aggregation, we may have paths in both pathlist and partial_pathlist.
8300 : : *
8301 : : * NB: We must have a partially grouped path for every child in order to
8302 : : * generate a partially grouped path for this relation.
8303 : : */
8304 [ + + + + ]: 136 : if (partially_grouped_rel && partial_grouping_valid)
8305 : : {
8306 [ + - ]: 100 : Assert(partially_grouped_live_children != NIL);
8307 : :
8308 : 200 : add_paths_to_append_rel(root, partially_grouped_rel,
8309 : 100 : partially_grouped_live_children);
8310 : 100 : }
8311 : :
8312 : : /* If possible, create append paths for fully grouped children. */
8313 [ + + ]: 136 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8314 : : {
8315 [ + - ]: 80 : Assert(grouped_live_children != NIL);
8316 : :
8317 : 80 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8318 : 80 : }
8319 : 136 : }
8320 : :
8321 : : /*
8322 : : * group_by_has_partkey
8323 : : *
8324 : : * Returns true if all the partition keys of the given relation are part of
8325 : : * the GROUP BY clauses, including having matching collation, false otherwise.
8326 : : */
8327 : : static bool
8328 : 127 : group_by_has_partkey(RelOptInfo *input_rel,
8329 : : List *targetList,
8330 : : List *groupClause)
8331 : : {
8332 : 127 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8333 : 127 : int cnt = 0;
8334 : 127 : int partnatts;
8335 : :
8336 : : /* Input relation should be partitioned. */
8337 [ + - ]: 127 : Assert(input_rel->part_scheme);
8338 : :
8339 : : /* Rule out early, if there are no partition keys present. */
8340 [ + - ]: 127 : if (!input_rel->partexprs)
8341 : 0 : return false;
8342 : :
8343 : 127 : partnatts = input_rel->part_scheme->partnatts;
8344 : :
8345 [ + + ]: 213 : for (cnt = 0; cnt < partnatts; cnt++)
8346 : : {
8347 : 133 : List *partexprs = input_rel->partexprs[cnt];
8348 : 133 : ListCell *lc;
8349 : 133 : bool found = false;
8350 : :
8351 [ + + + + : 287 : foreach(lc, partexprs)
+ + + + ]
8352 : : {
8353 : 154 : ListCell *lg;
8354 : 154 : Expr *partexpr = lfirst(lc);
8355 : 154 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8356 : :
8357 [ + - + + : 328 : foreach(lg, groupexprs)
+ + + + ]
8358 : : {
8359 : 174 : Expr *groupexpr = lfirst(lg);
8360 : 174 : Oid groupcoll = exprCollation((Node *) groupexpr);
8361 : :
8362 : : /*
8363 : : * Note: we can assume there is at most one RelabelType node;
8364 : : * eval_const_expressions() will have simplified if more than
8365 : : * one.
8366 : : */
8367 [ + + ]: 174 : if (IsA(groupexpr, RelabelType))
8368 : 4 : groupexpr = ((RelabelType *) groupexpr)->arg;
8369 : :
8370 [ + + ]: 174 : if (equal(groupexpr, partexpr))
8371 : : {
8372 : : /*
8373 : : * Reject a match if the grouping collation does not match
8374 : : * the partitioning collation.
8375 : : */
8376 [ + + + - : 88 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
+ + ]
8377 : 24 : partcoll != groupcoll)
8378 : 2 : return false;
8379 : :
8380 : 86 : found = true;
8381 : 86 : break;
8382 : : }
8383 [ + + ]: 174 : }
8384 : :
8385 [ + + ]: 152 : if (found)
8386 : 86 : break;
8387 [ + + ]: 154 : }
8388 : :
8389 : : /*
8390 : : * If none of the partition key expressions match with any of the
8391 : : * GROUP BY expression, return false.
8392 : : */
8393 [ + + ]: 131 : if (!found)
8394 : 45 : return false;
8395 [ + + ]: 133 : }
8396 : :
8397 : 80 : return true;
8398 : 127 : }
8399 : :
8400 : : /*
8401 : : * generate_setop_child_grouplist
8402 : : * Build a SortGroupClause list defining the sort/grouping properties
8403 : : * of the child of a set operation.
8404 : : *
8405 : : * This is similar to generate_setop_grouplist() but differs as the setop
8406 : : * child query's targetlist entries may already have a tleSortGroupRef
8407 : : * assigned for other purposes, such as GROUP BYs. Here we keep the
8408 : : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8409 : : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8410 : : * any of the columns in the targetlist don't match to the setop's colTypes
8411 : : * then we return an empty list. This may leave some TLEs with unreferenced
8412 : : * ressortgroupref markings, but that's harmless.
8413 : : */
8414 : : static List *
8415 : 1940 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8416 : : {
8417 : 1940 : List *grouplist = copyObject(op->groupClauses);
8418 : 1940 : ListCell *lg;
8419 : 1940 : ListCell *lt;
8420 : 1940 : ListCell *ct;
8421 : :
8422 : 1940 : lg = list_head(grouplist);
8423 : 1940 : ct = list_head(op->colTypes);
8424 [ + + + + : 7448 : foreach(lt, targetlist)
+ + + + ]
8425 : : {
8426 : 5508 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8427 : 5508 : SortGroupClause *sgc;
8428 : 5508 : Oid coltype;
8429 : :
8430 : : /* resjunk columns could have sortgrouprefs. Leave these alone */
8431 [ - + ]: 5508 : if (tle->resjunk)
8432 : 0 : continue;
8433 : :
8434 : : /*
8435 : : * We expect every non-resjunk target to have a SortGroupClause and
8436 : : * colTypes.
8437 : : */
8438 [ + - ]: 5508 : Assert(lg != NULL);
8439 [ + - ]: 5508 : Assert(ct != NULL);
8440 : 5508 : sgc = (SortGroupClause *) lfirst(lg);
8441 : 5508 : coltype = lfirst_oid(ct);
8442 : :
8443 : : /* reject if target type isn't the same as the setop target type */
8444 [ + + ]: 5508 : if (coltype != exprType((Node *) tle->expr))
8445 : 15 : return NIL;
8446 : :
8447 : 5493 : lg = lnext(grouplist, lg);
8448 : 5493 : ct = lnext(op->colTypes, ct);
8449 : :
8450 : : /* assign a tleSortGroupRef, or reuse the existing one */
8451 : 5493 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8452 [ - + + ]: 5508 : }
8453 : :
8454 [ + - ]: 1925 : Assert(lg == NULL);
8455 [ + - ]: 1925 : Assert(ct == NULL);
8456 : :
8457 : 1925 : return grouplist;
8458 : 1940 : }
8459 : :
8460 : : /*
8461 : : * create_unique_paths
8462 : : * Build a new RelOptInfo containing Paths that represent elimination of
8463 : : * distinct rows from the input data. Distinct-ness is defined according to
8464 : : * the needs of the semijoin represented by sjinfo. If it is not possible
8465 : : * to identify how to make the data unique, NULL is returned.
8466 : : *
8467 : : * If used at all, this is likely to be called repeatedly on the same rel,
8468 : : * so we cache the result.
8469 : : */
8470 : : RelOptInfo *
8471 : 1203 : create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
8472 : : {
8473 : 1203 : RelOptInfo *unique_rel;
8474 : 1203 : List *sortPathkeys = NIL;
8475 : 1203 : List *groupClause = NIL;
8476 : 1203 : MemoryContext oldcontext;
8477 : :
8478 : : /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8479 [ + - ]: 1203 : Assert(sjinfo->jointype == JOIN_SEMI);
8480 [ + - ]: 1203 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8481 : :
8482 : : /* If result already cached, return it */
8483 [ + + ]: 1203 : if (rel->unique_rel)
8484 : 135 : return rel->unique_rel;
8485 : :
8486 : : /* If it's not possible to unique-ify, return NULL */
8487 [ + + - + ]: 1068 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8488 : 19 : return NULL;
8489 : :
8490 : : /*
8491 : : * Punt if this is a child relation and we failed to build a unique-ified
8492 : : * relation for its parent. This can happen if all the RHS columns were
8493 : : * found to be equated to constants when unique-ifying the parent table,
8494 : : * leaving no columns to unique-ify.
8495 : : */
8496 [ + + + + : 1049 : if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
+ + ]
8497 : 2 : return NULL;
8498 : :
8499 : : /*
8500 : : * When called during GEQO join planning, we are in a short-lived memory
8501 : : * context. We must make sure that the unique rel and any subsidiary data
8502 : : * structures created for a baserel survive the GEQO cycle, else the
8503 : : * baserel is trashed for future GEQO cycles. On the other hand, when we
8504 : : * are creating those for a joinrel during GEQO, we don't want them to
8505 : : * clutter the main planning context. Upshot is that the best solution is
8506 : : * to explicitly allocate memory in the same context the given RelOptInfo
8507 : : * is in.
8508 : : */
8509 : 1047 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
8510 : :
8511 : 1047 : unique_rel = makeNode(RelOptInfo);
8512 : 1047 : memcpy(unique_rel, rel, sizeof(RelOptInfo));
8513 : :
8514 : : /*
8515 : : * clear path info
8516 : : */
8517 : 1047 : unique_rel->pathlist = NIL;
8518 : 1047 : unique_rel->ppilist = NIL;
8519 : 1047 : unique_rel->partial_pathlist = NIL;
8520 : 1047 : unique_rel->cheapest_startup_path = NULL;
8521 : 1047 : unique_rel->cheapest_total_path = NULL;
8522 : 1047 : unique_rel->cheapest_parameterized_paths = NIL;
8523 : :
8524 : : /*
8525 : : * Build the target list for the unique rel. We also build the pathkeys
8526 : : * that represent the ordering requirements for the sort-based
8527 : : * implementation, and the list of SortGroupClause nodes that represent
8528 : : * the columns to be grouped on for the hash-based implementation.
8529 : : *
8530 : : * For a child rel, we can construct these fields from those of its
8531 : : * parent.
8532 : : */
8533 [ + + + + : 1047 : if (IS_OTHER_REL(rel))
- + ]
8534 : : {
8535 : 72 : PathTarget *child_unique_target;
8536 : 72 : PathTarget *parent_unique_target;
8537 : :
8538 : 72 : parent_unique_target = rel->top_parent->unique_rel->reltarget;
8539 : :
8540 : 72 : child_unique_target = copy_pathtarget(parent_unique_target);
8541 : :
8542 : : /* Translate the target expressions */
8543 : 72 : child_unique_target->exprs = (List *)
8544 : 144 : adjust_appendrel_attrs_multilevel(root,
8545 : 72 : (Node *) parent_unique_target->exprs,
8546 : 72 : rel,
8547 : 72 : rel->top_parent);
8548 : :
8549 : 72 : unique_rel->reltarget = child_unique_target;
8550 : :
8551 : 72 : sortPathkeys = rel->top_parent->unique_pathkeys;
8552 : 72 : groupClause = rel->top_parent->unique_groupclause;
8553 : 72 : }
8554 : : else
8555 : : {
8556 : 975 : List *newtlist;
8557 : 975 : int nextresno;
8558 : 975 : List *sortList = NIL;
8559 : 975 : ListCell *lc1;
8560 : 975 : ListCell *lc2;
8561 : :
8562 : : /*
8563 : : * The values we are supposed to unique-ify may be expressions in the
8564 : : * variables of the input rel's targetlist. We have to add any such
8565 : : * expressions to the unique rel's targetlist.
8566 : : *
8567 : : * To complicate matters, some of the values to be unique-ified may be
8568 : : * known redundant by the EquivalenceClass machinery (e.g., because
8569 : : * they have been equated to constants). There is no need to compare
8570 : : * such values during unique-ification, and indeed we had better not
8571 : : * try because the Vars involved may not have propagated as high as
8572 : : * the semijoin's level. We use make_pathkeys_for_sortclauses to
8573 : : * detect such cases, which is a tad inefficient but it doesn't seem
8574 : : * worth building specialized infrastructure for this.
8575 : : */
8576 : 975 : newtlist = make_tlist_from_pathtarget(rel->reltarget);
8577 : 975 : nextresno = list_length(newtlist) + 1;
8578 : :
8579 [ + - + + : 1995 : forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
+ - + + +
+ + + ]
8580 : : {
8581 : 1020 : Expr *uniqexpr = lfirst(lc1);
8582 : 1020 : Oid in_oper = lfirst_oid(lc2);
8583 : 1020 : Oid sortop;
8584 : 1020 : TargetEntry *tle;
8585 : 1020 : bool made_tle = false;
8586 : :
8587 : 1020 : tle = tlist_member(uniqexpr, newtlist);
8588 [ + + ]: 1020 : if (!tle)
8589 : : {
8590 : 1068 : tle = makeTargetEntry(uniqexpr,
8591 : 534 : nextresno,
8592 : : NULL,
8593 : : false);
8594 : 534 : newtlist = lappend(newtlist, tle);
8595 : 534 : nextresno++;
8596 : 534 : made_tle = true;
8597 : 534 : }
8598 : :
8599 : : /*
8600 : : * Try to build an ORDER BY list to sort the input compatibly. We
8601 : : * do this for each sortable clause even when the clauses are not
8602 : : * all sortable, so that we can detect clauses that are redundant
8603 : : * according to the pathkey machinery.
8604 : : */
8605 : 1020 : sortop = get_ordering_op_for_equality_op(in_oper, false);
8606 [ + - ]: 1020 : if (OidIsValid(sortop))
8607 : : {
8608 : 1020 : Oid eqop;
8609 : 1020 : SortGroupClause *sortcl;
8610 : :
8611 : : /*
8612 : : * The Unique node will need equality operators. Normally
8613 : : * these are the same as the IN clause operators, but if those
8614 : : * are cross-type operators then the equality operators are
8615 : : * the ones for the IN clause operators' RHS datatype.
8616 : : */
8617 : 1020 : eqop = get_equality_op_for_ordering_op(sortop, NULL);
8618 [ + - ]: 1020 : if (!OidIsValid(eqop)) /* shouldn't happen */
8619 [ # # # # ]: 0 : elog(ERROR, "could not find equality operator for ordering operator %u",
8620 : : sortop);
8621 : :
8622 : 1020 : sortcl = makeNode(SortGroupClause);
8623 : 1020 : sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8624 : 1020 : sortcl->eqop = eqop;
8625 : 1020 : sortcl->sortop = sortop;
8626 : 1020 : sortcl->reverse_sort = false;
8627 : 1020 : sortcl->nulls_first = false;
8628 : 1020 : sortcl->hashable = false; /* no need to make this accurate */
8629 : 1020 : sortList = lappend(sortList, sortcl);
8630 : :
8631 : : /*
8632 : : * At each step, convert the SortGroupClause list to pathkey
8633 : : * form. If the just-added SortGroupClause is redundant, the
8634 : : * result will be shorter than the SortGroupClause list.
8635 : : */
8636 : 2040 : sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8637 : 1020 : newtlist);
8638 [ + + ]: 1020 : if (list_length(sortPathkeys) != list_length(sortList))
8639 : : {
8640 : : /* Drop the redundant SortGroupClause */
8641 : 342 : sortList = list_delete_last(sortList);
8642 [ - + ]: 342 : Assert(list_length(sortPathkeys) == list_length(sortList));
8643 : : /* Undo tlist addition, if we made one */
8644 [ + + ]: 342 : if (made_tle)
8645 : : {
8646 : 2 : newtlist = list_delete_last(newtlist);
8647 : 2 : nextresno--;
8648 : 2 : }
8649 : : /* We need not consider this clause for hashing, either */
8650 : 342 : continue;
8651 : : }
8652 [ + + ]: 1020 : }
8653 [ # # ]: 0 : else if (sjinfo->semi_can_btree) /* shouldn't happen */
8654 [ # # # # ]: 0 : elog(ERROR, "could not find ordering operator for equality operator %u",
8655 : : in_oper);
8656 : :
8657 [ - + ]: 678 : if (sjinfo->semi_can_hash)
8658 : : {
8659 : : /* Create a GROUP BY list for the Agg node to use */
8660 : 678 : Oid eq_oper;
8661 : 678 : SortGroupClause *groupcl;
8662 : :
8663 : : /*
8664 : : * Get the hashable equality operators for the Agg node to
8665 : : * use. Normally these are the same as the IN clause
8666 : : * operators, but if those are cross-type operators then the
8667 : : * equality operators are the ones for the IN clause
8668 : : * operators' RHS datatype.
8669 : : */
8670 [ + - ]: 678 : if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8671 [ # # # # ]: 0 : elog(ERROR, "could not find compatible hash operator for operator %u",
8672 : : in_oper);
8673 : :
8674 : 678 : groupcl = makeNode(SortGroupClause);
8675 : 678 : groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8676 : 678 : groupcl->eqop = eq_oper;
8677 : 678 : groupcl->sortop = sortop;
8678 : 678 : groupcl->reverse_sort = false;
8679 : 678 : groupcl->nulls_first = false;
8680 : 678 : groupcl->hashable = true;
8681 : 678 : groupClause = lappend(groupClause, groupcl);
8682 : 678 : }
8683 [ - + + ]: 1020 : }
8684 : :
8685 : : /*
8686 : : * Done building the sortPathkeys and groupClause. But the
8687 : : * sortPathkeys are bogus if not all the clauses were sortable.
8688 : : */
8689 [ + - ]: 975 : if (!sjinfo->semi_can_btree)
8690 : 0 : sortPathkeys = NIL;
8691 : :
8692 : : /*
8693 : : * It can happen that all the RHS columns are equated to constants.
8694 : : * We'd have to do something special to unique-ify in that case, and
8695 : : * it's such an unlikely-in-the-real-world case that it's not worth
8696 : : * the effort. So just punt if we found no columns to unique-ify.
8697 : : */
8698 [ + + - + ]: 975 : if (sortPathkeys == NIL && groupClause == NIL)
8699 : : {
8700 : 325 : MemoryContextSwitchTo(oldcontext);
8701 : 325 : return NULL;
8702 : : }
8703 : :
8704 : : /* Convert the required targetlist back to PathTarget form */
8705 : 650 : unique_rel->reltarget = create_pathtarget(root, newtlist);
8706 [ + + ]: 975 : }
8707 : :
8708 : : /* build unique paths based on input rel's pathlist */
8709 : 1444 : create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8710 : 722 : sjinfo, unique_rel);
8711 : :
8712 : : /* build unique paths based on input rel's partial_pathlist */
8713 : 1444 : create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8714 : 722 : sjinfo, unique_rel);
8715 : :
8716 : : /* Now choose the best path(s) */
8717 : 722 : set_cheapest(unique_rel);
8718 : :
8719 : : /*
8720 : : * There shouldn't be any partial paths for the unique relation;
8721 : : * otherwise, we won't be able to properly guarantee uniqueness.
8722 : : */
8723 [ + - ]: 722 : Assert(unique_rel->partial_pathlist == NIL);
8724 : :
8725 : : /* Cache the result */
8726 : 722 : rel->unique_rel = unique_rel;
8727 : 722 : rel->unique_pathkeys = sortPathkeys;
8728 : 722 : rel->unique_groupclause = groupClause;
8729 : :
8730 : 722 : MemoryContextSwitchTo(oldcontext);
8731 : :
8732 : 722 : return unique_rel;
8733 : 1203 : }
8734 : :
8735 : : /*
8736 : : * create_final_unique_paths
8737 : : * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8738 : : */
8739 : : static void
8740 : 1336 : create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8741 : : List *sortPathkeys, List *groupClause,
8742 : : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8743 : : {
8744 : 1336 : Path *cheapest_input_path = input_rel->cheapest_total_path;
8745 : :
8746 : : /* Estimate number of output rows */
8747 : 2672 : unique_rel->rows = estimate_num_groups(root,
8748 : 1336 : sjinfo->semi_rhs_exprs,
8749 : 1336 : cheapest_input_path->rows,
8750 : : NULL,
8751 : : NULL);
8752 : :
8753 : : /* Consider sort-based implementations, if possible. */
8754 [ - + ]: 1336 : if (sjinfo->semi_can_btree)
8755 : : {
8756 : 1336 : ListCell *lc;
8757 : :
8758 : : /*
8759 : : * Use any available suitably-sorted path as input, and also consider
8760 : : * sorting the cheapest-total path and incremental sort on any paths
8761 : : * with presorted keys.
8762 : : *
8763 : : * To save planning time, we ignore parameterized input paths unless
8764 : : * they are the cheapest-total path.
8765 : : */
8766 [ + - + + : 2927 : foreach(lc, input_rel->pathlist)
+ + ]
8767 : : {
8768 : 1591 : Path *input_path = (Path *) lfirst(lc);
8769 : 1591 : Path *path;
8770 : 1591 : bool is_sorted;
8771 : 1591 : int presorted_keys;
8772 : :
8773 : : /*
8774 : : * Ignore parameterized paths that are not the cheapest-total
8775 : : * path.
8776 : : */
8777 [ + + + + ]: 1591 : if (input_path->param_info &&
8778 : 152 : input_path != cheapest_input_path)
8779 : 143 : continue;
8780 : :
8781 : 2896 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8782 : 1448 : input_path->pathkeys,
8783 : : &presorted_keys);
8784 : :
8785 : : /*
8786 : : * Ignore paths that are not suitably or partially sorted, unless
8787 : : * they are the cheapest total path (no need to deal with paths
8788 : : * which have presorted keys when incremental sort is disabled).
8789 : : */
8790 [ + + + + : 1456 : if (!is_sorted && input_path != cheapest_input_path &&
+ - ]
8791 [ + + ]: 14 : (presorted_keys == 0 || !enable_incremental_sort))
8792 : 6 : continue;
8793 : :
8794 : : /*
8795 : : * Make a separate ProjectionPath in case we need a Result node.
8796 : : */
8797 : 2884 : path = (Path *) create_projection_path(root,
8798 : 1442 : unique_rel,
8799 : 1442 : input_path,
8800 : 1442 : unique_rel->reltarget);
8801 : :
8802 [ + + ]: 1442 : if (!is_sorted)
8803 : : {
8804 : : /*
8805 : : * We've no need to consider both a sort and incremental sort.
8806 : : * We'll just do a sort if there are no presorted keys and an
8807 : : * incremental sort when there are presorted keys.
8808 : : */
8809 [ + + - + ]: 720 : if (presorted_keys == 0 || !enable_incremental_sort)
8810 : 1424 : path = (Path *) create_sort_path(root,
8811 : 712 : unique_rel,
8812 : 712 : path,
8813 : 712 : sortPathkeys,
8814 : : -1.0);
8815 : : else
8816 : 16 : path = (Path *) create_incremental_sort_path(root,
8817 : 8 : unique_rel,
8818 : 8 : path,
8819 : 8 : sortPathkeys,
8820 : 8 : presorted_keys,
8821 : : -1.0);
8822 : 720 : }
8823 : :
8824 : 2884 : path = (Path *) create_unique_path(root, unique_rel, path,
8825 : 1442 : list_length(sortPathkeys),
8826 : 1442 : unique_rel->rows);
8827 : :
8828 : 1442 : add_path(unique_rel, path);
8829 [ - + + ]: 1591 : }
8830 : 1336 : }
8831 : :
8832 : : /* Consider hash-based implementation, if possible. */
8833 [ - + ]: 1336 : if (sjinfo->semi_can_hash)
8834 : : {
8835 : 1336 : Path *path;
8836 : :
8837 : : /*
8838 : : * Make a separate ProjectionPath in case we need a Result node.
8839 : : */
8840 : 2672 : path = (Path *) create_projection_path(root,
8841 : 1336 : unique_rel,
8842 : 1336 : cheapest_input_path,
8843 : 1336 : unique_rel->reltarget);
8844 : :
8845 : 2672 : path = (Path *) create_agg_path(root,
8846 : 1336 : unique_rel,
8847 : 1336 : path,
8848 : 1336 : cheapest_input_path->pathtarget,
8849 : : AGG_HASHED,
8850 : : AGGSPLIT_SIMPLE,
8851 : 1336 : groupClause,
8852 : : NIL,
8853 : : NULL,
8854 : 1336 : unique_rel->rows);
8855 : :
8856 : 1336 : add_path(unique_rel, path);
8857 : 1336 : }
8858 : 1336 : }
8859 : :
8860 : : /*
8861 : : * create_partial_unique_paths
8862 : : * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8863 : : */
8864 : : static void
8865 : 722 : create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8866 : : List *sortPathkeys, List *groupClause,
8867 : : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8868 : : {
8869 : 722 : RelOptInfo *partial_unique_rel;
8870 : 722 : Path *cheapest_partial_path;
8871 : :
8872 : : /* nothing to do when there are no partial paths in the input rel */
8873 [ + + + + ]: 722 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8874 : 108 : return;
8875 : :
8876 : : /*
8877 : : * nothing to do if there's anything in the targetlist that's
8878 : : * parallel-restricted.
8879 : : */
8880 [ + - ]: 614 : if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8881 : 0 : return;
8882 : :
8883 : 614 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
8884 : :
8885 : 614 : partial_unique_rel = makeNode(RelOptInfo);
8886 : 614 : memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8887 : :
8888 : : /*
8889 : : * clear path info
8890 : : */
8891 : 614 : partial_unique_rel->pathlist = NIL;
8892 : 614 : partial_unique_rel->ppilist = NIL;
8893 : 614 : partial_unique_rel->partial_pathlist = NIL;
8894 : 614 : partial_unique_rel->cheapest_startup_path = NULL;
8895 : 614 : partial_unique_rel->cheapest_total_path = NULL;
8896 : 614 : partial_unique_rel->cheapest_parameterized_paths = NIL;
8897 : :
8898 : : /* Estimate number of output rows */
8899 : 1228 : partial_unique_rel->rows = estimate_num_groups(root,
8900 : 614 : sjinfo->semi_rhs_exprs,
8901 : 614 : cheapest_partial_path->rows,
8902 : : NULL,
8903 : : NULL);
8904 : 614 : partial_unique_rel->reltarget = unique_rel->reltarget;
8905 : :
8906 : : /* Consider sort-based implementations, if possible. */
8907 [ - + ]: 614 : if (sjinfo->semi_can_btree)
8908 : : {
8909 : 614 : ListCell *lc;
8910 : :
8911 : : /*
8912 : : * Use any available suitably-sorted path as input, and also consider
8913 : : * sorting the cheapest partial path and incremental sort on any paths
8914 : : * with presorted keys.
8915 : : */
8916 [ + - + + : 1280 : foreach(lc, input_rel->partial_pathlist)
+ + ]
8917 : : {
8918 : 666 : Path *input_path = (Path *) lfirst(lc);
8919 : 666 : Path *path;
8920 : 666 : bool is_sorted;
8921 : 666 : int presorted_keys;
8922 : :
8923 : 1332 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8924 : 666 : input_path->pathkeys,
8925 : : &presorted_keys);
8926 : :
8927 : : /*
8928 : : * Ignore paths that are not suitably or partially sorted, unless
8929 : : * they are the cheapest partial path (no need to deal with paths
8930 : : * which have presorted keys when incremental sort is disabled).
8931 : : */
8932 [ + + - + : 666 : if (!is_sorted && input_path != cheapest_partial_path &&
# # ]
8933 [ # # ]: 0 : (presorted_keys == 0 || !enable_incremental_sort))
8934 : 0 : continue;
8935 : :
8936 : : /*
8937 : : * Make a separate ProjectionPath in case we need a Result node.
8938 : : */
8939 : 1332 : path = (Path *) create_projection_path(root,
8940 : 666 : partial_unique_rel,
8941 : 666 : input_path,
8942 : 666 : partial_unique_rel->reltarget);
8943 : :
8944 [ + + ]: 666 : if (!is_sorted)
8945 : : {
8946 : : /*
8947 : : * We've no need to consider both a sort and incremental sort.
8948 : : * We'll just do a sort if there are no presorted keys and an
8949 : : * incremental sort when there are presorted keys.
8950 : : */
8951 [ - + # # ]: 606 : if (presorted_keys == 0 || !enable_incremental_sort)
8952 : 1212 : path = (Path *) create_sort_path(root,
8953 : 606 : partial_unique_rel,
8954 : 606 : path,
8955 : 606 : sortPathkeys,
8956 : : -1.0);
8957 : : else
8958 : 0 : path = (Path *) create_incremental_sort_path(root,
8959 : 0 : partial_unique_rel,
8960 : 0 : path,
8961 : 0 : sortPathkeys,
8962 : 0 : presorted_keys,
8963 : : -1.0);
8964 : 606 : }
8965 : :
8966 : 1332 : path = (Path *) create_unique_path(root, partial_unique_rel, path,
8967 : 666 : list_length(sortPathkeys),
8968 : 666 : partial_unique_rel->rows);
8969 : :
8970 : 666 : add_partial_path(partial_unique_rel, path);
8971 [ - + ]: 666 : }
8972 : 614 : }
8973 : :
8974 : : /* Consider hash-based implementation, if possible. */
8975 [ - + ]: 614 : if (sjinfo->semi_can_hash)
8976 : : {
8977 : 614 : Path *path;
8978 : :
8979 : : /*
8980 : : * Make a separate ProjectionPath in case we need a Result node.
8981 : : */
8982 : 1228 : path = (Path *) create_projection_path(root,
8983 : 614 : partial_unique_rel,
8984 : 614 : cheapest_partial_path,
8985 : 614 : partial_unique_rel->reltarget);
8986 : :
8987 : 1228 : path = (Path *) create_agg_path(root,
8988 : 614 : partial_unique_rel,
8989 : 614 : path,
8990 : 614 : cheapest_partial_path->pathtarget,
8991 : : AGG_HASHED,
8992 : : AGGSPLIT_SIMPLE,
8993 : 614 : groupClause,
8994 : : NIL,
8995 : : NULL,
8996 : 614 : partial_unique_rel->rows);
8997 : :
8998 : 614 : add_partial_path(partial_unique_rel, path);
8999 : 614 : }
9000 : :
9001 [ - + ]: 614 : if (partial_unique_rel->partial_pathlist != NIL)
9002 : : {
9003 : 614 : generate_useful_gather_paths(root, partial_unique_rel, true);
9004 : 614 : set_cheapest(partial_unique_rel);
9005 : :
9006 : : /*
9007 : : * Finally, create paths to unique-ify the final result. This step is
9008 : : * needed to remove any duplicates due to combining rows from parallel
9009 : : * workers.
9010 : : */
9011 : 1228 : create_final_unique_paths(root, partial_unique_rel,
9012 : 614 : sortPathkeys, groupClause,
9013 : 614 : sjinfo, unique_rel);
9014 : 614 : }
9015 : 722 : }
9016 : :
9017 : : /*
9018 : : * Choose a unique name for some subroot.
9019 : : *
9020 : : * Modifies glob->subplanNames to track names already used.
9021 : : */
9022 : : char *
9023 : 8108 : choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
9024 : : {
9025 : 8108 : unsigned n;
9026 : :
9027 : : /*
9028 : : * If a numeric suffix is not required, then search the list of
9029 : : * previously-assigned names for a match. If none is found, then we can
9030 : : * use the provided name without modification.
9031 : : */
9032 [ + + ]: 8108 : if (!always_number)
9033 : : {
9034 : 1668 : bool found = false;
9035 : :
9036 [ + + + + : 3827 : foreach_ptr(char, subplan_name, glob->subplanNames)
+ + + + ]
9037 : : {
9038 [ + + ]: 491 : if (strcmp(subplan_name, name) == 0)
9039 : : {
9040 : 55 : found = true;
9041 : 55 : break;
9042 : : }
9043 : 2104 : }
9044 : :
9045 [ + + ]: 1668 : if (!found)
9046 : : {
9047 : : /* pstrdup here is just to avoid cast-away-const */
9048 : 1613 : char *chosen_name = pstrdup(name);
9049 : :
9050 : 1613 : glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9051 : 1613 : return chosen_name;
9052 : 1613 : }
9053 [ + + ]: 1668 : }
9054 : :
9055 : : /*
9056 : : * If a numeric suffix is required or if the un-suffixed name is already
9057 : : * in use, then loop until we find a positive integer that produces a
9058 : : * novel name.
9059 : : */
9060 [ + - ]: 10161 : for (n = 1; true; ++n)
9061 : : {
9062 : 10161 : char *proposed_name = psprintf("%s_%u", name, n);
9063 : 10161 : bool found = false;
9064 : :
9065 [ + + + + : 32086 : foreach_ptr(char, subplan_name, glob->subplanNames)
+ + + + ]
9066 : : {
9067 [ + + ]: 11764 : if (strcmp(subplan_name, proposed_name) == 0)
9068 : : {
9069 : 3666 : found = true;
9070 : 3666 : break;
9071 : : }
9072 : 18259 : }
9073 : :
9074 [ + + ]: 10161 : if (!found)
9075 : : {
9076 : 6495 : glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9077 : 6495 : return proposed_name;
9078 : : }
9079 : :
9080 : 3666 : pfree(proposed_name);
9081 [ + + ]: 10161 : }
9082 [ - + ]: 8108 : }
|