Branch data Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeModifyTable.c
4 : : * routines to handle ModifyTable nodes.
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeModifyTable.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /* INTERFACE ROUTINES
16 : : * ExecInitModifyTable - initialize the ModifyTable node
17 : : * ExecModifyTable - retrieve the next tuple from the node
18 : : * ExecEndModifyTable - shut down the ModifyTable node
19 : : * ExecReScanModifyTable - rescan the ModifyTable node
20 : : *
21 : : * NOTES
22 : : * The ModifyTable node receives input from its outerPlan, which is
23 : : * the data to insert for INSERT cases, the changed columns' new
24 : : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : : * row-locating info for DELETE cases.
26 : : *
27 : : * The relation to modify can be an ordinary table, a foreign table, or a
28 : : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : : * targeted a view not in one of those two categories, earlier processing
31 : : * already pointed the ModifyTable result relation to an underlying
32 : : * relation of that other view. This node does process
33 : : * ri_WithCheckOptions, which may have expressions from those other,
34 : : * automatically updatable views.
35 : : *
36 : : * MERGE runs a join between the source relation and the target table.
37 : : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : : * is an outer join that might output tuples without a matching target
39 : : * tuple. In this case, any unmatched target tuples will have NULL
40 : : * row-locating info, and only INSERT can be run. But for matched target
41 : : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : : * SOURCE, all tuples produced by the join will include a matching target
44 : : * tuple, so all tuples contain row-locating info.
45 : : *
46 : : * If the query specifies RETURNING, then the ModifyTable returns a
47 : : * RETURNING tuple after completing each row insert, update, or delete.
48 : : * It must be called again to continue the operation. Without RETURNING,
49 : : * we just loop within the node until all the work is done, then
50 : : * return NULL. This avoids useless call/return overhead.
51 : : */
52 : :
53 : : #include "postgres.h"
54 : :
55 : : #include "access/htup_details.h"
56 : : #include "access/tableam.h"
57 : : #include "access/xact.h"
58 : : #include "commands/trigger.h"
59 : : #include "executor/execPartition.h"
60 : : #include "executor/executor.h"
61 : : #include "executor/nodeModifyTable.h"
62 : : #include "foreign/fdwapi.h"
63 : : #include "miscadmin.h"
64 : : #include "nodes/nodeFuncs.h"
65 : : #include "optimizer/optimizer.h"
66 : : #include "rewrite/rewriteHandler.h"
67 : : #include "rewrite/rewriteManip.h"
68 : : #include "storage/lmgr.h"
69 : : #include "utils/builtins.h"
70 : : #include "utils/datum.h"
71 : : #include "utils/injection_point.h"
72 : : #include "utils/rel.h"
73 : : #include "utils/snapmgr.h"
74 : :
75 : :
76 : : typedef struct MTTargetRelLookup
77 : : {
78 : : Oid relationOid; /* hash key, must be first */
79 : : int relationIndex; /* rel's index in resultRelInfo[] array */
80 : : } MTTargetRelLookup;
81 : :
82 : : /*
83 : : * Context struct for a ModifyTable operation, containing basic execution
84 : : * state and some output variables populated by ExecUpdateAct() and
85 : : * ExecDeleteAct() to report the result of their actions to callers.
86 : : */
87 : : typedef struct ModifyTableContext
88 : : {
89 : : /* Operation state */
90 : : ModifyTableState *mtstate;
91 : : EPQState *epqstate;
92 : : EState *estate;
93 : :
94 : : /*
95 : : * Slot containing tuple obtained from ModifyTable's subplan. Used to
96 : : * access "junk" columns that are not going to be stored.
97 : : */
98 : : TupleTableSlot *planSlot;
99 : :
100 : : /*
101 : : * Information about the changes that were made concurrently to a tuple
102 : : * being updated or deleted
103 : : */
104 : : TM_FailureData tmfd;
105 : :
106 : : /*
107 : : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
108 : : * clause that refers to OLD columns (converted to the root's tuple
109 : : * descriptor).
110 : : */
111 : : TupleTableSlot *cpDeletedSlot;
112 : :
113 : : /*
114 : : * The tuple projected by the INSERT's RETURNING clause, when doing a
115 : : * cross-partition UPDATE
116 : : */
117 : : TupleTableSlot *cpUpdateReturningSlot;
118 : : } ModifyTableContext;
119 : :
120 : : /*
121 : : * Context struct containing output data specific to UPDATE operations.
122 : : */
123 : : typedef struct UpdateContext
124 : : {
125 : : bool crossPartUpdate; /* was it a cross-partition update? */
126 : : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
127 : :
128 : : /*
129 : : * Lock mode to acquire on the latest tuple version before performing
130 : : * EvalPlanQual on it
131 : : */
132 : : LockTupleMode lockmode;
133 : : } UpdateContext;
134 : :
135 : :
136 : : static void ExecBatchInsert(ModifyTableState *mtstate,
137 : : ResultRelInfo *resultRelInfo,
138 : : TupleTableSlot **slots,
139 : : TupleTableSlot **planSlots,
140 : : int numSlots,
141 : : EState *estate,
142 : : bool canSetTag);
143 : : static void ExecPendingInserts(EState *estate);
144 : : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
145 : : ResultRelInfo *sourcePartInfo,
146 : : ResultRelInfo *destPartInfo,
147 : : ItemPointer tupleid,
148 : : TupleTableSlot *oldslot,
149 : : TupleTableSlot *newslot);
150 : : static bool ExecOnConflictUpdate(ModifyTableContext *context,
151 : : ResultRelInfo *resultRelInfo,
152 : : ItemPointer conflictTid,
153 : : TupleTableSlot *excludedSlot,
154 : : bool canSetTag,
155 : : TupleTableSlot **returning);
156 : : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
157 : : EState *estate,
158 : : PartitionTupleRouting *proute,
159 : : ResultRelInfo *targetRelInfo,
160 : : TupleTableSlot *slot,
161 : : ResultRelInfo **partRelInfo);
162 : :
163 : : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
164 : : ResultRelInfo *resultRelInfo,
165 : : ItemPointer tupleid,
166 : : HeapTuple oldtuple,
167 : : bool canSetTag);
168 : : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
169 : : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
170 : : ResultRelInfo *resultRelInfo,
171 : : ItemPointer tupleid,
172 : : HeapTuple oldtuple,
173 : : bool canSetTag,
174 : : bool *matched);
175 : : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
176 : : ResultRelInfo *resultRelInfo,
177 : : bool canSetTag);
178 : :
179 : :
180 : : /*
181 : : * Verify that the tuples to be produced by INSERT match the
182 : : * target relation's rowtype
183 : : *
184 : : * We do this to guard against stale plans. If plan invalidation is
185 : : * functioning properly then we should never get a failure here, but better
186 : : * safe than sorry. Note that this is called after we have obtained lock
187 : : * on the target rel, so the rowtype can't change underneath us.
188 : : *
189 : : * The plan output is represented by its targetlist, because that makes
190 : : * handling the dropped-column case easier.
191 : : *
192 : : * We used to use this for UPDATE as well, but now the equivalent checks
193 : : * are done in ExecBuildUpdateProjection.
194 : : */
195 : : static void
196 : 0 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
197 : : {
198 : 0 : TupleDesc resultDesc = RelationGetDescr(resultRel);
199 : 0 : int attno = 0;
200 : 0 : ListCell *lc;
201 : :
202 [ # # # # : 0 : foreach(lc, targetList)
# # ]
203 : : {
204 : 0 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
205 : 0 : Form_pg_attribute attr;
206 : :
207 [ # # ]: 0 : Assert(!tle->resjunk); /* caller removed junk items already */
208 : :
209 [ # # ]: 0 : if (attno >= resultDesc->natts)
210 [ # # # # ]: 0 : ereport(ERROR,
211 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
212 : : errmsg("table row type and query-specified row type do not match"),
213 : : errdetail("Query has too many columns.")));
214 : 0 : attr = TupleDescAttr(resultDesc, attno);
215 : 0 : attno++;
216 : :
217 : : /*
218 : : * Special cases here should match planner's expand_insert_targetlist.
219 : : */
220 [ # # ]: 0 : if (attr->attisdropped)
221 : : {
222 : : /*
223 : : * For a dropped column, we can't check atttypid (it's likely 0).
224 : : * In any case the planner has most likely inserted an INT4 null.
225 : : * What we insist on is just *some* NULL constant.
226 : : */
227 [ # # ]: 0 : if (!IsA(tle->expr, Const) ||
228 : 0 : !((Const *) tle->expr)->constisnull)
229 [ # # # # ]: 0 : ereport(ERROR,
230 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
231 : : errmsg("table row type and query-specified row type do not match"),
232 : : errdetail("Query provides a value for a dropped column at ordinal position %d.",
233 : : attno)));
234 : 0 : }
235 [ # # ]: 0 : else if (attr->attgenerated)
236 : : {
237 : : /*
238 : : * For a generated column, the planner will have inserted a null
239 : : * of the column's base type (to avoid possibly failing on domain
240 : : * not-null constraints). It doesn't seem worth insisting on that
241 : : * exact type though, since a null value is type-independent. As
242 : : * above, just insist on *some* NULL constant.
243 : : */
244 [ # # ]: 0 : if (!IsA(tle->expr, Const) ||
245 : 0 : !((Const *) tle->expr)->constisnull)
246 [ # # # # ]: 0 : ereport(ERROR,
247 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
248 : : errmsg("table row type and query-specified row type do not match"),
249 : : errdetail("Query provides a value for a generated column at ordinal position %d.",
250 : : attno)));
251 : 0 : }
252 : : else
253 : : {
254 : : /* Normal case: demand type match */
255 [ # # ]: 0 : if (exprType((Node *) tle->expr) != attr->atttypid)
256 [ # # # # ]: 0 : ereport(ERROR,
257 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
258 : : errmsg("table row type and query-specified row type do not match"),
259 : : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
260 : : format_type_be(attr->atttypid),
261 : : attno,
262 : : format_type_be(exprType((Node *) tle->expr)))));
263 : : }
264 : 0 : }
265 [ # # ]: 0 : if (attno != resultDesc->natts)
266 [ # # # # ]: 0 : ereport(ERROR,
267 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
268 : : errmsg("table row type and query-specified row type do not match"),
269 : : errdetail("Query has too few columns.")));
270 : 0 : }
271 : :
272 : : /*
273 : : * ExecProcessReturning --- evaluate a RETURNING list
274 : : *
275 : : * context: context for the ModifyTable operation
276 : : * resultRelInfo: current result rel
277 : : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
278 : : * oldSlot: slot holding old tuple deleted or updated
279 : : * newSlot: slot holding new tuple inserted or updated
280 : : * planSlot: slot holding tuple returned by top subplan node
281 : : *
282 : : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
283 : : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
284 : : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
285 : : *
286 : : * Returns a slot holding the result tuple
287 : : */
288 : : static TupleTableSlot *
289 : 0 : ExecProcessReturning(ModifyTableContext *context,
290 : : ResultRelInfo *resultRelInfo,
291 : : CmdType cmdType,
292 : : TupleTableSlot *oldSlot,
293 : : TupleTableSlot *newSlot,
294 : : TupleTableSlot *planSlot)
295 : : {
296 : 0 : EState *estate = context->estate;
297 : 0 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
298 : 0 : ExprContext *econtext = projectReturning->pi_exprContext;
299 : :
300 : : /* Make tuple and any needed join variables available to ExecProject */
301 [ # # # ]: 0 : switch (cmdType)
302 : : {
303 : : case CMD_INSERT:
304 : : case CMD_UPDATE:
305 : : /* return new tuple by default */
306 [ # # ]: 0 : if (newSlot)
307 : 0 : econtext->ecxt_scantuple = newSlot;
308 : 0 : break;
309 : :
310 : : case CMD_DELETE:
311 : : /* return old tuple by default */
312 [ # # ]: 0 : if (oldSlot)
313 : 0 : econtext->ecxt_scantuple = oldSlot;
314 : 0 : break;
315 : :
316 : : default:
317 [ # # # # ]: 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
318 : 0 : }
319 : 0 : econtext->ecxt_outertuple = planSlot;
320 : :
321 : : /* Make old/new tuples available to ExecProject, if required */
322 [ # # ]: 0 : if (oldSlot)
323 : 0 : econtext->ecxt_oldtuple = oldSlot;
324 [ # # ]: 0 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
325 : 0 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
326 : : else
327 : 0 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
328 : :
329 [ # # ]: 0 : if (newSlot)
330 : 0 : econtext->ecxt_newtuple = newSlot;
331 [ # # ]: 0 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
332 : 0 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
333 : : else
334 : 0 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
335 : :
336 : : /*
337 : : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
338 : : * information is required to evaluate ReturningExpr nodes and also in
339 : : * ExecEvalSysVar() and ExecEvalWholeRowVar().
340 : : */
341 [ # # ]: 0 : if (oldSlot == NULL)
342 : 0 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
343 : : else
344 : 0 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
345 : :
346 [ # # ]: 0 : if (newSlot == NULL)
347 : 0 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
348 : : else
349 : 0 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
350 : :
351 : : /* Compute the RETURNING expressions */
352 : 0 : return ExecProject(projectReturning);
353 : 0 : }
354 : :
355 : : /*
356 : : * ExecCheckTupleVisible -- verify tuple is visible
357 : : *
358 : : * It would not be consistent with guarantees of the higher isolation levels to
359 : : * proceed with avoiding insertion (taking speculative insertion's alternative
360 : : * path) on the basis of another tuple that is not visible to MVCC snapshot.
361 : : * Check for the need to raise a serialization failure, and do so as necessary.
362 : : */
363 : : static void
364 : 0 : ExecCheckTupleVisible(EState *estate,
365 : : Relation rel,
366 : : TupleTableSlot *slot)
367 : : {
368 [ # # ]: 0 : if (!IsolationUsesXactSnapshot())
369 : 0 : return;
370 : :
371 [ # # ]: 0 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
372 : : {
373 : 0 : Datum xminDatum;
374 : 0 : TransactionId xmin;
375 : 0 : bool isnull;
376 : :
377 : 0 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
378 [ # # ]: 0 : Assert(!isnull);
379 : 0 : xmin = DatumGetTransactionId(xminDatum);
380 : :
381 : : /*
382 : : * We should not raise a serialization failure if the conflict is
383 : : * against a tuple inserted by our own transaction, even if it's not
384 : : * visible to our snapshot. (This would happen, for example, if
385 : : * conflicting keys are proposed for insertion in a single command.)
386 : : */
387 [ # # ]: 0 : if (!TransactionIdIsCurrentTransactionId(xmin))
388 [ # # # # ]: 0 : ereport(ERROR,
389 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
390 : : errmsg("could not serialize access due to concurrent update")));
391 : 0 : }
392 : 0 : }
393 : :
394 : : /*
395 : : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
396 : : */
397 : : static void
398 : 0 : ExecCheckTIDVisible(EState *estate,
399 : : ResultRelInfo *relinfo,
400 : : ItemPointer tid,
401 : : TupleTableSlot *tempSlot)
402 : : {
403 : 0 : Relation rel = relinfo->ri_RelationDesc;
404 : :
405 : : /* Redundantly check isolation level */
406 [ # # ]: 0 : if (!IsolationUsesXactSnapshot())
407 : 0 : return;
408 : :
409 [ # # ]: 0 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
410 [ # # # # ]: 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
411 : 0 : ExecCheckTupleVisible(estate, rel, tempSlot);
412 : 0 : ExecClearTuple(tempSlot);
413 [ # # ]: 0 : }
414 : :
415 : : /*
416 : : * Initialize generated columns handling for a tuple
417 : : *
418 : : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
419 : : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
420 : : * This is used only for stored generated columns.
421 : : *
422 : : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
423 : : * This is used by both stored and virtual generated columns.
424 : : *
425 : : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
426 : : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
427 : : * cross-partition UPDATEs, since a partition might be the target of both
428 : : * UPDATE and INSERT actions.
429 : : */
430 : : void
431 : 0 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
432 : : EState *estate,
433 : : CmdType cmdtype)
434 : : {
435 : 0 : Relation rel = resultRelInfo->ri_RelationDesc;
436 : 0 : TupleDesc tupdesc = RelationGetDescr(rel);
437 : 0 : int natts = tupdesc->natts;
438 : 0 : ExprState **ri_GeneratedExprs;
439 : 0 : int ri_NumGeneratedNeeded;
440 : 0 : Bitmapset *updatedCols;
441 : 0 : MemoryContext oldContext;
442 : :
443 : : /* Nothing to do if no generated columns */
444 [ # # # # : 0 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
# # ]
445 : 0 : return;
446 : :
447 : : /*
448 : : * In an UPDATE, we can skip computing any generated columns that do not
449 : : * depend on any UPDATE target column. But if there is a BEFORE ROW
450 : : * UPDATE trigger, we cannot skip because the trigger might change more
451 : : * columns.
452 : : */
453 [ # # # # ]: 0 : if (cmdtype == CMD_UPDATE &&
454 [ # # ]: 0 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
455 : 0 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
456 : : else
457 : 0 : updatedCols = NULL;
458 : :
459 : : /*
460 : : * Make sure these data structures are built in the per-query memory
461 : : * context so they'll survive throughout the query.
462 : : */
463 : 0 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
464 : :
465 : 0 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
466 : 0 : ri_NumGeneratedNeeded = 0;
467 : :
468 [ # # ]: 0 : for (int i = 0; i < natts; i++)
469 : : {
470 : 0 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
471 : :
472 [ # # ]: 0 : if (attgenerated)
473 : : {
474 : 0 : Expr *expr;
475 : :
476 : : /* Fetch the GENERATED AS expression tree */
477 : 0 : expr = (Expr *) build_column_default(rel, i + 1);
478 [ # # ]: 0 : if (expr == NULL)
479 [ # # # # ]: 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
480 : : i + 1, RelationGetRelationName(rel));
481 : :
482 : : /*
483 : : * If it's an update with a known set of update target columns,
484 : : * see if we can skip the computation.
485 : : */
486 [ # # ]: 0 : if (updatedCols)
487 : : {
488 : 0 : Bitmapset *attrs_used = NULL;
489 : :
490 : 0 : pull_varattnos((Node *) expr, 1, &attrs_used);
491 : :
492 [ # # ]: 0 : if (!bms_overlap(updatedCols, attrs_used))
493 : 0 : continue; /* need not update this column */
494 [ # # ]: 0 : }
495 : :
496 : : /* No luck, so prepare the expression for execution */
497 [ # # ]: 0 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
498 : : {
499 : 0 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
500 : 0 : ri_NumGeneratedNeeded++;
501 : 0 : }
502 : :
503 : : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
504 [ # # ]: 0 : if (cmdtype == CMD_UPDATE)
505 : 0 : resultRelInfo->ri_extraUpdatedCols =
506 : 0 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
507 : 0 : i + 1 - FirstLowInvalidHeapAttributeNumber);
508 [ # # ]: 0 : }
509 [ # # ]: 0 : }
510 : :
511 [ # # ]: 0 : if (ri_NumGeneratedNeeded == 0)
512 : : {
513 : : /* didn't need it after all */
514 : 0 : pfree(ri_GeneratedExprs);
515 : 0 : ri_GeneratedExprs = NULL;
516 : 0 : }
517 : :
518 : : /* Save in appropriate set of fields */
519 [ # # ]: 0 : if (cmdtype == CMD_UPDATE)
520 : : {
521 : : /* Don't call twice */
522 [ # # ]: 0 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
523 : :
524 : 0 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
525 : 0 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
526 : :
527 : 0 : resultRelInfo->ri_extraUpdatedCols_valid = true;
528 : 0 : }
529 : : else
530 : : {
531 : : /* Don't call twice */
532 [ # # ]: 0 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
533 : :
534 : 0 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
535 : 0 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
536 : : }
537 : :
538 : 0 : MemoryContextSwitchTo(oldContext);
539 : 0 : }
540 : :
541 : : /*
542 : : * Compute stored generated columns for a tuple
543 : : */
544 : : void
545 : 0 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
546 : : EState *estate, TupleTableSlot *slot,
547 : : CmdType cmdtype)
548 : : {
549 : 0 : Relation rel = resultRelInfo->ri_RelationDesc;
550 : 0 : TupleDesc tupdesc = RelationGetDescr(rel);
551 : 0 : int natts = tupdesc->natts;
552 [ # # ]: 0 : ExprContext *econtext = GetPerTupleExprContext(estate);
553 : 0 : ExprState **ri_GeneratedExprs;
554 : 0 : MemoryContext oldContext;
555 : 0 : Datum *values;
556 : 0 : bool *nulls;
557 : :
558 : : /* We should not be called unless this is true */
559 [ # # ]: 0 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
560 : :
561 : : /*
562 : : * Initialize the expressions if we didn't already, and check whether we
563 : : * can exit early because nothing needs to be computed.
564 : : */
565 [ # # ]: 0 : if (cmdtype == CMD_UPDATE)
566 : : {
567 [ # # ]: 0 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
568 : 0 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
569 [ # # ]: 0 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
570 : 0 : return;
571 : 0 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
572 : 0 : }
573 : : else
574 : : {
575 [ # # ]: 0 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
576 : 0 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
577 : : /* Early exit is impossible given the prior Assert */
578 [ # # ]: 0 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
579 : 0 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
580 : : }
581 : :
582 [ # # ]: 0 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
583 : :
584 : 0 : values = palloc_array(Datum, natts);
585 : 0 : nulls = palloc_array(bool, natts);
586 : :
587 : 0 : slot_getallattrs(slot);
588 : 0 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
589 : :
590 [ # # ]: 0 : for (int i = 0; i < natts; i++)
591 : : {
592 : 0 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
593 : :
594 [ # # ]: 0 : if (ri_GeneratedExprs[i])
595 : : {
596 : 0 : Datum val;
597 : 0 : bool isnull;
598 : :
599 [ # # ]: 0 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
600 : :
601 : 0 : econtext->ecxt_scantuple = slot;
602 : :
603 : 0 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
604 : :
605 : : /*
606 : : * We must make a copy of val as we have no guarantees about where
607 : : * memory for a pass-by-reference Datum is located.
608 : : */
609 [ # # ]: 0 : if (!isnull)
610 : 0 : val = datumCopy(val, attr->attbyval, attr->attlen);
611 : :
612 : 0 : values[i] = val;
613 : 0 : nulls[i] = isnull;
614 : 0 : }
615 : : else
616 : : {
617 [ # # ]: 0 : if (!nulls[i])
618 : 0 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
619 : : }
620 : 0 : }
621 : :
622 : 0 : ExecClearTuple(slot);
623 : 0 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
624 : 0 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
625 : 0 : ExecStoreVirtualTuple(slot);
626 : 0 : ExecMaterializeSlot(slot);
627 : :
628 : 0 : MemoryContextSwitchTo(oldContext);
629 [ # # ]: 0 : }
630 : :
631 : : /*
632 : : * ExecInitInsertProjection
633 : : * Do one-time initialization of projection data for INSERT tuples.
634 : : *
635 : : * INSERT queries may need a projection to filter out junk attrs in the tlist.
636 : : *
637 : : * This is also a convenient place to verify that the
638 : : * output of an INSERT matches the target table.
639 : : */
640 : : static void
641 : 0 : ExecInitInsertProjection(ModifyTableState *mtstate,
642 : : ResultRelInfo *resultRelInfo)
643 : : {
644 : 0 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
645 : 0 : Plan *subplan = outerPlan(node);
646 : 0 : EState *estate = mtstate->ps.state;
647 : 0 : List *insertTargetList = NIL;
648 : 0 : bool need_projection = false;
649 : 0 : ListCell *l;
650 : :
651 : : /* Extract non-junk columns of the subplan's result tlist. */
652 [ # # # # : 0 : foreach(l, subplan->targetlist)
# # ]
653 : : {
654 : 0 : TargetEntry *tle = (TargetEntry *) lfirst(l);
655 : :
656 [ # # ]: 0 : if (!tle->resjunk)
657 : 0 : insertTargetList = lappend(insertTargetList, tle);
658 : : else
659 : 0 : need_projection = true;
660 : 0 : }
661 : :
662 : : /*
663 : : * The junk-free list must produce a tuple suitable for the result
664 : : * relation.
665 : : */
666 : 0 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
667 : :
668 : : /* We'll need a slot matching the table's format. */
669 : 0 : resultRelInfo->ri_newTupleSlot =
670 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
671 : 0 : &estate->es_tupleTable);
672 : :
673 : : /* Build ProjectionInfo if needed (it probably isn't). */
674 [ # # ]: 0 : if (need_projection)
675 : : {
676 : 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
677 : :
678 : : /* need an expression context to do the projection */
679 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
680 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
681 : :
682 : 0 : resultRelInfo->ri_projectNew =
683 : 0 : ExecBuildProjectionInfo(insertTargetList,
684 : 0 : mtstate->ps.ps_ExprContext,
685 : 0 : resultRelInfo->ri_newTupleSlot,
686 : 0 : &mtstate->ps,
687 : 0 : relDesc);
688 : 0 : }
689 : :
690 : 0 : resultRelInfo->ri_projectNewInfoValid = true;
691 : 0 : }
692 : :
693 : : /*
694 : : * ExecInitUpdateProjection
695 : : * Do one-time initialization of projection data for UPDATE tuples.
696 : : *
697 : : * UPDATE always needs a projection, because (1) there's always some junk
698 : : * attrs, and (2) we may need to merge values of not-updated columns from
699 : : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
700 : : * the subplan contains only new values for the changed columns, plus row
701 : : * identity info in the junk attrs.
702 : : *
703 : : * This is "one-time" for any given result rel, but we might touch more than
704 : : * one result rel in the course of an inherited UPDATE, and each one needs
705 : : * its own projection due to possible column order variation.
706 : : *
707 : : * This is also a convenient place to verify that the output of an UPDATE
708 : : * matches the target table (ExecBuildUpdateProjection does that).
709 : : */
710 : : static void
711 : 0 : ExecInitUpdateProjection(ModifyTableState *mtstate,
712 : : ResultRelInfo *resultRelInfo)
713 : : {
714 : 0 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
715 : 0 : Plan *subplan = outerPlan(node);
716 : 0 : EState *estate = mtstate->ps.state;
717 : 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
718 : 0 : int whichrel;
719 : 0 : List *updateColnos;
720 : :
721 : : /*
722 : : * Usually, mt_lastResultIndex matches the target rel. If it happens not
723 : : * to, we can get the index the hard way with an integer division.
724 : : */
725 : 0 : whichrel = mtstate->mt_lastResultIndex;
726 [ # # ]: 0 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
727 : : {
728 : 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
729 [ # # ]: 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
730 : 0 : }
731 : :
732 : 0 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
733 : :
734 : : /*
735 : : * For UPDATE, we use the old tuple to fill up missing values in the tuple
736 : : * produced by the subplan to get the new tuple. We need two slots, both
737 : : * matching the table's desired format.
738 : : */
739 : 0 : resultRelInfo->ri_oldTupleSlot =
740 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
741 : 0 : &estate->es_tupleTable);
742 : 0 : resultRelInfo->ri_newTupleSlot =
743 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
744 : 0 : &estate->es_tupleTable);
745 : :
746 : : /* need an expression context to do the projection */
747 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
748 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
749 : :
750 : 0 : resultRelInfo->ri_projectNew =
751 : 0 : ExecBuildUpdateProjection(subplan->targetlist,
752 : : false, /* subplan did the evaluation */
753 : 0 : updateColnos,
754 : 0 : relDesc,
755 : 0 : mtstate->ps.ps_ExprContext,
756 : 0 : resultRelInfo->ri_newTupleSlot,
757 : 0 : &mtstate->ps);
758 : :
759 : 0 : resultRelInfo->ri_projectNewInfoValid = true;
760 : 0 : }
761 : :
762 : : /*
763 : : * ExecGetInsertNewTuple
764 : : * This prepares a "new" tuple ready to be inserted into given result
765 : : * relation, by removing any junk columns of the plan's output tuple
766 : : * and (if necessary) coercing the tuple to the right tuple format.
767 : : */
768 : : static TupleTableSlot *
769 : 0 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
770 : : TupleTableSlot *planSlot)
771 : : {
772 : 0 : ProjectionInfo *newProj = relinfo->ri_projectNew;
773 : 0 : ExprContext *econtext;
774 : :
775 : : /*
776 : : * If there's no projection to be done, just make sure the slot is of the
777 : : * right type for the target rel. If the planSlot is the right type we
778 : : * can use it as-is, else copy the data into ri_newTupleSlot.
779 : : */
780 [ # # ]: 0 : if (newProj == NULL)
781 : : {
782 [ # # ]: 0 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
783 : : {
784 : 0 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
785 : 0 : return relinfo->ri_newTupleSlot;
786 : : }
787 : : else
788 : 0 : return planSlot;
789 : : }
790 : :
791 : : /*
792 : : * Else project; since the projection output slot is ri_newTupleSlot, this
793 : : * will also fix any slot-type problem.
794 : : *
795 : : * Note: currently, this is dead code, because INSERT cases don't receive
796 : : * any junk columns so there's never a projection to be done.
797 : : */
798 : 0 : econtext = newProj->pi_exprContext;
799 : 0 : econtext->ecxt_outertuple = planSlot;
800 : 0 : return ExecProject(newProj);
801 : 0 : }
802 : :
803 : : /*
804 : : * ExecGetUpdateNewTuple
805 : : * This prepares a "new" tuple by combining an UPDATE subplan's output
806 : : * tuple (which contains values of changed columns) with unchanged
807 : : * columns taken from the old tuple.
808 : : *
809 : : * The subplan tuple might also contain junk columns, which are ignored.
810 : : * Note that the projection also ensures we have a slot of the right type.
811 : : */
812 : : TupleTableSlot *
813 : 0 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
814 : : TupleTableSlot *planSlot,
815 : : TupleTableSlot *oldSlot)
816 : : {
817 : 0 : ProjectionInfo *newProj = relinfo->ri_projectNew;
818 : 0 : ExprContext *econtext;
819 : :
820 : : /* Use a few extra Asserts to protect against outside callers */
821 [ # # ]: 0 : Assert(relinfo->ri_projectNewInfoValid);
822 [ # # ]: 0 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
823 [ # # ]: 0 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
824 : :
825 : 0 : econtext = newProj->pi_exprContext;
826 : 0 : econtext->ecxt_outertuple = planSlot;
827 : 0 : econtext->ecxt_scantuple = oldSlot;
828 : 0 : return ExecProject(newProj);
829 : 0 : }
830 : :
831 : : /* ----------------------------------------------------------------
832 : : * ExecInsert
833 : : *
834 : : * For INSERT, we have to insert the tuple into the target relation
835 : : * (or partition thereof) and insert appropriate tuples into the index
836 : : * relations.
837 : : *
838 : : * slot contains the new tuple value to be stored.
839 : : *
840 : : * Returns RETURNING result if any, otherwise NULL.
841 : : * *inserted_tuple is the tuple that's effectively inserted;
842 : : * *insert_destrel is the relation where it was inserted.
843 : : * These are only set on success.
844 : : *
845 : : * This may change the currently active tuple conversion map in
846 : : * mtstate->mt_transition_capture, so the callers must take care to
847 : : * save the previous value to avoid losing track of it.
848 : : * ----------------------------------------------------------------
849 : : */
850 : : static TupleTableSlot *
851 : 0 : ExecInsert(ModifyTableContext *context,
852 : : ResultRelInfo *resultRelInfo,
853 : : TupleTableSlot *slot,
854 : : bool canSetTag,
855 : : TupleTableSlot **inserted_tuple,
856 : : ResultRelInfo **insert_destrel)
857 : : {
858 : 0 : ModifyTableState *mtstate = context->mtstate;
859 : 0 : EState *estate = context->estate;
860 : 0 : Relation resultRelationDesc;
861 : 0 : List *recheckIndexes = NIL;
862 : 0 : TupleTableSlot *planSlot = context->planSlot;
863 : 0 : TupleTableSlot *result = NULL;
864 : 0 : TransitionCaptureState *ar_insert_trig_tcs;
865 : 0 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
866 : 0 : OnConflictAction onconflict = node->onConflictAction;
867 : 0 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
868 : 0 : MemoryContext oldContext;
869 : :
870 : : /*
871 : : * If the input result relation is a partitioned table, find the leaf
872 : : * partition to insert the tuple into.
873 : : */
874 [ # # ]: 0 : if (proute)
875 : : {
876 : 0 : ResultRelInfo *partRelInfo;
877 : :
878 : 0 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
879 : 0 : resultRelInfo, slot,
880 : : &partRelInfo);
881 : 0 : resultRelInfo = partRelInfo;
882 : 0 : }
883 : :
884 : 0 : ExecMaterializeSlot(slot);
885 : :
886 : 0 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
887 : :
888 : : /*
889 : : * Open the table's indexes, if we have not done so already, so that we
890 : : * can add new index entries for the inserted tuple.
891 : : */
892 [ # # # # ]: 0 : if (resultRelationDesc->rd_rel->relhasindex &&
893 : 0 : resultRelInfo->ri_IndexRelationDescs == NULL)
894 : 0 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
895 : :
896 : : /*
897 : : * BEFORE ROW INSERT Triggers.
898 : : *
899 : : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
900 : : * INSERT ... ON CONFLICT statement. We cannot check for constraint
901 : : * violations before firing these triggers, because they can change the
902 : : * values to insert. Also, they can run arbitrary user-defined code with
903 : : * side-effects that we can't cancel by just not inserting the tuple.
904 : : */
905 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
906 : 0 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
907 : : {
908 : : /* Flush any pending inserts, so rows are visible to the triggers */
909 [ # # ]: 0 : if (estate->es_insert_pending_result_relations != NIL)
910 : 0 : ExecPendingInserts(estate);
911 : :
912 [ # # ]: 0 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
913 : 0 : return NULL; /* "do nothing" */
914 : 0 : }
915 : :
916 : : /* INSTEAD OF ROW INSERT Triggers */
917 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
918 : 0 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
919 : : {
920 [ # # ]: 0 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
921 : 0 : return NULL; /* "do nothing" */
922 : 0 : }
923 [ # # ]: 0 : else if (resultRelInfo->ri_FdwRoutine)
924 : : {
925 : : /*
926 : : * GENERATED expressions might reference the tableoid column, so
927 : : * (re-)initialize tts_tableOid before evaluating them.
928 : : */
929 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
930 : :
931 : : /*
932 : : * Compute stored generated columns
933 : : */
934 [ # # # # ]: 0 : if (resultRelationDesc->rd_att->constr &&
935 : 0 : resultRelationDesc->rd_att->constr->has_generated_stored)
936 : 0 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
937 : : CMD_INSERT);
938 : :
939 : : /*
940 : : * If the FDW supports batching, and batching is requested, accumulate
941 : : * rows and insert them in batches. Otherwise use the per-row inserts.
942 : : */
943 [ # # ]: 0 : if (resultRelInfo->ri_BatchSize > 1)
944 : : {
945 : 0 : bool flushed = false;
946 : :
947 : : /*
948 : : * When we've reached the desired batch size, perform the
949 : : * insertion.
950 : : */
951 [ # # ]: 0 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
952 : : {
953 : 0 : ExecBatchInsert(mtstate, resultRelInfo,
954 : 0 : resultRelInfo->ri_Slots,
955 : 0 : resultRelInfo->ri_PlanSlots,
956 : 0 : resultRelInfo->ri_NumSlots,
957 : 0 : estate, canSetTag);
958 : 0 : flushed = true;
959 : 0 : }
960 : :
961 : 0 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
962 : :
963 [ # # ]: 0 : if (resultRelInfo->ri_Slots == NULL)
964 : : {
965 : 0 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
966 : 0 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
967 : 0 : }
968 : :
969 : : /*
970 : : * Initialize the batch slots. We don't know how many slots will
971 : : * be needed, so we initialize them as the batch grows, and we
972 : : * keep them across batches. To mitigate an inefficiency in how
973 : : * resource owner handles objects with many references (as with
974 : : * many slots all referencing the same tuple descriptor) we copy
975 : : * the appropriate tuple descriptor for each slot.
976 : : */
977 [ # # ]: 0 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
978 : : {
979 : 0 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
980 : 0 : TupleDesc plan_tdesc =
981 : 0 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
982 : :
983 : 0 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
984 : 0 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
985 : :
986 : 0 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
987 : 0 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
988 : :
989 : : /* remember how many batch slots we initialized */
990 : 0 : resultRelInfo->ri_NumSlotsInitialized++;
991 : 0 : }
992 : :
993 : 0 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
994 : 0 : slot);
995 : :
996 : 0 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
997 : 0 : planSlot);
998 : :
999 : : /*
1000 : : * If these are the first tuples stored in the buffers, add the
1001 : : * target rel and the mtstate to the
1002 : : * es_insert_pending_result_relations and
1003 : : * es_insert_pending_modifytables lists respectively, except in
1004 : : * the case where flushing was done above, in which case they
1005 : : * would already have been added to the lists, so no need to do
1006 : : * this.
1007 : : */
1008 [ # # # # ]: 0 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1009 : : {
1010 [ # # ]: 0 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1011 : : resultRelInfo));
1012 : 0 : estate->es_insert_pending_result_relations =
1013 : 0 : lappend(estate->es_insert_pending_result_relations,
1014 : 0 : resultRelInfo);
1015 : 0 : estate->es_insert_pending_modifytables =
1016 : 0 : lappend(estate->es_insert_pending_modifytables, mtstate);
1017 : 0 : }
1018 [ # # ]: 0 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1019 : : resultRelInfo));
1020 : :
1021 : 0 : resultRelInfo->ri_NumSlots++;
1022 : :
1023 : 0 : MemoryContextSwitchTo(oldContext);
1024 : :
1025 : 0 : return NULL;
1026 : 0 : }
1027 : :
1028 : : /*
1029 : : * insert into foreign table: let the FDW do it
1030 : : */
1031 : 0 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1032 : 0 : resultRelInfo,
1033 : 0 : slot,
1034 : 0 : planSlot);
1035 : :
1036 [ # # ]: 0 : if (slot == NULL) /* "do nothing" */
1037 : 0 : return NULL;
1038 : :
1039 : : /*
1040 : : * AFTER ROW Triggers or RETURNING expressions might reference the
1041 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1042 : : * them. (This covers the case where the FDW replaced the slot.)
1043 : : */
1044 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1045 : 0 : }
1046 : : else
1047 : : {
1048 : 0 : WCOKind wco_kind;
1049 : :
1050 : : /*
1051 : : * Constraints and GENERATED expressions might reference the tableoid
1052 : : * column, so (re-)initialize tts_tableOid before evaluating them.
1053 : : */
1054 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1055 : :
1056 : : /*
1057 : : * Compute stored generated columns
1058 : : */
1059 [ # # # # ]: 0 : if (resultRelationDesc->rd_att->constr &&
1060 : 0 : resultRelationDesc->rd_att->constr->has_generated_stored)
1061 : 0 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1062 : : CMD_INSERT);
1063 : :
1064 : : /*
1065 : : * Check any RLS WITH CHECK policies.
1066 : : *
1067 : : * Normally we should check INSERT policies. But if the insert is the
1068 : : * result of a partition key update that moved the tuple to a new
1069 : : * partition, we should instead check UPDATE policies, because we are
1070 : : * executing policies defined on the target table, and not those
1071 : : * defined on the child partitions.
1072 : : *
1073 : : * If we're running MERGE, we refer to the action that we're executing
1074 : : * to know if we're doing an INSERT or UPDATE to a partition table.
1075 : : */
1076 [ # # ]: 0 : if (mtstate->operation == CMD_UPDATE)
1077 : 0 : wco_kind = WCO_RLS_UPDATE_CHECK;
1078 [ # # ]: 0 : else if (mtstate->operation == CMD_MERGE)
1079 : 0 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1080 : : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1081 : : else
1082 : 0 : wco_kind = WCO_RLS_INSERT_CHECK;
1083 : :
1084 : : /*
1085 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1086 : : * we are looking for at this point.
1087 : : */
1088 [ # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1089 : 0 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1090 : :
1091 : : /*
1092 : : * Check the constraints of the tuple.
1093 : : */
1094 [ # # ]: 0 : if (resultRelationDesc->rd_att->constr)
1095 : 0 : ExecConstraints(resultRelInfo, slot, estate);
1096 : :
1097 : : /*
1098 : : * Also check the tuple against the partition constraint, if there is
1099 : : * one; except that if we got here via tuple-routing, we don't need to
1100 : : * if there's no BR trigger defined on the partition.
1101 : : */
1102 [ # # # # ]: 0 : if (resultRelationDesc->rd_rel->relispartition &&
1103 [ # # ]: 0 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1104 [ # # ]: 0 : (resultRelInfo->ri_TrigDesc &&
1105 : 0 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1106 : 0 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1107 : :
1108 [ # # # # ]: 0 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1109 : : {
1110 : : /* Perform a speculative insertion. */
1111 : 0 : uint32 specToken;
1112 : 0 : ItemPointerData conflictTid;
1113 : 0 : ItemPointerData invalidItemPtr;
1114 : 0 : bool specConflict;
1115 : 0 : List *arbiterIndexes;
1116 : :
1117 : 0 : ItemPointerSetInvalid(&invalidItemPtr);
1118 : 0 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1119 : :
1120 : : /*
1121 : : * Do a non-conclusive check for conflicts first.
1122 : : *
1123 : : * We're not holding any locks yet, so this doesn't guarantee that
1124 : : * the later insert won't conflict. But it avoids leaving behind
1125 : : * a lot of canceled speculative insertions, if you run a lot of
1126 : : * INSERT ON CONFLICT statements that do conflict.
1127 : : *
1128 : : * We loop back here if we find a conflict below, either during
1129 : : * the pre-check, or when we re-check after inserting the tuple
1130 : : * speculatively. Better allow interrupts in case some bug makes
1131 : : * this an infinite loop.
1132 : : */
1133 : : vlock:
1134 [ # # ]: 0 : CHECK_FOR_INTERRUPTS();
1135 : 0 : specConflict = false;
1136 [ # # # # ]: 0 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1137 : : &conflictTid, &invalidItemPtr,
1138 : 0 : arbiterIndexes))
1139 : : {
1140 : : /* committed conflict tuple found */
1141 [ # # ]: 0 : if (onconflict == ONCONFLICT_UPDATE)
1142 : : {
1143 : : /*
1144 : : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1145 : : * part. Be prepared to retry if the UPDATE fails because
1146 : : * of another concurrent UPDATE/DELETE to the conflict
1147 : : * tuple.
1148 : : */
1149 : 0 : TupleTableSlot *returning = NULL;
1150 : :
1151 [ # # # # ]: 0 : if (ExecOnConflictUpdate(context, resultRelInfo,
1152 : 0 : &conflictTid, slot, canSetTag,
1153 : : &returning))
1154 : : {
1155 [ # # ]: 0 : InstrCountTuples2(&mtstate->ps, 1);
1156 : 0 : return returning;
1157 : : }
1158 : : else
1159 : 0 : goto vlock;
1160 [ # # ]: 0 : }
1161 : : else
1162 : : {
1163 : : /*
1164 : : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1165 : : * verify that the tuple is visible to the executor's MVCC
1166 : : * snapshot at higher isolation levels.
1167 : : *
1168 : : * Using ExecGetReturningSlot() to store the tuple for the
1169 : : * recheck isn't that pretty, but we can't trivially use
1170 : : * the input slot, because it might not be of a compatible
1171 : : * type. As there's no conflicting usage of
1172 : : * ExecGetReturningSlot() in the DO NOTHING case...
1173 : : */
1174 [ # # ]: 0 : Assert(onconflict == ONCONFLICT_NOTHING);
1175 : 0 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1176 : 0 : ExecGetReturningSlot(estate, resultRelInfo));
1177 [ # # ]: 0 : InstrCountTuples2(&mtstate->ps, 1);
1178 : 0 : return NULL;
1179 : : }
1180 : : }
1181 : :
1182 : : /*
1183 : : * Before we start insertion proper, acquire our "speculative
1184 : : * insertion lock". Others can use that to wait for us to decide
1185 : : * if we're going to go ahead with the insertion, instead of
1186 : : * waiting for the whole transaction to complete.
1187 : : */
1188 : : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1189 : 0 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1190 : :
1191 : : /* insert the tuple, with the speculative token */
1192 : 0 : table_tuple_insert_speculative(resultRelationDesc, slot,
1193 : 0 : estate->es_output_cid,
1194 : : 0,
1195 : : NULL,
1196 : 0 : specToken);
1197 : :
1198 : : /* insert index entries for tuple */
1199 : 0 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1200 : 0 : slot, estate, false, true,
1201 : : &specConflict,
1202 : 0 : arbiterIndexes,
1203 : : false);
1204 : :
1205 : : /* adjust the tuple's state accordingly */
1206 : 0 : table_tuple_complete_speculative(resultRelationDesc, slot,
1207 : 0 : specToken, !specConflict);
1208 : :
1209 : : /*
1210 : : * Wake up anyone waiting for our decision. They will re-check
1211 : : * the tuple, see that it's no longer speculative, and wait on our
1212 : : * XID as if this was a regularly inserted tuple all along. Or if
1213 : : * we killed the tuple, they will see it's dead, and proceed as if
1214 : : * the tuple never existed.
1215 : : */
1216 : 0 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1217 : :
1218 : : /*
1219 : : * If there was a conflict, start from the beginning. We'll do
1220 : : * the pre-check again, which will now find the conflicting tuple
1221 : : * (unless it aborts before we get there).
1222 : : */
1223 [ # # ]: 0 : if (specConflict)
1224 : : {
1225 : 0 : list_free(recheckIndexes);
1226 : 0 : goto vlock;
1227 : : }
1228 : :
1229 : : /* Since there was no insertion conflict, we're done */
1230 [ # # ]: 0 : }
1231 : : else
1232 : : {
1233 : : /* insert the tuple normally */
1234 : 0 : table_tuple_insert(resultRelationDesc, slot,
1235 : 0 : estate->es_output_cid,
1236 : : 0, NULL);
1237 : :
1238 : : /* insert index entries for tuple */
1239 [ # # ]: 0 : if (resultRelInfo->ri_NumIndices > 0)
1240 : 0 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1241 : 0 : slot, estate, false,
1242 : : false, NULL, NIL,
1243 : : false);
1244 : : }
1245 [ # # ]: 0 : }
1246 : :
1247 [ # # ]: 0 : if (canSetTag)
1248 : 0 : (estate->es_processed)++;
1249 : :
1250 : : /*
1251 : : * If this insert is the result of a partition key update that moved the
1252 : : * tuple to a new partition, put this row into the transition NEW TABLE,
1253 : : * if there is one. We need to do this separately for DELETE and INSERT
1254 : : * because they happen on different tables.
1255 : : */
1256 : 0 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1257 [ # # ]: 0 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1258 [ # # # # ]: 0 : && mtstate->mt_transition_capture->tcs_update_new_table)
1259 : : {
1260 : 0 : ExecARUpdateTriggers(estate, resultRelInfo,
1261 : : NULL, NULL,
1262 : : NULL,
1263 : : NULL,
1264 : 0 : slot,
1265 : : NULL,
1266 : 0 : mtstate->mt_transition_capture,
1267 : : false);
1268 : :
1269 : : /*
1270 : : * We've already captured the NEW TABLE row, so make sure any AR
1271 : : * INSERT trigger fired below doesn't capture it again.
1272 : : */
1273 : 0 : ar_insert_trig_tcs = NULL;
1274 : 0 : }
1275 : :
1276 : : /* AFTER ROW INSERT Triggers */
1277 : 0 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1278 : 0 : ar_insert_trig_tcs);
1279 : :
1280 : 0 : list_free(recheckIndexes);
1281 : :
1282 : : /*
1283 : : * Check any WITH CHECK OPTION constraints from parent views. We are
1284 : : * required to do this after testing all constraints and uniqueness
1285 : : * violations per the SQL spec, so we do it after actually inserting the
1286 : : * record into the heap and all indexes.
1287 : : *
1288 : : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1289 : : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1290 : : *
1291 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1292 : : * are looking for at this point.
1293 : : */
1294 [ # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1295 : 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1296 : :
1297 : : /* Process RETURNING if present */
1298 [ # # ]: 0 : if (resultRelInfo->ri_projectReturning)
1299 : : {
1300 : 0 : TupleTableSlot *oldSlot = NULL;
1301 : :
1302 : : /*
1303 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1304 : : * refers to any OLD columns, ExecDelete() will have saved the tuple
1305 : : * deleted from the original partition, which we must use here to
1306 : : * compute the OLD column values. Otherwise, all OLD column values
1307 : : * will be NULL.
1308 : : */
1309 [ # # ]: 0 : if (context->cpDeletedSlot)
1310 : : {
1311 : 0 : TupleConversionMap *tupconv_map;
1312 : :
1313 : : /*
1314 : : * Convert the OLD tuple to the new partition's format/slot, if
1315 : : * needed. Note that ExecDelete() already converted it to the
1316 : : * root's partition's format/slot.
1317 : : */
1318 : 0 : oldSlot = context->cpDeletedSlot;
1319 : 0 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1320 [ # # ]: 0 : if (tupconv_map != NULL)
1321 : : {
1322 : 0 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1323 : 0 : oldSlot,
1324 : 0 : ExecGetReturningSlot(estate,
1325 : 0 : resultRelInfo));
1326 : :
1327 : 0 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1328 : 0 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1329 : 0 : }
1330 : 0 : }
1331 : :
1332 : 0 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1333 : 0 : oldSlot, slot, planSlot);
1334 : :
1335 : : /*
1336 : : * For a cross-partition UPDATE, release the old tuple, first making
1337 : : * sure that the result slot has a local copy of any pass-by-reference
1338 : : * values.
1339 : : */
1340 [ # # ]: 0 : if (context->cpDeletedSlot)
1341 : : {
1342 : 0 : ExecMaterializeSlot(result);
1343 : 0 : ExecClearTuple(oldSlot);
1344 [ # # ]: 0 : if (context->cpDeletedSlot != oldSlot)
1345 : 0 : ExecClearTuple(context->cpDeletedSlot);
1346 : 0 : context->cpDeletedSlot = NULL;
1347 : 0 : }
1348 : 0 : }
1349 : :
1350 [ # # ]: 0 : if (inserted_tuple)
1351 : 0 : *inserted_tuple = slot;
1352 [ # # ]: 0 : if (insert_destrel)
1353 : 0 : *insert_destrel = resultRelInfo;
1354 : :
1355 : 0 : return result;
1356 : 0 : }
1357 : :
1358 : : /* ----------------------------------------------------------------
1359 : : * ExecBatchInsert
1360 : : *
1361 : : * Insert multiple tuples in an efficient way.
1362 : : * Currently, this handles inserting into a foreign table without
1363 : : * RETURNING clause.
1364 : : * ----------------------------------------------------------------
1365 : : */
1366 : : static void
1367 : 0 : ExecBatchInsert(ModifyTableState *mtstate,
1368 : : ResultRelInfo *resultRelInfo,
1369 : : TupleTableSlot **slots,
1370 : : TupleTableSlot **planSlots,
1371 : : int numSlots,
1372 : : EState *estate,
1373 : : bool canSetTag)
1374 : : {
1375 : 0 : int i;
1376 : 0 : int numInserted = numSlots;
1377 : 0 : TupleTableSlot *slot = NULL;
1378 : 0 : TupleTableSlot **rslots;
1379 : :
1380 : : /*
1381 : : * insert into foreign table: let the FDW do it
1382 : : */
1383 : 0 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1384 : 0 : resultRelInfo,
1385 : 0 : slots,
1386 : 0 : planSlots,
1387 : : &numInserted);
1388 : :
1389 [ # # ]: 0 : for (i = 0; i < numInserted; i++)
1390 : : {
1391 : 0 : slot = rslots[i];
1392 : :
1393 : : /*
1394 : : * AFTER ROW Triggers might reference the tableoid column, so
1395 : : * (re-)initialize tts_tableOid before evaluating them.
1396 : : */
1397 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1398 : :
1399 : : /* AFTER ROW INSERT Triggers */
1400 : 0 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1401 : 0 : mtstate->mt_transition_capture);
1402 : :
1403 : : /*
1404 : : * Check any WITH CHECK OPTION constraints from parent views. See the
1405 : : * comment in ExecInsert.
1406 : : */
1407 [ # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1408 : 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1409 : 0 : }
1410 : :
1411 [ # # # # ]: 0 : if (canSetTag && numInserted > 0)
1412 : 0 : estate->es_processed += numInserted;
1413 : :
1414 : : /* Clean up all the slots, ready for the next batch */
1415 [ # # ]: 0 : for (i = 0; i < numSlots; i++)
1416 : : {
1417 : 0 : ExecClearTuple(slots[i]);
1418 : 0 : ExecClearTuple(planSlots[i]);
1419 : 0 : }
1420 : 0 : resultRelInfo->ri_NumSlots = 0;
1421 : 0 : }
1422 : :
1423 : : /*
1424 : : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1425 : : */
1426 : : static void
1427 : 0 : ExecPendingInserts(EState *estate)
1428 : : {
1429 : 0 : ListCell *l1,
1430 : : *l2;
1431 : :
1432 [ # # # # : 0 : forboth(l1, estate->es_insert_pending_result_relations,
# # # # #
# # # ]
1433 : : l2, estate->es_insert_pending_modifytables)
1434 : : {
1435 : 0 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1436 : 0 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1437 : :
1438 [ # # ]: 0 : Assert(mtstate);
1439 : 0 : ExecBatchInsert(mtstate, resultRelInfo,
1440 : 0 : resultRelInfo->ri_Slots,
1441 : 0 : resultRelInfo->ri_PlanSlots,
1442 : 0 : resultRelInfo->ri_NumSlots,
1443 : 0 : estate, mtstate->canSetTag);
1444 : 0 : }
1445 : :
1446 : 0 : list_free(estate->es_insert_pending_result_relations);
1447 : 0 : list_free(estate->es_insert_pending_modifytables);
1448 : 0 : estate->es_insert_pending_result_relations = NIL;
1449 : 0 : estate->es_insert_pending_modifytables = NIL;
1450 : 0 : }
1451 : :
1452 : : /*
1453 : : * ExecDeletePrologue -- subroutine for ExecDelete
1454 : : *
1455 : : * Prepare executor state for DELETE. Actually, the only thing we have to do
1456 : : * here is execute BEFORE ROW triggers. We return false if one of them makes
1457 : : * the delete a no-op; otherwise, return true.
1458 : : */
1459 : : static bool
1460 : 0 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1461 : : ItemPointer tupleid, HeapTuple oldtuple,
1462 : : TupleTableSlot **epqreturnslot, TM_Result *result)
1463 : : {
1464 [ # # ]: 0 : if (result)
1465 : 0 : *result = TM_Ok;
1466 : :
1467 : : /* BEFORE ROW DELETE triggers */
1468 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
1469 : 0 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1470 : : {
1471 : : /* Flush any pending inserts, so rows are visible to the triggers */
1472 [ # # ]: 0 : if (context->estate->es_insert_pending_result_relations != NIL)
1473 : 0 : ExecPendingInserts(context->estate);
1474 : :
1475 : 0 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1476 : 0 : resultRelInfo, tupleid, oldtuple,
1477 : 0 : epqreturnslot, result, &context->tmfd,
1478 : 0 : context->mtstate->operation == CMD_MERGE);
1479 : : }
1480 : :
1481 : 0 : return true;
1482 : 0 : }
1483 : :
1484 : : /*
1485 : : * ExecDeleteAct -- subroutine for ExecDelete
1486 : : *
1487 : : * Actually delete the tuple from a plain table.
1488 : : *
1489 : : * Caller is in charge of doing EvalPlanQual as necessary
1490 : : */
1491 : : static TM_Result
1492 : 0 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1493 : : ItemPointer tupleid, bool changingPart)
1494 : : {
1495 : 0 : EState *estate = context->estate;
1496 : :
1497 : 0 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1498 : 0 : estate->es_output_cid,
1499 : 0 : estate->es_snapshot,
1500 : 0 : estate->es_crosscheck_snapshot,
1501 : : true /* wait for commit */ ,
1502 : 0 : &context->tmfd,
1503 : 0 : changingPart);
1504 : 0 : }
1505 : :
1506 : : /*
1507 : : * ExecDeleteEpilogue -- subroutine for ExecDelete
1508 : : *
1509 : : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1510 : : * including the UPDATE triggers if the deletion is being done as part of a
1511 : : * cross-partition tuple move.
1512 : : */
1513 : : static void
1514 : 0 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1515 : : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1516 : : {
1517 : 0 : ModifyTableState *mtstate = context->mtstate;
1518 : 0 : EState *estate = context->estate;
1519 : 0 : TransitionCaptureState *ar_delete_trig_tcs;
1520 : :
1521 : : /*
1522 : : * If this delete is the result of a partition key update that moved the
1523 : : * tuple to a new partition, put this row into the transition OLD TABLE,
1524 : : * if there is one. We need to do this separately for DELETE and INSERT
1525 : : * because they happen on different tables.
1526 : : */
1527 : 0 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1528 [ # # # # : 0 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
# # ]
1529 : 0 : mtstate->mt_transition_capture->tcs_update_old_table)
1530 : : {
1531 : 0 : ExecARUpdateTriggers(estate, resultRelInfo,
1532 : : NULL, NULL,
1533 : 0 : tupleid, oldtuple,
1534 : 0 : NULL, NULL, mtstate->mt_transition_capture,
1535 : : false);
1536 : :
1537 : : /*
1538 : : * We've already captured the OLD TABLE row, so make sure any AR
1539 : : * DELETE trigger fired below doesn't capture it again.
1540 : : */
1541 : 0 : ar_delete_trig_tcs = NULL;
1542 : 0 : }
1543 : :
1544 : : /* AFTER ROW DELETE Triggers */
1545 : 0 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1546 : 0 : ar_delete_trig_tcs, changingPart);
1547 : 0 : }
1548 : :
1549 : : /* ----------------------------------------------------------------
1550 : : * ExecDelete
1551 : : *
1552 : : * DELETE is like UPDATE, except that we delete the tuple and no
1553 : : * index modifications are needed.
1554 : : *
1555 : : * When deleting from a table, tupleid identifies the tuple to delete and
1556 : : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1557 : : * oldtuple is passed to the triggers and identifies what to delete, and
1558 : : * tupleid is invalid. When deleting from a foreign table, tupleid is
1559 : : * invalid; the FDW has to figure out which row to delete using data from
1560 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
1561 : : * NULL when the foreign table has no relevant triggers. We use
1562 : : * tupleDeleted to indicate whether the tuple is actually deleted,
1563 : : * callers can use it to decide whether to continue the operation. When
1564 : : * this DELETE is a part of an UPDATE of partition-key, then the slot
1565 : : * returned by EvalPlanQual() is passed back using output parameter
1566 : : * epqreturnslot.
1567 : : *
1568 : : * Returns RETURNING result if any, otherwise NULL.
1569 : : * ----------------------------------------------------------------
1570 : : */
1571 : : static TupleTableSlot *
1572 : 0 : ExecDelete(ModifyTableContext *context,
1573 : : ResultRelInfo *resultRelInfo,
1574 : : ItemPointer tupleid,
1575 : : HeapTuple oldtuple,
1576 : : bool processReturning,
1577 : : bool changingPart,
1578 : : bool canSetTag,
1579 : : TM_Result *tmresult,
1580 : : bool *tupleDeleted,
1581 : : TupleTableSlot **epqreturnslot)
1582 : : {
1583 : 0 : EState *estate = context->estate;
1584 : 0 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1585 : 0 : TupleTableSlot *slot = NULL;
1586 : 0 : TM_Result result;
1587 : 0 : bool saveOld;
1588 : :
1589 [ # # ]: 0 : if (tupleDeleted)
1590 : 0 : *tupleDeleted = false;
1591 : :
1592 : : /*
1593 : : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1594 : : * done if it says we are.
1595 : : */
1596 [ # # # # ]: 0 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1597 : 0 : epqreturnslot, tmresult))
1598 : 0 : return NULL;
1599 : :
1600 : : /* INSTEAD OF ROW DELETE Triggers */
1601 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
1602 : 0 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1603 : : {
1604 : 0 : bool dodelete;
1605 : :
1606 [ # # ]: 0 : Assert(oldtuple != NULL);
1607 : 0 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1608 : :
1609 [ # # ]: 0 : if (!dodelete) /* "do nothing" */
1610 : 0 : return NULL;
1611 [ # # ]: 0 : }
1612 [ # # ]: 0 : else if (resultRelInfo->ri_FdwRoutine)
1613 : : {
1614 : : /*
1615 : : * delete from foreign table: let the FDW do it
1616 : : *
1617 : : * We offer the returning slot as a place to store RETURNING data,
1618 : : * although the FDW can return some other slot if it wants.
1619 : : */
1620 : 0 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1621 : 0 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1622 : 0 : resultRelInfo,
1623 : 0 : slot,
1624 : 0 : context->planSlot);
1625 : :
1626 [ # # ]: 0 : if (slot == NULL) /* "do nothing" */
1627 : 0 : return NULL;
1628 : :
1629 : : /*
1630 : : * RETURNING expressions might reference the tableoid column, so
1631 : : * (re)initialize tts_tableOid before evaluating them.
1632 : : */
1633 [ # # ]: 0 : if (TTS_EMPTY(slot))
1634 : 0 : ExecStoreAllNullTuple(slot);
1635 : :
1636 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1637 : 0 : }
1638 : : else
1639 : 0 : {
1640 : : /*
1641 : : * delete the tuple
1642 : : *
1643 : : * Note: if context->estate->es_crosscheck_snapshot isn't
1644 : : * InvalidSnapshot, we check that the row to be deleted is visible to
1645 : : * that snapshot, and throw a can't-serialize error if not. This is a
1646 : : * special-case behavior needed for referential integrity updates in
1647 : : * transaction-snapshot mode transactions.
1648 : : */
1649 : : ldelete:
1650 : 0 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1651 : :
1652 [ # # ]: 0 : if (tmresult)
1653 : 0 : *tmresult = result;
1654 : :
1655 [ # # # # : 0 : switch (result)
# ]
1656 : : {
1657 : : case TM_SelfModified:
1658 : :
1659 : : /*
1660 : : * The target tuple was already updated or deleted by the
1661 : : * current command, or by a later command in the current
1662 : : * transaction. The former case is possible in a join DELETE
1663 : : * where multiple tuples join to the same target tuple. This
1664 : : * is somewhat questionable, but Postgres has always allowed
1665 : : * it: we just ignore additional deletion attempts.
1666 : : *
1667 : : * The latter case arises if the tuple is modified by a
1668 : : * command in a BEFORE trigger, or perhaps by a command in a
1669 : : * volatile function used in the query. In such situations we
1670 : : * should not ignore the deletion, but it is equally unsafe to
1671 : : * proceed. We don't want to discard the original DELETE
1672 : : * while keeping the triggered actions based on its deletion;
1673 : : * and it would be no better to allow the original DELETE
1674 : : * while discarding updates that it triggered. The row update
1675 : : * carries some information that might be important according
1676 : : * to business rules; so throwing an error is the only safe
1677 : : * course.
1678 : : *
1679 : : * If a trigger actually intends this type of interaction, it
1680 : : * can re-execute the DELETE and then return NULL to cancel
1681 : : * the outer delete.
1682 : : */
1683 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
1684 [ # # # # ]: 0 : ereport(ERROR,
1685 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1686 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1687 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1688 : :
1689 : : /* Else, already deleted by self; nothing to do */
1690 : 0 : return NULL;
1691 : :
1692 : : case TM_Ok:
1693 : : break;
1694 : :
1695 : : case TM_Updated:
1696 : : {
1697 : 0 : TupleTableSlot *inputslot;
1698 : 0 : TupleTableSlot *epqslot;
1699 : :
1700 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
1701 [ # # # # ]: 0 : ereport(ERROR,
1702 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1703 : : errmsg("could not serialize access due to concurrent update")));
1704 : :
1705 : : /*
1706 : : * Already know that we're going to need to do EPQ, so
1707 : : * fetch tuple directly into the right slot.
1708 : : */
1709 : 0 : EvalPlanQualBegin(context->epqstate);
1710 : 0 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1711 : 0 : resultRelInfo->ri_RangeTableIndex);
1712 : :
1713 : 0 : result = table_tuple_lock(resultRelationDesc, tupleid,
1714 : 0 : estate->es_snapshot,
1715 : 0 : inputslot, estate->es_output_cid,
1716 : : LockTupleExclusive, LockWaitBlock,
1717 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1718 : 0 : &context->tmfd);
1719 : :
1720 [ # # # # ]: 0 : switch (result)
1721 : : {
1722 : : case TM_Ok:
1723 [ # # ]: 0 : Assert(context->tmfd.traversed);
1724 : 0 : epqslot = EvalPlanQual(context->epqstate,
1725 : 0 : resultRelationDesc,
1726 : 0 : resultRelInfo->ri_RangeTableIndex,
1727 : 0 : inputslot);
1728 [ # # # # ]: 0 : if (TupIsNull(epqslot))
1729 : : /* Tuple not passing quals anymore, exiting... */
1730 : 0 : return NULL;
1731 : :
1732 : : /*
1733 : : * If requested, skip delete and pass back the
1734 : : * updated row.
1735 : : */
1736 [ # # ]: 0 : if (epqreturnslot)
1737 : : {
1738 : 0 : *epqreturnslot = epqslot;
1739 : 0 : return NULL;
1740 : : }
1741 : : else
1742 : 0 : goto ldelete;
1743 : :
1744 : : case TM_SelfModified:
1745 : :
1746 : : /*
1747 : : * This can be reached when following an update
1748 : : * chain from a tuple updated by another session,
1749 : : * reaching a tuple that was already updated in
1750 : : * this transaction. If previously updated by this
1751 : : * command, ignore the delete, otherwise error
1752 : : * out.
1753 : : *
1754 : : * See also TM_SelfModified response to
1755 : : * table_tuple_delete() above.
1756 : : */
1757 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
1758 [ # # # # ]: 0 : ereport(ERROR,
1759 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1760 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1761 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1762 : 0 : return NULL;
1763 : :
1764 : : case TM_Deleted:
1765 : : /* tuple already deleted; nothing to do */
1766 : 0 : return NULL;
1767 : :
1768 : : default:
1769 : :
1770 : : /*
1771 : : * TM_Invisible should be impossible because we're
1772 : : * waiting for updated row versions, and would
1773 : : * already have errored out if the first version
1774 : : * is invisible.
1775 : : *
1776 : : * TM_Updated should be impossible, because we're
1777 : : * locking the latest version via
1778 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1779 : : */
1780 [ # # # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1781 : : result);
1782 : 0 : return NULL;
1783 : : }
1784 : :
1785 : : Assert(false);
1786 : : break;
1787 [ # # ]: 0 : }
1788 : :
1789 : : case TM_Deleted:
1790 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
1791 [ # # # # ]: 0 : ereport(ERROR,
1792 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1793 : : errmsg("could not serialize access due to concurrent delete")));
1794 : : /* tuple already deleted; nothing to do */
1795 : 0 : return NULL;
1796 : :
1797 : : default:
1798 [ # # # # ]: 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1799 : : result);
1800 : 0 : return NULL;
1801 : : }
1802 : :
1803 : : /*
1804 : : * Note: Normally one would think that we have to delete index tuples
1805 : : * associated with the heap tuple now...
1806 : : *
1807 : : * ... but in POSTGRES, we have no need to do this because VACUUM will
1808 : : * take care of it later. We can't delete index tuples immediately
1809 : : * anyway, since the tuple is still visible to other transactions.
1810 : : */
1811 : : }
1812 : :
1813 [ # # ]: 0 : if (canSetTag)
1814 : 0 : (estate->es_processed)++;
1815 : :
1816 : : /* Tell caller that the delete actually happened. */
1817 [ # # ]: 0 : if (tupleDeleted)
1818 : 0 : *tupleDeleted = true;
1819 : :
1820 : 0 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1821 : :
1822 : : /*
1823 : : * Process RETURNING if present and if requested.
1824 : : *
1825 : : * If this is part of a cross-partition UPDATE, and the RETURNING list
1826 : : * refers to any OLD column values, save the old tuple here for later
1827 : : * processing of the RETURNING list by ExecInsert().
1828 : : */
1829 [ # # # # ]: 0 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1830 : 0 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1831 : :
1832 [ # # # # : 0 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
# # ]
1833 : : {
1834 : : /*
1835 : : * We have to put the target tuple into a slot, which means first we
1836 : : * gotta fetch it. We can use the trigger tuple slot.
1837 : : */
1838 : 0 : TupleTableSlot *rslot;
1839 : :
1840 [ # # ]: 0 : if (resultRelInfo->ri_FdwRoutine)
1841 : : {
1842 : : /* FDW must have provided a slot containing the deleted row */
1843 [ # # ]: 0 : Assert(!TupIsNull(slot));
1844 : 0 : }
1845 : : else
1846 : : {
1847 : 0 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1848 [ # # ]: 0 : if (oldtuple != NULL)
1849 : : {
1850 : 0 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1851 : 0 : }
1852 : : else
1853 : : {
1854 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1855 : 0 : SnapshotAny, slot))
1856 [ # # # # ]: 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1857 : : }
1858 : : }
1859 : :
1860 : : /*
1861 : : * If required, save the old tuple for later processing of the
1862 : : * RETURNING list by ExecInsert().
1863 : : */
1864 [ # # ]: 0 : if (saveOld)
1865 : : {
1866 : 0 : TupleConversionMap *tupconv_map;
1867 : :
1868 : : /*
1869 : : * Convert the tuple into the root partition's format/slot, if
1870 : : * needed. ExecInsert() will then convert it to the new
1871 : : * partition's format/slot, if necessary.
1872 : : */
1873 : 0 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1874 [ # # ]: 0 : if (tupconv_map != NULL)
1875 : : {
1876 : 0 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1877 : 0 : TupleTableSlot *oldSlot = slot;
1878 : :
1879 : 0 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1880 : 0 : slot,
1881 : 0 : ExecGetReturningSlot(estate,
1882 : 0 : rootRelInfo));
1883 : :
1884 : 0 : slot->tts_tableOid = oldSlot->tts_tableOid;
1885 : 0 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1886 : 0 : }
1887 : :
1888 : 0 : context->cpDeletedSlot = slot;
1889 : :
1890 : 0 : return NULL;
1891 : 0 : }
1892 : :
1893 : 0 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1894 : 0 : slot, NULL, context->planSlot);
1895 : :
1896 : : /*
1897 : : * Before releasing the target tuple again, make sure rslot has a
1898 : : * local copy of any pass-by-reference values.
1899 : : */
1900 : 0 : ExecMaterializeSlot(rslot);
1901 : :
1902 : 0 : ExecClearTuple(slot);
1903 : :
1904 : 0 : return rslot;
1905 : 0 : }
1906 : :
1907 : 0 : return NULL;
1908 : 0 : }
1909 : :
1910 : : /*
1911 : : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1912 : : *
1913 : : * This works by first deleting the old tuple from the current partition,
1914 : : * followed by inserting the new tuple into the root parent table, that is,
1915 : : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1916 : : * correct partition.
1917 : : *
1918 : : * Returns true if the tuple has been successfully moved, or if it's found
1919 : : * that the tuple was concurrently deleted so there's nothing more to do
1920 : : * for the caller.
1921 : : *
1922 : : * False is returned if the tuple we're trying to move is found to have been
1923 : : * concurrently updated. In that case, the caller must check if the updated
1924 : : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1925 : : * this function again or perform a regular update accordingly. For MERGE,
1926 : : * the updated tuple is not returned in *retry_slot; it has its own retry
1927 : : * logic.
1928 : : */
1929 : : static bool
1930 : 0 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1931 : : ResultRelInfo *resultRelInfo,
1932 : : ItemPointer tupleid, HeapTuple oldtuple,
1933 : : TupleTableSlot *slot,
1934 : : bool canSetTag,
1935 : : UpdateContext *updateCxt,
1936 : : TM_Result *tmresult,
1937 : : TupleTableSlot **retry_slot,
1938 : : TupleTableSlot **inserted_tuple,
1939 : : ResultRelInfo **insert_destrel)
1940 : : {
1941 : 0 : ModifyTableState *mtstate = context->mtstate;
1942 : 0 : EState *estate = mtstate->ps.state;
1943 : 0 : TupleConversionMap *tupconv_map;
1944 : 0 : bool tuple_deleted;
1945 : 0 : TupleTableSlot *epqslot = NULL;
1946 : :
1947 : 0 : context->cpDeletedSlot = NULL;
1948 : 0 : context->cpUpdateReturningSlot = NULL;
1949 : 0 : *retry_slot = NULL;
1950 : :
1951 : : /*
1952 : : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1953 : : * to migrate to a different partition. Maybe this can be implemented
1954 : : * some day, but it seems a fringe feature with little redeeming value.
1955 : : */
1956 [ # # ]: 0 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1957 [ # # # # ]: 0 : ereport(ERROR,
1958 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1959 : : errmsg("invalid ON UPDATE specification"),
1960 : : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1961 : :
1962 : : /*
1963 : : * When an UPDATE is run directly on a leaf partition, simply fail with a
1964 : : * partition constraint violation error.
1965 : : */
1966 [ # # ]: 0 : if (resultRelInfo == mtstate->rootResultRelInfo)
1967 : 0 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1968 : :
1969 : : /* Initialize tuple routing info if not already done. */
1970 [ # # ]: 0 : if (mtstate->mt_partition_tuple_routing == NULL)
1971 : : {
1972 : 0 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1973 : 0 : MemoryContext oldcxt;
1974 : :
1975 : : /* Things built here have to last for the query duration. */
1976 : 0 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1977 : :
1978 : 0 : mtstate->mt_partition_tuple_routing =
1979 : 0 : ExecSetupPartitionTupleRouting(estate, rootRel);
1980 : :
1981 : : /*
1982 : : * Before a partition's tuple can be re-routed, it must first be
1983 : : * converted to the root's format, so we'll need a slot for storing
1984 : : * such tuples.
1985 : : */
1986 [ # # ]: 0 : Assert(mtstate->mt_root_tuple_slot == NULL);
1987 : 0 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1988 : :
1989 : 0 : MemoryContextSwitchTo(oldcxt);
1990 : 0 : }
1991 : :
1992 : : /*
1993 : : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1994 : : * We want to return rows from INSERT.
1995 : : */
1996 : 0 : ExecDelete(context, resultRelInfo,
1997 : 0 : tupleid, oldtuple,
1998 : : false, /* processReturning */
1999 : : true, /* changingPart */
2000 : : false, /* canSetTag */
2001 : 0 : tmresult, &tuple_deleted, &epqslot);
2002 : :
2003 : : /*
2004 : : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2005 : : * it was already deleted by self, or it was concurrently deleted by
2006 : : * another transaction), then we should skip the insert as well;
2007 : : * otherwise, an UPDATE could cause an increase in the total number of
2008 : : * rows across all partitions, which is clearly wrong.
2009 : : *
2010 : : * For a normal UPDATE, the case where the tuple has been the subject of a
2011 : : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2012 : : * machinery, but for an UPDATE that we've translated into a DELETE from
2013 : : * this partition and an INSERT into some other partition, that's not
2014 : : * available, because CTID chains can't span relation boundaries. We
2015 : : * mimic the semantics to a limited extent by skipping the INSERT if the
2016 : : * DELETE fails to find a tuple. This ensures that two concurrent
2017 : : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2018 : : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2019 : : * it.
2020 : : */
2021 [ # # ]: 0 : if (!tuple_deleted)
2022 : : {
2023 : : /*
2024 : : * epqslot will be typically NULL. But when ExecDelete() finds that
2025 : : * another transaction has concurrently updated the same row, it
2026 : : * re-fetches the row, skips the delete, and epqslot is set to the
2027 : : * re-fetched tuple slot. In that case, we need to do all the checks
2028 : : * again. For MERGE, we leave everything to the caller (it must do
2029 : : * additional rechecking, and might end up executing a different
2030 : : * action entirely).
2031 : : */
2032 [ # # ]: 0 : if (mtstate->operation == CMD_MERGE)
2033 : 0 : return *tmresult == TM_Ok;
2034 [ # # # # ]: 0 : else if (TupIsNull(epqslot))
2035 : 0 : return true;
2036 : : else
2037 : : {
2038 : : /* Fetch the most recent version of old tuple. */
2039 : 0 : TupleTableSlot *oldSlot;
2040 : :
2041 : : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2042 [ # # ]: 0 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2043 : 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2044 : 0 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2045 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2046 : 0 : tupleid,
2047 : : SnapshotAny,
2048 : 0 : oldSlot))
2049 [ # # # # ]: 0 : elog(ERROR, "failed to fetch tuple being updated");
2050 : : /* and project the new tuple to retry the UPDATE with */
2051 : 0 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2052 : 0 : oldSlot);
2053 : 0 : return false;
2054 : 0 : }
2055 : : }
2056 : :
2057 : : /*
2058 : : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2059 : : * convert the tuple into root's tuple descriptor if needed, since
2060 : : * ExecInsert() starts the search from root.
2061 : : */
2062 : 0 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2063 [ # # ]: 0 : if (tupconv_map != NULL)
2064 : 0 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2065 : 0 : slot,
2066 : 0 : mtstate->mt_root_tuple_slot);
2067 : :
2068 : : /* Tuple routing starts from the root table. */
2069 : 0 : context->cpUpdateReturningSlot =
2070 : 0 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2071 : 0 : inserted_tuple, insert_destrel);
2072 : :
2073 : : /*
2074 : : * Reset the transition state that may possibly have been written by
2075 : : * INSERT.
2076 : : */
2077 [ # # ]: 0 : if (mtstate->mt_transition_capture)
2078 : 0 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2079 : :
2080 : : /* We're done moving. */
2081 : 0 : return true;
2082 : 0 : }
2083 : :
2084 : : /*
2085 : : * ExecUpdatePrologue -- subroutine for ExecUpdate
2086 : : *
2087 : : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2088 : : * triggers. We return false if one of them makes the update a no-op;
2089 : : * otherwise, return true.
2090 : : */
2091 : : static bool
2092 : 0 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2093 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2094 : : TM_Result *result)
2095 : : {
2096 : 0 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2097 : :
2098 [ # # ]: 0 : if (result)
2099 : 0 : *result = TM_Ok;
2100 : :
2101 : 0 : ExecMaterializeSlot(slot);
2102 : :
2103 : : /*
2104 : : * Open the table's indexes, if we have not done so already, so that we
2105 : : * can add new index entries for the updated tuple.
2106 : : */
2107 [ # # # # ]: 0 : if (resultRelationDesc->rd_rel->relhasindex &&
2108 : 0 : resultRelInfo->ri_IndexRelationDescs == NULL)
2109 : 0 : ExecOpenIndices(resultRelInfo, false);
2110 : :
2111 : : /* BEFORE ROW UPDATE triggers */
2112 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
2113 : 0 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2114 : : {
2115 : : /* Flush any pending inserts, so rows are visible to the triggers */
2116 [ # # ]: 0 : if (context->estate->es_insert_pending_result_relations != NIL)
2117 : 0 : ExecPendingInserts(context->estate);
2118 : :
2119 : 0 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2120 : 0 : resultRelInfo, tupleid, oldtuple, slot,
2121 : 0 : result, &context->tmfd,
2122 : 0 : context->mtstate->operation == CMD_MERGE);
2123 : : }
2124 : :
2125 : 0 : return true;
2126 : 0 : }
2127 : :
2128 : : /*
2129 : : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2130 : : *
2131 : : * Apply the final modifications to the tuple slot before the update.
2132 : : * (This is split out because we also need it in the foreign-table code path.)
2133 : : */
2134 : : static void
2135 : 0 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2136 : : TupleTableSlot *slot,
2137 : : EState *estate)
2138 : : {
2139 : 0 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2140 : :
2141 : : /*
2142 : : * Constraints and GENERATED expressions might reference the tableoid
2143 : : * column, so (re-)initialize tts_tableOid before evaluating them.
2144 : : */
2145 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2146 : :
2147 : : /*
2148 : : * Compute stored generated columns
2149 : : */
2150 [ # # # # ]: 0 : if (resultRelationDesc->rd_att->constr &&
2151 : 0 : resultRelationDesc->rd_att->constr->has_generated_stored)
2152 : 0 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2153 : : CMD_UPDATE);
2154 : 0 : }
2155 : :
2156 : : /*
2157 : : * ExecUpdateAct -- subroutine for ExecUpdate
2158 : : *
2159 : : * Actually update the tuple, when operating on a plain table. If the
2160 : : * table is a partition, and the command was called referencing an ancestor
2161 : : * partitioned table, this routine migrates the resulting tuple to another
2162 : : * partition.
2163 : : *
2164 : : * The caller is in charge of keeping indexes current as necessary. The
2165 : : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2166 : : * be concurrently updated. However, in case of a cross-partition update,
2167 : : * this routine does it.
2168 : : */
2169 : : static TM_Result
2170 : 0 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2171 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2172 : : bool canSetTag, UpdateContext *updateCxt)
2173 : : {
2174 : 0 : EState *estate = context->estate;
2175 : 0 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2176 : 0 : bool partition_constraint_failed;
2177 : 0 : TM_Result result;
2178 : :
2179 : 0 : updateCxt->crossPartUpdate = false;
2180 : :
2181 : : /*
2182 : : * If we move the tuple to a new partition, we loop back here to recompute
2183 : : * GENERATED values (which are allowed to be different across partitions)
2184 : : * and recheck any RLS policies and constraints. We do not fire any
2185 : : * BEFORE triggers of the new partition, however.
2186 : : */
2187 : : lreplace:
2188 : : /* Fill in GENERATEd columns */
2189 : 0 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2190 : :
2191 : : /* ensure slot is independent, consider e.g. EPQ */
2192 : 0 : ExecMaterializeSlot(slot);
2193 : :
2194 : : /*
2195 : : * If partition constraint fails, this row might get moved to another
2196 : : * partition, in which case we should check the RLS CHECK policy just
2197 : : * before inserting into the new partition, rather than doing it here.
2198 : : * This is because a trigger on that partition might again change the row.
2199 : : * So skip the WCO checks if the partition constraint fails.
2200 : : */
2201 : 0 : partition_constraint_failed =
2202 [ # # ]: 0 : resultRelationDesc->rd_rel->relispartition &&
2203 : 0 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2204 : :
2205 : : /* Check any RLS UPDATE WITH CHECK policies */
2206 [ # # # # ]: 0 : if (!partition_constraint_failed &&
2207 : 0 : resultRelInfo->ri_WithCheckOptions != NIL)
2208 : : {
2209 : : /*
2210 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2211 : : * we are looking for at this point.
2212 : : */
2213 : 0 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2214 : 0 : resultRelInfo, slot, estate);
2215 : 0 : }
2216 : :
2217 : : /*
2218 : : * If a partition check failed, try to move the row into the right
2219 : : * partition.
2220 : : */
2221 [ # # ]: 0 : if (partition_constraint_failed)
2222 : : {
2223 : 0 : TupleTableSlot *inserted_tuple,
2224 : : *retry_slot;
2225 : 0 : ResultRelInfo *insert_destrel = NULL;
2226 : :
2227 : : /*
2228 : : * ExecCrossPartitionUpdate will first DELETE the row from the
2229 : : * partition it's currently in and then insert it back into the root
2230 : : * table, which will re-route it to the correct partition. However,
2231 : : * if the tuple has been concurrently updated, a retry is needed.
2232 : : */
2233 [ # # # # ]: 0 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2234 : 0 : tupleid, oldtuple, slot,
2235 : 0 : canSetTag, updateCxt,
2236 : : &result,
2237 : : &retry_slot,
2238 : : &inserted_tuple,
2239 : : &insert_destrel))
2240 : : {
2241 : : /* success! */
2242 : 0 : updateCxt->crossPartUpdate = true;
2243 : :
2244 : : /*
2245 : : * If the partitioned table being updated is referenced in foreign
2246 : : * keys, queue up trigger events to check that none of them were
2247 : : * violated. No special treatment is needed in
2248 : : * non-cross-partition update situations, because the leaf
2249 : : * partition's AR update triggers will take care of that. During
2250 : : * cross-partition updates implemented as delete on the source
2251 : : * partition followed by insert on the destination partition,
2252 : : * AR-UPDATE triggers of the root table (that is, the table
2253 : : * mentioned in the query) must be fired.
2254 : : *
2255 : : * NULL insert_destrel means that the move failed to occur, that
2256 : : * is, the update failed, so no need to anything in that case.
2257 : : */
2258 [ # # ]: 0 : if (insert_destrel &&
2259 [ # # # # ]: 0 : resultRelInfo->ri_TrigDesc &&
2260 : 0 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2261 : 0 : ExecCrossPartitionUpdateForeignKey(context,
2262 : 0 : resultRelInfo,
2263 : 0 : insert_destrel,
2264 : 0 : tupleid, slot,
2265 : 0 : inserted_tuple);
2266 : :
2267 : 0 : return TM_Ok;
2268 : : }
2269 : :
2270 : : /*
2271 : : * No luck, a retry is needed. If running MERGE, we do not do so
2272 : : * here; instead let it handle that on its own rules.
2273 : : */
2274 [ # # ]: 0 : if (context->mtstate->operation == CMD_MERGE)
2275 : 0 : return result;
2276 : :
2277 : : /*
2278 : : * ExecCrossPartitionUpdate installed an updated version of the new
2279 : : * tuple in the retry slot; start over.
2280 : : */
2281 : 0 : slot = retry_slot;
2282 : 0 : goto lreplace;
2283 [ # # ]: 0 : }
2284 : :
2285 : : /*
2286 : : * Check the constraints of the tuple. We've already checked the
2287 : : * partition constraint above; however, we must still ensure the tuple
2288 : : * passes all other constraints, so we will call ExecConstraints() and
2289 : : * have it validate all remaining checks.
2290 : : */
2291 [ # # ]: 0 : if (resultRelationDesc->rd_att->constr)
2292 : 0 : ExecConstraints(resultRelInfo, slot, estate);
2293 : :
2294 : : /*
2295 : : * replace the heap tuple
2296 : : *
2297 : : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2298 : : * the row to be updated is visible to that snapshot, and throw a
2299 : : * can't-serialize error if not. This is a special-case behavior needed
2300 : : * for referential integrity updates in transaction-snapshot mode
2301 : : * transactions.
2302 : : */
2303 : 0 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2304 : 0 : estate->es_output_cid,
2305 : 0 : estate->es_snapshot,
2306 : 0 : estate->es_crosscheck_snapshot,
2307 : : true /* wait for commit */ ,
2308 : 0 : &context->tmfd, &updateCxt->lockmode,
2309 : 0 : &updateCxt->updateIndexes);
2310 : :
2311 : 0 : return result;
2312 : 0 : }
2313 : :
2314 : : /*
2315 : : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2316 : : *
2317 : : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2318 : : * returns indicating that the tuple was updated.
2319 : : */
2320 : : static void
2321 : 0 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2322 : : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2323 : : HeapTuple oldtuple, TupleTableSlot *slot)
2324 : : {
2325 : 0 : ModifyTableState *mtstate = context->mtstate;
2326 : 0 : List *recheckIndexes = NIL;
2327 : :
2328 : : /* insert index entries for tuple if necessary */
2329 [ # # # # ]: 0 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2330 : 0 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2331 : 0 : slot, context->estate,
2332 : : true, false,
2333 : : NULL, NIL,
2334 : 0 : (updateCxt->updateIndexes == TU_Summarizing));
2335 : :
2336 : : /* AFTER ROW UPDATE Triggers */
2337 : 0 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2338 : : NULL, NULL,
2339 : 0 : tupleid, oldtuple, slot,
2340 : 0 : recheckIndexes,
2341 [ # # ]: 0 : mtstate->operation == CMD_INSERT ?
2342 : 0 : mtstate->mt_oc_transition_capture :
2343 : 0 : mtstate->mt_transition_capture,
2344 : : false);
2345 : :
2346 : 0 : list_free(recheckIndexes);
2347 : :
2348 : : /*
2349 : : * Check any WITH CHECK OPTION constraints from parent views. We are
2350 : : * required to do this after testing all constraints and uniqueness
2351 : : * violations per the SQL spec, so we do it after actually updating the
2352 : : * record in the heap and all indexes.
2353 : : *
2354 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2355 : : * are looking for at this point.
2356 : : */
2357 [ # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2358 : 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2359 : 0 : slot, context->estate);
2360 : 0 : }
2361 : :
2362 : : /*
2363 : : * Queues up an update event using the target root partitioned table's
2364 : : * trigger to check that a cross-partition update hasn't broken any foreign
2365 : : * keys pointing into it.
2366 : : */
2367 : : static void
2368 : 0 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2369 : : ResultRelInfo *sourcePartInfo,
2370 : : ResultRelInfo *destPartInfo,
2371 : : ItemPointer tupleid,
2372 : : TupleTableSlot *oldslot,
2373 : : TupleTableSlot *newslot)
2374 : : {
2375 : 0 : ListCell *lc;
2376 : 0 : ResultRelInfo *rootRelInfo;
2377 : 0 : List *ancestorRels;
2378 : :
2379 : 0 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2380 : 0 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2381 : :
2382 : : /*
2383 : : * For any foreign keys that point directly into a non-root ancestors of
2384 : : * the source partition, we can in theory fire an update event to enforce
2385 : : * those constraints using their triggers, if we could tell that both the
2386 : : * source and the destination partitions are under the same ancestor. But
2387 : : * for now, we simply report an error that those cannot be enforced.
2388 : : */
2389 [ # # # # : 0 : foreach(lc, ancestorRels)
# # ]
2390 : : {
2391 : 0 : ResultRelInfo *rInfo = lfirst(lc);
2392 : 0 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2393 : 0 : bool has_noncloned_fkey = false;
2394 : :
2395 : : /* Root ancestor's triggers will be processed. */
2396 [ # # ]: 0 : if (rInfo == rootRelInfo)
2397 : 0 : continue;
2398 : :
2399 [ # # # # ]: 0 : if (trigdesc && trigdesc->trig_update_after_row)
2400 : : {
2401 [ # # ]: 0 : for (int i = 0; i < trigdesc->numtriggers; i++)
2402 : : {
2403 : 0 : Trigger *trig = &trigdesc->triggers[i];
2404 : :
2405 [ # # # # ]: 0 : if (!trig->tgisclone &&
2406 : 0 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2407 : : {
2408 : 0 : has_noncloned_fkey = true;
2409 : 0 : break;
2410 : : }
2411 [ # # ]: 0 : }
2412 : 0 : }
2413 : :
2414 [ # # ]: 0 : if (has_noncloned_fkey)
2415 [ # # # # ]: 0 : ereport(ERROR,
2416 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2417 : : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2418 : : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2419 : : RelationGetRelationName(rInfo->ri_RelationDesc),
2420 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2421 : : errhint("Consider defining the foreign key on table \"%s\".",
2422 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2423 [ # # # ]: 0 : }
2424 : :
2425 : : /* Perform the root table's triggers. */
2426 : 0 : ExecARUpdateTriggers(context->estate,
2427 : 0 : rootRelInfo, sourcePartInfo, destPartInfo,
2428 : 0 : tupleid, NULL, newslot, NIL, NULL, true);
2429 : 0 : }
2430 : :
2431 : : /* ----------------------------------------------------------------
2432 : : * ExecUpdate
2433 : : *
2434 : : * note: we can't run UPDATE queries with transactions
2435 : : * off because UPDATEs are actually INSERTs and our
2436 : : * scan will mistakenly loop forever, updating the tuple
2437 : : * it just inserted.. This should be fixed but until it
2438 : : * is, we don't want to get stuck in an infinite loop
2439 : : * which corrupts your database..
2440 : : *
2441 : : * When updating a table, tupleid identifies the tuple to update and
2442 : : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2443 : : * oldtuple is passed to the triggers and identifies what to update, and
2444 : : * tupleid is invalid. When updating a foreign table, tupleid is
2445 : : * invalid; the FDW has to figure out which row to update using data from
2446 : : * the planSlot. oldtuple is passed to foreign table triggers; it is
2447 : : * NULL when the foreign table has no relevant triggers.
2448 : : *
2449 : : * oldSlot contains the old tuple value.
2450 : : * slot contains the new tuple value to be stored.
2451 : : * planSlot is the output of the ModifyTable's subplan; we use it
2452 : : * to access values from other input tables (for RETURNING),
2453 : : * row-ID junk columns, etc.
2454 : : *
2455 : : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2456 : : * had identified the tuple to update, it will identify the tuple
2457 : : * actually updated after EvalPlanQual.
2458 : : * ----------------------------------------------------------------
2459 : : */
2460 : : static TupleTableSlot *
2461 : 0 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2462 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2463 : : TupleTableSlot *slot, bool canSetTag)
2464 : : {
2465 : 0 : EState *estate = context->estate;
2466 : 0 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2467 : 0 : UpdateContext updateCxt = {0};
2468 : 0 : TM_Result result;
2469 : :
2470 : : /*
2471 : : * abort the operation if not running transactions
2472 : : */
2473 [ # # ]: 0 : if (IsBootstrapProcessingMode())
2474 [ # # # # ]: 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2475 : :
2476 : : /*
2477 : : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2478 : : * done if it says we are.
2479 : : */
2480 [ # # ]: 0 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2481 : 0 : return NULL;
2482 : :
2483 : : /* INSTEAD OF ROW UPDATE Triggers */
2484 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
2485 : 0 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2486 : : {
2487 [ # # # # ]: 0 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2488 : 0 : oldtuple, slot))
2489 : 0 : return NULL; /* "do nothing" */
2490 : 0 : }
2491 [ # # ]: 0 : else if (resultRelInfo->ri_FdwRoutine)
2492 : : {
2493 : : /* Fill in GENERATEd columns */
2494 : 0 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2495 : :
2496 : : /*
2497 : : * update in foreign table: let the FDW do it
2498 : : */
2499 : 0 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2500 : 0 : resultRelInfo,
2501 : 0 : slot,
2502 : 0 : context->planSlot);
2503 : :
2504 [ # # ]: 0 : if (slot == NULL) /* "do nothing" */
2505 : 0 : return NULL;
2506 : :
2507 : : /*
2508 : : * AFTER ROW Triggers or RETURNING expressions might reference the
2509 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2510 : : * them. (This covers the case where the FDW replaced the slot.)
2511 : : */
2512 : 0 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2513 : 0 : }
2514 : : else
2515 : : {
2516 : 0 : ItemPointerData lockedtid;
2517 : :
2518 : : /*
2519 : : * If we generate a new candidate tuple after EvalPlanQual testing, we
2520 : : * must loop back here to try again. (We don't need to redo triggers,
2521 : : * however. If there are any BEFORE triggers then trigger.c will have
2522 : : * done table_tuple_lock to lock the correct tuple, so there's no need
2523 : : * to do them again.)
2524 : : */
2525 : : redo_act:
2526 : 0 : lockedtid = *tupleid;
2527 : 0 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2528 : 0 : canSetTag, &updateCxt);
2529 : :
2530 : : /*
2531 : : * If ExecUpdateAct reports that a cross-partition update was done,
2532 : : * then the RETURNING tuple (if any) has been projected and there's
2533 : : * nothing else for us to do.
2534 : : */
2535 [ # # ]: 0 : if (updateCxt.crossPartUpdate)
2536 : 0 : return context->cpUpdateReturningSlot;
2537 : :
2538 [ # # # # : 0 : switch (result)
# ]
2539 : : {
2540 : : case TM_SelfModified:
2541 : :
2542 : : /*
2543 : : * The target tuple was already updated or deleted by the
2544 : : * current command, or by a later command in the current
2545 : : * transaction. The former case is possible in a join UPDATE
2546 : : * where multiple tuples join to the same target tuple. This
2547 : : * is pretty questionable, but Postgres has always allowed it:
2548 : : * we just execute the first update action and ignore
2549 : : * additional update attempts.
2550 : : *
2551 : : * The latter case arises if the tuple is modified by a
2552 : : * command in a BEFORE trigger, or perhaps by a command in a
2553 : : * volatile function used in the query. In such situations we
2554 : : * should not ignore the update, but it is equally unsafe to
2555 : : * proceed. We don't want to discard the original UPDATE
2556 : : * while keeping the triggered actions based on it; and we
2557 : : * have no principled way to merge this update with the
2558 : : * previous ones. So throwing an error is the only safe
2559 : : * course.
2560 : : *
2561 : : * If a trigger actually intends this type of interaction, it
2562 : : * can re-execute the UPDATE (assuming it can figure out how)
2563 : : * and then return NULL to cancel the outer update.
2564 : : */
2565 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
2566 [ # # # # ]: 0 : ereport(ERROR,
2567 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2568 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2569 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2570 : :
2571 : : /* Else, already updated by self; nothing to do */
2572 : 0 : return NULL;
2573 : :
2574 : : case TM_Ok:
2575 : : break;
2576 : :
2577 : : case TM_Updated:
2578 : : {
2579 : 0 : TupleTableSlot *inputslot;
2580 : 0 : TupleTableSlot *epqslot;
2581 : :
2582 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2583 [ # # # # ]: 0 : ereport(ERROR,
2584 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2585 : : errmsg("could not serialize access due to concurrent update")));
2586 : :
2587 : : /*
2588 : : * Already know that we're going to need to do EPQ, so
2589 : : * fetch tuple directly into the right slot.
2590 : : */
2591 : 0 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2592 : 0 : resultRelInfo->ri_RangeTableIndex);
2593 : :
2594 : 0 : result = table_tuple_lock(resultRelationDesc, tupleid,
2595 : 0 : estate->es_snapshot,
2596 : 0 : inputslot, estate->es_output_cid,
2597 : 0 : updateCxt.lockmode, LockWaitBlock,
2598 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2599 : 0 : &context->tmfd);
2600 : :
2601 [ # # # # ]: 0 : switch (result)
2602 : : {
2603 : : case TM_Ok:
2604 [ # # ]: 0 : Assert(context->tmfd.traversed);
2605 : :
2606 : 0 : epqslot = EvalPlanQual(context->epqstate,
2607 : 0 : resultRelationDesc,
2608 : 0 : resultRelInfo->ri_RangeTableIndex,
2609 : 0 : inputslot);
2610 [ # # # # ]: 0 : if (TupIsNull(epqslot))
2611 : : /* Tuple not passing quals anymore, exiting... */
2612 : 0 : return NULL;
2613 : :
2614 : : /* Make sure ri_oldTupleSlot is initialized. */
2615 [ # # ]: 0 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2616 : 0 : ExecInitUpdateProjection(context->mtstate,
2617 : 0 : resultRelInfo);
2618 : :
2619 [ # # ]: 0 : if (resultRelInfo->ri_needLockTagTuple)
2620 : : {
2621 : 0 : UnlockTuple(resultRelationDesc,
2622 : : &lockedtid, InplaceUpdateTupleLock);
2623 : 0 : LockTuple(resultRelationDesc,
2624 : 0 : tupleid, InplaceUpdateTupleLock);
2625 : 0 : }
2626 : :
2627 : : /* Fetch the most recent version of old tuple. */
2628 : 0 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2629 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2630 : 0 : tupleid,
2631 : : SnapshotAny,
2632 : 0 : oldSlot))
2633 [ # # # # ]: 0 : elog(ERROR, "failed to fetch tuple being updated");
2634 : 0 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2635 : 0 : epqslot, oldSlot);
2636 : 0 : goto redo_act;
2637 : :
2638 : : case TM_Deleted:
2639 : : /* tuple already deleted; nothing to do */
2640 : 0 : return NULL;
2641 : :
2642 : : case TM_SelfModified:
2643 : :
2644 : : /*
2645 : : * This can be reached when following an update
2646 : : * chain from a tuple updated by another session,
2647 : : * reaching a tuple that was already updated in
2648 : : * this transaction. If previously modified by
2649 : : * this command, ignore the redundant update,
2650 : : * otherwise error out.
2651 : : *
2652 : : * See also TM_SelfModified response to
2653 : : * table_tuple_update() above.
2654 : : */
2655 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
2656 [ # # # # ]: 0 : ereport(ERROR,
2657 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2658 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2659 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2660 : 0 : return NULL;
2661 : :
2662 : : default:
2663 : : /* see table_tuple_lock call in ExecDelete() */
2664 [ # # # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2665 : : result);
2666 : 0 : return NULL;
2667 : : }
2668 [ # # ]: 0 : }
2669 : :
2670 : : break;
2671 : :
2672 : : case TM_Deleted:
2673 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2674 [ # # # # ]: 0 : ereport(ERROR,
2675 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2676 : : errmsg("could not serialize access due to concurrent delete")));
2677 : : /* tuple already deleted; nothing to do */
2678 : 0 : return NULL;
2679 : :
2680 : : default:
2681 [ # # # # ]: 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2682 : : result);
2683 : 0 : return NULL;
2684 : : }
2685 [ # # ]: 0 : }
2686 : :
2687 [ # # ]: 0 : if (canSetTag)
2688 : 0 : (estate->es_processed)++;
2689 : :
2690 : 0 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2691 : 0 : slot);
2692 : :
2693 : : /* Process RETURNING if present */
2694 [ # # ]: 0 : if (resultRelInfo->ri_projectReturning)
2695 : 0 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2696 : 0 : oldSlot, slot, context->planSlot);
2697 : :
2698 : 0 : return NULL;
2699 : 0 : }
2700 : :
2701 : : /*
2702 : : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2703 : : *
2704 : : * Try to lock tuple for update as part of speculative insertion. If
2705 : : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2706 : : * (but still lock row, even though it may not satisfy estate's
2707 : : * snapshot).
2708 : : *
2709 : : * Returns true if we're done (with or without an update), or false if
2710 : : * the caller must retry the INSERT from scratch.
2711 : : */
2712 : : static bool
2713 : 0 : ExecOnConflictUpdate(ModifyTableContext *context,
2714 : : ResultRelInfo *resultRelInfo,
2715 : : ItemPointer conflictTid,
2716 : : TupleTableSlot *excludedSlot,
2717 : : bool canSetTag,
2718 : : TupleTableSlot **returning)
2719 : : {
2720 : 0 : ModifyTableState *mtstate = context->mtstate;
2721 : 0 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2722 : 0 : Relation relation = resultRelInfo->ri_RelationDesc;
2723 : 0 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2724 : 0 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2725 : 0 : TM_FailureData tmfd;
2726 : 0 : LockTupleMode lockmode;
2727 : 0 : TM_Result test;
2728 : 0 : Datum xminDatum;
2729 : 0 : TransactionId xmin;
2730 : 0 : bool isnull;
2731 : :
2732 : : /*
2733 : : * Parse analysis should have blocked ON CONFLICT for all system
2734 : : * relations, which includes these. There's no fundamental obstacle to
2735 : : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2736 : : * ExecUpdate() caller.
2737 : : */
2738 [ # # ]: 0 : Assert(!resultRelInfo->ri_needLockTagTuple);
2739 : :
2740 : : /* Determine lock mode to use */
2741 : 0 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2742 : :
2743 : : /*
2744 : : * Lock tuple for update. Don't follow updates when tuple cannot be
2745 : : * locked without doing so. A row locking conflict here means our
2746 : : * previous conclusion that the tuple is conclusively committed is not
2747 : : * true anymore.
2748 : : */
2749 : 0 : test = table_tuple_lock(relation, conflictTid,
2750 : 0 : context->estate->es_snapshot,
2751 : 0 : existing, context->estate->es_output_cid,
2752 : 0 : lockmode, LockWaitBlock, 0,
2753 : : &tmfd);
2754 [ # # # # : 0 : switch (test)
# # ]
2755 : : {
2756 : : case TM_Ok:
2757 : : /* success! */
2758 : : break;
2759 : :
2760 : : case TM_Invisible:
2761 : :
2762 : : /*
2763 : : * This can occur when a just inserted tuple is updated again in
2764 : : * the same command. E.g. because multiple rows with the same
2765 : : * conflicting key values are inserted.
2766 : : *
2767 : : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2768 : : * case. We do not want to proceed because it would lead to the
2769 : : * same row being updated a second time in some unspecified order,
2770 : : * and in contrast to plain UPDATEs there's no historical behavior
2771 : : * to break.
2772 : : *
2773 : : * It is the user's responsibility to prevent this situation from
2774 : : * occurring. These problems are why the SQL standard similarly
2775 : : * specifies that for SQL MERGE, an exception must be raised in
2776 : : * the event of an attempt to update the same row twice.
2777 : : */
2778 : 0 : xminDatum = slot_getsysattr(existing,
2779 : : MinTransactionIdAttributeNumber,
2780 : : &isnull);
2781 [ # # ]: 0 : Assert(!isnull);
2782 : 0 : xmin = DatumGetTransactionId(xminDatum);
2783 : :
2784 [ # # ]: 0 : if (TransactionIdIsCurrentTransactionId(xmin))
2785 [ # # # # ]: 0 : ereport(ERROR,
2786 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2787 : : /* translator: %s is a SQL command name */
2788 : : errmsg("%s command cannot affect row a second time",
2789 : : "ON CONFLICT DO UPDATE"),
2790 : : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2791 : :
2792 : : /* This shouldn't happen */
2793 [ # # # # ]: 0 : elog(ERROR, "attempted to lock invisible tuple");
2794 : 0 : break;
2795 : :
2796 : : case TM_SelfModified:
2797 : :
2798 : : /*
2799 : : * This state should never be reached. As a dirty snapshot is used
2800 : : * to find conflicting tuples, speculative insertion wouldn't have
2801 : : * seen this row to conflict with.
2802 : : */
2803 [ # # # # ]: 0 : elog(ERROR, "unexpected self-updated tuple");
2804 : 0 : break;
2805 : :
2806 : : case TM_Updated:
2807 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2808 [ # # # # ]: 0 : ereport(ERROR,
2809 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2810 : : errmsg("could not serialize access due to concurrent update")));
2811 : :
2812 : : /*
2813 : : * Tell caller to try again from the very start.
2814 : : *
2815 : : * It does not make sense to use the usual EvalPlanQual() style
2816 : : * loop here, as the new version of the row might not conflict
2817 : : * anymore, or the conflicting tuple has actually been deleted.
2818 : : */
2819 : 0 : ExecClearTuple(existing);
2820 : 0 : return false;
2821 : :
2822 : : case TM_Deleted:
2823 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2824 [ # # # # ]: 0 : ereport(ERROR,
2825 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2826 : : errmsg("could not serialize access due to concurrent delete")));
2827 : :
2828 : : /* see TM_Updated case */
2829 : 0 : ExecClearTuple(existing);
2830 : 0 : return false;
2831 : :
2832 : : default:
2833 [ # # # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2834 : 0 : }
2835 : :
2836 : : /* Success, the tuple is locked. */
2837 : :
2838 : : /*
2839 : : * Verify that the tuple is visible to our MVCC snapshot if the current
2840 : : * isolation level mandates that.
2841 : : *
2842 : : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2843 : : * CONFLICT ... WHERE clause may prevent us from reaching that.
2844 : : *
2845 : : * This means we only ever continue when a new command in the current
2846 : : * transaction could see the row, even though in READ COMMITTED mode the
2847 : : * tuple will not be visible according to the current statement's
2848 : : * snapshot. This is in line with the way UPDATE deals with newer tuple
2849 : : * versions.
2850 : : */
2851 : 0 : ExecCheckTupleVisible(context->estate, relation, existing);
2852 : :
2853 : : /*
2854 : : * Make tuple and any needed join variables available to ExecQual and
2855 : : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2856 : : * the target's existing tuple is installed in the scantuple. EXCLUDED
2857 : : * has been made to reference INNER_VAR in setrefs.c, but there is no
2858 : : * other redirection.
2859 : : */
2860 : 0 : econtext->ecxt_scantuple = existing;
2861 : 0 : econtext->ecxt_innertuple = excludedSlot;
2862 : 0 : econtext->ecxt_outertuple = NULL;
2863 : :
2864 [ # # ]: 0 : if (!ExecQual(onConflictSetWhere, econtext))
2865 : : {
2866 : 0 : ExecClearTuple(existing); /* see return below */
2867 [ # # ]: 0 : InstrCountFiltered1(&mtstate->ps, 1);
2868 : 0 : return true; /* done with the tuple */
2869 : : }
2870 : :
2871 [ # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2872 : : {
2873 : : /*
2874 : : * Check target's existing tuple against UPDATE-applicable USING
2875 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2876 : : *
2877 : : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2878 : : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2879 : : * but that's almost the extent of its special handling for ON
2880 : : * CONFLICT DO UPDATE.
2881 : : *
2882 : : * The rewriter will also have associated UPDATE applicable straight
2883 : : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2884 : : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2885 : : * kinds, so there is no danger of spurious over-enforcement in the
2886 : : * INSERT or UPDATE path.
2887 : : */
2888 : 0 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2889 : 0 : existing,
2890 : 0 : mtstate->ps.state);
2891 : 0 : }
2892 : :
2893 : : /* Project the new tuple version */
2894 : 0 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2895 : :
2896 : : /*
2897 : : * Note that it is possible that the target tuple has been modified in
2898 : : * this session, after the above table_tuple_lock. We choose to not error
2899 : : * out in that case, in line with ExecUpdate's treatment of similar cases.
2900 : : * This can happen if an UPDATE is triggered from within ExecQual(),
2901 : : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2902 : : * wCTE in the ON CONFLICT's SET.
2903 : : */
2904 : :
2905 : : /* Execute UPDATE with projection */
2906 : 0 : *returning = ExecUpdate(context, resultRelInfo,
2907 : 0 : conflictTid, NULL, existing,
2908 : 0 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2909 : 0 : canSetTag);
2910 : :
2911 : : /*
2912 : : * Clear out existing tuple, as there might not be another conflict among
2913 : : * the next input rows. Don't want to hold resources till the end of the
2914 : : * query. First though, make sure that the returning slot, if any, has a
2915 : : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2916 : : * columns.
2917 : : */
2918 [ # # # # ]: 0 : if (*returning != NULL &&
2919 : 0 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2920 : 0 : ExecMaterializeSlot(*returning);
2921 : :
2922 : 0 : ExecClearTuple(existing);
2923 : :
2924 : 0 : return true;
2925 : 0 : }
2926 : :
2927 : : /*
2928 : : * Perform MERGE.
2929 : : */
2930 : : static TupleTableSlot *
2931 : 0 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2932 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2933 : : {
2934 : 0 : TupleTableSlot *rslot = NULL;
2935 : 0 : bool matched;
2936 : :
2937 : : /*-----
2938 : : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2939 : : * valid, depending on whether the result relation is a table or a view.
2940 : : * We execute the first action for which the additional WHEN MATCHED AND
2941 : : * quals pass. If an action without quals is found, that action is
2942 : : * executed.
2943 : : *
2944 : : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2945 : : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2946 : : * in sequence until one passes. This is almost identical to the WHEN
2947 : : * MATCHED case, and both cases are handled by ExecMergeMatched().
2948 : : *
2949 : : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2950 : : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2951 : : * TARGET] actions in sequence until one passes.
2952 : : *
2953 : : * Things get interesting in case of concurrent update/delete of the
2954 : : * target tuple. Such concurrent update/delete is detected while we are
2955 : : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2956 : : *
2957 : : * A concurrent update can:
2958 : : *
2959 : : * 1. modify the target tuple so that the results from checking any
2960 : : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2961 : : * SOURCE actions potentially change, but the result from the join
2962 : : * quals does not change.
2963 : : *
2964 : : * In this case, we are still dealing with the same kind of match
2965 : : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2966 : : * actions from the start and choose the first one that satisfies the
2967 : : * new target tuple.
2968 : : *
2969 : : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2970 : : * quals no longer pass and hence the source and target tuples no
2971 : : * longer match.
2972 : : *
2973 : : * In this case, we are now dealing with a NOT MATCHED case, and we
2974 : : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2975 : : * TARGET] actions. First ExecMergeMatched() processes the list of
2976 : : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2977 : : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2978 : : * TARGET] actions in sequence until one passes. Thus we may execute
2979 : : * two actions; one of each kind.
2980 : : *
2981 : : * Thus we support concurrent updates that turn MATCHED candidate rows
2982 : : * into NOT MATCHED rows. However, we do not attempt to support cases
2983 : : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2984 : : * cause a target row to match a different source row.
2985 : : *
2986 : : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2987 : : * [BY TARGET].
2988 : : *
2989 : : * ExecMergeMatched() takes care of following the update chain and
2990 : : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2991 : : * action, as long as the target tuple still exists. If the target tuple
2992 : : * gets deleted or a concurrent update causes the join quals to fail, it
2993 : : * returns a matched status of false and we call ExecMergeNotMatched().
2994 : : * Given that ExecMergeMatched() always makes progress by following the
2995 : : * update chain and we never switch from ExecMergeNotMatched() to
2996 : : * ExecMergeMatched(), there is no risk of a livelock.
2997 : : */
2998 [ # # ]: 0 : matched = tupleid != NULL || oldtuple != NULL;
2999 [ # # ]: 0 : if (matched)
3000 : 0 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3001 : 0 : canSetTag, &matched);
3002 : :
3003 : : /*
3004 : : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3005 : : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3006 : : * "matched" to false, indicating that it no longer matches).
3007 : : */
3008 [ # # ]: 0 : if (!matched)
3009 : : {
3010 : : /*
3011 : : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3012 : : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3013 : : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3014 : : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3015 : : * SOURCE action, and computed the row to return. If so, we cannot
3016 : : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3017 : : * pending (to be processed on the next call to ExecModifyTable()).
3018 : : * Otherwise, just process the action now.
3019 : : */
3020 [ # # ]: 0 : if (rslot == NULL)
3021 : 0 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3022 : : else
3023 : 0 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3024 : 0 : }
3025 : :
3026 : 0 : return rslot;
3027 : 0 : }
3028 : :
3029 : : /*
3030 : : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3031 : : * action, depending on whether the join quals are satisfied. If the target
3032 : : * relation is a table, the current target tuple is identified by tupleid.
3033 : : * Otherwise, if the target relation is a view, oldtuple is the current target
3034 : : * tuple from the view.
3035 : : *
3036 : : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3037 : : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3038 : : * action do not pass, we check the second, then the third and so on. If we
3039 : : * reach the end without finding a qualifying action, we return NULL.
3040 : : * Otherwise, we execute the qualifying action and return its RETURNING
3041 : : * result, if any, or NULL.
3042 : : *
3043 : : * On entry, "*matched" is assumed to be true. If a concurrent update or
3044 : : * delete is detected that causes the join quals to no longer pass, we set it
3045 : : * to false, indicating that the caller should process any NOT MATCHED [BY
3046 : : * TARGET] actions.
3047 : : *
3048 : : * After a concurrent update, we restart from the first action to look for a
3049 : : * new qualifying action to execute. If the join quals originally passed, and
3050 : : * the concurrent update caused them to no longer pass, then we switch from
3051 : : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3052 : : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3053 : : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3054 : : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3055 : : */
3056 : : static TupleTableSlot *
3057 : 0 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3058 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3059 : : bool *matched)
3060 : : {
3061 : 0 : ModifyTableState *mtstate = context->mtstate;
3062 : 0 : List **mergeActions = resultRelInfo->ri_MergeActions;
3063 : 0 : ItemPointerData lockedtid;
3064 : 0 : List *actionStates;
3065 : 0 : TupleTableSlot *newslot = NULL;
3066 : 0 : TupleTableSlot *rslot = NULL;
3067 : 0 : EState *estate = context->estate;
3068 : 0 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3069 : 0 : bool isNull;
3070 : 0 : EPQState *epqstate = &mtstate->mt_epqstate;
3071 : 0 : ListCell *l;
3072 : :
3073 : : /* Expect matched to be true on entry */
3074 [ # # ]: 0 : Assert(*matched);
3075 : :
3076 : : /*
3077 : : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3078 : : * are done.
3079 : : */
3080 [ # # # # ]: 0 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3081 : 0 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3082 : 0 : return NULL;
3083 : :
3084 : : /*
3085 : : * Make tuple and any needed join variables available to ExecQual and
3086 : : * ExecProject. The target's existing tuple is installed in the scantuple.
3087 : : * This target relation's slot is required only in the case of a MATCHED
3088 : : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3089 : : */
3090 : 0 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3091 : 0 : econtext->ecxt_innertuple = context->planSlot;
3092 : 0 : econtext->ecxt_outertuple = NULL;
3093 : :
3094 : : /*
3095 : : * This routine is only invoked for matched target rows, so we should
3096 : : * either have the tupleid of the target row, or an old tuple from the
3097 : : * target wholerow junk attr.
3098 : : */
3099 [ # # # # ]: 0 : Assert(tupleid != NULL || oldtuple != NULL);
3100 : 0 : ItemPointerSetInvalid(&lockedtid);
3101 [ # # ]: 0 : if (oldtuple != NULL)
3102 : : {
3103 [ # # ]: 0 : Assert(!resultRelInfo->ri_needLockTagTuple);
3104 : 0 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3105 : : false);
3106 : 0 : }
3107 : : else
3108 : : {
3109 [ # # ]: 0 : if (resultRelInfo->ri_needLockTagTuple)
3110 : : {
3111 : : /*
3112 : : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3113 : : * that don't match mas_whenqual. MERGE on system catalogs is a
3114 : : * minor use case, so don't bother optimizing those.
3115 : : */
3116 : 0 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3117 : : InplaceUpdateTupleLock);
3118 : 0 : lockedtid = *tupleid;
3119 : 0 : }
3120 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3121 : 0 : tupleid,
3122 : : SnapshotAny,
3123 : 0 : resultRelInfo->ri_oldTupleSlot))
3124 [ # # # # ]: 0 : elog(ERROR, "failed to fetch the target tuple");
3125 : : }
3126 : :
3127 : : /*
3128 : : * Test the join condition. If it's satisfied, perform a MATCHED action.
3129 : : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3130 : : *
3131 : : * Note that this join condition will be NULL if there are no NOT MATCHED
3132 : : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3133 : : * need only consider MATCHED actions here.
3134 : : */
3135 [ # # ]: 0 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3136 : 0 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3137 : : else
3138 : 0 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3139 : :
3140 : : lmerge_matched:
3141 : :
3142 [ # # # # : 0 : foreach(l, actionStates)
# # # # #
# ]
3143 : : {
3144 : 0 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3145 : 0 : CmdType commandType = relaction->mas_action->commandType;
3146 : 0 : TM_Result result;
3147 : 0 : UpdateContext updateCxt = {0};
3148 : :
3149 : : /*
3150 : : * Test condition, if any.
3151 : : *
3152 : : * In the absence of any condition, we perform the action
3153 : : * unconditionally (no need to check separately since ExecQual() will
3154 : : * return true if there are no conditions to evaluate).
3155 : : */
3156 [ # # ]: 0 : if (!ExecQual(relaction->mas_whenqual, econtext))
3157 : 0 : continue;
3158 : :
3159 : : /*
3160 : : * Check if the existing target tuple meets the USING checks of
3161 : : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3162 : : * error.
3163 : : *
3164 : : * The WITH CHECK quals for UPDATE RLS policies are applied in
3165 : : * ExecUpdateAct() and hence we need not do anything special to handle
3166 : : * them.
3167 : : *
3168 : : * NOTE: We must do this after WHEN quals are evaluated, so that we
3169 : : * check policies only when they matter.
3170 : : */
3171 [ # # # # ]: 0 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3172 : : {
3173 : 0 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3174 : : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3175 : 0 : resultRelInfo,
3176 : 0 : resultRelInfo->ri_oldTupleSlot,
3177 : 0 : context->mtstate->ps.state);
3178 : 0 : }
3179 : :
3180 : : /* Perform stated action */
3181 [ # # # # ]: 0 : switch (commandType)
3182 : : {
3183 : : case CMD_UPDATE:
3184 : :
3185 : : /*
3186 : : * Project the output tuple, and use that to update the table.
3187 : : * We don't need to filter out junk attributes, because the
3188 : : * UPDATE action's targetlist doesn't have any.
3189 : : */
3190 : 0 : newslot = ExecProject(relaction->mas_proj);
3191 : :
3192 : 0 : mtstate->mt_merge_action = relaction;
3193 [ # # # # ]: 0 : if (!ExecUpdatePrologue(context, resultRelInfo,
3194 : 0 : tupleid, NULL, newslot, &result))
3195 : : {
3196 [ # # ]: 0 : if (result == TM_Ok)
3197 : 0 : goto out; /* "do nothing" */
3198 : :
3199 : 0 : break; /* concurrent update/delete */
3200 : : }
3201 : :
3202 : : /* INSTEAD OF ROW UPDATE Triggers */
3203 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
3204 : 0 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3205 : : {
3206 [ # # # # ]: 0 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3207 : 0 : oldtuple, newslot))
3208 : 0 : goto out; /* "do nothing" */
3209 : 0 : }
3210 : : else
3211 : : {
3212 : : /* checked ri_needLockTagTuple above */
3213 [ # # ]: 0 : Assert(oldtuple == NULL);
3214 : :
3215 : 0 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3216 : 0 : NULL, newslot, canSetTag,
3217 : : &updateCxt);
3218 : :
3219 : : /*
3220 : : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3221 : : * cross-partition update was done, then there's nothing
3222 : : * else for us to do --- the UPDATE has been turned into a
3223 : : * DELETE and an INSERT, and we must not perform any of
3224 : : * the usual post-update tasks. Also, the RETURNING tuple
3225 : : * (if any) has been projected, so we can just return
3226 : : * that.
3227 : : */
3228 [ # # ]: 0 : if (updateCxt.crossPartUpdate)
3229 : : {
3230 : 0 : mtstate->mt_merge_updated += 1;
3231 : 0 : rslot = context->cpUpdateReturningSlot;
3232 : 0 : goto out;
3233 : : }
3234 : : }
3235 : :
3236 [ # # ]: 0 : if (result == TM_Ok)
3237 : : {
3238 : 0 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3239 : 0 : tupleid, NULL, newslot);
3240 : 0 : mtstate->mt_merge_updated += 1;
3241 : 0 : }
3242 : 0 : break;
3243 : :
3244 : : case CMD_DELETE:
3245 : 0 : mtstate->mt_merge_action = relaction;
3246 [ # # ]: 0 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3247 : : NULL, NULL, &result))
3248 : : {
3249 [ # # ]: 0 : if (result == TM_Ok)
3250 : 0 : goto out; /* "do nothing" */
3251 : :
3252 : 0 : break; /* concurrent update/delete */
3253 : : }
3254 : :
3255 : : /* INSTEAD OF ROW DELETE Triggers */
3256 [ # # # # ]: 0 : if (resultRelInfo->ri_TrigDesc &&
3257 : 0 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3258 : : {
3259 [ # # # # ]: 0 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3260 : 0 : oldtuple))
3261 : 0 : goto out; /* "do nothing" */
3262 : 0 : }
3263 : : else
3264 : : {
3265 : : /* checked ri_needLockTagTuple above */
3266 [ # # ]: 0 : Assert(oldtuple == NULL);
3267 : :
3268 : 0 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3269 : : false);
3270 : : }
3271 : :
3272 [ # # ]: 0 : if (result == TM_Ok)
3273 : : {
3274 : 0 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3275 : : false);
3276 : 0 : mtstate->mt_merge_deleted += 1;
3277 : 0 : }
3278 : 0 : break;
3279 : :
3280 : : case CMD_NOTHING:
3281 : : /* Doing nothing is always OK */
3282 : 0 : result = TM_Ok;
3283 : 0 : break;
3284 : :
3285 : : default:
3286 [ # # # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3287 : 0 : }
3288 : :
3289 [ # # # # : 0 : switch (result)
# # ]
3290 : : {
3291 : : case TM_Ok:
3292 : : /* all good; perform final actions */
3293 [ # # # # ]: 0 : if (canSetTag && commandType != CMD_NOTHING)
3294 : 0 : (estate->es_processed)++;
3295 : :
3296 : 0 : break;
3297 : :
3298 : : case TM_SelfModified:
3299 : :
3300 : : /*
3301 : : * The target tuple was already updated or deleted by the
3302 : : * current command, or by a later command in the current
3303 : : * transaction. The former case is explicitly disallowed by
3304 : : * the SQL standard for MERGE, which insists that the MERGE
3305 : : * join condition should not join a target row to more than
3306 : : * one source row.
3307 : : *
3308 : : * The latter case arises if the tuple is modified by a
3309 : : * command in a BEFORE trigger, or perhaps by a command in a
3310 : : * volatile function used in the query. In such situations we
3311 : : * should not ignore the MERGE action, but it is equally
3312 : : * unsafe to proceed. We don't want to discard the original
3313 : : * MERGE action while keeping the triggered actions based on
3314 : : * it; and it would be no better to allow the original MERGE
3315 : : * action while discarding the updates that it triggered. So
3316 : : * throwing an error is the only safe course.
3317 : : */
3318 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
3319 [ # # # # ]: 0 : ereport(ERROR,
3320 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3321 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3322 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3323 : :
3324 [ # # ]: 0 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3325 [ # # # # ]: 0 : ereport(ERROR,
3326 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3327 : : /* translator: %s is a SQL command name */
3328 : : errmsg("%s command cannot affect row a second time",
3329 : : "MERGE"),
3330 : : errhint("Ensure that not more than one source row matches any one target row.")));
3331 : :
3332 : : /* This shouldn't happen */
3333 [ # # # # ]: 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3334 : 0 : break;
3335 : :
3336 : : case TM_Deleted:
3337 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
3338 [ # # # # ]: 0 : ereport(ERROR,
3339 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3340 : : errmsg("could not serialize access due to concurrent delete")));
3341 : :
3342 : : /*
3343 : : * If the tuple was already deleted, set matched to false to
3344 : : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3345 : : */
3346 : 0 : *matched = false;
3347 : 0 : goto out;
3348 : :
3349 : : case TM_Updated:
3350 : : {
3351 : 0 : bool was_matched;
3352 : 0 : Relation resultRelationDesc;
3353 : 0 : TupleTableSlot *epqslot,
3354 : : *inputslot;
3355 : 0 : LockTupleMode lockmode;
3356 : :
3357 : : /*
3358 : : * The target tuple was concurrently updated by some other
3359 : : * transaction. If we are currently processing a MATCHED
3360 : : * action, use EvalPlanQual() with the new version of the
3361 : : * tuple and recheck the join qual, to detect a change
3362 : : * from the MATCHED to the NOT MATCHED cases. If we are
3363 : : * already processing a NOT MATCHED BY SOURCE action, we
3364 : : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3365 : : * MATCHED).
3366 : : */
3367 : 0 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3368 : 0 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3369 : 0 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3370 : :
3371 [ # # ]: 0 : if (was_matched)
3372 : 0 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3373 : 0 : resultRelInfo->ri_RangeTableIndex);
3374 : : else
3375 : 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3376 : :
3377 : 0 : result = table_tuple_lock(resultRelationDesc, tupleid,
3378 : 0 : estate->es_snapshot,
3379 : 0 : inputslot, estate->es_output_cid,
3380 : 0 : lockmode, LockWaitBlock,
3381 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3382 : 0 : &context->tmfd);
3383 [ # # # # ]: 0 : switch (result)
3384 : : {
3385 : : case TM_Ok:
3386 : :
3387 : : /*
3388 : : * If the tuple was updated and migrated to
3389 : : * another partition concurrently, the current
3390 : : * MERGE implementation can't follow. There's
3391 : : * probably a better way to handle this case, but
3392 : : * it'd require recognizing the relation to which
3393 : : * the tuple moved, and setting our current
3394 : : * resultRelInfo to that.
3395 : : */
3396 [ # # ]: 0 : if (ItemPointerIndicatesMovedPartitions(tupleid))
3397 [ # # # # ]: 0 : ereport(ERROR,
3398 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3399 : : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3400 : :
3401 : : /*
3402 : : * If this was a MATCHED case, use EvalPlanQual()
3403 : : * to recheck the join condition.
3404 : : */
3405 [ # # ]: 0 : if (was_matched)
3406 : : {
3407 : 0 : epqslot = EvalPlanQual(epqstate,
3408 : 0 : resultRelationDesc,
3409 : 0 : resultRelInfo->ri_RangeTableIndex,
3410 : 0 : inputslot);
3411 : :
3412 : : /*
3413 : : * If the subplan didn't return a tuple, then
3414 : : * we must be dealing with an inner join for
3415 : : * which the join condition no longer matches.
3416 : : * This can only happen if there are no NOT
3417 : : * MATCHED actions, and so there is nothing
3418 : : * more to do.
3419 : : */
3420 [ # # # # ]: 0 : if (TupIsNull(epqslot))
3421 : 0 : goto out;
3422 : :
3423 : : /*
3424 : : * If we got a NULL ctid from the subplan, the
3425 : : * join quals no longer pass and we switch to
3426 : : * the NOT MATCHED BY SOURCE case.
3427 : : */
3428 : 0 : (void) ExecGetJunkAttribute(epqslot,
3429 : 0 : resultRelInfo->ri_RowIdAttNo,
3430 : : &isNull);
3431 [ # # ]: 0 : if (isNull)
3432 : 0 : *matched = false;
3433 : :
3434 : : /*
3435 : : * Otherwise, recheck the join quals to see if
3436 : : * we need to switch to the NOT MATCHED BY
3437 : : * SOURCE case.
3438 : : */
3439 [ # # ]: 0 : if (resultRelInfo->ri_needLockTagTuple)
3440 : : {
3441 [ # # ]: 0 : if (ItemPointerIsValid(&lockedtid))
3442 : 0 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3443 : : InplaceUpdateTupleLock);
3444 : 0 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3445 : : InplaceUpdateTupleLock);
3446 : 0 : lockedtid = *tupleid;
3447 : 0 : }
3448 : :
3449 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3450 : 0 : tupleid,
3451 : : SnapshotAny,
3452 : 0 : resultRelInfo->ri_oldTupleSlot))
3453 [ # # # # ]: 0 : elog(ERROR, "failed to fetch the target tuple");
3454 : :
3455 [ # # ]: 0 : if (*matched)
3456 : 0 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3457 : 0 : econtext);
3458 : :
3459 : : /* Switch lists, if necessary */
3460 [ # # ]: 0 : if (!*matched)
3461 : : {
3462 : 0 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3463 : :
3464 : : /*
3465 : : * If we have both NOT MATCHED BY SOURCE
3466 : : * and NOT MATCHED BY TARGET actions (a
3467 : : * full join between the source and target
3468 : : * relations), the single previously
3469 : : * matched tuple from the outer plan node
3470 : : * is treated as two not matched tuples,
3471 : : * in the same way as if they had not
3472 : : * matched to start with. Therefore, we
3473 : : * must adjust the outer plan node's tuple
3474 : : * count, if we're instrumenting the
3475 : : * query, to get the correct "skipped" row
3476 : : * count --- see show_modifytable_info().
3477 : : */
3478 [ # # ]: 0 : if (outerPlanState(mtstate)->instrument &&
3479 [ # # # # ]: 0 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3480 : 0 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3481 : 0 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3482 : 0 : }
3483 : 0 : }
3484 : :
3485 : : /*
3486 : : * Loop back and process the MATCHED or NOT
3487 : : * MATCHED BY SOURCE actions from the start.
3488 : : */
3489 : 0 : goto lmerge_matched;
3490 : :
3491 : : case TM_Deleted:
3492 : :
3493 : : /*
3494 : : * tuple already deleted; tell caller to run NOT
3495 : : * MATCHED [BY TARGET] actions
3496 : : */
3497 : 0 : *matched = false;
3498 : 0 : goto out;
3499 : :
3500 : : case TM_SelfModified:
3501 : :
3502 : : /*
3503 : : * This can be reached when following an update
3504 : : * chain from a tuple updated by another session,
3505 : : * reaching a tuple that was already updated or
3506 : : * deleted by the current command, or by a later
3507 : : * command in the current transaction. As above,
3508 : : * this should always be treated as an error.
3509 : : */
3510 [ # # ]: 0 : if (context->tmfd.cmax != estate->es_output_cid)
3511 [ # # # # ]: 0 : ereport(ERROR,
3512 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3513 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3514 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3515 : :
3516 [ # # ]: 0 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3517 [ # # # # ]: 0 : ereport(ERROR,
3518 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3519 : : /* translator: %s is a SQL command name */
3520 : : errmsg("%s command cannot affect row a second time",
3521 : : "MERGE"),
3522 : : errhint("Ensure that not more than one source row matches any one target row.")));
3523 : :
3524 : : /* This shouldn't happen */
3525 [ # # # # ]: 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3526 : 0 : goto out;
3527 : :
3528 : : default:
3529 : : /* see table_tuple_lock call in ExecDelete() */
3530 [ # # # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3531 : : result);
3532 : 0 : goto out;
3533 : : }
3534 : 0 : }
3535 : :
3536 : : case TM_Invisible:
3537 : : case TM_WouldBlock:
3538 : : case TM_BeingModified:
3539 : : /* these should not occur */
3540 [ # # # # ]: 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3541 : 0 : break;
3542 : : }
3543 : :
3544 : : /* Process RETURNING if present */
3545 [ # # ]: 0 : if (resultRelInfo->ri_projectReturning)
3546 : : {
3547 [ # # # # ]: 0 : switch (commandType)
3548 : : {
3549 : : case CMD_UPDATE:
3550 : 0 : rslot = ExecProcessReturning(context,
3551 : 0 : resultRelInfo,
3552 : : CMD_UPDATE,
3553 : 0 : resultRelInfo->ri_oldTupleSlot,
3554 : 0 : newslot,
3555 : 0 : context->planSlot);
3556 : 0 : break;
3557 : :
3558 : : case CMD_DELETE:
3559 : 0 : rslot = ExecProcessReturning(context,
3560 : 0 : resultRelInfo,
3561 : : CMD_DELETE,
3562 : 0 : resultRelInfo->ri_oldTupleSlot,
3563 : : NULL,
3564 : 0 : context->planSlot);
3565 : 0 : break;
3566 : :
3567 : : case CMD_NOTHING:
3568 : : break;
3569 : :
3570 : : default:
3571 [ # # # # ]: 0 : elog(ERROR, "unrecognized commandType: %d",
3572 : : (int) commandType);
3573 : 0 : }
3574 : 0 : }
3575 : :
3576 : : /*
3577 : : * We've activated one of the WHEN clauses, so we don't search
3578 : : * further. This is required behaviour, not an optimization.
3579 : : */
3580 : 0 : break;
3581 [ # # ]: 0 : }
3582 : :
3583 : : /*
3584 : : * Successfully executed an action or no qualifying action was found.
3585 : : */
3586 : : out:
3587 [ # # ]: 0 : if (ItemPointerIsValid(&lockedtid))
3588 : 0 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3589 : : InplaceUpdateTupleLock);
3590 : 0 : return rslot;
3591 : 0 : }
3592 : :
3593 : : /*
3594 : : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3595 : : */
3596 : : static TupleTableSlot *
3597 : 0 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3598 : : bool canSetTag)
3599 : : {
3600 : 0 : ModifyTableState *mtstate = context->mtstate;
3601 : 0 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3602 : 0 : List *actionStates;
3603 : 0 : TupleTableSlot *rslot = NULL;
3604 : 0 : ListCell *l;
3605 : :
3606 : : /*
3607 : : * For INSERT actions, the root relation's merge action is OK since the
3608 : : * INSERT's targetlist and the WHEN conditions can only refer to the
3609 : : * source relation and hence it does not matter which result relation we
3610 : : * work with.
3611 : : *
3612 : : * XXX does this mean that we can avoid creating copies of actionStates on
3613 : : * partitioned tables, for not-matched actions?
3614 : : */
3615 : 0 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3616 : :
3617 : : /*
3618 : : * Make source tuple available to ExecQual and ExecProject. We don't need
3619 : : * the target tuple, since the WHEN quals and targetlist can't refer to
3620 : : * the target columns.
3621 : : */
3622 : 0 : econtext->ecxt_scantuple = NULL;
3623 : 0 : econtext->ecxt_innertuple = context->planSlot;
3624 : 0 : econtext->ecxt_outertuple = NULL;
3625 : :
3626 [ # # # # : 0 : foreach(l, actionStates)
# # ]
3627 : : {
3628 : 0 : MergeActionState *action = (MergeActionState *) lfirst(l);
3629 : 0 : CmdType commandType = action->mas_action->commandType;
3630 : 0 : TupleTableSlot *newslot;
3631 : :
3632 : : /*
3633 : : * Test condition, if any.
3634 : : *
3635 : : * In the absence of any condition, we perform the action
3636 : : * unconditionally (no need to check separately since ExecQual() will
3637 : : * return true if there are no conditions to evaluate).
3638 : : */
3639 [ # # ]: 0 : if (!ExecQual(action->mas_whenqual, econtext))
3640 : 0 : continue;
3641 : :
3642 : : /* Perform stated action */
3643 [ # # # ]: 0 : switch (commandType)
3644 : : {
3645 : : case CMD_INSERT:
3646 : :
3647 : : /*
3648 : : * Project the tuple. In case of a partitioned table, the
3649 : : * projection was already built to use the root's descriptor,
3650 : : * so we don't need to map the tuple here.
3651 : : */
3652 : 0 : newslot = ExecProject(action->mas_proj);
3653 : 0 : mtstate->mt_merge_action = action;
3654 : :
3655 : 0 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3656 : 0 : newslot, canSetTag, NULL, NULL);
3657 : 0 : mtstate->mt_merge_inserted += 1;
3658 : 0 : break;
3659 : : case CMD_NOTHING:
3660 : : /* Do nothing */
3661 : : break;
3662 : : default:
3663 [ # # # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3664 : 0 : }
3665 : :
3666 : : /*
3667 : : * We've activated one of the WHEN clauses, so we don't search
3668 : : * further. This is required behaviour, not an optimization.
3669 : : */
3670 : 0 : break;
3671 [ # # ]: 0 : }
3672 : :
3673 : 0 : return rslot;
3674 : 0 : }
3675 : :
3676 : : /*
3677 : : * Initialize state for execution of MERGE.
3678 : : */
3679 : : void
3680 : 0 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3681 : : {
3682 : 0 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3683 : 0 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3684 : 0 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3685 : 0 : ResultRelInfo *resultRelInfo;
3686 : 0 : ExprContext *econtext;
3687 : 0 : ListCell *lc;
3688 : 0 : int i;
3689 : :
3690 [ # # ]: 0 : if (mergeActionLists == NIL)
3691 : 0 : return;
3692 : :
3693 : 0 : mtstate->mt_merge_subcommands = 0;
3694 : :
3695 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
3696 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
3697 : 0 : econtext = mtstate->ps.ps_ExprContext;
3698 : :
3699 : : /*
3700 : : * Create a MergeActionState for each action on the mergeActionList and
3701 : : * add it to either a list of matched actions or not-matched actions.
3702 : : *
3703 : : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3704 : : * anything here, do so there too.
3705 : : */
3706 : 0 : i = 0;
3707 [ # # # # : 0 : foreach(lc, mergeActionLists)
# # ]
3708 : : {
3709 : 0 : List *mergeActionList = lfirst(lc);
3710 : 0 : Node *joinCondition;
3711 : 0 : TupleDesc relationDesc;
3712 : 0 : ListCell *l;
3713 : :
3714 : 0 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3715 : 0 : resultRelInfo = mtstate->resultRelInfo + i;
3716 : 0 : i++;
3717 : 0 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3718 : :
3719 : : /* initialize slots for MERGE fetches from this rel */
3720 [ # # ]: 0 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3721 : 0 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3722 : :
3723 : : /* initialize state for join condition checking */
3724 : 0 : resultRelInfo->ri_MergeJoinCondition =
3725 : 0 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3726 : :
3727 [ # # # # : 0 : foreach(l, mergeActionList)
# # ]
3728 : : {
3729 : 0 : MergeAction *action = (MergeAction *) lfirst(l);
3730 : 0 : MergeActionState *action_state;
3731 : 0 : TupleTableSlot *tgtslot;
3732 : 0 : TupleDesc tgtdesc;
3733 : :
3734 : : /*
3735 : : * Build action merge state for this rel. (For partitions,
3736 : : * equivalent code exists in ExecInitPartitionInfo.)
3737 : : */
3738 : 0 : action_state = makeNode(MergeActionState);
3739 : 0 : action_state->mas_action = action;
3740 : 0 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3741 : 0 : &mtstate->ps);
3742 : :
3743 : : /*
3744 : : * We create three lists - one for each MergeMatchKind - and stick
3745 : : * the MergeActionState into the appropriate list.
3746 : : */
3747 : 0 : resultRelInfo->ri_MergeActions[action->matchKind] =
3748 : 0 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3749 : 0 : action_state);
3750 : :
3751 [ # # # # : 0 : switch (action->commandType)
# ]
3752 : : {
3753 : : case CMD_INSERT:
3754 : : /* INSERT actions always use rootRelInfo */
3755 : 0 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3756 : 0 : action->targetList);
3757 : :
3758 : : /*
3759 : : * If the MERGE targets a partitioned table, any INSERT
3760 : : * actions must be routed through it, not the child
3761 : : * relations. Initialize the routing struct and the root
3762 : : * table's "new" tuple slot for that, if not already done.
3763 : : * The projection we prepare, for all relations, uses the
3764 : : * root relation descriptor, and targets the plan's root
3765 : : * slot. (This is consistent with the fact that we
3766 : : * checked the plan output to match the root relation,
3767 : : * above.)
3768 : : */
3769 [ # # ]: 0 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3770 : : RELKIND_PARTITIONED_TABLE)
3771 : : {
3772 [ # # ]: 0 : if (mtstate->mt_partition_tuple_routing == NULL)
3773 : : {
3774 : : /*
3775 : : * Initialize planstate for routing if not already
3776 : : * done.
3777 : : *
3778 : : * Note that the slot is managed as a standalone
3779 : : * slot belonging to ModifyTableState, so we pass
3780 : : * NULL for the 2nd argument.
3781 : : */
3782 : 0 : mtstate->mt_root_tuple_slot =
3783 : 0 : table_slot_create(rootRelInfo->ri_RelationDesc,
3784 : : NULL);
3785 : 0 : mtstate->mt_partition_tuple_routing =
3786 : 0 : ExecSetupPartitionTupleRouting(estate,
3787 : 0 : rootRelInfo->ri_RelationDesc);
3788 : 0 : }
3789 : 0 : tgtslot = mtstate->mt_root_tuple_slot;
3790 : 0 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3791 : 0 : }
3792 : : else
3793 : : {
3794 : : /*
3795 : : * If the MERGE targets an inherited table, we insert
3796 : : * into the root table, so we must initialize its
3797 : : * "new" tuple slot, if not already done, and use its
3798 : : * relation descriptor for the projection.
3799 : : *
3800 : : * For non-inherited tables, rootRelInfo and
3801 : : * resultRelInfo are the same, and the "new" tuple
3802 : : * slot will already have been initialized.
3803 : : */
3804 [ # # ]: 0 : if (rootRelInfo->ri_newTupleSlot == NULL)
3805 : 0 : rootRelInfo->ri_newTupleSlot =
3806 : 0 : table_slot_create(rootRelInfo->ri_RelationDesc,
3807 : 0 : &estate->es_tupleTable);
3808 : :
3809 : 0 : tgtslot = rootRelInfo->ri_newTupleSlot;
3810 : 0 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3811 : : }
3812 : :
3813 : 0 : action_state->mas_proj =
3814 : 0 : ExecBuildProjectionInfo(action->targetList, econtext,
3815 : 0 : tgtslot,
3816 : 0 : &mtstate->ps,
3817 : 0 : tgtdesc);
3818 : :
3819 : 0 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3820 : 0 : break;
3821 : : case CMD_UPDATE:
3822 : 0 : action_state->mas_proj =
3823 : 0 : ExecBuildUpdateProjection(action->targetList,
3824 : : true,
3825 : 0 : action->updateColnos,
3826 : 0 : relationDesc,
3827 : 0 : econtext,
3828 : 0 : resultRelInfo->ri_newTupleSlot,
3829 : 0 : &mtstate->ps);
3830 : 0 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3831 : 0 : break;
3832 : : case CMD_DELETE:
3833 : 0 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3834 : 0 : break;
3835 : : case CMD_NOTHING:
3836 : : break;
3837 : : default:
3838 [ # # # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3839 : 0 : break;
3840 : : }
3841 : 0 : }
3842 : 0 : }
3843 : :
3844 : : /*
3845 : : * If the MERGE targets an inherited table, any INSERT actions will use
3846 : : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
3847 : : * Therefore we must initialize its WITH CHECK OPTION constraints and
3848 : : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
3849 : : * entries.
3850 : : *
3851 : : * Note that the planner does not build a withCheckOptionList or
3852 : : * returningList for the root relation, but as in ExecInitPartitionInfo,
3853 : : * we can use the first resultRelInfo entry as a reference to calculate
3854 : : * the attno's for the root table.
3855 : : */
3856 [ # # ]: 0 : if (rootRelInfo != mtstate->resultRelInfo &&
3857 [ # # # # ]: 0 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
3858 : 0 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
3859 : : {
3860 : 0 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3861 : 0 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
3862 : 0 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
3863 : 0 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
3864 : 0 : AttrMap *part_attmap = NULL;
3865 : 0 : bool found_whole_row;
3866 : :
3867 [ # # ]: 0 : if (node->withCheckOptionLists != NIL)
3868 : : {
3869 : 0 : List *wcoList;
3870 : 0 : List *wcoExprs = NIL;
3871 : :
3872 : : /* There should be as many WCO lists as result rels */
3873 [ # # ]: 0 : Assert(list_length(node->withCheckOptionLists) ==
3874 : : list_length(node->resultRelations));
3875 : :
3876 : : /*
3877 : : * Use the first WCO list as a reference. In the most common case,
3878 : : * this will be for the same relation as rootRelInfo, and so there
3879 : : * will be no need to adjust its attno's.
3880 : : */
3881 : 0 : wcoList = linitial(node->withCheckOptionLists);
3882 [ # # ]: 0 : if (rootRelation != firstResultRel)
3883 : : {
3884 : : /* Convert any Vars in it to contain the root's attno's */
3885 : 0 : part_attmap =
3886 : 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3887 : 0 : RelationGetDescr(firstResultRel),
3888 : : false);
3889 : :
3890 : 0 : wcoList = (List *)
3891 : 0 : map_variable_attnos((Node *) wcoList,
3892 : 0 : firstVarno, 0,
3893 : 0 : part_attmap,
3894 : 0 : RelationGetForm(rootRelation)->reltype,
3895 : : &found_whole_row);
3896 : 0 : }
3897 : :
3898 [ # # # # : 0 : foreach(lc, wcoList)
# # ]
3899 : : {
3900 : 0 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
3901 : 0 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
3902 : 0 : &mtstate->ps);
3903 : :
3904 : 0 : wcoExprs = lappend(wcoExprs, wcoExpr);
3905 : 0 : }
3906 : :
3907 : 0 : rootRelInfo->ri_WithCheckOptions = wcoList;
3908 : 0 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
3909 : 0 : }
3910 : :
3911 [ # # ]: 0 : if (node->returningLists != NIL)
3912 : : {
3913 : 0 : List *returningList;
3914 : :
3915 : : /* There should be as many returning lists as result rels */
3916 [ # # ]: 0 : Assert(list_length(node->returningLists) ==
3917 : : list_length(node->resultRelations));
3918 : :
3919 : : /*
3920 : : * Use the first returning list as a reference. In the most common
3921 : : * case, this will be for the same relation as rootRelInfo, and so
3922 : : * there will be no need to adjust its attno's.
3923 : : */
3924 : 0 : returningList = linitial(node->returningLists);
3925 [ # # ]: 0 : if (rootRelation != firstResultRel)
3926 : : {
3927 : : /* Convert any Vars in it to contain the root's attno's */
3928 [ # # ]: 0 : if (part_attmap == NULL)
3929 : 0 : part_attmap =
3930 : 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3931 : 0 : RelationGetDescr(firstResultRel),
3932 : : false);
3933 : :
3934 : 0 : returningList = (List *)
3935 : 0 : map_variable_attnos((Node *) returningList,
3936 : 0 : firstVarno, 0,
3937 : 0 : part_attmap,
3938 : 0 : RelationGetForm(rootRelation)->reltype,
3939 : : &found_whole_row);
3940 : 0 : }
3941 : 0 : rootRelInfo->ri_returningList = returningList;
3942 : :
3943 : : /* Initialize the RETURNING projection */
3944 : 0 : rootRelInfo->ri_projectReturning =
3945 : 0 : ExecBuildProjectionInfo(returningList, econtext,
3946 : 0 : mtstate->ps.ps_ResultTupleSlot,
3947 : 0 : &mtstate->ps,
3948 : 0 : RelationGetDescr(rootRelation));
3949 : 0 : }
3950 : 0 : }
3951 [ # # ]: 0 : }
3952 : :
3953 : : /*
3954 : : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3955 : : *
3956 : : * We mark 'projectNewInfoValid' even though the projections themselves
3957 : : * are not initialized here.
3958 : : */
3959 : : void
3960 : 0 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3961 : : ResultRelInfo *resultRelInfo)
3962 : : {
3963 : 0 : EState *estate = mtstate->ps.state;
3964 : :
3965 [ # # ]: 0 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3966 : :
3967 : 0 : resultRelInfo->ri_oldTupleSlot =
3968 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
3969 : 0 : &estate->es_tupleTable);
3970 : 0 : resultRelInfo->ri_newTupleSlot =
3971 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
3972 : 0 : &estate->es_tupleTable);
3973 : 0 : resultRelInfo->ri_projectNewInfoValid = true;
3974 : 0 : }
3975 : :
3976 : : /*
3977 : : * Process BEFORE EACH STATEMENT triggers
3978 : : */
3979 : : static void
3980 : 0 : fireBSTriggers(ModifyTableState *node)
3981 : : {
3982 : 0 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3983 : 0 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3984 : :
3985 [ # # # # : 0 : switch (node->operation)
# ]
3986 : : {
3987 : : case CMD_INSERT:
3988 : 0 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3989 [ # # ]: 0 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3990 : 0 : ExecBSUpdateTriggers(node->ps.state,
3991 : 0 : resultRelInfo);
3992 : 0 : break;
3993 : : case CMD_UPDATE:
3994 : 0 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3995 : 0 : break;
3996 : : case CMD_DELETE:
3997 : 0 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3998 : 0 : break;
3999 : : case CMD_MERGE:
4000 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_INSERT)
4001 : 0 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4002 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4003 : 0 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4004 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_DELETE)
4005 : 0 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4006 : 0 : break;
4007 : : default:
4008 [ # # # # ]: 0 : elog(ERROR, "unknown operation");
4009 : 0 : break;
4010 : : }
4011 : 0 : }
4012 : :
4013 : : /*
4014 : : * Process AFTER EACH STATEMENT triggers
4015 : : */
4016 : : static void
4017 : 0 : fireASTriggers(ModifyTableState *node)
4018 : : {
4019 : 0 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4020 : 0 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4021 : :
4022 [ # # # # : 0 : switch (node->operation)
# ]
4023 : : {
4024 : : case CMD_INSERT:
4025 [ # # ]: 0 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4026 : 0 : ExecASUpdateTriggers(node->ps.state,
4027 : 0 : resultRelInfo,
4028 : 0 : node->mt_oc_transition_capture);
4029 : 0 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4030 : 0 : node->mt_transition_capture);
4031 : 0 : break;
4032 : : case CMD_UPDATE:
4033 : 0 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4034 : 0 : node->mt_transition_capture);
4035 : 0 : break;
4036 : : case CMD_DELETE:
4037 : 0 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4038 : 0 : node->mt_transition_capture);
4039 : 0 : break;
4040 : : case CMD_MERGE:
4041 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_DELETE)
4042 : 0 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4043 : 0 : node->mt_transition_capture);
4044 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4045 : 0 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4046 : 0 : node->mt_transition_capture);
4047 [ # # ]: 0 : if (node->mt_merge_subcommands & MERGE_INSERT)
4048 : 0 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4049 : 0 : node->mt_transition_capture);
4050 : 0 : break;
4051 : : default:
4052 [ # # # # ]: 0 : elog(ERROR, "unknown operation");
4053 : 0 : break;
4054 : : }
4055 : 0 : }
4056 : :
4057 : : /*
4058 : : * Set up the state needed for collecting transition tuples for AFTER
4059 : : * triggers.
4060 : : */
4061 : : static void
4062 : 0 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4063 : : {
4064 : 0 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4065 : 0 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4066 : :
4067 : : /* Check for transition tables on the directly targeted relation. */
4068 : 0 : mtstate->mt_transition_capture =
4069 : 0 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4070 : 0 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4071 : 0 : mtstate->operation);
4072 [ # # # # ]: 0 : if (plan->operation == CMD_INSERT &&
4073 : 0 : plan->onConflictAction == ONCONFLICT_UPDATE)
4074 : 0 : mtstate->mt_oc_transition_capture =
4075 : 0 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4076 : 0 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4077 : : CMD_UPDATE);
4078 : 0 : }
4079 : :
4080 : : /*
4081 : : * ExecPrepareTupleRouting --- prepare for routing one tuple
4082 : : *
4083 : : * Determine the partition in which the tuple in slot is to be inserted,
4084 : : * and return its ResultRelInfo in *partRelInfo. The return value is
4085 : : * a slot holding the tuple of the partition rowtype.
4086 : : *
4087 : : * This also sets the transition table information in mtstate based on the
4088 : : * selected partition.
4089 : : */
4090 : : static TupleTableSlot *
4091 : 0 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4092 : : EState *estate,
4093 : : PartitionTupleRouting *proute,
4094 : : ResultRelInfo *targetRelInfo,
4095 : : TupleTableSlot *slot,
4096 : : ResultRelInfo **partRelInfo)
4097 : : {
4098 : 0 : ResultRelInfo *partrel;
4099 : 0 : TupleConversionMap *map;
4100 : :
4101 : : /*
4102 : : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4103 : : * not find a valid partition for the tuple in 'slot' then an error is
4104 : : * raised. An error may also be raised if the found partition is not a
4105 : : * valid target for INSERTs. This is required since a partitioned table
4106 : : * UPDATE to another partition becomes a DELETE+INSERT.
4107 : : */
4108 : 0 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4109 : :
4110 : : /*
4111 : : * If we're capturing transition tuples, we might need to convert from the
4112 : : * partition rowtype to root partitioned table's rowtype. But if there
4113 : : * are no BEFORE triggers on the partition that could change the tuple, we
4114 : : * can just remember the original unconverted tuple to avoid a needless
4115 : : * round trip conversion.
4116 : : */
4117 [ # # ]: 0 : if (mtstate->mt_transition_capture != NULL)
4118 : : {
4119 : 0 : bool has_before_insert_row_trig;
4120 : :
4121 [ # # ]: 0 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4122 : 0 : partrel->ri_TrigDesc->trig_insert_before_row);
4123 : :
4124 : 0 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4125 [ # # ]: 0 : !has_before_insert_row_trig ? slot : NULL;
4126 : 0 : }
4127 : :
4128 : : /*
4129 : : * Convert the tuple, if necessary.
4130 : : */
4131 : 0 : map = ExecGetRootToChildMap(partrel, estate);
4132 [ # # ]: 0 : if (map != NULL)
4133 : : {
4134 : 0 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4135 : :
4136 : 0 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4137 : 0 : }
4138 : :
4139 : 0 : *partRelInfo = partrel;
4140 : 0 : return slot;
4141 : 0 : }
4142 : :
4143 : : /* ----------------------------------------------------------------
4144 : : * ExecModifyTable
4145 : : *
4146 : : * Perform table modifications as required, and return RETURNING results
4147 : : * if needed.
4148 : : * ----------------------------------------------------------------
4149 : : */
4150 : : static TupleTableSlot *
4151 : 0 : ExecModifyTable(PlanState *pstate)
4152 : : {
4153 : 0 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4154 : 0 : ModifyTableContext context;
4155 : 0 : EState *estate = node->ps.state;
4156 : 0 : CmdType operation = node->operation;
4157 : 0 : ResultRelInfo *resultRelInfo;
4158 : 0 : PlanState *subplanstate;
4159 : 0 : TupleTableSlot *slot;
4160 : 0 : TupleTableSlot *oldSlot;
4161 : 0 : ItemPointerData tuple_ctid;
4162 : 0 : HeapTupleData oldtupdata;
4163 : 0 : HeapTuple oldtuple;
4164 : 0 : ItemPointer tupleid;
4165 : 0 : bool tuplock;
4166 : :
4167 [ # # ]: 0 : CHECK_FOR_INTERRUPTS();
4168 : :
4169 : : /*
4170 : : * This should NOT get called during EvalPlanQual; we should have passed a
4171 : : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4172 : : * Assert because this condition is easy to miss in testing. (Note:
4173 : : * although ModifyTable should not get executed within an EvalPlanQual
4174 : : * operation, we do have to allow it to be initialized and shut down in
4175 : : * case it is within a CTE subplan. Hence this test must be here, not in
4176 : : * ExecInitModifyTable.)
4177 : : */
4178 [ # # ]: 0 : if (estate->es_epq_active != NULL)
4179 [ # # # # ]: 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4180 : :
4181 : : /*
4182 : : * If we've already completed processing, don't try to do more. We need
4183 : : * this test because ExecPostprocessPlan might call us an extra time, and
4184 : : * our subplan's nodes aren't necessarily robust against being called
4185 : : * extra times.
4186 : : */
4187 [ # # ]: 0 : if (node->mt_done)
4188 : 0 : return NULL;
4189 : :
4190 : : /*
4191 : : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4192 : : */
4193 [ # # ]: 0 : if (node->fireBSTriggers)
4194 : : {
4195 : 0 : fireBSTriggers(node);
4196 : 0 : node->fireBSTriggers = false;
4197 : 0 : }
4198 : :
4199 : : /* Preload local variables */
4200 : 0 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4201 : 0 : subplanstate = outerPlanState(node);
4202 : :
4203 : : /* Set global context */
4204 : 0 : context.mtstate = node;
4205 : 0 : context.epqstate = &node->mt_epqstate;
4206 : 0 : context.estate = estate;
4207 : :
4208 : : /*
4209 : : * Fetch rows from subplan, and execute the required table modification
4210 : : * for each row.
4211 : : */
4212 : 0 : for (;;)
4213 : : {
4214 : : /*
4215 : : * Reset the per-output-tuple exprcontext. This is needed because
4216 : : * triggers expect to use that context as workspace. It's a bit ugly
4217 : : * to do this below the top level of the plan, however. We might need
4218 : : * to rethink this later.
4219 : : */
4220 [ # # ]: 0 : ResetPerTupleExprContext(estate);
4221 : :
4222 : : /*
4223 : : * Reset per-tuple memory context used for processing on conflict and
4224 : : * returning clauses, to free any expression evaluation storage
4225 : : * allocated in the previous cycle.
4226 : : */
4227 [ # # ]: 0 : if (pstate->ps_ExprContext)
4228 : 0 : ResetExprContext(pstate->ps_ExprContext);
4229 : :
4230 : : /*
4231 : : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4232 : : * to execute, do so now --- see the comments in ExecMerge().
4233 : : */
4234 [ # # ]: 0 : if (node->mt_merge_pending_not_matched != NULL)
4235 : : {
4236 : 0 : context.planSlot = node->mt_merge_pending_not_matched;
4237 : 0 : context.cpDeletedSlot = NULL;
4238 : :
4239 : 0 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4240 : 0 : node->canSetTag);
4241 : :
4242 : : /* Clear the pending action */
4243 : 0 : node->mt_merge_pending_not_matched = NULL;
4244 : :
4245 : : /*
4246 : : * If we got a RETURNING result, return it to the caller. We'll
4247 : : * continue the work on next call.
4248 : : */
4249 [ # # ]: 0 : if (slot)
4250 : 0 : return slot;
4251 : :
4252 : 0 : continue; /* continue with the next tuple */
4253 : : }
4254 : :
4255 : : /* Fetch the next row from subplan */
4256 : 0 : context.planSlot = ExecProcNode(subplanstate);
4257 : 0 : context.cpDeletedSlot = NULL;
4258 : :
4259 : : /* No more tuples to process? */
4260 [ # # # # ]: 0 : if (TupIsNull(context.planSlot))
4261 : 0 : break;
4262 : :
4263 : : /*
4264 : : * When there are multiple result relations, each tuple contains a
4265 : : * junk column that gives the OID of the rel from which it came.
4266 : : * Extract it and select the correct result relation.
4267 : : */
4268 [ # # ]: 0 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4269 : : {
4270 : 0 : Datum datum;
4271 : 0 : bool isNull;
4272 : 0 : Oid resultoid;
4273 : :
4274 : 0 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4275 : : &isNull);
4276 [ # # ]: 0 : if (isNull)
4277 : : {
4278 : : /*
4279 : : * For commands other than MERGE, any tuples having InvalidOid
4280 : : * for tableoid are errors. For MERGE, we may need to handle
4281 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4282 : : *
4283 : : * Note that we use the node's toplevel resultRelInfo, not any
4284 : : * specific partition's.
4285 : : */
4286 [ # # ]: 0 : if (operation == CMD_MERGE)
4287 : : {
4288 : 0 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4289 : :
4290 : 0 : slot = ExecMerge(&context, node->resultRelInfo,
4291 : 0 : NULL, NULL, node->canSetTag);
4292 : :
4293 : : /*
4294 : : * If we got a RETURNING result, return it to the caller.
4295 : : * We'll continue the work on next call.
4296 : : */
4297 [ # # ]: 0 : if (slot)
4298 : 0 : return slot;
4299 : :
4300 : 0 : continue; /* continue with the next tuple */
4301 : : }
4302 : :
4303 [ # # # # ]: 0 : elog(ERROR, "tableoid is NULL");
4304 : 0 : }
4305 : 0 : resultoid = DatumGetObjectId(datum);
4306 : :
4307 : : /* If it's not the same as last time, we need to locate the rel */
4308 [ # # ]: 0 : if (resultoid != node->mt_lastResultOid)
4309 : 0 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4310 : : false, true);
4311 [ # # # ]: 0 : }
4312 : :
4313 : : /*
4314 : : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4315 : : * here is compute the RETURNING expressions.
4316 : : */
4317 [ # # ]: 0 : if (resultRelInfo->ri_usesFdwDirectModify)
4318 : : {
4319 [ # # ]: 0 : Assert(resultRelInfo->ri_projectReturning);
4320 : :
4321 : : /*
4322 : : * A scan slot containing the data that was actually inserted,
4323 : : * updated or deleted has already been made available to
4324 : : * ExecProcessReturning by IterateDirectModify, so no need to
4325 : : * provide it here. The individual old and new slots are not
4326 : : * needed, since direct-modify is disabled if the RETURNING list
4327 : : * refers to OLD/NEW values.
4328 : : */
4329 [ # # ]: 0 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4330 : : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4331 : :
4332 : 0 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4333 : 0 : NULL, NULL, context.planSlot);
4334 : :
4335 : 0 : return slot;
4336 : : }
4337 : :
4338 : 0 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4339 : 0 : slot = context.planSlot;
4340 : :
4341 : 0 : tupleid = NULL;
4342 : 0 : oldtuple = NULL;
4343 : :
4344 : : /*
4345 : : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4346 : : * to be updated/deleted/merged. For a heap relation, that's a TID;
4347 : : * otherwise we may have a wholerow junk attr that carries the old
4348 : : * tuple in toto. Keep this in step with the part of
4349 : : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4350 : : */
4351 [ # # # # : 0 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
# # ]
4352 : 0 : operation == CMD_MERGE)
4353 : : {
4354 : 0 : char relkind;
4355 : 0 : Datum datum;
4356 : 0 : bool isNull;
4357 : :
4358 : 0 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4359 [ # # ]: 0 : if (relkind == RELKIND_RELATION ||
4360 [ # # # # ]: 0 : relkind == RELKIND_MATVIEW ||
4361 : 0 : relkind == RELKIND_PARTITIONED_TABLE)
4362 : : {
4363 : : /*
4364 : : * ri_RowIdAttNo refers to a ctid attribute. See the comment
4365 : : * in ExecInitModifyTable().
4366 : : */
4367 [ # # # # ]: 0 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo) ||
4368 : : relkind == RELKIND_PARTITIONED_TABLE);
4369 : 0 : datum = ExecGetJunkAttribute(slot,
4370 : 0 : resultRelInfo->ri_RowIdAttNo,
4371 : : &isNull);
4372 : :
4373 : : /*
4374 : : * For commands other than MERGE, any tuples having a null row
4375 : : * identifier are errors. For MERGE, we may need to handle
4376 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4377 : : *
4378 : : * Note that we use the node's toplevel resultRelInfo, not any
4379 : : * specific partition's.
4380 : : */
4381 [ # # ]: 0 : if (isNull)
4382 : : {
4383 [ # # ]: 0 : if (operation == CMD_MERGE)
4384 : : {
4385 : 0 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4386 : :
4387 : 0 : slot = ExecMerge(&context, node->resultRelInfo,
4388 : 0 : NULL, NULL, node->canSetTag);
4389 : :
4390 : : /*
4391 : : * If we got a RETURNING result, return it to the
4392 : : * caller. We'll continue the work on next call.
4393 : : */
4394 [ # # ]: 0 : if (slot)
4395 : 0 : return slot;
4396 : :
4397 : 0 : continue; /* continue with the next tuple */
4398 : : }
4399 : :
4400 [ # # # # ]: 0 : elog(ERROR, "ctid is NULL");
4401 : 0 : }
4402 : :
4403 : 0 : tupleid = (ItemPointer) DatumGetPointer(datum);
4404 : 0 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4405 : 0 : tupleid = &tuple_ctid;
4406 : 0 : }
4407 : :
4408 : : /*
4409 : : * Use the wholerow attribute, when available, to reconstruct the
4410 : : * old relation tuple. The old tuple serves one or both of two
4411 : : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4412 : : * provides values for any unchanged columns for the NEW tuple of
4413 : : * an UPDATE, because the subplan does not produce all the columns
4414 : : * of the target table.
4415 : : *
4416 : : * Note that the wholerow attribute does not carry system columns,
4417 : : * so foreign table triggers miss seeing those, except that we
4418 : : * know enough here to set t_tableOid. Quite separately from
4419 : : * this, the FDW may fetch its own junk attrs to identify the row.
4420 : : *
4421 : : * Other relevant relkinds, currently limited to views, always
4422 : : * have a wholerow attribute.
4423 : : */
4424 [ # # ]: 0 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4425 : : {
4426 : 0 : datum = ExecGetJunkAttribute(slot,
4427 : 0 : resultRelInfo->ri_RowIdAttNo,
4428 : : &isNull);
4429 : :
4430 : : /*
4431 : : * For commands other than MERGE, any tuples having a null row
4432 : : * identifier are errors. For MERGE, we may need to handle
4433 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4434 : : *
4435 : : * Note that we use the node's toplevel resultRelInfo, not any
4436 : : * specific partition's.
4437 : : */
4438 [ # # ]: 0 : if (isNull)
4439 : : {
4440 [ # # ]: 0 : if (operation == CMD_MERGE)
4441 : : {
4442 : 0 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4443 : :
4444 : 0 : slot = ExecMerge(&context, node->resultRelInfo,
4445 : 0 : NULL, NULL, node->canSetTag);
4446 : :
4447 : : /*
4448 : : * If we got a RETURNING result, return it to the
4449 : : * caller. We'll continue the work on next call.
4450 : : */
4451 [ # # ]: 0 : if (slot)
4452 : 0 : return slot;
4453 : :
4454 : 0 : continue; /* continue with the next tuple */
4455 : : }
4456 : :
4457 [ # # # # ]: 0 : elog(ERROR, "wholerow is NULL");
4458 : 0 : }
4459 : :
4460 : 0 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4461 : 0 : oldtupdata.t_len =
4462 : 0 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4463 : 0 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4464 : : /* Historically, view triggers see invalid t_tableOid. */
4465 : 0 : oldtupdata.t_tableOid =
4466 [ # # ]: 0 : (relkind == RELKIND_VIEW) ? InvalidOid :
4467 : 0 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4468 : :
4469 : 0 : oldtuple = &oldtupdata;
4470 : 0 : }
4471 : : else
4472 : : {
4473 : : /* Only foreign tables are allowed to omit a row-ID attr */
4474 [ # # ]: 0 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4475 : : }
4476 [ # # ]: 0 : }
4477 : :
4478 [ # # # # : 0 : switch (operation)
# ]
4479 : : {
4480 : : case CMD_INSERT:
4481 : : /* Initialize projection info if first time for this table */
4482 [ # # ]: 0 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4483 : 0 : ExecInitInsertProjection(node, resultRelInfo);
4484 : 0 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4485 : 0 : slot = ExecInsert(&context, resultRelInfo, slot,
4486 : 0 : node->canSetTag, NULL, NULL);
4487 : 0 : break;
4488 : :
4489 : : case CMD_UPDATE:
4490 : 0 : tuplock = false;
4491 : :
4492 : : /* Initialize projection info if first time for this table */
4493 [ # # ]: 0 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4494 : 0 : ExecInitUpdateProjection(node, resultRelInfo);
4495 : :
4496 : : /*
4497 : : * Make the new tuple by combining plan's output tuple with
4498 : : * the old tuple being updated.
4499 : : */
4500 : 0 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4501 [ # # ]: 0 : if (oldtuple != NULL)
4502 : : {
4503 [ # # ]: 0 : Assert(!resultRelInfo->ri_needLockTagTuple);
4504 : : /* Use the wholerow junk attr as the old tuple. */
4505 : 0 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4506 : 0 : }
4507 : : else
4508 : : {
4509 : : /* Fetch the most recent version of old tuple. */
4510 : 0 : Relation relation = resultRelInfo->ri_RelationDesc;
4511 : :
4512 [ # # ]: 0 : if (resultRelInfo->ri_needLockTagTuple)
4513 : : {
4514 : 0 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4515 : 0 : tuplock = true;
4516 : 0 : }
4517 [ # # # # ]: 0 : if (!table_tuple_fetch_row_version(relation, tupleid,
4518 : : SnapshotAny,
4519 : 0 : oldSlot))
4520 [ # # # # ]: 0 : elog(ERROR, "failed to fetch tuple being updated");
4521 : 0 : }
4522 : 0 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4523 : 0 : oldSlot);
4524 : :
4525 : : /* Now apply the update. */
4526 : 0 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4527 : 0 : oldSlot, slot, node->canSetTag);
4528 [ # # ]: 0 : if (tuplock)
4529 : 0 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4530 : : InplaceUpdateTupleLock);
4531 : 0 : break;
4532 : :
4533 : : case CMD_DELETE:
4534 : 0 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4535 : 0 : true, false, node->canSetTag, NULL, NULL, NULL);
4536 : 0 : break;
4537 : :
4538 : : case CMD_MERGE:
4539 : 0 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4540 : 0 : node->canSetTag);
4541 : 0 : break;
4542 : :
4543 : : default:
4544 [ # # # # ]: 0 : elog(ERROR, "unknown operation");
4545 : 0 : break;
4546 : : }
4547 : :
4548 : : /*
4549 : : * If we got a RETURNING result, return it to caller. We'll continue
4550 : : * the work on next call.
4551 : : */
4552 [ # # ]: 0 : if (slot)
4553 : 0 : return slot;
4554 : : }
4555 : :
4556 : : /*
4557 : : * Insert remaining tuples for batch insert.
4558 : : */
4559 [ # # ]: 0 : if (estate->es_insert_pending_result_relations != NIL)
4560 : 0 : ExecPendingInserts(estate);
4561 : :
4562 : : /*
4563 : : * We're done, but fire AFTER STATEMENT triggers before exiting.
4564 : : */
4565 : 0 : fireASTriggers(node);
4566 : :
4567 : 0 : node->mt_done = true;
4568 : :
4569 : 0 : return NULL;
4570 : 0 : }
4571 : :
4572 : : /*
4573 : : * ExecLookupResultRelByOid
4574 : : * If the table with given OID is among the result relations to be
4575 : : * updated by the given ModifyTable node, return its ResultRelInfo.
4576 : : *
4577 : : * If not found, return NULL if missing_ok, else raise error.
4578 : : *
4579 : : * If update_cache is true, then upon successful lookup, update the node's
4580 : : * one-element cache. ONLY ExecModifyTable may pass true for this.
4581 : : */
4582 : : ResultRelInfo *
4583 : 0 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4584 : : bool missing_ok, bool update_cache)
4585 : : {
4586 [ # # ]: 0 : if (node->mt_resultOidHash)
4587 : : {
4588 : : /* Use the pre-built hash table to locate the rel */
4589 : 0 : MTTargetRelLookup *mtlookup;
4590 : :
4591 : 0 : mtlookup = (MTTargetRelLookup *)
4592 : 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4593 [ # # ]: 0 : if (mtlookup)
4594 : : {
4595 [ # # ]: 0 : if (update_cache)
4596 : : {
4597 : 0 : node->mt_lastResultOid = resultoid;
4598 : 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4599 : 0 : }
4600 : 0 : return node->resultRelInfo + mtlookup->relationIndex;
4601 : : }
4602 [ # # ]: 0 : }
4603 : : else
4604 : : {
4605 : : /* With few target rels, just search the ResultRelInfo array */
4606 [ # # # # ]: 0 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4607 : : {
4608 : 0 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4609 : :
4610 [ # # ]: 0 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4611 : : {
4612 [ # # ]: 0 : if (update_cache)
4613 : : {
4614 : 0 : node->mt_lastResultOid = resultoid;
4615 : 0 : node->mt_lastResultIndex = ndx;
4616 : 0 : }
4617 : 0 : return rInfo;
4618 : : }
4619 [ # # ]: 0 : }
4620 : : }
4621 : :
4622 [ # # ]: 0 : if (!missing_ok)
4623 [ # # # # ]: 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4624 : 0 : return NULL;
4625 : 0 : }
4626 : :
4627 : : /* ----------------------------------------------------------------
4628 : : * ExecInitModifyTable
4629 : : * ----------------------------------------------------------------
4630 : : */
4631 : : ModifyTableState *
4632 : 0 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4633 : : {
4634 : 0 : ModifyTableState *mtstate;
4635 : 0 : Plan *subplan = outerPlan(node);
4636 : 0 : CmdType operation = node->operation;
4637 : 0 : int total_nrels = list_length(node->resultRelations);
4638 : 0 : int nrels;
4639 : 0 : List *resultRelations = NIL;
4640 : 0 : List *withCheckOptionLists = NIL;
4641 : 0 : List *returningLists = NIL;
4642 : 0 : List *updateColnosLists = NIL;
4643 : 0 : List *mergeActionLists = NIL;
4644 : 0 : List *mergeJoinConditions = NIL;
4645 : 0 : ResultRelInfo *resultRelInfo;
4646 : 0 : List *arowmarks;
4647 : 0 : ListCell *l;
4648 : 0 : int i;
4649 : 0 : Relation rel;
4650 : :
4651 : : /* check for unsupported flags */
4652 [ # # ]: 0 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4653 : :
4654 : : /*
4655 : : * Only consider unpruned relations for initializing their ResultRelInfo
4656 : : * struct and other fields such as withCheckOptions, etc.
4657 : : *
4658 : : * Note: We must avoid pruning every result relation. This is important
4659 : : * for MERGE, since even if every result relation is pruned from the
4660 : : * subplan, there might still be NOT MATCHED rows, for which there may be
4661 : : * INSERT actions to perform. To allow these actions to be found, at
4662 : : * least one result relation must be kept. Also, when inserting into a
4663 : : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4664 : : * as a reference for building the ResultRelInfo of the target partition.
4665 : : * In either case, it doesn't matter which result relation is kept, so we
4666 : : * just keep the first one, if all others have been pruned. See also,
4667 : : * ExecDoInitialPruning(), which ensures that this first result relation
4668 : : * has been locked.
4669 : : */
4670 : 0 : i = 0;
4671 [ # # # # : 0 : foreach(l, node->resultRelations)
# # ]
4672 : : {
4673 : 0 : Index rti = lfirst_int(l);
4674 : 0 : bool keep_rel;
4675 : :
4676 : 0 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4677 [ # # # # : 0 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
# # ]
4678 : : {
4679 : : /* all result relations pruned; keep the first one */
4680 : 0 : keep_rel = true;
4681 : 0 : rti = linitial_int(node->resultRelations);
4682 : 0 : i = 0;
4683 : 0 : }
4684 : :
4685 [ # # ]: 0 : if (keep_rel)
4686 : : {
4687 : 0 : resultRelations = lappend_int(resultRelations, rti);
4688 [ # # ]: 0 : if (node->withCheckOptionLists)
4689 : : {
4690 : 0 : List *withCheckOptions = list_nth_node(List,
4691 : : node->withCheckOptionLists,
4692 : : i);
4693 : :
4694 : 0 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4695 : 0 : }
4696 [ # # ]: 0 : if (node->returningLists)
4697 : : {
4698 : 0 : List *returningList = list_nth_node(List,
4699 : : node->returningLists,
4700 : : i);
4701 : :
4702 : 0 : returningLists = lappend(returningLists, returningList);
4703 : 0 : }
4704 [ # # ]: 0 : if (node->updateColnosLists)
4705 : : {
4706 : 0 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4707 : :
4708 : 0 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4709 : 0 : }
4710 [ # # ]: 0 : if (node->mergeActionLists)
4711 : : {
4712 : 0 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4713 : :
4714 : 0 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4715 : 0 : }
4716 [ # # ]: 0 : if (node->mergeJoinConditions)
4717 : : {
4718 : 0 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4719 : :
4720 : 0 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4721 : 0 : }
4722 : 0 : }
4723 : 0 : i++;
4724 : 0 : }
4725 : 0 : nrels = list_length(resultRelations);
4726 [ # # ]: 0 : Assert(nrels > 0);
4727 : :
4728 : : /*
4729 : : * create state structure
4730 : : */
4731 : 0 : mtstate = makeNode(ModifyTableState);
4732 : 0 : mtstate->ps.plan = (Plan *) node;
4733 : 0 : mtstate->ps.state = estate;
4734 : 0 : mtstate->ps.ExecProcNode = ExecModifyTable;
4735 : :
4736 : 0 : mtstate->operation = operation;
4737 : 0 : mtstate->canSetTag = node->canSetTag;
4738 : 0 : mtstate->mt_done = false;
4739 : :
4740 : 0 : mtstate->mt_nrels = nrels;
4741 : 0 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
4742 : :
4743 : 0 : mtstate->mt_merge_pending_not_matched = NULL;
4744 : 0 : mtstate->mt_merge_inserted = 0;
4745 : 0 : mtstate->mt_merge_updated = 0;
4746 : 0 : mtstate->mt_merge_deleted = 0;
4747 : 0 : mtstate->mt_updateColnosLists = updateColnosLists;
4748 : 0 : mtstate->mt_mergeActionLists = mergeActionLists;
4749 : 0 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4750 : :
4751 : : /*----------
4752 : : * Resolve the target relation. This is the same as:
4753 : : *
4754 : : * - the relation for which we will fire FOR STATEMENT triggers,
4755 : : * - the relation into whose tuple format all captured transition tuples
4756 : : * must be converted, and
4757 : : * - the root partitioned table used for tuple routing.
4758 : : *
4759 : : * If it's a partitioned or inherited table, the root partition or
4760 : : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4761 : : * given explicitly in node->rootRelation. Otherwise, the target relation
4762 : : * is the sole relation in the node->resultRelations list and, since it can
4763 : : * never be pruned, also in the resultRelations list constructed above.
4764 : : *----------
4765 : : */
4766 [ # # ]: 0 : if (node->rootRelation > 0)
4767 : : {
4768 [ # # ]: 0 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4769 : 0 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4770 : 0 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4771 : 0 : node->rootRelation);
4772 : 0 : }
4773 : : else
4774 : : {
4775 [ # # ]: 0 : Assert(list_length(node->resultRelations) == 1);
4776 [ # # ]: 0 : Assert(list_length(resultRelations) == 1);
4777 : 0 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4778 : 0 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4779 : 0 : linitial_int(resultRelations));
4780 : : }
4781 : :
4782 : : /* set up epqstate with dummy subplan data for the moment */
4783 : 0 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4784 : 0 : node->epqParam, resultRelations);
4785 : 0 : mtstate->fireBSTriggers = true;
4786 : :
4787 : : /*
4788 : : * Build state for collecting transition tuples. This requires having a
4789 : : * valid trigger query context, so skip it in explain-only mode.
4790 : : */
4791 [ # # ]: 0 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4792 : 0 : ExecSetupTransitionCaptureState(mtstate, estate);
4793 : :
4794 : : /*
4795 : : * Open all the result relations and initialize the ResultRelInfo structs.
4796 : : * (But root relation was initialized above, if it's part of the array.)
4797 : : * We must do this before initializing the subplan, because direct-modify
4798 : : * FDWs expect their ResultRelInfos to be available.
4799 : : */
4800 : 0 : resultRelInfo = mtstate->resultRelInfo;
4801 : 0 : i = 0;
4802 [ # # # # : 0 : foreach(l, resultRelations)
# # ]
4803 : : {
4804 : 0 : Index resultRelation = lfirst_int(l);
4805 : 0 : List *mergeActions = NIL;
4806 : :
4807 [ # # ]: 0 : if (mergeActionLists)
4808 : 0 : mergeActions = list_nth(mergeActionLists, i);
4809 : :
4810 [ # # ]: 0 : if (resultRelInfo != mtstate->rootResultRelInfo)
4811 : : {
4812 : 0 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4813 : :
4814 : : /*
4815 : : * For child result relations, store the root result relation
4816 : : * pointer. We do so for the convenience of places that want to
4817 : : * look at the query's original target relation but don't have the
4818 : : * mtstate handy.
4819 : : */
4820 : 0 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4821 : 0 : }
4822 : :
4823 : : /* Initialize the usesFdwDirectModify flag */
4824 : 0 : resultRelInfo->ri_usesFdwDirectModify =
4825 : 0 : bms_is_member(i, node->fdwDirectModifyPlans);
4826 : :
4827 : : /*
4828 : : * Verify result relation is a valid target for the current operation
4829 : : */
4830 : 0 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
4831 : 0 : mergeActions);
4832 : :
4833 : 0 : resultRelInfo++;
4834 : 0 : i++;
4835 : 0 : }
4836 : :
4837 : : /*
4838 : : * Now we may initialize the subplan.
4839 : : */
4840 : 0 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4841 : :
4842 : : /*
4843 : : * Do additional per-result-relation initialization.
4844 : : */
4845 [ # # ]: 0 : for (i = 0; i < nrels; i++)
4846 : : {
4847 : 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4848 : :
4849 : : /* Let FDWs init themselves for foreign-table result rels */
4850 [ # # ]: 0 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4851 [ # # # # ]: 0 : resultRelInfo->ri_FdwRoutine != NULL &&
4852 : 0 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4853 : : {
4854 : 0 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4855 : :
4856 : 0 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4857 : 0 : resultRelInfo,
4858 : 0 : fdw_private,
4859 : 0 : i,
4860 : 0 : eflags);
4861 : 0 : }
4862 : :
4863 : : /*
4864 : : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4865 : : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4866 : : * tables, the FDW might have created additional junk attr(s), but
4867 : : * those are no concern of ours.
4868 : : */
4869 [ # # # # : 0 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
# # ]
4870 : 0 : operation == CMD_MERGE)
4871 : : {
4872 : 0 : char relkind;
4873 : :
4874 : 0 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4875 [ # # ]: 0 : if (relkind == RELKIND_RELATION ||
4876 [ # # # # ]: 0 : relkind == RELKIND_MATVIEW ||
4877 : 0 : relkind == RELKIND_PARTITIONED_TABLE)
4878 : : {
4879 : 0 : resultRelInfo->ri_RowIdAttNo =
4880 : 0 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4881 : :
4882 : : /*
4883 : : * For heap relations, a ctid junk attribute must be present.
4884 : : * Partitioned tables should only appear here when all leaf
4885 : : * partitions were pruned, in which case no rows can be
4886 : : * produced and ctid is not needed.
4887 : : */
4888 [ # # ]: 0 : if (relkind == RELKIND_PARTITIONED_TABLE)
4889 [ # # ]: 0 : Assert(nrels == 1);
4890 [ # # ]: 0 : else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4891 [ # # # # ]: 0 : elog(ERROR, "could not find junk ctid column");
4892 : 0 : }
4893 [ # # ]: 0 : else if (relkind == RELKIND_FOREIGN_TABLE)
4894 : : {
4895 : : /*
4896 : : * We don't support MERGE with foreign tables for now. (It's
4897 : : * problematic because the implementation uses CTID.)
4898 : : */
4899 [ # # ]: 0 : Assert(operation != CMD_MERGE);
4900 : :
4901 : : /*
4902 : : * When there is a row-level trigger, there should be a
4903 : : * wholerow attribute. We also require it to be present in
4904 : : * UPDATE and MERGE, so we can get the values of unchanged
4905 : : * columns.
4906 : : */
4907 : 0 : resultRelInfo->ri_RowIdAttNo =
4908 : 0 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4909 : : "wholerow");
4910 [ # # # # ]: 0 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4911 : 0 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4912 [ # # # # ]: 0 : elog(ERROR, "could not find junk wholerow column");
4913 : 0 : }
4914 : : else
4915 : : {
4916 : : /* Other valid target relkinds must provide wholerow */
4917 : 0 : resultRelInfo->ri_RowIdAttNo =
4918 : 0 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4919 : : "wholerow");
4920 [ # # ]: 0 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4921 [ # # # # ]: 0 : elog(ERROR, "could not find junk wholerow column");
4922 : : }
4923 : 0 : }
4924 : 0 : }
4925 : :
4926 : : /*
4927 : : * If this is an inherited update/delete/merge, there will be a junk
4928 : : * attribute named "tableoid" present in the subplan's targetlist. It
4929 : : * will be used to identify the result relation for a given tuple to be
4930 : : * updated/deleted/merged.
4931 : : */
4932 : 0 : mtstate->mt_resultOidAttno =
4933 : 0 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4934 [ # # # # ]: 0 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
4935 : 0 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4936 : 0 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4937 : :
4938 : : /* Get the root target relation */
4939 : 0 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4940 : :
4941 : : /*
4942 : : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4943 : : * or MERGE might need this too, but only if it actually moves tuples
4944 : : * between partitions; in that case setup is done by
4945 : : * ExecCrossPartitionUpdate.
4946 : : */
4947 [ # # # # ]: 0 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4948 : 0 : operation == CMD_INSERT)
4949 : 0 : mtstate->mt_partition_tuple_routing =
4950 : 0 : ExecSetupPartitionTupleRouting(estate, rel);
4951 : :
4952 : : /*
4953 : : * Initialize any WITH CHECK OPTION constraints if needed.
4954 : : */
4955 : 0 : resultRelInfo = mtstate->resultRelInfo;
4956 [ # # # # : 0 : foreach(l, withCheckOptionLists)
# # ]
4957 : : {
4958 : 0 : List *wcoList = (List *) lfirst(l);
4959 : 0 : List *wcoExprs = NIL;
4960 : 0 : ListCell *ll;
4961 : :
4962 [ # # # # : 0 : foreach(ll, wcoList)
# # ]
4963 : : {
4964 : 0 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4965 : 0 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4966 : 0 : &mtstate->ps);
4967 : :
4968 : 0 : wcoExprs = lappend(wcoExprs, wcoExpr);
4969 : 0 : }
4970 : :
4971 : 0 : resultRelInfo->ri_WithCheckOptions = wcoList;
4972 : 0 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4973 : 0 : resultRelInfo++;
4974 : 0 : }
4975 : :
4976 : : /*
4977 : : * Initialize RETURNING projections if needed.
4978 : : */
4979 [ # # ]: 0 : if (returningLists)
4980 : : {
4981 : 0 : TupleTableSlot *slot;
4982 : 0 : ExprContext *econtext;
4983 : :
4984 : : /*
4985 : : * Initialize result tuple slot and assign its rowtype using the plan
4986 : : * node's declared targetlist, which the planner set up to be the same
4987 : : * as the first (before runtime pruning) RETURNING list. We assume
4988 : : * all the result rels will produce compatible output.
4989 : : */
4990 : 0 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4991 : 0 : slot = mtstate->ps.ps_ResultTupleSlot;
4992 : :
4993 : : /* Need an econtext too */
4994 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
4995 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
4996 : 0 : econtext = mtstate->ps.ps_ExprContext;
4997 : :
4998 : : /*
4999 : : * Build a projection for each result rel.
5000 : : */
5001 : 0 : resultRelInfo = mtstate->resultRelInfo;
5002 [ # # # # : 0 : foreach(l, returningLists)
# # ]
5003 : : {
5004 : 0 : List *rlist = (List *) lfirst(l);
5005 : :
5006 : 0 : resultRelInfo->ri_returningList = rlist;
5007 : 0 : resultRelInfo->ri_projectReturning =
5008 : 0 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5009 : 0 : resultRelInfo->ri_RelationDesc->rd_att);
5010 : 0 : resultRelInfo++;
5011 : 0 : }
5012 : 0 : }
5013 : : else
5014 : : {
5015 : : /*
5016 : : * We still must construct a dummy result tuple type, because InitPlan
5017 : : * expects one (maybe should change that?).
5018 : : */
5019 : 0 : ExecInitResultTypeTL(&mtstate->ps);
5020 : :
5021 : 0 : mtstate->ps.ps_ExprContext = NULL;
5022 : : }
5023 : :
5024 : : /* Set the list of arbiter indexes if needed for ON CONFLICT */
5025 : 0 : resultRelInfo = mtstate->resultRelInfo;
5026 [ # # ]: 0 : if (node->onConflictAction != ONCONFLICT_NONE)
5027 : : {
5028 : : /* insert may only have one relation, inheritance is not expanded */
5029 [ # # ]: 0 : Assert(total_nrels == 1);
5030 : 0 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5031 : 0 : }
5032 : :
5033 : : /*
5034 : : * If needed, Initialize target list, projection and qual for ON CONFLICT
5035 : : * DO UPDATE.
5036 : : */
5037 [ # # ]: 0 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5038 : : {
5039 : 0 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
5040 : 0 : ExprContext *econtext;
5041 : 0 : TupleDesc relationDesc;
5042 : :
5043 : : /* already exists if created by RETURNING processing above */
5044 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
5045 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
5046 : :
5047 : 0 : econtext = mtstate->ps.ps_ExprContext;
5048 : 0 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5049 : :
5050 : : /* create state for DO UPDATE SET operation */
5051 : 0 : resultRelInfo->ri_onConflict = onconfl;
5052 : :
5053 : : /* initialize slot for the existing tuple */
5054 : 0 : onconfl->oc_Existing =
5055 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
5056 : 0 : &mtstate->ps.state->es_tupleTable);
5057 : :
5058 : : /*
5059 : : * Create the tuple slot for the UPDATE SET projection. We want a slot
5060 : : * of the table's type here, because the slot will be used to insert
5061 : : * into the table, and for RETURNING processing - which may access
5062 : : * system attributes.
5063 : : */
5064 : 0 : onconfl->oc_ProjSlot =
5065 : 0 : table_slot_create(resultRelInfo->ri_RelationDesc,
5066 : 0 : &mtstate->ps.state->es_tupleTable);
5067 : :
5068 : : /* build UPDATE SET projection state */
5069 : 0 : onconfl->oc_ProjInfo =
5070 : 0 : ExecBuildUpdateProjection(node->onConflictSet,
5071 : : true,
5072 : 0 : node->onConflictCols,
5073 : 0 : relationDesc,
5074 : 0 : econtext,
5075 : 0 : onconfl->oc_ProjSlot,
5076 : 0 : &mtstate->ps);
5077 : :
5078 : : /* initialize state to evaluate the WHERE clause, if any */
5079 [ # # ]: 0 : if (node->onConflictWhere)
5080 : : {
5081 : 0 : ExprState *qualexpr;
5082 : :
5083 : 0 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5084 : 0 : &mtstate->ps);
5085 : 0 : onconfl->oc_WhereClause = qualexpr;
5086 : 0 : }
5087 : 0 : }
5088 : :
5089 : : /*
5090 : : * If we have any secondary relations in an UPDATE or DELETE, they need to
5091 : : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5092 : : * EvalPlanQual mechanism needs to be told about them. This also goes for
5093 : : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5094 : : */
5095 : 0 : arowmarks = NIL;
5096 [ # # # # : 0 : foreach(l, node->rowMarks)
# # ]
5097 : : {
5098 : 0 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5099 : 0 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5100 : 0 : ExecRowMark *erm;
5101 : 0 : ExecAuxRowMark *aerm;
5102 : :
5103 : : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5104 [ # # ]: 0 : if (rc->isParent)
5105 : 0 : continue;
5106 : :
5107 : : /*
5108 : : * Also ignore rowmarks belonging to child tables that have been
5109 : : * pruned in ExecDoInitialPruning().
5110 : : */
5111 [ # # # # ]: 0 : if (rte->rtekind == RTE_RELATION &&
5112 : 0 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5113 : 0 : continue;
5114 : :
5115 : : /* Find ExecRowMark and build ExecAuxRowMark */
5116 : 0 : erm = ExecFindRowMark(estate, rc->rti, false);
5117 : 0 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5118 : 0 : arowmarks = lappend(arowmarks, aerm);
5119 [ # # # ]: 0 : }
5120 : :
5121 : : /* For a MERGE command, initialize its state */
5122 [ # # ]: 0 : if (mtstate->operation == CMD_MERGE)
5123 : 0 : ExecInitMerge(mtstate, estate);
5124 : :
5125 : 0 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5126 : :
5127 : : /*
5128 : : * If there are a lot of result relations, use a hash table to speed the
5129 : : * lookups. If there are not a lot, a simple linear search is faster.
5130 : : *
5131 : : * It's not clear where the threshold is, but try 64 for starters. In a
5132 : : * debugging build, use a small threshold so that we get some test
5133 : : * coverage of both code paths.
5134 : : */
5135 : : #ifdef USE_ASSERT_CHECKING
5136 : : #define MT_NRELS_HASH 4
5137 : : #else
5138 : : #define MT_NRELS_HASH 64
5139 : : #endif
5140 [ # # ]: 0 : if (nrels >= MT_NRELS_HASH)
5141 : : {
5142 : 0 : HASHCTL hash_ctl;
5143 : :
5144 : 0 : hash_ctl.keysize = sizeof(Oid);
5145 : 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5146 : 0 : hash_ctl.hcxt = CurrentMemoryContext;
5147 : 0 : mtstate->mt_resultOidHash =
5148 : 0 : hash_create("ModifyTable target hash",
5149 : 0 : nrels, &hash_ctl,
5150 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5151 [ # # ]: 0 : for (i = 0; i < nrels; i++)
5152 : : {
5153 : 0 : Oid hashkey;
5154 : 0 : MTTargetRelLookup *mtlookup;
5155 : 0 : bool found;
5156 : :
5157 : 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5158 : 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5159 : 0 : mtlookup = (MTTargetRelLookup *)
5160 : 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5161 : : HASH_ENTER, &found);
5162 [ # # ]: 0 : Assert(!found);
5163 : 0 : mtlookup->relationIndex = i;
5164 : 0 : }
5165 : 0 : }
5166 : : else
5167 : 0 : mtstate->mt_resultOidHash = NULL;
5168 : :
5169 : : /*
5170 : : * Determine if the FDW supports batch insert and determine the batch size
5171 : : * (a FDW may support batching, but it may be disabled for the
5172 : : * server/table).
5173 : : *
5174 : : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5175 : : * remains set to 0.
5176 : : */
5177 [ # # ]: 0 : if (operation == CMD_INSERT)
5178 : : {
5179 : : /* insert may only have one relation, inheritance is not expanded */
5180 [ # # ]: 0 : Assert(total_nrels == 1);
5181 : 0 : resultRelInfo = mtstate->resultRelInfo;
5182 [ # # ]: 0 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5183 [ # # ]: 0 : resultRelInfo->ri_FdwRoutine != NULL &&
5184 [ # # # # ]: 0 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5185 : 0 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5186 : : {
5187 : 0 : resultRelInfo->ri_BatchSize =
5188 : 0 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5189 [ # # ]: 0 : Assert(resultRelInfo->ri_BatchSize >= 1);
5190 : 0 : }
5191 : : else
5192 : 0 : resultRelInfo->ri_BatchSize = 1;
5193 : 0 : }
5194 : :
5195 : : /*
5196 : : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5197 : : * to estate->es_auxmodifytables so that it will be run to completion by
5198 : : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5199 : : * ModifyTable node too, but there's no need.) Note the use of lcons not
5200 : : * lappend: we need later-initialized ModifyTable nodes to be shut down
5201 : : * before earlier ones. This ensures that we don't throw away RETURNING
5202 : : * rows that need to be seen by a later CTE subplan.
5203 : : */
5204 [ # # ]: 0 : if (!mtstate->canSetTag)
5205 : 0 : estate->es_auxmodifytables = lcons(mtstate,
5206 : 0 : estate->es_auxmodifytables);
5207 : :
5208 : 0 : return mtstate;
5209 : 0 : }
5210 : :
5211 : : /* ----------------------------------------------------------------
5212 : : * ExecEndModifyTable
5213 : : *
5214 : : * Shuts down the plan.
5215 : : *
5216 : : * Returns nothing of interest.
5217 : : * ----------------------------------------------------------------
5218 : : */
5219 : : void
5220 : 0 : ExecEndModifyTable(ModifyTableState *node)
5221 : : {
5222 : 0 : int i;
5223 : :
5224 : : /*
5225 : : * Allow any FDWs to shut down
5226 : : */
5227 [ # # ]: 0 : for (i = 0; i < node->mt_nrels; i++)
5228 : : {
5229 : 0 : int j;
5230 : 0 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5231 : :
5232 [ # # ]: 0 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5233 [ # # # # ]: 0 : resultRelInfo->ri_FdwRoutine != NULL &&
5234 : 0 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5235 : 0 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5236 : 0 : resultRelInfo);
5237 : :
5238 : : /*
5239 : : * Cleanup the initialized batch slots. This only matters for FDWs
5240 : : * with batching, but the other cases will have ri_NumSlotsInitialized
5241 : : * == 0.
5242 : : */
5243 [ # # ]: 0 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5244 : : {
5245 : 0 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5246 : 0 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5247 : 0 : }
5248 : 0 : }
5249 : :
5250 : : /*
5251 : : * Close all the partitioned tables, leaf partitions, and their indices
5252 : : * and release the slot used for tuple routing, if set.
5253 : : */
5254 [ # # ]: 0 : if (node->mt_partition_tuple_routing)
5255 : : {
5256 : 0 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5257 : :
5258 [ # # ]: 0 : if (node->mt_root_tuple_slot)
5259 : 0 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5260 : 0 : }
5261 : :
5262 : : /*
5263 : : * Terminate EPQ execution if active
5264 : : */
5265 : 0 : EvalPlanQualEnd(&node->mt_epqstate);
5266 : :
5267 : : /*
5268 : : * shut down subplan
5269 : : */
5270 : 0 : ExecEndNode(outerPlanState(node));
5271 : 0 : }
5272 : :
5273 : : void
5274 : 0 : ExecReScanModifyTable(ModifyTableState *node)
5275 : : {
5276 : : /*
5277 : : * Currently, we don't need to support rescan on ModifyTable nodes. The
5278 : : * semantics of that would be a bit debatable anyway.
5279 : : */
5280 [ # # # # ]: 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5281 : 0 : }
|