Branch data Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * heapam.c
4 : : * heap access method code
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/heap/heapam.c
12 : : *
13 : : *
14 : : * INTERFACE ROUTINES
15 : : * heap_beginscan - begin relation scan
16 : : * heap_rescan - restart a relation scan
17 : : * heap_endscan - end relation scan
18 : : * heap_getnext - retrieve next tuple in scan
19 : : * heap_fetch - retrieve tuple with given tid
20 : : * heap_insert - insert tuple into a relation
21 : : * heap_multi_insert - insert multiple tuples into a relation
22 : : * heap_delete - delete a tuple from a relation
23 : : * heap_update - replace a tuple in a relation with another tuple
24 : : *
25 : : * NOTES
26 : : * This file contains the heap_ routines which implement
27 : : * the POSTGRES heap access method used for all POSTGRES
28 : : * relations.
29 : : *
30 : : *-------------------------------------------------------------------------
31 : : */
32 : : #include "postgres.h"
33 : :
34 : : #include "access/heapam.h"
35 : : #include "access/heaptoast.h"
36 : : #include "access/hio.h"
37 : : #include "access/multixact.h"
38 : : #include "access/subtrans.h"
39 : : #include "access/syncscan.h"
40 : : #include "access/valid.h"
41 : : #include "access/visibilitymap.h"
42 : : #include "access/xloginsert.h"
43 : : #include "catalog/pg_database.h"
44 : : #include "catalog/pg_database_d.h"
45 : : #include "commands/vacuum.h"
46 : : #include "pgstat.h"
47 : : #include "port/pg_bitutils.h"
48 : : #include "storage/lmgr.h"
49 : : #include "storage/predicate.h"
50 : : #include "storage/procarray.h"
51 : : #include "utils/datum.h"
52 : : #include "utils/injection_point.h"
53 : : #include "utils/inval.h"
54 : : #include "utils/spccache.h"
55 : : #include "utils/syscache.h"
56 : :
57 : :
58 : : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
59 : : TransactionId xid, CommandId cid, int options);
60 : : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
61 : : Buffer newbuf, HeapTuple oldtup,
62 : : HeapTuple newtup, HeapTuple old_key_tuple,
63 : : bool all_visible_cleared, bool new_all_visible_cleared);
64 : : #ifdef USE_ASSERT_CHECKING
65 : : static void check_lock_if_inplace_updateable_rel(Relation relation,
66 : : const ItemPointerData *otid,
67 : : HeapTuple newtup);
68 : : static void check_inplace_rel_lock(HeapTuple oldtup);
69 : : #endif
70 : : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
71 : : Bitmapset *interesting_cols,
72 : : Bitmapset *external_cols,
73 : : HeapTuple oldtup, HeapTuple newtup,
74 : : bool *has_external);
75 : : static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
76 : : LockTupleMode mode, LockWaitPolicy wait_policy,
77 : : bool *have_tuple_lock);
78 : : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
79 : : BlockNumber block,
80 : : ScanDirection dir);
81 : : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
82 : : ScanDirection dir);
83 : : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
84 : : uint16 old_infomask2, TransactionId add_to_xmax,
85 : : LockTupleMode mode, bool is_update,
86 : : TransactionId *result_xmax, uint16 *result_infomask,
87 : : uint16 *result_infomask2);
88 : : static TM_Result heap_lock_updated_tuple(Relation rel,
89 : : uint16 prior_infomask,
90 : : TransactionId prior_raw_xmax,
91 : : const ItemPointerData *prior_ctid,
92 : : TransactionId xid,
93 : : LockTupleMode mode);
94 : : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
95 : : uint16 *new_infomask2);
96 : : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
97 : : uint16 t_infomask);
98 : : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
99 : : LockTupleMode lockmode, bool *current_is_member);
100 : : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
101 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
102 : : int *remaining);
103 : : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
104 : : uint16 infomask, Relation rel, int *remaining,
105 : : bool logLockFailure);
106 : : static void index_delete_sort(TM_IndexDeleteOp *delstate);
107 : : static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
108 : : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
109 : : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
110 : : bool *copy);
111 : :
112 : :
113 : : /*
114 : : * Each tuple lock mode has a corresponding heavyweight lock, and one or two
115 : : * corresponding MultiXactStatuses (one to merely lock tuples, another one to
116 : : * update them). This table (and the macros below) helps us determine the
117 : : * heavyweight lock mode and MultiXactStatus values to use for any particular
118 : : * tuple lock strength.
119 : : *
120 : : * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
121 : : *
122 : : * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
123 : : * instead.
124 : : */
125 : : static const struct
126 : : {
127 : : LOCKMODE hwlock;
128 : : int lockstatus;
129 : : int updstatus;
130 : : }
131 : :
132 : : tupleLockExtraInfo[MaxLockTupleMode + 1] =
133 : : {
134 : : { /* LockTupleKeyShare */
135 : : AccessShareLock,
136 : : MultiXactStatusForKeyShare,
137 : : -1 /* KeyShare does not allow updating tuples */
138 : : },
139 : : { /* LockTupleShare */
140 : : RowShareLock,
141 : : MultiXactStatusForShare,
142 : : -1 /* Share does not allow updating tuples */
143 : : },
144 : : { /* LockTupleNoKeyExclusive */
145 : : ExclusiveLock,
146 : : MultiXactStatusForNoKeyUpdate,
147 : : MultiXactStatusNoKeyUpdate
148 : : },
149 : : { /* LockTupleExclusive */
150 : : AccessExclusiveLock,
151 : : MultiXactStatusForUpdate,
152 : : MultiXactStatusUpdate
153 : : }
154 : : };
155 : :
156 : : /* Get the LOCKMODE for a given MultiXactStatus */
157 : : #define LOCKMODE_from_mxstatus(status) \
158 : : (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
159 : :
160 : : /*
161 : : * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
162 : : * This is more readable than having every caller translate it to lock.h's
163 : : * LOCKMODE.
164 : : */
165 : : #define LockTupleTuplock(rel, tup, mode) \
166 : : LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
167 : : #define UnlockTupleTuplock(rel, tup, mode) \
168 : : UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
169 : : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
170 : : ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
171 : :
172 : : #ifdef USE_PREFETCH
173 : : /*
174 : : * heap_index_delete_tuples and index_delete_prefetch_buffer use this
175 : : * structure to coordinate prefetching activity
176 : : */
177 : : typedef struct
178 : : {
179 : : BlockNumber cur_hblkno;
180 : : int next_item;
181 : : int ndeltids;
182 : : TM_IndexDelete *deltids;
183 : : } IndexDeletePrefetchState;
184 : : #endif
185 : :
186 : : /* heap_index_delete_tuples bottom-up index deletion costing constants */
187 : : #define BOTTOMUP_MAX_NBLOCKS 6
188 : : #define BOTTOMUP_TOLERANCE_NBLOCKS 3
189 : :
190 : : /*
191 : : * heap_index_delete_tuples uses this when determining which heap blocks it
192 : : * must visit to help its bottom-up index deletion caller
193 : : */
194 : : typedef struct IndexDeleteCounts
195 : : {
196 : : int16 npromisingtids; /* Number of "promising" TIDs in group */
197 : : int16 ntids; /* Number of TIDs in group */
198 : : int16 ifirsttid; /* Offset to group's first deltid */
199 : : } IndexDeleteCounts;
200 : :
201 : : /*
202 : : * This table maps tuple lock strength values for each particular
203 : : * MultiXactStatus value.
204 : : */
205 : : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
206 : : {
207 : : LockTupleKeyShare, /* ForKeyShare */
208 : : LockTupleShare, /* ForShare */
209 : : LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
210 : : LockTupleExclusive, /* ForUpdate */
211 : : LockTupleNoKeyExclusive, /* NoKeyUpdate */
212 : : LockTupleExclusive /* Update */
213 : : };
214 : :
215 : : /* Get the LockTupleMode for a given MultiXactStatus */
216 : : #define TUPLOCK_from_mxstatus(status) \
217 : : (MultiXactStatusLock[(status)])
218 : :
219 : : /*
220 : : * Check that we have a valid snapshot if we might need TOAST access.
221 : : */
222 : : static inline void
223 : 2068295 : AssertHasSnapshotForToast(Relation rel)
224 : : {
225 : : #ifdef USE_ASSERT_CHECKING
226 : :
227 : : /* bootstrap mode in particular breaks this rule */
228 [ + + ]: 2068295 : if (!IsNormalProcessingMode())
229 : 11794 : return;
230 : :
231 : : /* if the relation doesn't have a TOAST table, we are good */
232 [ + + ]: 2056501 : if (!OidIsValid(rel->rd_rel->reltoastrelid))
233 : 1284809 : return;
234 : :
235 [ + - ]: 771692 : Assert(HaveRegisteredOrActiveSnapshot());
236 : :
237 : : #endif /* USE_ASSERT_CHECKING */
238 : 2068295 : }
239 : :
240 : : /* ----------------------------------------------------------------
241 : : * heap support routines
242 : : * ----------------------------------------------------------------
243 : : */
244 : :
245 : : /*
246 : : * Streaming read API callback for parallel sequential scans. Returns the next
247 : : * block the caller wants from the read stream or InvalidBlockNumber when done.
248 : : */
249 : : static BlockNumber
250 : 30515 : heap_scan_stream_read_next_parallel(ReadStream *stream,
251 : : void *callback_private_data,
252 : : void *per_buffer_data)
253 : : {
254 : 30515 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
255 : :
256 [ + - ]: 30515 : Assert(ScanDirectionIsForward(scan->rs_dir));
257 [ + - ]: 30515 : Assert(scan->rs_base.rs_parallel);
258 : :
259 [ + + ]: 30515 : if (unlikely(!scan->rs_inited))
260 : : {
261 : : /* parallel scan */
262 : 1038 : table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
263 : 519 : scan->rs_parallelworkerdata,
264 : 519 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel,
265 : 519 : scan->rs_startblock,
266 : 519 : scan->rs_numblocks);
267 : :
268 : : /* may return InvalidBlockNumber if there are no more blocks */
269 : 1038 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
270 : 519 : scan->rs_parallelworkerdata,
271 : 519 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
272 : 519 : scan->rs_inited = true;
273 : 519 : }
274 : : else
275 : : {
276 : 59992 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
277 : 29996 : scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
278 : 29996 : scan->rs_base.rs_parallel);
279 : : }
280 : :
281 : 61030 : return scan->rs_prefetch_block;
282 : 30515 : }
283 : :
284 : : /*
285 : : * Streaming read API callback for serial sequential and TID range scans.
286 : : * Returns the next block the caller wants from the read stream or
287 : : * InvalidBlockNumber when done.
288 : : */
289 : : static BlockNumber
290 : 1133424 : heap_scan_stream_read_next_serial(ReadStream *stream,
291 : : void *callback_private_data,
292 : : void *per_buffer_data)
293 : : {
294 : 1133424 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
295 : :
296 [ + + ]: 1133424 : if (unlikely(!scan->rs_inited))
297 : : {
298 : 344920 : scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
299 : 344920 : scan->rs_inited = true;
300 : 344920 : }
301 : : else
302 : 1577008 : scan->rs_prefetch_block = heapgettup_advance_block(scan,
303 : 788504 : scan->rs_prefetch_block,
304 : 788504 : scan->rs_dir);
305 : :
306 : 2266848 : return scan->rs_prefetch_block;
307 : 1133424 : }
308 : :
309 : : /*
310 : : * Read stream API callback for bitmap heap scans.
311 : : * Returns the next block the caller wants from the read stream or
312 : : * InvalidBlockNumber when done.
313 : : */
314 : : static BlockNumber
315 : 44484 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
316 : : void *per_buffer_data)
317 : : {
318 : 44484 : TBMIterateResult *tbmres = per_buffer_data;
319 : 44484 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
320 : 44484 : HeapScanDesc hscan = (HeapScanDesc) bscan;
321 : 44484 : TableScanDesc sscan = &hscan->rs_base;
322 : :
323 : 44484 : for (;;)
324 : : {
325 [ + - ]: 44484 : CHECK_FOR_INTERRUPTS();
326 : :
327 : : /* no more entries in the bitmap */
328 [ + + ]: 44484 : if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
329 : 2009 : return InvalidBlockNumber;
330 : :
331 : : /*
332 : : * Ignore any claimed entries past what we think is the end of the
333 : : * relation. It may have been extended after the start of our scan (we
334 : : * only hold an AccessShareLock, and it could be inserts from this
335 : : * backend). We don't take this optimization in SERIALIZABLE
336 : : * isolation though, as we need to examine all invisible tuples
337 : : * reachable by the index.
338 : : */
339 [ + - + - ]: 42475 : if (!IsolationIsSerializable() &&
340 : 42475 : tbmres->blockno >= hscan->rs_nblocks)
341 : 0 : continue;
342 : :
343 : 42475 : return tbmres->blockno;
344 : : }
345 : :
346 : : /* not reachable */
347 : : Assert(false);
348 : 44484 : }
349 : :
350 : : /* ----------------
351 : : * initscan - scan code common to heap_beginscan and heap_rescan
352 : : * ----------------
353 : : */
354 : : static void
355 : 348747 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
356 : : {
357 : 348747 : ParallelBlockTableScanDesc bpscan = NULL;
358 : 348747 : bool allow_strat;
359 : 348747 : bool allow_sync;
360 : :
361 : : /*
362 : : * Determine the number of blocks we have to scan.
363 : : *
364 : : * It is sufficient to do this once at scan start, since any tuples added
365 : : * while the scan is in progress will be invisible to my snapshot anyway.
366 : : * (That is not true when using a non-MVCC snapshot. However, we couldn't
367 : : * guarantee to return tuples added after scan start anyway, since they
368 : : * might go into pages we already scanned. To guarantee consistent
369 : : * results for a non-MVCC snapshot, the caller must hold some higher-level
370 : : * lock that ensures the interesting tuple(s) won't change.)
371 : : */
372 [ + + ]: 348747 : if (scan->rs_base.rs_parallel != NULL)
373 : : {
374 : 717 : bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
375 : 717 : scan->rs_nblocks = bpscan->phs_nblocks;
376 : 717 : }
377 : : else
378 : 348030 : scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
379 : :
380 : : /*
381 : : * If the table is large relative to NBuffers, use a bulk-read access
382 : : * strategy and enable synchronized scanning (see syncscan.c). Although
383 : : * the thresholds for these features could be different, we make them the
384 : : * same so that there are only two behaviors to tune rather than four.
385 : : * (However, some callers need to be able to disable one or both of these
386 : : * behaviors, independently of the size of the table; also there is a GUC
387 : : * variable that can disable synchronized scanning.)
388 : : *
389 : : * Note that table_block_parallelscan_initialize has a very similar test;
390 : : * if you change this, consider changing that one, too.
391 : : */
392 [ + + + - ]: 348747 : if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
393 : 346414 : scan->rs_nblocks > NBuffers / 4)
394 : : {
395 : 0 : allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
396 : 0 : allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
397 : 0 : }
398 : : else
399 : 348747 : allow_strat = allow_sync = false;
400 : :
401 [ - + ]: 348747 : if (allow_strat)
402 : : {
403 : : /* During a rescan, keep the previous strategy object. */
404 [ # # ]: 0 : if (scan->rs_strategy == NULL)
405 : 0 : scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
406 : 0 : }
407 : : else
408 : : {
409 [ + - ]: 348747 : if (scan->rs_strategy != NULL)
410 : 0 : FreeAccessStrategy(scan->rs_strategy);
411 : 348747 : scan->rs_strategy = NULL;
412 : : }
413 : :
414 [ + + ]: 348747 : if (scan->rs_base.rs_parallel != NULL)
415 : : {
416 : : /* For parallel scan, believe whatever ParallelTableScanDesc says. */
417 [ - + ]: 717 : if (scan->rs_base.rs_parallel->phs_syncscan)
418 : 0 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
419 : : else
420 : 717 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
421 : :
422 : : /*
423 : : * If not rescanning, initialize the startblock. Finding the actual
424 : : * start location is done in table_block_parallelscan_startblock_init,
425 : : * based on whether an alternative start location has been set with
426 : : * heap_setscanlimits, or using the syncscan location, when syncscan
427 : : * is enabled.
428 : : */
429 [ + + ]: 717 : if (!keep_startblock)
430 : 679 : scan->rs_startblock = InvalidBlockNumber;
431 : 717 : }
432 : : else
433 : : {
434 [ + + ]: 348030 : if (keep_startblock)
435 : : {
436 : : /*
437 : : * When rescanning, we want to keep the previous startblock
438 : : * setting, so that rewinding a cursor doesn't generate surprising
439 : : * results. Reset the active syncscan setting, though.
440 : : */
441 [ - + # # ]: 2660 : if (allow_sync && synchronize_seqscans)
442 : 0 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
443 : : else
444 : 2660 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
445 : 2660 : }
446 [ - + # # ]: 345370 : else if (allow_sync && synchronize_seqscans)
447 : : {
448 : 0 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
449 : 0 : scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
450 : 0 : }
451 : : else
452 : : {
453 : 345370 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
454 : 345370 : scan->rs_startblock = 0;
455 : : }
456 : : }
457 : :
458 : 348747 : scan->rs_numblocks = InvalidBlockNumber;
459 : 348747 : scan->rs_inited = false;
460 : 348747 : scan->rs_ctup.t_data = NULL;
461 : 348747 : ItemPointerSetInvalid(&scan->rs_ctup.t_self);
462 : 348747 : scan->rs_cbuf = InvalidBuffer;
463 : 348747 : scan->rs_cblock = InvalidBlockNumber;
464 : 348747 : scan->rs_ntuples = 0;
465 : 348747 : scan->rs_cindex = 0;
466 : :
467 : : /*
468 : : * Initialize to ForwardScanDirection because it is most common and
469 : : * because heap scans go forward before going backward (e.g. CURSORs).
470 : : */
471 : 348747 : scan->rs_dir = ForwardScanDirection;
472 : 348747 : scan->rs_prefetch_block = InvalidBlockNumber;
473 : :
474 : : /* page-at-a-time fields are always invalid when not rs_inited */
475 : :
476 : : /*
477 : : * copy the scan key, if appropriate
478 : : */
479 [ + + - + ]: 348747 : if (key != NULL && scan->rs_base.rs_nkeys > 0)
480 : 14398 : memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
481 : :
482 : : /*
483 : : * Currently, we only have a stats counter for sequential heap scans (but
484 : : * e.g for bitmap scans the underlying bitmap index scans will be counted,
485 : : * and for sample scans we update stats for tuple fetches).
486 : : */
487 [ + + ]: 348747 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
488 [ + + + - : 345265 : pgstat_count_heap_scan(scan->rs_base.rs_rd);
+ - ]
489 : 348747 : }
490 : :
491 : : /*
492 : : * heap_setscanlimits - restrict range of a heapscan
493 : : *
494 : : * startBlk is the page to start at
495 : : * numBlks is number of pages to scan (InvalidBlockNumber means "all")
496 : : */
497 : : void
498 : 417 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
499 : : {
500 : 417 : HeapScanDesc scan = (HeapScanDesc) sscan;
501 : :
502 [ + - ]: 417 : Assert(!scan->rs_inited); /* else too late to change */
503 : : /* else rs_startblock is significant */
504 [ + - ]: 417 : Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
505 : :
506 : : /* Check startBlk is valid (but allow case of zero blocks...) */
507 [ + + + - ]: 417 : Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
508 : :
509 : 417 : scan->rs_startblock = startBlk;
510 : 417 : scan->rs_numblocks = numBlks;
511 : 417 : }
512 : :
513 : : /*
514 : : * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
515 : : * multiple times, with constant arguments for all_visible,
516 : : * check_serializable.
517 : : */
518 : : pg_attribute_always_inline
519 : : static int
520 : 797940 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
521 : : Page page, Buffer buffer,
522 : : BlockNumber block, int lines,
523 : : bool all_visible, bool check_serializable)
524 : : {
525 : 797940 : Oid relid = RelationGetRelid(scan->rs_base.rs_rd);
526 : 797940 : int ntup = 0;
527 : 797940 : int nvis = 0;
528 : 797940 : BatchMVCCState batchmvcc;
529 : :
530 : : /* page at a time should have been disabled otherwise */
531 [ - + # # ]: 797940 : Assert(IsMVCCSnapshot(snapshot));
532 : :
533 : : /* first find all tuples on the page */
534 [ + + ]: 55236919 : for (OffsetNumber lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
535 : : {
536 : 54438979 : ItemId lpp = PageGetItemId(page, lineoff);
537 : 54438979 : HeapTuple tup;
538 : :
539 [ + + ]: 54438979 : if (unlikely(!ItemIdIsNormal(lpp)))
540 : 8943522 : continue;
541 : :
542 : : /*
543 : : * If the page is not all-visible or we need to check serializability,
544 : : * maintain enough state to be able to refind the tuple efficiently,
545 : : * without again first needing to fetch the item and then via that the
546 : : * tuple.
547 : : */
548 [ + + - + ]: 45495457 : if (!all_visible || check_serializable)
549 : : {
550 : 11728552 : tup = &batchmvcc.tuples[ntup];
551 : :
552 : 11728552 : tup->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
553 : 11728552 : tup->t_len = ItemIdGetLength(lpp);
554 : 11728552 : tup->t_tableOid = relid;
555 : 11728552 : ItemPointerSet(&(tup->t_self), block, lineoff);
556 : 11728552 : }
557 : :
558 : : /*
559 : : * If the page is all visible, these fields otherwise won't be
560 : : * populated in loop below.
561 : : */
562 [ + + ]: 45495457 : if (all_visible)
563 : : {
564 [ + - ]: 33766905 : if (check_serializable)
565 : : {
566 : 0 : batchmvcc.visible[ntup] = true;
567 : 0 : }
568 : 33766905 : scan->rs_vistuples[ntup] = lineoff;
569 : 33766905 : }
570 : :
571 : 45495457 : ntup++;
572 [ - + + ]: 54438979 : }
573 : :
574 [ + - ]: 797940 : Assert(ntup <= MaxHeapTuplesPerPage);
575 : :
576 : : /*
577 : : * Unless the page is all visible, test visibility for all tuples one go.
578 : : * That is considerably more efficient than calling
579 : : * HeapTupleSatisfiesMVCC() one-by-one.
580 : : */
581 [ + + ]: 797940 : if (all_visible)
582 : 407796 : nvis = ntup;
583 : : else
584 : 780288 : nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
585 : 390144 : ntup,
586 : : &batchmvcc,
587 : 390144 : scan->rs_vistuples);
588 : :
589 : : /*
590 : : * So far we don't have batch API for testing serializabilty, so do so
591 : : * one-by-one.
592 : : */
593 [ + + ]: 797940 : if (check_serializable)
594 : : {
595 [ + + ]: 101 : for (int i = 0; i < ntup; i++)
596 : : {
597 : 170 : HeapCheckForSerializableConflictOut(batchmvcc.visible[i],
598 : 85 : scan->rs_base.rs_rd,
599 : 85 : &batchmvcc.tuples[i],
600 : 85 : buffer, snapshot);
601 : 85 : }
602 : 16 : }
603 : :
604 : 1595880 : return nvis;
605 : 797940 : }
606 : :
607 : : /*
608 : : * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
609 : : *
610 : : * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
611 : : * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
612 : : */
613 : : void
614 : 797940 : heap_prepare_pagescan(TableScanDesc sscan)
615 : : {
616 : 797940 : HeapScanDesc scan = (HeapScanDesc) sscan;
617 : 797940 : Buffer buffer = scan->rs_cbuf;
618 : 797940 : BlockNumber block = scan->rs_cblock;
619 : 797940 : Snapshot snapshot;
620 : 797940 : Page page;
621 : 797940 : int lines;
622 : 797940 : bool all_visible;
623 : 797940 : bool check_serializable;
624 : :
625 [ + - ]: 797940 : Assert(BufferGetBlockNumber(buffer) == block);
626 : :
627 : : /* ensure we're not accidentally being used when not in pagemode */
628 [ + - ]: 797940 : Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
629 : 797940 : snapshot = scan->rs_base.rs_snapshot;
630 : :
631 : : /*
632 : : * Prune and repair fragmentation for the whole page, if possible.
633 : : */
634 : 797940 : heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
635 : :
636 : : /*
637 : : * We must hold share lock on the buffer content while examining tuple
638 : : * visibility. Afterwards, however, the tuples we have found to be
639 : : * visible are guaranteed good as long as we hold the buffer pin.
640 : : */
641 : 797940 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
642 : :
643 : 797940 : page = BufferGetPage(buffer);
644 : 797940 : lines = PageGetMaxOffsetNumber(page);
645 : :
646 : : /*
647 : : * If the all-visible flag indicates that all tuples on the page are
648 : : * visible to everyone, we can skip the per-tuple visibility tests.
649 : : *
650 : : * Note: In hot standby, a tuple that's already visible to all
651 : : * transactions on the primary might still be invisible to a read-only
652 : : * transaction in the standby. We partly handle this problem by tracking
653 : : * the minimum xmin of visible tuples as the cut-off XID while marking a
654 : : * page all-visible on the primary and WAL log that along with the
655 : : * visibility map SET operation. In hot standby, we wait for (or abort)
656 : : * all transactions that can potentially may not see one or more tuples on
657 : : * the page. That's how index-only scans work fine in hot standby. A
658 : : * crucial difference between index-only scans and heap scans is that the
659 : : * index-only scan completely relies on the visibility map where as heap
660 : : * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
661 : : * the page-level flag can be trusted in the same way, because it might
662 : : * get propagated somehow without being explicitly WAL-logged, e.g. via a
663 : : * full page write. Until we can prove that beyond doubt, let's check each
664 : : * tuple for visibility the hard way.
665 : : */
666 [ + + ]: 797940 : all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
667 : 797940 : check_serializable =
668 : 797940 : CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
669 : :
670 : : /*
671 : : * We call page_collect_tuples() with constant arguments, to get the
672 : : * compiler to constant fold the constant arguments. Separate calls with
673 : : * constant arguments, rather than variables, are needed on several
674 : : * compilers to actually perform constant folding.
675 : : */
676 [ + + ]: 797940 : if (likely(all_visible))
677 : : {
678 [ + - ]: 407796 : if (likely(!check_serializable))
679 : 815592 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
680 : 407796 : block, lines, true, false);
681 : : else
682 : 0 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
683 : 0 : block, lines, true, true);
684 : 407796 : }
685 : : else
686 : : {
687 [ + + ]: 390144 : if (likely(!check_serializable))
688 : 780256 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
689 : 390128 : block, lines, false, false);
690 : : else
691 : 32 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
692 : 16 : block, lines, false, true);
693 : : }
694 : :
695 : 797940 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
696 : 797940 : }
697 : :
698 : : /*
699 : : * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
700 : : *
701 : : * Read the next block of the scan relation from the read stream and save it
702 : : * in the scan descriptor. It is already pinned.
703 : : */
704 : : static inline void
705 : 855599 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
706 : : {
707 [ + - ]: 855599 : Assert(scan->rs_read_stream);
708 : :
709 : : /* release previous scan buffer, if any */
710 [ + + ]: 855599 : if (BufferIsValid(scan->rs_cbuf))
711 : : {
712 : 510160 : ReleaseBuffer(scan->rs_cbuf);
713 : 510160 : scan->rs_cbuf = InvalidBuffer;
714 : 510160 : }
715 : :
716 : : /*
717 : : * Be sure to check for interrupts at least once per page. Checks at
718 : : * higher code levels won't be able to stop a seqscan that encounters many
719 : : * pages' worth of consecutive dead tuples.
720 : : */
721 [ + - ]: 855599 : CHECK_FOR_INTERRUPTS();
722 : :
723 : : /*
724 : : * If the scan direction is changing, reset the prefetch block to the
725 : : * current block. Otherwise, we will incorrectly prefetch the blocks
726 : : * between the prefetch block and the current block again before
727 : : * prefetching blocks in the new, correct scan direction.
728 : : */
729 [ + + ]: 855599 : if (unlikely(scan->rs_dir != dir))
730 : : {
731 : 25 : scan->rs_prefetch_block = scan->rs_cblock;
732 : 25 : read_stream_reset(scan->rs_read_stream);
733 : 25 : }
734 : :
735 : 855599 : scan->rs_dir = dir;
736 : :
737 : 855599 : scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
738 [ + + ]: 855599 : if (BufferIsValid(scan->rs_cbuf))
739 : 818392 : scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
740 : 855599 : }
741 : :
742 : : /*
743 : : * heapgettup_initial_block - return the first BlockNumber to scan
744 : : *
745 : : * Returns InvalidBlockNumber when there are no blocks to scan. This can
746 : : * occur with empty tables and in parallel scans when parallel workers get all
747 : : * of the pages before we can get a chance to get our first page.
748 : : */
749 : : static pg_noinline BlockNumber
750 : 344920 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
751 : : {
752 [ + - ]: 344920 : Assert(!scan->rs_inited);
753 [ + - ]: 344920 : Assert(scan->rs_base.rs_parallel == NULL);
754 : :
755 : : /* When there are no pages to scan, return InvalidBlockNumber */
756 [ + + + + ]: 344920 : if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
757 : 13136 : return InvalidBlockNumber;
758 : :
759 [ + + ]: 331784 : if (ScanDirectionIsForward(dir))
760 : : {
761 : 331774 : return scan->rs_startblock;
762 : : }
763 : : else
764 : : {
765 : : /*
766 : : * Disable reporting to syncscan logic in a backwards scan; it's not
767 : : * very likely anyone else is doing the same thing at the same time,
768 : : * and much more likely that we'll just bollix things for forward
769 : : * scanners.
770 : : */
771 : 10 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
772 : :
773 : : /*
774 : : * Start from last page of the scan. Ensure we take into account
775 : : * rs_numblocks if it's been adjusted by heap_setscanlimits().
776 : : */
777 [ + + ]: 10 : if (scan->rs_numblocks != InvalidBlockNumber)
778 : 1 : return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
779 : :
780 [ - + ]: 9 : if (scan->rs_startblock > 0)
781 : 0 : return scan->rs_startblock - 1;
782 : :
783 : 9 : return scan->rs_nblocks - 1;
784 : : }
785 : 344920 : }
786 : :
787 : :
788 : : /*
789 : : * heapgettup_start_page - helper function for heapgettup()
790 : : *
791 : : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
792 : : * to the number of tuples on this page. Also set *lineoff to the first
793 : : * offset to scan with forward scans getting the first offset and backward
794 : : * getting the final offset on the page.
795 : : */
796 : : static Page
797 : 21862 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
798 : : OffsetNumber *lineoff)
799 : : {
800 : 21862 : Page page;
801 : :
802 [ + - ]: 21862 : Assert(scan->rs_inited);
803 [ + - ]: 21862 : Assert(BufferIsValid(scan->rs_cbuf));
804 : :
805 : : /* Caller is responsible for ensuring buffer is locked if needed */
806 : 21862 : page = BufferGetPage(scan->rs_cbuf);
807 : :
808 : 21862 : *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
809 : :
810 [ + - ]: 21862 : if (ScanDirectionIsForward(dir))
811 : 21862 : *lineoff = FirstOffsetNumber;
812 : : else
813 : 0 : *lineoff = (OffsetNumber) (*linesleft);
814 : :
815 : : /* lineoff now references the physically previous or next tid */
816 : 43724 : return page;
817 : 21862 : }
818 : :
819 : :
820 : : /*
821 : : * heapgettup_continue_page - helper function for heapgettup()
822 : : *
823 : : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
824 : : * to the number of tuples left to scan on this page. Also set *lineoff to
825 : : * the next offset to scan according to the ScanDirection in 'dir'.
826 : : */
827 : : static inline Page
828 : 1738132 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
829 : : OffsetNumber *lineoff)
830 : : {
831 : 1738132 : Page page;
832 : :
833 [ + - ]: 1738132 : Assert(scan->rs_inited);
834 [ + - ]: 1738132 : Assert(BufferIsValid(scan->rs_cbuf));
835 : :
836 : : /* Caller is responsible for ensuring buffer is locked if needed */
837 : 1738132 : page = BufferGetPage(scan->rs_cbuf);
838 : :
839 [ + - ]: 1738132 : if (ScanDirectionIsForward(dir))
840 : : {
841 : 1738132 : *lineoff = OffsetNumberNext(scan->rs_coffset);
842 : 1738132 : *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
843 : 1738132 : }
844 : : else
845 : : {
846 : : /*
847 : : * The previous returned tuple may have been vacuumed since the
848 : : * previous scan when we use a non-MVCC snapshot, so we must
849 : : * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
850 : : */
851 [ # # ]: 0 : *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
852 : 0 : *linesleft = *lineoff;
853 : : }
854 : :
855 : : /* lineoff now references the physically previous or next tid */
856 : 3476264 : return page;
857 : 1738132 : }
858 : :
859 : : /*
860 : : * heapgettup_advance_block - helper for heap_fetch_next_buffer()
861 : : *
862 : : * Given the current block number, the scan direction, and various information
863 : : * contained in the scan descriptor, calculate the BlockNumber to scan next
864 : : * and return it. If there are no further blocks to scan, return
865 : : * InvalidBlockNumber to indicate this fact to the caller.
866 : : *
867 : : * This should not be called to determine the initial block number -- only for
868 : : * subsequent blocks.
869 : : *
870 : : * This also adjusts rs_numblocks when a limit has been imposed by
871 : : * heap_setscanlimits().
872 : : */
873 : : static inline BlockNumber
874 : 788504 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
875 : : {
876 [ + - ]: 788504 : Assert(scan->rs_base.rs_parallel == NULL);
877 : :
878 [ + + ]: 788504 : if (likely(ScanDirectionIsForward(dir)))
879 : : {
880 : 788485 : block++;
881 : :
882 : : /* wrap back to the start of the heap */
883 [ + + ]: 788485 : if (block >= scan->rs_nblocks)
884 : 328737 : block = 0;
885 : :
886 : : /*
887 : : * Report our new scan position for synchronization purposes. We don't
888 : : * do that when moving backwards, however. That would just mess up any
889 : : * other forward-moving scanners.
890 : : *
891 : : * Note: we do this before checking for end of scan so that the final
892 : : * state of the position hint is back at the start of the rel. That's
893 : : * not strictly necessary, but otherwise when you run the same query
894 : : * multiple times the starting position would shift a little bit
895 : : * backwards on every invocation, which is confusing. We don't
896 : : * guarantee any specific ordering in general, though.
897 : : */
898 [ + - ]: 788485 : if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
899 : 0 : ss_report_location(scan->rs_base.rs_rd, block);
900 : :
901 : : /* we're done if we're back at where we started */
902 [ + + ]: 788485 : if (block == scan->rs_startblock)
903 : 328728 : return InvalidBlockNumber;
904 : :
905 : : /* check if the limit imposed by heap_setscanlimits() is met */
906 [ + + ]: 459757 : if (scan->rs_numblocks != InvalidBlockNumber)
907 : : {
908 [ + + ]: 338 : if (--scan->rs_numblocks == 0)
909 : 44 : return InvalidBlockNumber;
910 : 294 : }
911 : :
912 : 459713 : return block;
913 : : }
914 : : else
915 : : {
916 : : /* we're done if the last block is the start position */
917 [ + - ]: 19 : if (block == scan->rs_startblock)
918 : 19 : return InvalidBlockNumber;
919 : :
920 : : /* check if the limit imposed by heap_setscanlimits() is met */
921 [ # # ]: 0 : if (scan->rs_numblocks != InvalidBlockNumber)
922 : : {
923 [ # # ]: 0 : if (--scan->rs_numblocks == 0)
924 : 0 : return InvalidBlockNumber;
925 : 0 : }
926 : :
927 : : /* wrap to the end of the heap when the last page was page 0 */
928 [ # # ]: 0 : if (block == 0)
929 : 0 : block = scan->rs_nblocks;
930 : :
931 : 0 : block--;
932 : :
933 : 0 : return block;
934 : : }
935 : 788504 : }
936 : :
937 : : /* ----------------
938 : : * heapgettup - fetch next heap tuple
939 : : *
940 : : * Initialize the scan if not already done; then advance to the next
941 : : * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
942 : : * or set scan->rs_ctup.t_data = NULL if no more tuples.
943 : : *
944 : : * Note: the reason nkeys/key are passed separately, even though they are
945 : : * kept in the scan descriptor, is that the caller may not want us to check
946 : : * the scankeys.
947 : : *
948 : : * Note: when we fall off the end of the scan in either direction, we
949 : : * reset rs_inited. This means that a further request with the same
950 : : * scan direction will restart the scan, which is a bit odd, but a
951 : : * request with the opposite scan direction will start a fresh scan
952 : : * in the proper direction. The latter is required behavior for cursors,
953 : : * while the former case is generally undefined behavior in Postgres
954 : : * so we don't care too much.
955 : : * ----------------
956 : : */
957 : : static void
958 : 1742010 : heapgettup(HeapScanDesc scan,
959 : : ScanDirection dir,
960 : : int nkeys,
961 : : ScanKey key)
962 : : {
963 : 1742010 : HeapTuple tuple = &(scan->rs_ctup);
964 : 1742010 : Page page;
965 : 1742010 : OffsetNumber lineoff;
966 : 1742010 : int linesleft;
967 : :
968 [ + + ]: 1742010 : if (likely(scan->rs_inited))
969 : : {
970 : : /* continue from previously returned page/tuple */
971 : 1738132 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
972 : 1738132 : page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
973 : 1738132 : goto continue_page;
974 : : }
975 : :
976 : : /*
977 : : * advance the scan until we find a qualifying tuple or run out of stuff
978 : : * to scan
979 : : */
980 : 25739 : while (true)
981 : : {
982 : 25739 : heap_fetch_next_buffer(scan, dir);
983 : :
984 : : /* did we run out of blocks to scan? */
985 [ + + ]: 25739 : if (!BufferIsValid(scan->rs_cbuf))
986 : 3877 : break;
987 : :
988 [ - + ]: 21862 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
989 : :
990 : 21862 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
991 : 21862 : page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
992 : : continue_page:
993 : :
994 : : /*
995 : : * Only continue scanning the page while we have lines left.
996 : : *
997 : : * Note that this protects us from accessing line pointers past
998 : : * PageGetMaxOffsetNumber(); both for forward scans when we resume the
999 : : * table scan, and for when we start scanning a new page.
1000 : : */
1001 [ + + ]: 1765961 : for (; linesleft > 0; linesleft--, lineoff += dir)
1002 : : {
1003 : 1744100 : bool visible;
1004 : 1744100 : ItemId lpp = PageGetItemId(page, lineoff);
1005 : :
1006 [ + + ]: 1744100 : if (!ItemIdIsNormal(lpp))
1007 : 5967 : continue;
1008 : :
1009 : 1738133 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1010 : 1738133 : tuple->t_len = ItemIdGetLength(lpp);
1011 : 1738133 : ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
1012 : :
1013 : 3476266 : visible = HeapTupleSatisfiesVisibility(tuple,
1014 : 1738133 : scan->rs_base.rs_snapshot,
1015 : 1738133 : scan->rs_cbuf);
1016 : :
1017 : 3476266 : HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
1018 : 1738133 : tuple, scan->rs_cbuf,
1019 : 1738133 : scan->rs_base.rs_snapshot);
1020 : :
1021 : : /* skip tuples not visible to this snapshot */
1022 [ + - ]: 1738133 : if (!visible)
1023 : 0 : continue;
1024 : :
1025 : : /* skip any tuples that don't match the scan key */
1026 [ - + # # ]: 1738133 : if (key != NULL &&
1027 : 0 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1028 : 0 : nkeys, key))
1029 : 0 : continue;
1030 : :
1031 : 1738133 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1032 : 1738133 : scan->rs_coffset = lineoff;
1033 : 1738133 : return;
1034 [ + + ]: 1744100 : }
1035 : :
1036 : : /*
1037 : : * if we get here, it means we've exhausted the items on this page and
1038 : : * it's time to move to the next.
1039 : : */
1040 : 21861 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1041 : : }
1042 : :
1043 : : /* end of scan */
1044 [ + - ]: 3877 : if (BufferIsValid(scan->rs_cbuf))
1045 : 0 : ReleaseBuffer(scan->rs_cbuf);
1046 : :
1047 : 3877 : scan->rs_cbuf = InvalidBuffer;
1048 : 3877 : scan->rs_cblock = InvalidBlockNumber;
1049 : 3877 : scan->rs_prefetch_block = InvalidBlockNumber;
1050 : 3877 : tuple->t_data = NULL;
1051 : 3877 : scan->rs_inited = false;
1052 [ - + ]: 1742010 : }
1053 : :
1054 : : /* ----------------
1055 : : * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
1056 : : *
1057 : : * Same API as heapgettup, but used in page-at-a-time mode
1058 : : *
1059 : : * The internal logic is much the same as heapgettup's too, but there are some
1060 : : * differences: we do not take the buffer content lock (that only needs to
1061 : : * happen inside heap_prepare_pagescan), and we iterate through just the
1062 : : * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1063 : : * that lineindex is 0-based, where the corresponding loop variable lineoff in
1064 : : * heapgettup is 1-based.
1065 : : * ----------------
1066 : : */
1067 : : static void
1068 : 16173616 : heapgettup_pagemode(HeapScanDesc scan,
1069 : : ScanDirection dir,
1070 : : int nkeys,
1071 : : ScanKey key)
1072 : : {
1073 : 16173616 : HeapTuple tuple = &(scan->rs_ctup);
1074 : 16173616 : Page page;
1075 : 16173616 : uint32 lineindex;
1076 : 16173616 : uint32 linesleft;
1077 : :
1078 [ + + ]: 16173616 : if (likely(scan->rs_inited))
1079 : : {
1080 : : /* continue from previously returned page/tuple */
1081 : 15832055 : page = BufferGetPage(scan->rs_cbuf);
1082 : :
1083 : 15832055 : lineindex = scan->rs_cindex + dir;
1084 [ + + ]: 15832055 : if (ScanDirectionIsForward(dir))
1085 : 15831946 : linesleft = scan->rs_ntuples - lineindex;
1086 : : else
1087 : 109 : linesleft = scan->rs_cindex;
1088 : : /* lineindex now references the next or previous visible tid */
1089 : :
1090 : 15832055 : goto continue_page;
1091 : : }
1092 : :
1093 : : /*
1094 : : * advance the scan until we find a qualifying tuple or run out of stuff
1095 : : * to scan
1096 : : */
1097 : 829858 : while (true)
1098 : : {
1099 : 829858 : heap_fetch_next_buffer(scan, dir);
1100 : :
1101 : : /* did we run out of blocks to scan? */
1102 [ + + ]: 829858 : if (!BufferIsValid(scan->rs_cbuf))
1103 : 33328 : break;
1104 : :
1105 [ - + ]: 796530 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
1106 : :
1107 : : /* prune the page and determine visible tuple offsets */
1108 : 796530 : heap_prepare_pagescan((TableScanDesc) scan);
1109 : 796530 : page = BufferGetPage(scan->rs_cbuf);
1110 : 796530 : linesleft = scan->rs_ntuples;
1111 [ + + ]: 796530 : lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
1112 : :
1113 : : /* block is the same for all tuples, set it once outside the loop */
1114 : 796530 : ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1115 : :
1116 : : /* lineindex now references the next or previous visible tid */
1117 : : continue_page:
1118 : :
1119 [ + + ]: 18522755 : for (; linesleft > 0; linesleft--, lineindex += dir)
1120 : : {
1121 : 18034458 : ItemId lpp;
1122 : 18034458 : OffsetNumber lineoff;
1123 : :
1124 [ + - ]: 18034458 : Assert(lineindex < scan->rs_ntuples);
1125 : 18034458 : lineoff = scan->rs_vistuples[lineindex];
1126 : 18034458 : lpp = PageGetItemId(page, lineoff);
1127 [ + - ]: 18034458 : Assert(ItemIdIsNormal(lpp));
1128 : :
1129 : 18034458 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1130 : 18034458 : tuple->t_len = ItemIdGetLength(lpp);
1131 : 18034458 : ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
1132 : :
1133 : : /* skip any tuples that don't match the scan key */
1134 [ + + + + ]: 18034458 : if (key != NULL &&
1135 : 3813470 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1136 : 1906735 : nkeys, key))
1137 : 1894172 : continue;
1138 : :
1139 : 16140286 : scan->rs_cindex = lineindex;
1140 : 16140286 : return;
1141 [ + + ]: 18034458 : }
1142 : : }
1143 : :
1144 : : /* end of scan */
1145 [ + - ]: 33328 : if (BufferIsValid(scan->rs_cbuf))
1146 : 0 : ReleaseBuffer(scan->rs_cbuf);
1147 : 33328 : scan->rs_cbuf = InvalidBuffer;
1148 : 33328 : scan->rs_cblock = InvalidBlockNumber;
1149 : 33328 : scan->rs_prefetch_block = InvalidBlockNumber;
1150 : 33328 : tuple->t_data = NULL;
1151 : 33328 : scan->rs_inited = false;
1152 [ - + ]: 16173616 : }
1153 : :
1154 : :
1155 : : /* ----------------------------------------------------------------
1156 : : * heap access method interface
1157 : : * ----------------------------------------------------------------
1158 : : */
1159 : :
1160 : :
1161 : : TableScanDesc
1162 : 346049 : heap_beginscan(Relation relation, Snapshot snapshot,
1163 : : int nkeys, ScanKey key,
1164 : : ParallelTableScanDesc parallel_scan,
1165 : : uint32 flags)
1166 : : {
1167 : 346049 : HeapScanDesc scan;
1168 : :
1169 : : /*
1170 : : * increment relation ref count while scanning relation
1171 : : *
1172 : : * This is just to make really sure the relcache entry won't go away while
1173 : : * the scan has a pointer to it. Caller should be holding the rel open
1174 : : * anyway, so this is redundant in all normal scenarios...
1175 : : */
1176 : 346049 : RelationIncrementReferenceCount(relation);
1177 : :
1178 : : /*
1179 : : * allocate and initialize scan descriptor
1180 : : */
1181 [ + + ]: 346049 : if (flags & SO_TYPE_BITMAPSCAN)
1182 : : {
1183 : 1942 : BitmapHeapScanDesc bscan = palloc_object(BitmapHeapScanDescData);
1184 : :
1185 : : /*
1186 : : * Bitmap Heap scans do not have any fields that a normal Heap Scan
1187 : : * does not have, so no special initializations required here.
1188 : : */
1189 : 1942 : scan = (HeapScanDesc) bscan;
1190 : 1942 : }
1191 : : else
1192 : 344107 : scan = (HeapScanDesc) palloc_object(HeapScanDescData);
1193 : :
1194 : 346049 : scan->rs_base.rs_rd = relation;
1195 : 346049 : scan->rs_base.rs_snapshot = snapshot;
1196 : 346049 : scan->rs_base.rs_nkeys = nkeys;
1197 : 346049 : scan->rs_base.rs_flags = flags;
1198 : 346049 : scan->rs_base.rs_parallel = parallel_scan;
1199 : 346049 : scan->rs_strategy = NULL; /* set in initscan */
1200 : 346049 : scan->rs_cbuf = InvalidBuffer;
1201 : :
1202 : : /*
1203 : : * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1204 : : */
1205 [ + + + + : 346049 : if (!(snapshot && IsMVCCSnapshot(snapshot)))
- + ]
1206 : 4878 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1207 : :
1208 : : /* Check that a historic snapshot is not used for non-catalog tables */
1209 [ + + ]: 346049 : if (snapshot &&
1210 [ + - ]: 345049 : IsHistoricMVCCSnapshot(snapshot) &&
1211 [ # # # # : 0 : !RelationIsAccessibleInLogicalDecoding(relation))
# # # # #
# # # ]
1212 : : {
1213 [ # # # # ]: 0 : ereport(ERROR,
1214 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1215 : : errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1216 : : RelationGetRelationName(relation))));
1217 : 0 : }
1218 : :
1219 : : /*
1220 : : * For seqscan and sample scans in a serializable transaction, acquire a
1221 : : * predicate lock on the entire relation. This is required not only to
1222 : : * lock all the matching tuples, but also to conflict with new insertions
1223 : : * into the table. In an indexscan, we take page locks on the index pages
1224 : : * covering the range specified in the scan qual, but in a heap scan there
1225 : : * is nothing more fine-grained to lock. A bitmap scan is a different
1226 : : * story, there we have already scanned the index and locked the index
1227 : : * pages covering the predicate. But in that case we still have to lock
1228 : : * any matching heap tuples. For sample scan we could optimize the locking
1229 : : * to be at least page-level granularity, but we'd need to add per-tuple
1230 : : * locking for that.
1231 : : */
1232 [ + + ]: 346049 : if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
1233 : : {
1234 : : /*
1235 : : * Ensure a missing snapshot is noticed reliably, even if the
1236 : : * isolation mode means predicate locking isn't performed (and
1237 : : * therefore the snapshot isn't used here).
1238 : : */
1239 [ + - ]: 342698 : Assert(snapshot);
1240 : 342698 : PredicateLockRelation(relation, snapshot);
1241 : 342698 : }
1242 : :
1243 : : /* we only need to set this up once */
1244 : 346049 : scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1245 : :
1246 : : /*
1247 : : * Allocate memory to keep track of page allocation for parallel workers
1248 : : * when doing a parallel scan.
1249 : : */
1250 [ + + ]: 346049 : if (parallel_scan != NULL)
1251 : 679 : scan->rs_parallelworkerdata = palloc_object(ParallelBlockTableScanWorkerData);
1252 : : else
1253 : 345370 : scan->rs_parallelworkerdata = NULL;
1254 : :
1255 : : /*
1256 : : * we do this here instead of in initscan() because heap_rescan also calls
1257 : : * initscan() and we don't want to allocate memory again
1258 : : */
1259 [ + + ]: 346049 : if (nkeys > 0)
1260 : 14398 : scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
1261 : : else
1262 : 331651 : scan->rs_base.rs_key = NULL;
1263 : :
1264 : 346049 : initscan(scan, key, false);
1265 : :
1266 : 346049 : scan->rs_read_stream = NULL;
1267 : :
1268 : : /*
1269 : : * Set up a read stream for sequential scans and TID range scans. This
1270 : : * should be done after initscan() because initscan() allocates the
1271 : : * BufferAccessStrategy object passed to the read stream API.
1272 : : */
1273 [ + + + + ]: 346049 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1274 : 3372 : scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
1275 : : {
1276 : 343005 : ReadStreamBlockNumberCB cb;
1277 : :
1278 [ + + ]: 343005 : if (scan->rs_base.rs_parallel)
1279 : 679 : cb = heap_scan_stream_read_next_parallel;
1280 : : else
1281 : 342326 : cb = heap_scan_stream_read_next_serial;
1282 : :
1283 : : /* ---
1284 : : * It is safe to use batchmode as the only locks taken by `cb`
1285 : : * are never taken while waiting for IO:
1286 : : * - SyncScanLock is used in the non-parallel case
1287 : : * - in the parallel case, only spinlocks and atomics are used
1288 : : * ---
1289 : : */
1290 : 343005 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
1291 : : READ_STREAM_USE_BATCHING,
1292 : 343005 : scan->rs_strategy,
1293 : 343005 : scan->rs_base.rs_rd,
1294 : : MAIN_FORKNUM,
1295 : 343005 : cb,
1296 : 343005 : scan,
1297 : : 0);
1298 : 343005 : }
1299 [ + + ]: 3044 : else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1300 : : {
1301 : 1942 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
1302 : : READ_STREAM_USE_BATCHING,
1303 : 1942 : scan->rs_strategy,
1304 : 1942 : scan->rs_base.rs_rd,
1305 : : MAIN_FORKNUM,
1306 : : bitmapheap_stream_read_next,
1307 : 1942 : scan,
1308 : : sizeof(TBMIterateResult));
1309 : 1942 : }
1310 : :
1311 : :
1312 : 692098 : return (TableScanDesc) scan;
1313 : 346049 : }
1314 : :
1315 : : void
1316 : 2698 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1317 : : bool allow_strat, bool allow_sync, bool allow_pagemode)
1318 : : {
1319 : 2698 : HeapScanDesc scan = (HeapScanDesc) sscan;
1320 : :
1321 [ + + ]: 2698 : if (set_params)
1322 : : {
1323 [ + - ]: 4 : if (allow_strat)
1324 : 4 : scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
1325 : : else
1326 : 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
1327 : :
1328 [ + + ]: 4 : if (allow_sync)
1329 : 2 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
1330 : : else
1331 : 2 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
1332 : :
1333 [ + - + - : 4 : if (allow_pagemode && scan->rs_base.rs_snapshot &&
# # ]
1334 [ - + ]: 4 : IsMVCCSnapshot(scan->rs_base.rs_snapshot))
1335 : 4 : scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
1336 : : else
1337 : 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1338 : 4 : }
1339 : :
1340 : : /*
1341 : : * unpin scan buffers
1342 : : */
1343 [ + + ]: 2698 : if (BufferIsValid(scan->rs_cbuf))
1344 : : {
1345 : 401 : ReleaseBuffer(scan->rs_cbuf);
1346 : 401 : scan->rs_cbuf = InvalidBuffer;
1347 : 401 : }
1348 : :
1349 : : /*
1350 : : * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1351 : : * additional data vs a normal HeapScan
1352 : : */
1353 : :
1354 : : /*
1355 : : * The read stream is reset on rescan. This must be done before
1356 : : * initscan(), as some state referred to by read_stream_reset() is reset
1357 : : * in initscan().
1358 : : */
1359 [ + + ]: 2698 : if (scan->rs_read_stream)
1360 : 2693 : read_stream_reset(scan->rs_read_stream);
1361 : :
1362 : : /*
1363 : : * reinitialize scan descriptor
1364 : : */
1365 : 2698 : initscan(scan, key, true);
1366 : 2698 : }
1367 : :
1368 : : void
1369 : 345325 : heap_endscan(TableScanDesc sscan)
1370 : : {
1371 : 345325 : HeapScanDesc scan = (HeapScanDesc) sscan;
1372 : :
1373 : : /* Note: no locking manipulations needed */
1374 : :
1375 : : /*
1376 : : * unpin scan buffers
1377 : : */
1378 [ + + ]: 345325 : if (BufferIsValid(scan->rs_cbuf))
1379 : 307321 : ReleaseBuffer(scan->rs_cbuf);
1380 : :
1381 : : /*
1382 : : * Must free the read stream before freeing the BufferAccessStrategy.
1383 : : */
1384 [ + + ]: 345325 : if (scan->rs_read_stream)
1385 : 344237 : read_stream_end(scan->rs_read_stream);
1386 : :
1387 : : /*
1388 : : * decrement relation reference count and free scan descriptor storage
1389 : : */
1390 : 345325 : RelationDecrementReferenceCount(scan->rs_base.rs_rd);
1391 : :
1392 [ + + ]: 345325 : if (scan->rs_base.rs_key)
1393 : 14390 : pfree(scan->rs_base.rs_key);
1394 : :
1395 [ + - ]: 345325 : if (scan->rs_strategy != NULL)
1396 : 0 : FreeAccessStrategy(scan->rs_strategy);
1397 : :
1398 [ + + ]: 345325 : if (scan->rs_parallelworkerdata != NULL)
1399 : 679 : pfree(scan->rs_parallelworkerdata);
1400 : :
1401 [ + + ]: 345325 : if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1402 : 6743 : UnregisterSnapshot(scan->rs_base.rs_snapshot);
1403 : :
1404 : 345325 : pfree(scan);
1405 : 345325 : }
1406 : :
1407 : : HeapTuple
1408 : 1670031 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
1409 : : {
1410 : 1670031 : HeapScanDesc scan = (HeapScanDesc) sscan;
1411 : :
1412 : : /*
1413 : : * This is still widely used directly, without going through table AM, so
1414 : : * add a safety check. It's possible we should, at a later point,
1415 : : * downgrade this to an assert. The reason for checking the AM routine,
1416 : : * rather than the AM oid, is that this allows to write regression tests
1417 : : * that create another AM reusing the heap handler.
1418 : : */
1419 [ + - ]: 1670031 : if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1420 [ # # # # ]: 0 : ereport(ERROR,
1421 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1422 : : errmsg_internal("only heap AM is supported")));
1423 : :
1424 : : /*
1425 : : * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1426 : : * for catalog or regular tables. See detailed comments in xact.c where
1427 : : * these variables are declared. Normally we have such a check at tableam
1428 : : * level API but this is called from many places so we need to ensure it
1429 : : * here.
1430 : : */
1431 [ + - + - ]: 1670031 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
1432 [ # # # # ]: 0 : elog(ERROR, "unexpected heap_getnext call during logical decoding");
1433 : :
1434 : : /* Note: no locking manipulations needed */
1435 : :
1436 [ + + ]: 1670031 : if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1437 : 86758 : heapgettup_pagemode(scan, direction,
1438 : 43379 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1439 : : else
1440 : 3253304 : heapgettup(scan, direction,
1441 : 1626652 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1442 : :
1443 [ + + ]: 1670031 : if (scan->rs_ctup.t_data == NULL)
1444 : 10042 : return NULL;
1445 : :
1446 : : /*
1447 : : * if we get here it means we have a new current scan tuple, so point to
1448 : : * the proper return buffer and return the tuple.
1449 : : */
1450 : :
1451 [ + - + - : 1659989 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
# # ]
1452 : :
1453 : 1659989 : return &scan->rs_ctup;
1454 : 1670031 : }
1455 : :
1456 : : bool
1457 : 16244684 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1458 : : {
1459 : 16244684 : HeapScanDesc scan = (HeapScanDesc) sscan;
1460 : :
1461 : : /* Note: no locking manipulations needed */
1462 : :
1463 [ + + ]: 16244684 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1464 : 16129326 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1465 : : else
1466 : 115358 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1467 : :
1468 [ + + ]: 16244684 : if (scan->rs_ctup.t_data == NULL)
1469 : : {
1470 : 27129 : ExecClearTuple(slot);
1471 : 27129 : return false;
1472 : : }
1473 : :
1474 : : /*
1475 : : * if we get here it means we have a new current scan tuple, so point to
1476 : : * the proper return buffer and return the tuple.
1477 : : */
1478 : :
1479 [ + - + - : 16217555 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
# # ]
1480 : :
1481 : 32435110 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1482 : 16217555 : scan->rs_cbuf);
1483 : 16217555 : return true;
1484 : 16244684 : }
1485 : :
1486 : : void
1487 : 343 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
1488 : : ItemPointer maxtid)
1489 : : {
1490 : 343 : HeapScanDesc scan = (HeapScanDesc) sscan;
1491 : 343 : BlockNumber startBlk;
1492 : 343 : BlockNumber numBlks;
1493 : 343 : ItemPointerData highestItem;
1494 : 343 : ItemPointerData lowestItem;
1495 : :
1496 : : /*
1497 : : * For relations without any pages, we can simply leave the TID range
1498 : : * unset. There will be no tuples to scan, therefore no tuples outside
1499 : : * the given TID range.
1500 : : */
1501 [ + + ]: 343 : if (scan->rs_nblocks == 0)
1502 : 2 : return;
1503 : :
1504 : : /*
1505 : : * Set up some ItemPointers which point to the first and last possible
1506 : : * tuples in the heap.
1507 : : */
1508 : 341 : ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
1509 : 341 : ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
1510 : :
1511 : : /*
1512 : : * If the given maximum TID is below the highest possible TID in the
1513 : : * relation, then restrict the range to that, otherwise we scan to the end
1514 : : * of the relation.
1515 : : */
1516 [ + + ]: 341 : if (ItemPointerCompare(maxtid, &highestItem) < 0)
1517 : 42 : ItemPointerCopy(maxtid, &highestItem);
1518 : :
1519 : : /*
1520 : : * If the given minimum TID is above the lowest possible TID in the
1521 : : * relation, then restrict the range to only scan for TIDs above that.
1522 : : */
1523 [ + + ]: 341 : if (ItemPointerCompare(mintid, &lowestItem) > 0)
1524 : 303 : ItemPointerCopy(mintid, &lowestItem);
1525 : :
1526 : : /*
1527 : : * Check for an empty range and protect from would be negative results
1528 : : * from the numBlks calculation below.
1529 : : */
1530 [ + + ]: 341 : if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
1531 : : {
1532 : : /* Set an empty range of blocks to scan */
1533 : 6 : heap_setscanlimits(sscan, 0, 0);
1534 : 6 : return;
1535 : : }
1536 : :
1537 : : /*
1538 : : * Calculate the first block and the number of blocks we must scan. We
1539 : : * could be more aggressive here and perform some more validation to try
1540 : : * and further narrow the scope of blocks to scan by checking if the
1541 : : * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1542 : : * advance startBlk by one. Likewise, if highestItem has an offset of 0
1543 : : * we could scan one fewer blocks. However, such an optimization does not
1544 : : * seem worth troubling over, currently.
1545 : : */
1546 : 335 : startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
1547 : :
1548 : 1005 : numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
1549 : 670 : ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
1550 : :
1551 : : /* Set the start block and number of blocks to scan */
1552 : 335 : heap_setscanlimits(sscan, startBlk, numBlks);
1553 : :
1554 : : /* Finally, set the TID range in sscan */
1555 : 335 : ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1556 : 335 : ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1557 [ - + ]: 343 : }
1558 : :
1559 : : bool
1560 : 878 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
1561 : : TupleTableSlot *slot)
1562 : : {
1563 : 878 : HeapScanDesc scan = (HeapScanDesc) sscan;
1564 : 878 : ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1565 : 878 : ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1566 : :
1567 : : /* Note: no locking manipulations needed */
1568 : 878 : for (;;)
1569 : : {
1570 [ + - ]: 911 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1571 : 911 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1572 : : else
1573 : 0 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1574 : :
1575 [ + + ]: 911 : if (scan->rs_ctup.t_data == NULL)
1576 : : {
1577 : 34 : ExecClearTuple(slot);
1578 : 34 : return false;
1579 : : }
1580 : :
1581 : : /*
1582 : : * heap_set_tidrange will have used heap_setscanlimits to limit the
1583 : : * range of pages we scan to only ones that can contain the TID range
1584 : : * we're scanning for. Here we must filter out any tuples from these
1585 : : * pages that are outside of that range.
1586 : : */
1587 [ + + ]: 877 : if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1588 : : {
1589 : 31 : ExecClearTuple(slot);
1590 : :
1591 : : /*
1592 : : * When scanning backwards, the TIDs will be in descending order.
1593 : : * Future tuples in this direction will be lower still, so we can
1594 : : * just return false to indicate there will be no more tuples.
1595 : : */
1596 [ - + ]: 31 : if (ScanDirectionIsBackward(direction))
1597 : 0 : return false;
1598 : :
1599 : 31 : continue;
1600 : : }
1601 : :
1602 : : /*
1603 : : * Likewise for the final page, we must filter out TIDs greater than
1604 : : * maxtid.
1605 : : */
1606 [ + + ]: 846 : if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1607 : : {
1608 : 20 : ExecClearTuple(slot);
1609 : :
1610 : : /*
1611 : : * When scanning forward, the TIDs will be in ascending order.
1612 : : * Future tuples in this direction will be higher still, so we can
1613 : : * just return false to indicate there will be no more tuples.
1614 : : */
1615 [ + + ]: 20 : if (ScanDirectionIsForward(direction))
1616 : 18 : return false;
1617 : 2 : continue;
1618 : : }
1619 : :
1620 : 826 : break;
1621 : : }
1622 : :
1623 : : /*
1624 : : * if we get here it means we have a new current scan tuple, so point to
1625 : : * the proper return buffer and return the tuple.
1626 : : */
1627 [ + - + - : 826 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
# # ]
1628 : :
1629 : 826 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1630 : 826 : return true;
1631 : 878 : }
1632 : :
1633 : : /*
1634 : : * heap_fetch - retrieve tuple with given tid
1635 : : *
1636 : : * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1637 : : * the tuple, fill in the remaining fields of *tuple, and check the tuple
1638 : : * against the specified snapshot.
1639 : : *
1640 : : * If successful (tuple found and passes snapshot time qual), then *userbuf
1641 : : * is set to the buffer holding the tuple and true is returned. The caller
1642 : : * must unpin the buffer when done with the tuple.
1643 : : *
1644 : : * If the tuple is not found (ie, item number references a deleted slot),
1645 : : * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1646 : : * and false is returned.
1647 : : *
1648 : : * If the tuple is found but fails the time qual check, then the behavior
1649 : : * depends on the keep_buf parameter. If keep_buf is false, the results
1650 : : * are the same as for the tuple-not-found case. If keep_buf is true,
1651 : : * then tuple->t_data and *userbuf are returned as for the success case,
1652 : : * and again the caller must unpin the buffer; but false is returned.
1653 : : *
1654 : : * heap_fetch does not follow HOT chains: only the exact TID requested will
1655 : : * be fetched.
1656 : : *
1657 : : * It is somewhat inconsistent that we ereport() on invalid block number but
1658 : : * return false on invalid item number. There are a couple of reasons though.
1659 : : * One is that the caller can relatively easily check the block number for
1660 : : * validity, but cannot check the item number without reading the page
1661 : : * himself. Another is that when we are following a t_ctid link, we can be
1662 : : * reasonably confident that the page number is valid (since VACUUM shouldn't
1663 : : * truncate off the destination page without having killed the referencing
1664 : : * tuple first), but the item number might well not be good.
1665 : : */
1666 : : bool
1667 : 411927 : heap_fetch(Relation relation,
1668 : : Snapshot snapshot,
1669 : : HeapTuple tuple,
1670 : : Buffer *userbuf,
1671 : : bool keep_buf)
1672 : : {
1673 : 411927 : ItemPointer tid = &(tuple->t_self);
1674 : 411927 : ItemId lp;
1675 : 411927 : Buffer buffer;
1676 : 411927 : Page page;
1677 : 411927 : OffsetNumber offnum;
1678 : 411927 : bool valid;
1679 : :
1680 : : /*
1681 : : * Fetch and pin the appropriate page of the relation.
1682 : : */
1683 : 411927 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1684 : :
1685 : : /*
1686 : : * Need share lock on buffer to examine tuple commit status.
1687 : : */
1688 : 411927 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1689 : 411927 : page = BufferGetPage(buffer);
1690 : :
1691 : : /*
1692 : : * We'd better check for out-of-range offnum in case of VACUUM since the
1693 : : * TID was obtained.
1694 : : */
1695 : 411927 : offnum = ItemPointerGetOffsetNumber(tid);
1696 [ + - + + ]: 411927 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1697 : : {
1698 : 1 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1699 : 1 : ReleaseBuffer(buffer);
1700 : 1 : *userbuf = InvalidBuffer;
1701 : 1 : tuple->t_data = NULL;
1702 : 1 : return false;
1703 : : }
1704 : :
1705 : : /*
1706 : : * get the item line pointer corresponding to the requested tid
1707 : : */
1708 : 411926 : lp = PageGetItemId(page, offnum);
1709 : :
1710 : : /*
1711 : : * Must check for deleted tuple.
1712 : : */
1713 [ + - ]: 411926 : if (!ItemIdIsNormal(lp))
1714 : : {
1715 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1716 : 0 : ReleaseBuffer(buffer);
1717 : 0 : *userbuf = InvalidBuffer;
1718 : 0 : tuple->t_data = NULL;
1719 : 0 : return false;
1720 : : }
1721 : :
1722 : : /*
1723 : : * fill in *tuple fields
1724 : : */
1725 : 411926 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1726 : 411926 : tuple->t_len = ItemIdGetLength(lp);
1727 : 411926 : tuple->t_tableOid = RelationGetRelid(relation);
1728 : :
1729 : : /*
1730 : : * check tuple visibility, then release lock
1731 : : */
1732 : 411926 : valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1733 : :
1734 [ + + ]: 411926 : if (valid)
1735 : 823842 : PredicateLockTID(relation, &(tuple->t_self), snapshot,
1736 : 411921 : HeapTupleHeaderGetXmin(tuple->t_data));
1737 : :
1738 : 411926 : HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1739 : :
1740 : 411926 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1741 : :
1742 [ + + ]: 411926 : if (valid)
1743 : : {
1744 : : /*
1745 : : * All checks passed, so return the tuple as valid. Caller is now
1746 : : * responsible for releasing the buffer.
1747 : : */
1748 : 411921 : *userbuf = buffer;
1749 : :
1750 : 411921 : return true;
1751 : : }
1752 : :
1753 : : /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1754 [ - + ]: 5 : if (keep_buf)
1755 : 0 : *userbuf = buffer;
1756 : : else
1757 : : {
1758 : 5 : ReleaseBuffer(buffer);
1759 : 5 : *userbuf = InvalidBuffer;
1760 : 5 : tuple->t_data = NULL;
1761 : : }
1762 : :
1763 : 5 : return false;
1764 : 411927 : }
1765 : :
1766 : : /*
1767 : : * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1768 : : *
1769 : : * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1770 : : * of a HOT chain), and buffer is the buffer holding this tuple. We search
1771 : : * for the first chain member satisfying the given snapshot. If one is
1772 : : * found, we update *tid to reference that tuple's offset number, and
1773 : : * return true. If no match, return false without modifying *tid.
1774 : : *
1775 : : * heapTuple is a caller-supplied buffer. When a match is found, we return
1776 : : * the tuple here, in addition to updating *tid. If no match is found, the
1777 : : * contents of this buffer on return are undefined.
1778 : : *
1779 : : * If all_dead is not NULL, we check non-visible tuples to see if they are
1780 : : * globally dead; *all_dead is set true if all members of the HOT chain
1781 : : * are vacuumable, false if not.
1782 : : *
1783 : : * Unlike heap_fetch, the caller must already have pin and (at least) share
1784 : : * lock on the buffer; it is still pinned/locked at exit.
1785 : : */
1786 : : bool
1787 : 4375432 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1788 : : Snapshot snapshot, HeapTuple heapTuple,
1789 : : bool *all_dead, bool first_call)
1790 : : {
1791 : 4375432 : Page page = BufferGetPage(buffer);
1792 : 4375432 : TransactionId prev_xmax = InvalidTransactionId;
1793 : 4375432 : BlockNumber blkno;
1794 : 4375432 : OffsetNumber offnum;
1795 : 4375432 : bool at_chain_start;
1796 : 4375432 : bool valid;
1797 : 4375432 : bool skip;
1798 : 4375432 : GlobalVisState *vistest = NULL;
1799 : :
1800 : : /* If this is not the first call, previous call returned a (live!) tuple */
1801 [ + + ]: 4375432 : if (all_dead)
1802 : 3810425 : *all_dead = first_call;
1803 : :
1804 : 4375432 : blkno = ItemPointerGetBlockNumber(tid);
1805 : 4375432 : offnum = ItemPointerGetOffsetNumber(tid);
1806 : 4375432 : at_chain_start = first_call;
1807 : 4375432 : skip = !first_call;
1808 : :
1809 : : /* XXX: we should assert that a snapshot is pushed or registered */
1810 [ + - ]: 4375432 : Assert(TransactionIdIsValid(RecentXmin));
1811 [ + - ]: 4375432 : Assert(BufferGetBlockNumber(buffer) == blkno);
1812 : :
1813 : : /* Scan through possible multiple members of HOT-chain */
1814 : 4503461 : for (;;)
1815 : : {
1816 : 4539397 : ItemId lp;
1817 : :
1818 : : /* check for bogus TID */
1819 [ + - - + ]: 4539397 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1820 : 0 : break;
1821 : :
1822 : 4539397 : lp = PageGetItemId(page, offnum);
1823 : :
1824 : : /* check for unused, dead, or redirected items */
1825 [ + + ]: 4539397 : if (!ItemIdIsNormal(lp))
1826 : : {
1827 : : /* We should only see a redirect at start of chain */
1828 [ + + - + ]: 106972 : if (ItemIdIsRedirected(lp) && at_chain_start)
1829 : : {
1830 : : /* Follow the redirect */
1831 : 35936 : offnum = ItemIdGetRedirect(lp);
1832 : 35936 : at_chain_start = false;
1833 : 35936 : continue;
1834 : : }
1835 : : /* else must be end of chain */
1836 : 71036 : break;
1837 : : }
1838 : :
1839 : : /*
1840 : : * Update heapTuple to point to the element of the HOT chain we're
1841 : : * currently investigating. Having t_self set correctly is important
1842 : : * because the SSI checks and the *Satisfies routine for historical
1843 : : * MVCC snapshots need the correct tid to decide about the visibility.
1844 : : */
1845 : 4432425 : heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1846 : 4432425 : heapTuple->t_len = ItemIdGetLength(lp);
1847 : 4432425 : heapTuple->t_tableOid = RelationGetRelid(relation);
1848 : 4432425 : ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1849 : :
1850 : : /*
1851 : : * Shouldn't see a HEAP_ONLY tuple at chain start.
1852 : : */
1853 [ + + + - ]: 4432425 : if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1854 : 0 : break;
1855 : :
1856 : : /*
1857 : : * The xmin should match the previous xmax value, else chain is
1858 : : * broken.
1859 : : */
1860 [ + + + - ]: 4432425 : if (TransactionIdIsValid(prev_xmax) &&
1861 : 128029 : !TransactionIdEquals(prev_xmax,
1862 : : HeapTupleHeaderGetXmin(heapTuple->t_data)))
1863 : 0 : break;
1864 : :
1865 : : /*
1866 : : * When first_call is true (and thus, skip is initially false) we'll
1867 : : * return the first tuple we find. But on later passes, heapTuple
1868 : : * will initially be pointing to the tuple we returned last time.
1869 : : * Returning it again would be incorrect (and would loop forever), so
1870 : : * we skip it and return the next match we find.
1871 : : */
1872 [ + + ]: 4432425 : if (!skip)
1873 : : {
1874 : : /* If it's visible per the snapshot, we must return it */
1875 : 4428588 : valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1876 : 8857176 : HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1877 : 4428588 : buffer, snapshot);
1878 : :
1879 [ + + ]: 4428588 : if (valid)
1880 : : {
1881 : 2327933 : ItemPointerSetOffsetNumber(tid, offnum);
1882 : 4655866 : PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1883 : 2327933 : HeapTupleHeaderGetXmin(heapTuple->t_data));
1884 [ + + ]: 2327933 : if (all_dead)
1885 : 1822936 : *all_dead = false;
1886 : 2327933 : return true;
1887 : : }
1888 : 2100655 : }
1889 : 2104492 : skip = false;
1890 : :
1891 : : /*
1892 : : * If we can't see it, maybe no one else can either. At caller
1893 : : * request, check whether all chain members are dead to all
1894 : : * transactions.
1895 : : *
1896 : : * Note: if you change the criterion here for what is "dead", fix the
1897 : : * planner's get_actual_variable_range() function to match.
1898 : : */
1899 [ + + + + ]: 2104492 : if (all_dead && *all_dead)
1900 : : {
1901 [ + + ]: 2027116 : if (!vistest)
1902 : 2004370 : vistest = GlobalVisTestFor(relation);
1903 : :
1904 [ + + ]: 2027116 : if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1905 : 1961516 : *all_dead = false;
1906 : 2027116 : }
1907 : :
1908 : : /*
1909 : : * Check to see if HOT chain continues past this tuple; if so fetch
1910 : : * the next offnum and loop around.
1911 : : */
1912 [ + + ]: 2104492 : if (HeapTupleIsHotUpdated(heapTuple))
1913 : : {
1914 [ - + ]: 128029 : Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1915 : : blkno);
1916 : 128029 : offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1917 : 128029 : at_chain_start = false;
1918 : 128029 : prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1919 : 128029 : }
1920 : : else
1921 : 1976463 : break; /* end of chain */
1922 [ + + + + ]: 4539397 : }
1923 : :
1924 : 2047499 : return false;
1925 : 4375432 : }
1926 : :
1927 : : /*
1928 : : * heap_get_latest_tid - get the latest tid of a specified tuple
1929 : : *
1930 : : * Actually, this gets the latest version that is visible according to the
1931 : : * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1932 : : * possibly uncommitted version.
1933 : : *
1934 : : * *tid is both an input and an output parameter: it is updated to
1935 : : * show the latest version of the row. Note that it will not be changed
1936 : : * if no version of the row passes the snapshot test.
1937 : : */
1938 : : void
1939 : 49 : heap_get_latest_tid(TableScanDesc sscan,
1940 : : ItemPointer tid)
1941 : : {
1942 : 49 : Relation relation = sscan->rs_rd;
1943 : 49 : Snapshot snapshot = sscan->rs_snapshot;
1944 : 49 : ItemPointerData ctid;
1945 : 49 : TransactionId priorXmax;
1946 : :
1947 : : /*
1948 : : * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1949 : : * Assume that t_ctid links are valid however - there shouldn't be invalid
1950 : : * ones in the table.
1951 : : */
1952 [ + - ]: 49 : Assert(ItemPointerIsValid(tid));
1953 : :
1954 : : /*
1955 : : * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1956 : : * need to examine, and *tid is the TID we will return if ctid turns out
1957 : : * to be bogus.
1958 : : *
1959 : : * Note that we will loop until we reach the end of the t_ctid chain.
1960 : : * Depending on the snapshot passed, there might be at most one visible
1961 : : * version of the row, but we don't try to optimize for that.
1962 : : */
1963 : 49 : ctid = *tid;
1964 : 49 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1965 : 64 : for (;;)
1966 : : {
1967 : 64 : Buffer buffer;
1968 : 64 : Page page;
1969 : 64 : OffsetNumber offnum;
1970 : 64 : ItemId lp;
1971 : 64 : HeapTupleData tp;
1972 : 64 : bool valid;
1973 : :
1974 : : /*
1975 : : * Read, pin, and lock the page.
1976 : : */
1977 : 64 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1978 : 64 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1979 : 64 : page = BufferGetPage(buffer);
1980 : :
1981 : : /*
1982 : : * Check for bogus item number. This is not treated as an error
1983 : : * condition because it can happen while following a t_ctid link. We
1984 : : * just assume that the prior tid is OK and return it unchanged.
1985 : : */
1986 : 64 : offnum = ItemPointerGetOffsetNumber(&ctid);
1987 [ + - - + ]: 64 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1988 : : {
1989 : 0 : UnlockReleaseBuffer(buffer);
1990 : 0 : break;
1991 : : }
1992 : 64 : lp = PageGetItemId(page, offnum);
1993 [ + - ]: 64 : if (!ItemIdIsNormal(lp))
1994 : : {
1995 : 0 : UnlockReleaseBuffer(buffer);
1996 : 0 : break;
1997 : : }
1998 : :
1999 : : /* OK to access the tuple */
2000 : 64 : tp.t_self = ctid;
2001 : 64 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2002 : 64 : tp.t_len = ItemIdGetLength(lp);
2003 : 64 : tp.t_tableOid = RelationGetRelid(relation);
2004 : :
2005 : : /*
2006 : : * After following a t_ctid link, we might arrive at an unrelated
2007 : : * tuple. Check for XMIN match.
2008 : : */
2009 [ + + + - ]: 64 : if (TransactionIdIsValid(priorXmax) &&
2010 : 15 : !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
2011 : : {
2012 : 0 : UnlockReleaseBuffer(buffer);
2013 : 0 : break;
2014 : : }
2015 : :
2016 : : /*
2017 : : * Check tuple visibility; if visible, set it as the new result
2018 : : * candidate.
2019 : : */
2020 : 64 : valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2021 : 64 : HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2022 [ + + ]: 64 : if (valid)
2023 : 45 : *tid = ctid;
2024 : :
2025 : : /*
2026 : : * If there's a valid t_ctid link, follow it, else we're done.
2027 : : */
2028 [ + + ]: 64 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2029 [ + + ]: 27 : HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
2030 [ + - + + ]: 19 : HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
2031 : 19 : ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2032 : : {
2033 : 49 : UnlockReleaseBuffer(buffer);
2034 : 49 : break;
2035 : : }
2036 : :
2037 : 15 : ctid = tp.t_data->t_ctid;
2038 : 15 : priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2039 : 15 : UnlockReleaseBuffer(buffer);
2040 [ - + + ]: 64 : } /* end of loop */
2041 : 49 : }
2042 : :
2043 : :
2044 : : /*
2045 : : * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
2046 : : *
2047 : : * This is called after we have waited for the XMAX transaction to terminate.
2048 : : * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
2049 : : * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
2050 : : * hint bit if possible --- but beware that that may not yet be possible,
2051 : : * if the transaction committed asynchronously.
2052 : : *
2053 : : * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
2054 : : * even if it commits.
2055 : : *
2056 : : * Hence callers should look only at XMAX_INVALID.
2057 : : *
2058 : : * Note this is not allowed for tuples whose xmax is a multixact.
2059 : : */
2060 : : static void
2061 : 0 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
2062 : : {
2063 [ # # ]: 0 : Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
2064 [ # # ]: 0 : Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
2065 : :
2066 [ # # ]: 0 : if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
2067 : : {
2068 [ # # # # ]: 0 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2069 : 0 : TransactionIdDidCommit(xid))
2070 : 0 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
2071 : 0 : xid);
2072 : : else
2073 : 0 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
2074 : : InvalidTransactionId);
2075 : 0 : }
2076 : 0 : }
2077 : :
2078 : :
2079 : : /*
2080 : : * GetBulkInsertState - prepare status object for a bulk insert
2081 : : */
2082 : : BulkInsertState
2083 : 664 : GetBulkInsertState(void)
2084 : : {
2085 : 664 : BulkInsertState bistate;
2086 : :
2087 : 664 : bistate = (BulkInsertState) palloc_object(BulkInsertStateData);
2088 : 664 : bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2089 : 664 : bistate->current_buf = InvalidBuffer;
2090 : 664 : bistate->next_free = InvalidBlockNumber;
2091 : 664 : bistate->last_free = InvalidBlockNumber;
2092 : 664 : bistate->already_extended_by = 0;
2093 : 1328 : return bistate;
2094 : 664 : }
2095 : :
2096 : : /*
2097 : : * FreeBulkInsertState - clean up after finishing a bulk insert
2098 : : */
2099 : : void
2100 : 621 : FreeBulkInsertState(BulkInsertState bistate)
2101 : : {
2102 [ + + ]: 621 : if (bistate->current_buf != InvalidBuffer)
2103 : 496 : ReleaseBuffer(bistate->current_buf);
2104 : 621 : FreeAccessStrategy(bistate->strategy);
2105 : 621 : pfree(bistate);
2106 : 621 : }
2107 : :
2108 : : /*
2109 : : * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2110 : : */
2111 : : void
2112 : 10021 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
2113 : : {
2114 [ + + ]: 10021 : if (bistate->current_buf != InvalidBuffer)
2115 : 10007 : ReleaseBuffer(bistate->current_buf);
2116 : 10021 : bistate->current_buf = InvalidBuffer;
2117 : :
2118 : : /*
2119 : : * Despite the name, we also reset bulk relation extension state.
2120 : : * Otherwise we can end up erroring out due to looking for free space in
2121 : : * ->next_free of one partition, even though ->next_free was set when
2122 : : * extending another partition. It could obviously also be bad for
2123 : : * efficiency to look at existing blocks at offsets from another
2124 : : * partition, even if we don't error out.
2125 : : */
2126 : 10021 : bistate->next_free = InvalidBlockNumber;
2127 : 10021 : bistate->last_free = InvalidBlockNumber;
2128 : 10021 : }
2129 : :
2130 : :
2131 : : /*
2132 : : * heap_insert - insert tuple into a heap
2133 : : *
2134 : : * The new tuple is stamped with current transaction ID and the specified
2135 : : * command ID.
2136 : : *
2137 : : * See table_tuple_insert for comments about most of the input flags, except
2138 : : * that this routine directly takes a tuple rather than a slot.
2139 : : *
2140 : : * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2141 : : * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2142 : : * implement table_tuple_insert_speculative().
2143 : : *
2144 : : * On return the header fields of *tup are updated to match the stored tuple;
2145 : : * in particular tup->t_self receives the actual TID where the tuple was
2146 : : * stored. But note that any toasting of fields within the tuple data is NOT
2147 : : * reflected into *tup.
2148 : : */
2149 : : void
2150 : 1680027 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2151 : : int options, BulkInsertState bistate)
2152 : : {
2153 : 1680027 : TransactionId xid = GetCurrentTransactionId();
2154 : 1680027 : HeapTuple heaptup;
2155 : 1680027 : Buffer buffer;
2156 : 1680027 : Buffer vmbuffer = InvalidBuffer;
2157 : 1680027 : bool all_visible_cleared = false;
2158 : :
2159 : : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2160 [ + - ]: 1680027 : Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
2161 : : RelationGetNumberOfAttributes(relation));
2162 : :
2163 : 1680027 : AssertHasSnapshotForToast(relation);
2164 : :
2165 : : /*
2166 : : * Fill in tuple header fields and toast the tuple if necessary.
2167 : : *
2168 : : * Note: below this point, heaptup is the data we actually intend to store
2169 : : * into the relation; tup is the caller's original untoasted data.
2170 : : */
2171 : 1680027 : heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2172 : :
2173 : : /*
2174 : : * Find buffer to insert this tuple into. If the page is all visible,
2175 : : * this will also pin the requisite visibility map page.
2176 : : */
2177 : 3360054 : buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2178 : 1680027 : InvalidBuffer, options, bistate,
2179 : : &vmbuffer, NULL,
2180 : : 0);
2181 : :
2182 : : /*
2183 : : * We're about to do the actual insert -- but check for conflict first, to
2184 : : * avoid possibly having to roll back work we've just done.
2185 : : *
2186 : : * This is safe without a recheck as long as there is no possibility of
2187 : : * another process scanning the page between this check and the insert
2188 : : * being visible to the scan (i.e., an exclusive buffer content lock is
2189 : : * continuously held from this point until the tuple insert is visible).
2190 : : *
2191 : : * For a heap insert, we only need to check for table-level SSI locks. Our
2192 : : * new tuple can't possibly conflict with existing tuple locks, and heap
2193 : : * page locks are only consolidated versions of tuple locks; they do not
2194 : : * lock "gaps" as index page locks do. So we don't need to specify a
2195 : : * buffer when making the call, which makes for a faster check.
2196 : : */
2197 : 1680027 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2198 : :
2199 : : /* NO EREPORT(ERROR) from here till changes are logged */
2200 : 1680027 : START_CRIT_SECTION();
2201 : :
2202 : 3360054 : RelationPutHeapTuple(relation, buffer, heaptup,
2203 : 1680027 : (options & HEAP_INSERT_SPECULATIVE) != 0);
2204 : :
2205 [ + + ]: 1680027 : if (PageIsAllVisible(BufferGetPage(buffer)))
2206 : : {
2207 : 340 : all_visible_cleared = true;
2208 : 340 : PageClearAllVisible(BufferGetPage(buffer));
2209 : 680 : visibilitymap_clear(relation,
2210 : 340 : ItemPointerGetBlockNumber(&(heaptup->t_self)),
2211 : 340 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2212 : 340 : }
2213 : :
2214 : : /*
2215 : : * XXX Should we set PageSetPrunable on this page ?
2216 : : *
2217 : : * The inserting transaction may eventually abort thus making this tuple
2218 : : * DEAD and hence available for pruning. Though we don't want to optimize
2219 : : * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2220 : : * aborted tuple will never be pruned until next vacuum is triggered.
2221 : : *
2222 : : * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2223 : : */
2224 : :
2225 : 1680027 : MarkBufferDirty(buffer);
2226 : :
2227 : : /* XLOG stuff */
2228 [ + + + + : 1680027 : if (RelationNeedsWAL(relation))
+ + + + ]
2229 : : {
2230 : 1138311 : xl_heap_insert xlrec;
2231 : 1138311 : xl_heap_header xlhdr;
2232 : 1138311 : XLogRecPtr recptr;
2233 : 1138311 : Page page = BufferGetPage(buffer);
2234 : 1138311 : uint8 info = XLOG_HEAP_INSERT;
2235 : 1138311 : int bufflags = 0;
2236 : :
2237 : : /*
2238 : : * If this is a catalog, we need to transmit combo CIDs to properly
2239 : : * decode, so log that as well.
2240 : : */
2241 [ + - - + : 1138311 : if (RelationIsAccessibleInLogicalDecoding(relation))
# # # # #
# # # # #
# # ]
2242 : 0 : log_heap_new_cid(relation, heaptup);
2243 : :
2244 : : /*
2245 : : * If this is the single and first tuple on page, we can reinit the
2246 : : * page instead of restoring the whole thing. Set flag, and hide
2247 : : * buffer references from XLogInsert.
2248 : : */
2249 [ + + + + ]: 1138311 : if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2250 : 12296 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2251 : : {
2252 : 12229 : info |= XLOG_HEAP_INIT_PAGE;
2253 : 12229 : bufflags |= REGBUF_WILL_INIT;
2254 : 12229 : }
2255 : :
2256 : 1138311 : xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2257 : 1138311 : xlrec.flags = 0;
2258 [ + + ]: 1138311 : if (all_visible_cleared)
2259 : 339 : xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
2260 [ + + ]: 1138311 : if (options & HEAP_INSERT_SPECULATIVE)
2261 : 84 : xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
2262 [ + - ]: 1138311 : Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2263 : :
2264 : : /*
2265 : : * For logical decoding, we need the tuple even if we're doing a full
2266 : : * page write, so make sure it's included even if we take a full-page
2267 : : * image. (XXX We could alternatively store a pointer into the FPW).
2268 : : */
2269 [ + - - + : 1138311 : if (RelationIsLogicallyLogged(relation) &&
# # # # #
# # # #
# ]
2270 : 0 : !(options & HEAP_INSERT_NO_LOGICAL))
2271 : : {
2272 : 0 : xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2273 : 0 : bufflags |= REGBUF_KEEP_DATA;
2274 : :
2275 [ # # ]: 0 : if (IsToastRelation(relation))
2276 : 0 : xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
2277 : 0 : }
2278 : :
2279 : 1138311 : XLogBeginInsert();
2280 : 1138311 : XLogRegisterData(&xlrec, SizeOfHeapInsert);
2281 : :
2282 : 1138311 : xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2283 : 1138311 : xlhdr.t_infomask = heaptup->t_data->t_infomask;
2284 : 1138311 : xlhdr.t_hoff = heaptup->t_data->t_hoff;
2285 : :
2286 : : /*
2287 : : * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2288 : : * write the whole page to the xlog, we don't need to store
2289 : : * xl_heap_header in the xlog.
2290 : : */
2291 : 1138311 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2292 : 1138311 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
2293 : : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2294 : 1138311 : XLogRegisterBufData(0,
2295 : 1138311 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2296 : 1138311 : heaptup->t_len - SizeofHeapTupleHeader);
2297 : :
2298 : : /* filtering by origin on a row level is much more efficient */
2299 : 1138311 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2300 : :
2301 : 1138311 : recptr = XLogInsert(RM_HEAP_ID, info);
2302 : :
2303 : 1138311 : PageSetLSN(page, recptr);
2304 : 1138311 : }
2305 : :
2306 [ + - ]: 1680027 : END_CRIT_SECTION();
2307 : :
2308 : 1680027 : UnlockReleaseBuffer(buffer);
2309 [ + + ]: 1680027 : if (vmbuffer != InvalidBuffer)
2310 : 402 : ReleaseBuffer(vmbuffer);
2311 : :
2312 : : /*
2313 : : * If tuple is cacheable, mark it for invalidation from the caches in case
2314 : : * we abort. Note it is OK to do this after releasing the buffer, because
2315 : : * the heaptup data structure is all in local memory, not in the shared
2316 : : * buffer.
2317 : : */
2318 : 1680027 : CacheInvalidateHeapTuple(relation, heaptup, NULL);
2319 : :
2320 : : /* Note: speculative insertions are counted too, even if aborted later */
2321 : 1680027 : pgstat_count_heap_insert(relation, 1);
2322 : :
2323 : : /*
2324 : : * If heaptup is a private copy, release it. Don't forget to copy t_self
2325 : : * back to the caller's image, too.
2326 : : */
2327 [ + + ]: 1680027 : if (heaptup != tup)
2328 : : {
2329 : 2122 : tup->t_self = heaptup->t_self;
2330 : 2122 : heap_freetuple(heaptup);
2331 : 2122 : }
2332 : 1680027 : }
2333 : :
2334 : : /*
2335 : : * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2336 : : * tuple header fields and toasts the tuple if necessary. Returns a toasted
2337 : : * version of the tuple if it was toasted, or the original tuple if not. Note
2338 : : * that in any case, the header fields are also set in the original tuple.
2339 : : */
2340 : : static HeapTuple
2341 : 1900891 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2342 : : CommandId cid, int options)
2343 : : {
2344 : : /*
2345 : : * To allow parallel inserts, we need to ensure that they are safe to be
2346 : : * performed in workers. We have the infrastructure to allow parallel
2347 : : * inserts in general except for the cases where inserts generate a new
2348 : : * CommandId (eg. inserts into a table having a foreign key column).
2349 : : */
2350 [ + - ]: 1900891 : if (IsParallelWorker())
2351 [ # # # # ]: 0 : ereport(ERROR,
2352 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2353 : : errmsg("cannot insert tuples in a parallel worker")));
2354 : :
2355 : 1900891 : tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2356 : 1900891 : tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2357 : 1900891 : tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2358 : 1900891 : HeapTupleHeaderSetXmin(tup->t_data, xid);
2359 [ + + ]: 1900891 : if (options & HEAP_INSERT_FROZEN)
2360 : 563 : HeapTupleHeaderSetXminFrozen(tup->t_data);
2361 : :
2362 : 1900891 : HeapTupleHeaderSetCmin(tup->t_data, cid);
2363 : 1900891 : HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2364 : 1900891 : tup->t_tableOid = RelationGetRelid(relation);
2365 : :
2366 : : /*
2367 : : * If the new tuple is too big for storage or contains already toasted
2368 : : * out-of-line attributes from some other relation, invoke the toaster.
2369 : : */
2370 [ + + + + ]: 1900891 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
2371 : 1473 : relation->rd_rel->relkind != RELKIND_MATVIEW)
2372 : : {
2373 : : /* toast table entries should never be recursively toasted */
2374 [ + - ]: 1460 : Assert(!HeapTupleHasExternal(tup));
2375 : 1460 : return tup;
2376 : : }
2377 [ + + + + ]: 1899431 : else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2378 : 2129 : return heap_toast_insert_or_update(relation, tup, NULL, options);
2379 : : else
2380 : 1897302 : return tup;
2381 : 1900891 : }
2382 : :
2383 : : /*
2384 : : * Helper for heap_multi_insert() that computes the number of entire pages
2385 : : * that inserting the remaining heaptuples requires. Used to determine how
2386 : : * much the relation needs to be extended by.
2387 : : */
2388 : : static int
2389 : 61216 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
2390 : : {
2391 : 61216 : size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2392 : 61216 : int npages = 1;
2393 : :
2394 [ + + ]: 367678 : for (int i = done; i < ntuples; i++)
2395 : : {
2396 : 306462 : size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2397 : :
2398 [ + + ]: 306462 : if (page_avail < tup_sz)
2399 : : {
2400 : 1929 : npages++;
2401 : 1929 : page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2402 : 1929 : }
2403 : 306462 : page_avail -= tup_sz;
2404 : 306462 : }
2405 : :
2406 : 122432 : return npages;
2407 : 61216 : }
2408 : :
2409 : : /*
2410 : : * heap_multi_insert - insert multiple tuples into a heap
2411 : : *
2412 : : * This is like heap_insert(), but inserts multiple tuples in one operation.
2413 : : * That's faster than calling heap_insert() in a loop, because when multiple
2414 : : * tuples can be inserted on a single page, we can write just a single WAL
2415 : : * record covering all of them, and only need to lock/unlock the page once.
2416 : : *
2417 : : * Note: this leaks memory into the current memory context. You can create a
2418 : : * temporary context before calling this, if that's a problem.
2419 : : */
2420 : : void
2421 : 60314 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2422 : : CommandId cid, int options, BulkInsertState bistate)
2423 : : {
2424 : 60314 : TransactionId xid = GetCurrentTransactionId();
2425 : 60314 : HeapTuple *heaptuples;
2426 : 60314 : int i;
2427 : 60314 : int ndone;
2428 : 60314 : PGAlignedBlock scratch;
2429 : 60314 : Page page;
2430 : 60314 : Buffer vmbuffer = InvalidBuffer;
2431 : 60314 : bool needwal;
2432 : 60314 : Size saveFreeSpace;
2433 [ + - - + : 60314 : bool need_tuple_data = RelationIsLogicallyLogged(relation);
# # # # #
# ]
2434 [ + - - + : 60314 : bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
# # # # #
# # # # #
# # # # ]
2435 : 60314 : bool starting_with_empty_page = false;
2436 : 60314 : int npages = 0;
2437 : 60314 : int npages_used = 0;
2438 : :
2439 : : /* currently not needed (thus unsupported) for heap_multi_insert() */
2440 [ + - ]: 60314 : Assert(!(options & HEAP_INSERT_NO_LOGICAL));
2441 : :
2442 : 60314 : AssertHasSnapshotForToast(relation);
2443 : :
2444 [ + + + + : 120593 : needwal = RelationNeedsWAL(relation);
+ + ]
2445 [ + - ]: 60314 : saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2446 : : HEAP_DEFAULT_FILLFACTOR);
2447 : :
2448 : : /* Toast and set header data in all the slots */
2449 : 60314 : heaptuples = palloc(ntuples * sizeof(HeapTuple));
2450 [ + + ]: 281178 : for (i = 0; i < ntuples; i++)
2451 : : {
2452 : 220864 : HeapTuple tuple;
2453 : :
2454 : 220864 : tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2455 : 220864 : slots[i]->tts_tableOid = RelationGetRelid(relation);
2456 : 220864 : tuple->t_tableOid = slots[i]->tts_tableOid;
2457 : 441728 : heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2458 : 220864 : options);
2459 : 220864 : }
2460 : :
2461 : : /*
2462 : : * We're about to do the actual inserts -- but check for conflict first,
2463 : : * to minimize the possibility of having to roll back work we've just
2464 : : * done.
2465 : : *
2466 : : * A check here does not definitively prevent a serialization anomaly;
2467 : : * that check MUST be done at least past the point of acquiring an
2468 : : * exclusive buffer content lock on every buffer that will be affected,
2469 : : * and MAY be done after all inserts are reflected in the buffers and
2470 : : * those locks are released; otherwise there is a race condition. Since
2471 : : * multiple buffers can be locked and unlocked in the loop below, and it
2472 : : * would not be feasible to identify and lock all of those buffers before
2473 : : * the loop, we must do a final check at the end.
2474 : : *
2475 : : * The check here could be omitted with no loss of correctness; it is
2476 : : * present strictly as an optimization.
2477 : : *
2478 : : * For heap inserts, we only need to check for table-level SSI locks. Our
2479 : : * new tuples can't possibly conflict with existing tuple locks, and heap
2480 : : * page locks are only consolidated versions of tuple locks; they do not
2481 : : * lock "gaps" as index page locks do. So we don't need to specify a
2482 : : * buffer when making the call, which makes for a faster check.
2483 : : */
2484 : 60314 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2485 : :
2486 : 60314 : ndone = 0;
2487 [ + + ]: 122570 : while (ndone < ntuples)
2488 : : {
2489 : 62256 : Buffer buffer;
2490 : 62256 : bool all_visible_cleared = false;
2491 : 62256 : bool all_frozen_set = false;
2492 : 62256 : int nthispage;
2493 : :
2494 [ + - ]: 62256 : CHECK_FOR_INTERRUPTS();
2495 : :
2496 : : /*
2497 : : * Compute number of pages needed to fit the to-be-inserted tuples in
2498 : : * the worst case. This will be used to determine how much to extend
2499 : : * the relation by in RelationGetBufferForTuple(), if needed. If we
2500 : : * filled a prior page from scratch, we can just update our last
2501 : : * computation, but if we started with a partially filled page,
2502 : : * recompute from scratch, the number of potentially required pages
2503 : : * can vary due to tuples needing to fit onto the page, page headers
2504 : : * etc.
2505 : : */
2506 [ + + + + ]: 62256 : if (ndone == 0 || !starting_with_empty_page)
2507 : : {
2508 : 122432 : npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2509 : 61216 : saveFreeSpace);
2510 : 61216 : npages_used = 0;
2511 : 61216 : }
2512 : : else
2513 : 1040 : npages_used++;
2514 : :
2515 : : /*
2516 : : * Find buffer where at least the next tuple will fit. If the page is
2517 : : * all-visible, this will also pin the requisite visibility map page.
2518 : : *
2519 : : * Also pin visibility map page if COPY FREEZE inserts tuples into an
2520 : : * empty page. See all_frozen_set below.
2521 : : */
2522 : 124512 : buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2523 : 62256 : InvalidBuffer, options, bistate,
2524 : : &vmbuffer, NULL,
2525 : 62256 : npages - npages_used);
2526 : 62256 : page = BufferGetPage(buffer);
2527 : :
2528 : 62256 : starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2529 : :
2530 [ + + + + ]: 62256 : if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
2531 : : {
2532 : 4 : all_frozen_set = true;
2533 : : /* Lock the vmbuffer before entering the critical section */
2534 : 4 : LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
2535 : 4 : }
2536 : :
2537 : : /* NO EREPORT(ERROR) from here till changes are logged */
2538 : 62256 : START_CRIT_SECTION();
2539 : :
2540 : : /*
2541 : : * RelationGetBufferForTuple has ensured that the first tuple fits.
2542 : : * Put that on the page, and then as many other tuples as fit.
2543 : : */
2544 : 62256 : RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2545 : :
2546 : : /*
2547 : : * For logical decoding we need combo CIDs to properly decode the
2548 : : * catalog.
2549 : : */
2550 [ + + + - ]: 62256 : if (needwal && need_cids)
2551 : 0 : log_heap_new_cid(relation, heaptuples[ndone]);
2552 : :
2553 [ + + ]: 220864 : for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2554 : : {
2555 : 160550 : HeapTuple heaptup = heaptuples[ndone + nthispage];
2556 : :
2557 [ + + ]: 160550 : if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2558 : 1942 : break;
2559 : :
2560 : 158608 : RelationPutHeapTuple(relation, buffer, heaptup, false);
2561 : :
2562 : : /*
2563 : : * For logical decoding we need combo CIDs to properly decode the
2564 : : * catalog.
2565 : : */
2566 [ + + + - ]: 158608 : if (needwal && need_cids)
2567 : 0 : log_heap_new_cid(relation, heaptup);
2568 [ - + + ]: 160550 : }
2569 : :
2570 : : /*
2571 : : * If the page is all visible, need to clear that, unless we're only
2572 : : * going to add further frozen rows to it.
2573 : : *
2574 : : * If we're only adding already frozen rows to a previously empty
2575 : : * page, mark it as all-frozen and update the visibility map. We're
2576 : : * already holding a pin on the vmbuffer.
2577 : : */
2578 [ + + - + ]: 62256 : if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
2579 : : {
2580 : 309 : all_visible_cleared = true;
2581 : 309 : PageClearAllVisible(page);
2582 : 618 : visibilitymap_clear(relation,
2583 : 309 : BufferGetBlockNumber(buffer),
2584 : 309 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2585 : 309 : }
2586 [ + + ]: 61947 : else if (all_frozen_set)
2587 : : {
2588 : 4 : PageSetAllVisible(page);
2589 : 8 : visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
2590 : 4 : vmbuffer,
2591 : : VISIBILITYMAP_ALL_VISIBLE |
2592 : : VISIBILITYMAP_ALL_FROZEN,
2593 : 4 : relation->rd_locator);
2594 : 4 : }
2595 : :
2596 : : /*
2597 : : * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2598 : : */
2599 : :
2600 : 62256 : MarkBufferDirty(buffer);
2601 : :
2602 : : /* XLOG stuff */
2603 [ + + ]: 62256 : if (needwal)
2604 : : {
2605 : 62179 : XLogRecPtr recptr;
2606 : 62179 : xl_heap_multi_insert *xlrec;
2607 : 62179 : uint8 info = XLOG_HEAP2_MULTI_INSERT;
2608 : 62179 : char *tupledata;
2609 : 62179 : int totaldatalen;
2610 : 62179 : char *scratchptr = scratch.data;
2611 : 62179 : bool init;
2612 : 62179 : int bufflags = 0;
2613 : :
2614 : : /*
2615 : : * If the page was previously empty, we can reinit the page
2616 : : * instead of restoring the whole thing.
2617 : : */
2618 : 62179 : init = starting_with_empty_page;
2619 : :
2620 : : /* allocate xl_heap_multi_insert struct from the scratch area */
2621 : 62179 : xlrec = (xl_heap_multi_insert *) scratchptr;
2622 : 62179 : scratchptr += SizeOfHeapMultiInsert;
2623 : :
2624 : : /*
2625 : : * Allocate offsets array. Unless we're reinitializing the page,
2626 : : * in that case the tuples are stored in order starting at
2627 : : * FirstOffsetNumber and we don't need to store the offsets
2628 : : * explicitly.
2629 : : */
2630 [ + + ]: 62179 : if (!init)
2631 : 59919 : scratchptr += nthispage * sizeof(OffsetNumber);
2632 : :
2633 : : /* the rest of the scratch space is used for tuple data */
2634 : 62179 : tupledata = scratchptr;
2635 : :
2636 : : /* check that the mutually exclusive flags are not both set */
2637 [ + + + - ]: 62179 : Assert(!(all_visible_cleared && all_frozen_set));
2638 : :
2639 : 62179 : xlrec->flags = 0;
2640 [ + + ]: 62179 : if (all_visible_cleared)
2641 : 309 : xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
2642 : :
2643 : : /*
2644 : : * We don't have to worry about including a conflict xid in the
2645 : : * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2646 : : * visibility rules.
2647 : : */
2648 [ + - ]: 62179 : if (all_frozen_set)
2649 : 0 : xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
2650 : :
2651 : 62179 : xlrec->ntuples = nthispage;
2652 : :
2653 : : /*
2654 : : * Write out an xl_multi_insert_tuple and the tuple data itself
2655 : : * for each tuple.
2656 : : */
2657 [ + + ]: 281901 : for (i = 0; i < nthispage; i++)
2658 : : {
2659 : 219722 : HeapTuple heaptup = heaptuples[ndone + i];
2660 : 219722 : xl_multi_insert_tuple *tuphdr;
2661 : 219722 : int datalen;
2662 : :
2663 [ + + ]: 219722 : if (!init)
2664 : 118442 : xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2665 : : /* xl_multi_insert_tuple needs two-byte alignment. */
2666 : 219722 : tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2667 : 219722 : scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2668 : :
2669 : 219722 : tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2670 : 219722 : tuphdr->t_infomask = heaptup->t_data->t_infomask;
2671 : 219722 : tuphdr->t_hoff = heaptup->t_data->t_hoff;
2672 : :
2673 : : /* write bitmap [+ padding] [+ oid] + data */
2674 : 219722 : datalen = heaptup->t_len - SizeofHeapTupleHeader;
2675 : 219722 : memcpy(scratchptr,
2676 : : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2677 : : datalen);
2678 : 219722 : tuphdr->datalen = datalen;
2679 : 219722 : scratchptr += datalen;
2680 : 219722 : }
2681 : 62179 : totaldatalen = scratchptr - tupledata;
2682 [ + - ]: 62179 : Assert((scratchptr - scratch.data) < BLCKSZ);
2683 : :
2684 [ + - ]: 62179 : if (need_tuple_data)
2685 : 0 : xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2686 : :
2687 : : /*
2688 : : * Signal that this is the last xl_heap_multi_insert record
2689 : : * emitted by this call to heap_multi_insert(). Needed for logical
2690 : : * decoding so it knows when to cleanup temporary data.
2691 : : */
2692 [ + + ]: 62179 : if (ndone + nthispage == ntuples)
2693 : 60271 : xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2694 : :
2695 [ + + ]: 62179 : if (init)
2696 : : {
2697 : 2260 : info |= XLOG_HEAP_INIT_PAGE;
2698 : 2260 : bufflags |= REGBUF_WILL_INIT;
2699 : 2260 : }
2700 : :
2701 : : /*
2702 : : * If we're doing logical decoding, include the new tuple data
2703 : : * even if we take a full-page image of the page.
2704 : : */
2705 [ + - ]: 62179 : if (need_tuple_data)
2706 : 0 : bufflags |= REGBUF_KEEP_DATA;
2707 : :
2708 : 62179 : XLogBeginInsert();
2709 : 62179 : XLogRegisterData(xlrec, tupledata - scratch.data);
2710 : 62179 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2711 [ + - ]: 62179 : if (all_frozen_set)
2712 : 0 : XLogRegisterBuffer(1, vmbuffer, 0);
2713 : :
2714 : 62179 : XLogRegisterBufData(0, tupledata, totaldatalen);
2715 : :
2716 : : /* filtering by origin on a row level is much more efficient */
2717 : 62179 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2718 : :
2719 : 62179 : recptr = XLogInsert(RM_HEAP2_ID, info);
2720 : :
2721 : 62179 : PageSetLSN(page, recptr);
2722 [ + - ]: 62179 : if (all_frozen_set)
2723 : : {
2724 [ # # ]: 0 : Assert(BufferIsDirty(vmbuffer));
2725 : 0 : PageSetLSN(BufferGetPage(vmbuffer), recptr);
2726 : 0 : }
2727 : 62179 : }
2728 : :
2729 [ + - ]: 62256 : END_CRIT_SECTION();
2730 : :
2731 [ + + ]: 62256 : if (all_frozen_set)
2732 : 4 : LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2733 : :
2734 : 62256 : UnlockReleaseBuffer(buffer);
2735 : 62256 : ndone += nthispage;
2736 : :
2737 : : /*
2738 : : * NB: Only release vmbuffer after inserting all tuples - it's fairly
2739 : : * likely that we'll insert into subsequent heap pages that are likely
2740 : : * to use the same vm page.
2741 : : */
2742 : 62256 : }
2743 : :
2744 : : /* We're done with inserting all tuples, so release the last vmbuffer. */
2745 [ + + ]: 60314 : if (vmbuffer != InvalidBuffer)
2746 : 312 : ReleaseBuffer(vmbuffer);
2747 : :
2748 : : /*
2749 : : * We're done with the actual inserts. Check for conflicts again, to
2750 : : * ensure that all rw-conflicts in to these inserts are detected. Without
2751 : : * this final check, a sequential scan of the heap may have locked the
2752 : : * table after the "before" check, missing one opportunity to detect the
2753 : : * conflict, and then scanned the table before the new tuples were there,
2754 : : * missing the other chance to detect the conflict.
2755 : : *
2756 : : * For heap inserts, we only need to check for table-level SSI locks. Our
2757 : : * new tuples can't possibly conflict with existing tuple locks, and heap
2758 : : * page locks are only consolidated versions of tuple locks; they do not
2759 : : * lock "gaps" as index page locks do. So we don't need to specify a
2760 : : * buffer when making the call.
2761 : : */
2762 : 60314 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2763 : :
2764 : : /*
2765 : : * If tuples are cacheable, mark them for invalidation from the caches in
2766 : : * case we abort. Note it is OK to do this after releasing the buffer,
2767 : : * because the heaptuples data structure is all in local memory, not in
2768 : : * the shared buffer.
2769 : : */
2770 [ + + ]: 60314 : if (IsCatalogRelation(relation))
2771 : : {
2772 [ + + ]: 175587 : for (i = 0; i < ntuples; i++)
2773 : 115491 : CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2774 : 60096 : }
2775 : :
2776 : : /* copy t_self fields back to the caller's slots */
2777 [ + + ]: 281178 : for (i = 0; i < ntuples; i++)
2778 : 220864 : slots[i]->tts_tid = heaptuples[i]->t_self;
2779 : :
2780 : 60314 : pgstat_count_heap_insert(relation, ntuples);
2781 : 60314 : }
2782 : :
2783 : : /*
2784 : : * simple_heap_insert - insert a tuple
2785 : : *
2786 : : * Currently, this routine differs from heap_insert only in supplying
2787 : : * a default command ID and not allowing access to the speedup options.
2788 : : *
2789 : : * This should be used rather than using heap_insert directly in most places
2790 : : * where we are modifying system catalogs.
2791 : : */
2792 : : void
2793 : 57339 : simple_heap_insert(Relation relation, HeapTuple tup)
2794 : : {
2795 : 57339 : heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2796 : 57339 : }
2797 : :
2798 : : /*
2799 : : * Given infomask/infomask2, compute the bits that must be saved in the
2800 : : * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2801 : : * xl_heap_lock_updated WAL records.
2802 : : *
2803 : : * See fix_infomask_from_infobits.
2804 : : */
2805 : : static uint8
2806 : 314971 : compute_infobits(uint16 infomask, uint16 infomask2)
2807 : : {
2808 : 314971 : return
2809 : 944913 : ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2810 : 629942 : ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2811 : 629942 : ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2812 : : /* note we ignore HEAP_XMAX_SHR_LOCK here */
2813 : 629942 : ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2814 : 314971 : ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2815 : : XLHL_KEYS_UPDATED : 0);
2816 : : }
2817 : :
2818 : : /*
2819 : : * Given two versions of the same t_infomask for a tuple, compare them and
2820 : : * return whether the relevant status for a tuple Xmax has changed. This is
2821 : : * used after a buffer lock has been released and reacquired: we want to ensure
2822 : : * that the tuple state continues to be the same it was when we previously
2823 : : * examined it.
2824 : : *
2825 : : * Note the Xmax field itself must be compared separately.
2826 : : */
2827 : : static inline bool
2828 : 10 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2829 : : {
2830 : 10 : const uint16 interesting =
2831 : : HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2832 : :
2833 [ - + ]: 10 : if ((new_infomask & interesting) != (old_infomask & interesting))
2834 : 0 : return true;
2835 : :
2836 : 10 : return false;
2837 : 10 : }
2838 : :
2839 : : /*
2840 : : * heap_delete - delete a tuple
2841 : : *
2842 : : * See table_tuple_delete() for an explanation of the parameters, except that
2843 : : * this routine directly takes a tuple rather than a slot.
2844 : : *
2845 : : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2846 : : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2847 : : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2848 : : * generated by another transaction).
2849 : : */
2850 : : TM_Result
2851 : 303827 : heap_delete(Relation relation, const ItemPointerData *tid,
2852 : : CommandId cid, Snapshot crosscheck, bool wait,
2853 : : TM_FailureData *tmfd, bool changingPart)
2854 : : {
2855 : 303827 : TM_Result result;
2856 : 303827 : TransactionId xid = GetCurrentTransactionId();
2857 : 303827 : ItemId lp;
2858 : 303827 : HeapTupleData tp;
2859 : 303827 : Page page;
2860 : 303827 : BlockNumber block;
2861 : 303827 : Buffer buffer;
2862 : 303827 : Buffer vmbuffer = InvalidBuffer;
2863 : 303827 : TransactionId new_xmax;
2864 : 303827 : uint16 new_infomask,
2865 : : new_infomask2;
2866 : 303827 : bool have_tuple_lock = false;
2867 : 303827 : bool iscombo;
2868 : 303827 : bool all_visible_cleared = false;
2869 : 303827 : HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2870 : 303827 : bool old_key_copied = false;
2871 : :
2872 [ + - ]: 303827 : Assert(ItemPointerIsValid(tid));
2873 : :
2874 : 303827 : AssertHasSnapshotForToast(relation);
2875 : :
2876 : : /*
2877 : : * Forbid this during a parallel operation, lest it allocate a combo CID.
2878 : : * Other workers might need that combo CID for visibility checks, and we
2879 : : * have no provision for broadcasting it to them.
2880 : : */
2881 [ + - ]: 303827 : if (IsInParallelMode())
2882 [ # # # # ]: 0 : ereport(ERROR,
2883 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2884 : : errmsg("cannot delete tuples during a parallel operation")));
2885 : :
2886 : 303827 : block = ItemPointerGetBlockNumber(tid);
2887 : 303827 : buffer = ReadBuffer(relation, block);
2888 : 303827 : page = BufferGetPage(buffer);
2889 : :
2890 : : /*
2891 : : * Before locking the buffer, pin the visibility map page if it appears to
2892 : : * be necessary. Since we haven't got the lock yet, someone else might be
2893 : : * in the middle of changing this, so we'll need to recheck after we have
2894 : : * the lock.
2895 : : */
2896 [ + + ]: 303827 : if (PageIsAllVisible(page))
2897 : 9 : visibilitymap_pin(relation, block, &vmbuffer);
2898 : :
2899 : 303827 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2900 : :
2901 : 303827 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2902 [ + - ]: 303827 : Assert(ItemIdIsNormal(lp));
2903 : :
2904 : 303827 : tp.t_tableOid = RelationGetRelid(relation);
2905 : 303827 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2906 : 303827 : tp.t_len = ItemIdGetLength(lp);
2907 : 303827 : tp.t_self = *tid;
2908 : :
2909 : : l1:
2910 : :
2911 : : /*
2912 : : * If we didn't pin the visibility map page and the page has become all
2913 : : * visible while we were busy locking the buffer, we'll have to unlock and
2914 : : * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2915 : : * unfortunate, but hopefully shouldn't happen often.
2916 : : */
2917 [ + + + - ]: 303827 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2918 : : {
2919 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2920 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
2921 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2922 : 0 : }
2923 : :
2924 : 303827 : result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2925 : :
2926 [ + - ]: 303827 : if (result == TM_Invisible)
2927 : : {
2928 : 0 : UnlockReleaseBuffer(buffer);
2929 [ # # # # ]: 0 : ereport(ERROR,
2930 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2931 : : errmsg("attempted to delete invisible tuple")));
2932 : 0 : }
2933 [ + + - + ]: 303827 : else if (result == TM_BeingModified && wait)
2934 : : {
2935 : 32 : TransactionId xwait;
2936 : 32 : uint16 infomask;
2937 : :
2938 : : /* must copy state data before unlocking buffer */
2939 : 32 : xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2940 : 32 : infomask = tp.t_data->t_infomask;
2941 : :
2942 : : /*
2943 : : * Sleep until concurrent transaction ends -- except when there's a
2944 : : * single locker and it's our own transaction. Note we don't care
2945 : : * which lock mode the locker has, because we need the strongest one.
2946 : : *
2947 : : * Before sleeping, we need to acquire tuple lock to establish our
2948 : : * priority for the tuple (see heap_lock_tuple). LockTuple will
2949 : : * release us when we are next-in-line for the tuple.
2950 : : *
2951 : : * If we are forced to "start over" below, we keep the tuple lock;
2952 : : * this arranges that we stay at the head of the line while rechecking
2953 : : * tuple state.
2954 : : */
2955 [ - + ]: 32 : if (infomask & HEAP_XMAX_IS_MULTI)
2956 : : {
2957 : 0 : bool current_is_member = false;
2958 : :
2959 [ # # ]: 0 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2960 : : LockTupleExclusive, ¤t_is_member))
2961 : : {
2962 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2963 : :
2964 : : /*
2965 : : * Acquire the lock, if necessary (but skip it when we're
2966 : : * requesting a lock and already have one; avoids deadlock).
2967 : : */
2968 [ # # ]: 0 : if (!current_is_member)
2969 : 0 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2970 : : LockWaitBlock, &have_tuple_lock);
2971 : :
2972 : : /* wait for multixact */
2973 : 0 : MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2974 : 0 : relation, &(tp.t_self), XLTW_Delete,
2975 : : NULL);
2976 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2977 : :
2978 : : /*
2979 : : * If xwait had just locked the tuple then some other xact
2980 : : * could update this tuple before we get to this point. Check
2981 : : * for xmax change, and start over if so.
2982 : : *
2983 : : * We also must start over if we didn't pin the VM page, and
2984 : : * the page has become all visible.
2985 : : */
2986 [ # # ]: 0 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2987 [ # # # # ]: 0 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2988 : 0 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2989 : : xwait))
2990 : 0 : goto l1;
2991 : 0 : }
2992 : :
2993 : : /*
2994 : : * You might think the multixact is necessarily done here, but not
2995 : : * so: it could have surviving members, namely our own xact or
2996 : : * other subxacts of this backend. It is legal for us to delete
2997 : : * the tuple in either case, however (the latter case is
2998 : : * essentially a situation of upgrading our former shared lock to
2999 : : * exclusive). We don't bother changing the on-disk hint bits
3000 : : * since we are about to overwrite the xmax altogether.
3001 : : */
3002 [ # # ]: 0 : }
3003 [ + - ]: 32 : else if (!TransactionIdIsCurrentTransactionId(xwait))
3004 : : {
3005 : : /*
3006 : : * Wait for regular transaction to end; but first, acquire tuple
3007 : : * lock.
3008 : : */
3009 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3010 : 0 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3011 : : LockWaitBlock, &have_tuple_lock);
3012 : 0 : XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3013 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3014 : :
3015 : : /*
3016 : : * xwait is done, but if xwait had just locked the tuple then some
3017 : : * other xact could update this tuple before we get to this point.
3018 : : * Check for xmax change, and start over if so.
3019 : : *
3020 : : * We also must start over if we didn't pin the VM page, and the
3021 : : * page has become all visible.
3022 : : */
3023 [ # # ]: 0 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
3024 [ # # # # ]: 0 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3025 : 0 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
3026 : : xwait))
3027 : 0 : goto l1;
3028 : :
3029 : : /* Otherwise check if it committed or aborted */
3030 : 0 : UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3031 : 0 : }
3032 : :
3033 : : /*
3034 : : * We may overwrite if previous xmax aborted, or if it committed but
3035 : : * only locked the tuple without updating it.
3036 : : */
3037 [ + - ]: 32 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3038 [ - + # # ]: 32 : HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
3039 : 0 : HeapTupleHeaderIsOnlyLocked(tp.t_data))
3040 : 32 : result = TM_Ok;
3041 [ # # ]: 0 : else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
3042 : 0 : result = TM_Updated;
3043 : : else
3044 : 0 : result = TM_Deleted;
3045 [ - - + ]: 32 : }
3046 : :
3047 : : /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3048 [ + + ]: 303827 : if (result != TM_Ok)
3049 : : {
3050 [ - + # # : 7 : Assert(result == TM_SelfModified ||
# # # # ]
3051 : : result == TM_Updated ||
3052 : : result == TM_Deleted ||
3053 : : result == TM_BeingModified);
3054 [ + - ]: 7 : Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
3055 [ - + # # ]: 7 : Assert(result != TM_Updated ||
3056 : : !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
3057 : 7 : }
3058 : :
3059 [ - + # # ]: 303827 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3060 : : {
3061 : : /* Perform additional check for transaction-snapshot mode RI updates */
3062 [ # # ]: 0 : if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3063 : 0 : result = TM_Updated;
3064 : 0 : }
3065 : :
3066 [ + + ]: 303827 : if (result != TM_Ok)
3067 : : {
3068 : 7 : tmfd->ctid = tp.t_data->t_ctid;
3069 : 7 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
3070 [ + - ]: 7 : if (result == TM_SelfModified)
3071 : 7 : tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3072 : : else
3073 : 0 : tmfd->cmax = InvalidCommandId;
3074 : 7 : UnlockReleaseBuffer(buffer);
3075 [ + - ]: 7 : if (have_tuple_lock)
3076 : 0 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3077 [ + - ]: 7 : if (vmbuffer != InvalidBuffer)
3078 : 0 : ReleaseBuffer(vmbuffer);
3079 : 7 : return result;
3080 : : }
3081 : :
3082 : : /*
3083 : : * We're about to do the actual delete -- check for conflict first, to
3084 : : * avoid possibly having to roll back work we've just done.
3085 : : *
3086 : : * This is safe without a recheck as long as there is no possibility of
3087 : : * another process scanning the page between this check and the delete
3088 : : * being visible to the scan (i.e., an exclusive buffer content lock is
3089 : : * continuously held from this point until the tuple delete is visible).
3090 : : */
3091 : 303820 : CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
3092 : :
3093 : : /* replace cid with a combo CID if necessary */
3094 : 303820 : HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3095 : :
3096 : : /*
3097 : : * Compute replica identity tuple before entering the critical section so
3098 : : * we don't PANIC upon a memory allocation failure.
3099 : : */
3100 : 303820 : old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3101 : :
3102 : : /*
3103 : : * If this is the first possibly-multixact-able operation in the current
3104 : : * transaction, set my per-backend OldestMemberMXactId setting. We can be
3105 : : * certain that the transaction will never become a member of any older
3106 : : * MultiXactIds than that. (We have to do this even if we end up just
3107 : : * using our own TransactionId below, since some other backend could
3108 : : * incorporate our XID into a MultiXact immediately afterwards.)
3109 : : */
3110 : 303820 : MultiXactIdSetOldestMember();
3111 : :
3112 : 607640 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
3113 : 303820 : tp.t_data->t_infomask, tp.t_data->t_infomask2,
3114 : 303820 : xid, LockTupleExclusive, true,
3115 : : &new_xmax, &new_infomask, &new_infomask2);
3116 : :
3117 : 303820 : START_CRIT_SECTION();
3118 : :
3119 : : /*
3120 : : * If this transaction commits, the tuple will become DEAD sooner or
3121 : : * later. Set flag that this page is a candidate for pruning once our xid
3122 : : * falls below the OldestXmin horizon. If the transaction finally aborts,
3123 : : * the subsequent page pruning will be a no-op and the hint will be
3124 : : * cleared.
3125 : : */
3126 [ + - + + : 303820 : PageSetPrunable(page, xid);
+ + ]
3127 : :
3128 [ + + ]: 303820 : if (PageIsAllVisible(page))
3129 : : {
3130 : 9 : all_visible_cleared = true;
3131 : 9 : PageClearAllVisible(page);
3132 : 18 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3133 : 9 : vmbuffer, VISIBILITYMAP_VALID_BITS);
3134 : 9 : }
3135 : :
3136 : : /* store transaction information of xact deleting the tuple */
3137 : 303820 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3138 : 303820 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3139 : 303820 : tp.t_data->t_infomask |= new_infomask;
3140 : 303820 : tp.t_data->t_infomask2 |= new_infomask2;
3141 : 303820 : HeapTupleHeaderClearHotUpdated(tp.t_data);
3142 : 303820 : HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3143 : 303820 : HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3144 : : /* Make sure there is no forward chain link in t_ctid */
3145 : 303820 : tp.t_data->t_ctid = tp.t_self;
3146 : :
3147 : : /* Signal that this is actually a move into another partition */
3148 [ + + ]: 303820 : if (changingPart)
3149 : 145 : HeapTupleHeaderSetMovedPartitions(tp.t_data);
3150 : :
3151 : 303820 : MarkBufferDirty(buffer);
3152 : :
3153 : : /*
3154 : : * XLOG stuff
3155 : : *
3156 : : * NB: heap_abort_speculative() uses the same xlog record and replay
3157 : : * routines.
3158 : : */
3159 [ + + + + : 303820 : if (RelationNeedsWAL(relation))
+ + + + ]
3160 : : {
3161 : 283618 : xl_heap_delete xlrec;
3162 : 283618 : xl_heap_header xlhdr;
3163 : 283618 : XLogRecPtr recptr;
3164 : :
3165 : : /*
3166 : : * For logical decode we need combo CIDs to properly decode the
3167 : : * catalog
3168 : : */
3169 [ + - - + : 283618 : if (RelationIsAccessibleInLogicalDecoding(relation))
# # # # #
# # # # #
# # ]
3170 : 0 : log_heap_new_cid(relation, &tp);
3171 : :
3172 : 283618 : xlrec.flags = 0;
3173 [ + + ]: 283618 : if (all_visible_cleared)
3174 : 9 : xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
3175 [ + + ]: 283618 : if (changingPart)
3176 : 145 : xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
3177 : 567236 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3178 : 283618 : tp.t_data->t_infomask2);
3179 : 283618 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
3180 : 283618 : xlrec.xmax = new_xmax;
3181 : :
3182 [ + - ]: 283618 : if (old_key_tuple != NULL)
3183 : : {
3184 [ # # ]: 0 : if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3185 : 0 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
3186 : : else
3187 : 0 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
3188 : 0 : }
3189 : :
3190 : 283618 : XLogBeginInsert();
3191 : 283618 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
3192 : :
3193 : 283618 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3194 : :
3195 : : /*
3196 : : * Log replica identity of the deleted tuple if there is one
3197 : : */
3198 [ - + ]: 283618 : if (old_key_tuple != NULL)
3199 : : {
3200 : 0 : xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3201 : 0 : xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3202 : 0 : xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3203 : :
3204 : 0 : XLogRegisterData(&xlhdr, SizeOfHeapHeader);
3205 : 0 : XLogRegisterData((char *) old_key_tuple->t_data
3206 : 0 : + SizeofHeapTupleHeader,
3207 : 0 : old_key_tuple->t_len
3208 : 0 : - SizeofHeapTupleHeader);
3209 : 0 : }
3210 : :
3211 : : /* filtering by origin on a row level is much more efficient */
3212 : 283618 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
3213 : :
3214 : 283618 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3215 : :
3216 : 283618 : PageSetLSN(page, recptr);
3217 : 283618 : }
3218 : :
3219 [ + - ]: 303820 : END_CRIT_SECTION();
3220 : :
3221 : 303820 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3222 : :
3223 [ + + ]: 303820 : if (vmbuffer != InvalidBuffer)
3224 : 9 : ReleaseBuffer(vmbuffer);
3225 : :
3226 : : /*
3227 : : * If the tuple has toasted out-of-line attributes, we need to delete
3228 : : * those items too. We have to do this before releasing the buffer
3229 : : * because we need to look at the contents of the tuple, but it's OK to
3230 : : * release the content lock on the buffer first.
3231 : : */
3232 [ + + + + ]: 303820 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3233 : 531 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3234 : : {
3235 : : /* toast table entries should never be recursively toasted */
3236 [ + - ]: 528 : Assert(!HeapTupleHasExternal(&tp));
3237 : 528 : }
3238 [ + + ]: 303292 : else if (HeapTupleHasExternal(&tp))
3239 : 79 : heap_toast_delete(relation, &tp, false);
3240 : :
3241 : : /*
3242 : : * Mark tuple for invalidation from system caches at next command
3243 : : * boundary. We have to do this before releasing the buffer because we
3244 : : * need to look at the contents of the tuple.
3245 : : */
3246 : 303820 : CacheInvalidateHeapTuple(relation, &tp, NULL);
3247 : :
3248 : : /* Now we can release the buffer */
3249 : 303820 : ReleaseBuffer(buffer);
3250 : :
3251 : : /*
3252 : : * Release the lmgr tuple lock, if we had it.
3253 : : */
3254 [ + - ]: 303820 : if (have_tuple_lock)
3255 : 0 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3256 : :
3257 : 303820 : pgstat_count_heap_delete(relation);
3258 : :
3259 [ - + # # ]: 303820 : if (old_key_tuple != NULL && old_key_copied)
3260 : 0 : heap_freetuple(old_key_tuple);
3261 : :
3262 : 303820 : return TM_Ok;
3263 : 303827 : }
3264 : :
3265 : : /*
3266 : : * simple_heap_delete - delete a tuple
3267 : : *
3268 : : * This routine may be used to delete a tuple when concurrent updates of
3269 : : * the target tuple are not expected (for example, because we have a lock
3270 : : * on the relation associated with the tuple). Any failure is reported
3271 : : * via ereport().
3272 : : */
3273 : : void
3274 : 118843 : simple_heap_delete(Relation relation, const ItemPointerData *tid)
3275 : : {
3276 : 118843 : TM_Result result;
3277 : 118843 : TM_FailureData tmfd;
3278 : :
3279 : 237686 : result = heap_delete(relation, tid,
3280 : 118843 : GetCurrentCommandId(true), InvalidSnapshot,
3281 : : true /* wait for commit */ ,
3282 : : &tmfd, false /* changingPart */ );
3283 [ + - - - : 118843 : switch (result)
- ]
3284 : : {
3285 : : case TM_SelfModified:
3286 : : /* Tuple was already updated in current command? */
3287 [ # # # # ]: 0 : elog(ERROR, "tuple already updated by self");
3288 : 0 : break;
3289 : :
3290 : : case TM_Ok:
3291 : : /* done successfully */
3292 : : break;
3293 : :
3294 : : case TM_Updated:
3295 [ # # # # ]: 0 : elog(ERROR, "tuple concurrently updated");
3296 : 0 : break;
3297 : :
3298 : : case TM_Deleted:
3299 [ # # # # ]: 0 : elog(ERROR, "tuple concurrently deleted");
3300 : 0 : break;
3301 : :
3302 : : default:
3303 [ # # # # ]: 0 : elog(ERROR, "unrecognized heap_delete status: %u", result);
3304 : 0 : break;
3305 : : }
3306 : 118843 : }
3307 : :
3308 : : /*
3309 : : * heap_update - replace a tuple
3310 : : *
3311 : : * See table_tuple_update() for an explanation of the parameters, except that
3312 : : * this routine directly takes a tuple rather than a slot.
3313 : : *
3314 : : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3315 : : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3316 : : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3317 : : * generated by another transaction).
3318 : : */
3319 : : TM_Result
3320 : 24127 : heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
3321 : : CommandId cid, Snapshot crosscheck, bool wait,
3322 : : TM_FailureData *tmfd, LockTupleMode *lockmode,
3323 : : TU_UpdateIndexes *update_indexes)
3324 : : {
3325 : 24127 : TM_Result result;
3326 : 24127 : TransactionId xid = GetCurrentTransactionId();
3327 : 24127 : Bitmapset *hot_attrs;
3328 : 24127 : Bitmapset *sum_attrs;
3329 : 24127 : Bitmapset *key_attrs;
3330 : 24127 : Bitmapset *id_attrs;
3331 : 24127 : Bitmapset *interesting_attrs;
3332 : 24127 : Bitmapset *modified_attrs;
3333 : 24127 : ItemId lp;
3334 : 24127 : HeapTupleData oldtup;
3335 : 24127 : HeapTuple heaptup;
3336 : 24127 : HeapTuple old_key_tuple = NULL;
3337 : 24127 : bool old_key_copied = false;
3338 : 24127 : Page page;
3339 : 24127 : BlockNumber block;
3340 : 24127 : MultiXactStatus mxact_status;
3341 : 48254 : Buffer buffer,
3342 : : newbuf,
3343 : 24127 : vmbuffer = InvalidBuffer,
3344 : 24127 : vmbuffer_new = InvalidBuffer;
3345 : 24127 : bool need_toast;
3346 : 24127 : Size newtupsize,
3347 : : pagefree;
3348 : 24127 : bool have_tuple_lock = false;
3349 : 24127 : bool iscombo;
3350 : 24127 : bool use_hot_update = false;
3351 : 24127 : bool summarized_update = false;
3352 : 24127 : bool key_intact;
3353 : 24127 : bool all_visible_cleared = false;
3354 : 24127 : bool all_visible_cleared_new = false;
3355 : 24127 : bool checked_lockers;
3356 : 24127 : bool locker_remains;
3357 : 24127 : bool id_has_external = false;
3358 : 24127 : TransactionId xmax_new_tuple,
3359 : : xmax_old_tuple;
3360 : 24127 : uint16 infomask_old_tuple,
3361 : : infomask2_old_tuple,
3362 : : infomask_new_tuple,
3363 : : infomask2_new_tuple;
3364 : :
3365 [ + - ]: 24127 : Assert(ItemPointerIsValid(otid));
3366 : :
3367 : : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3368 [ + - ]: 24127 : Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
3369 : : RelationGetNumberOfAttributes(relation));
3370 : :
3371 : 24127 : AssertHasSnapshotForToast(relation);
3372 : :
3373 : : /*
3374 : : * Forbid this during a parallel operation, lest it allocate a combo CID.
3375 : : * Other workers might need that combo CID for visibility checks, and we
3376 : : * have no provision for broadcasting it to them.
3377 : : */
3378 [ + - ]: 24127 : if (IsInParallelMode())
3379 [ # # # # ]: 0 : ereport(ERROR,
3380 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3381 : : errmsg("cannot update tuples during a parallel operation")));
3382 : :
3383 : : #ifdef USE_ASSERT_CHECKING
3384 : 24127 : check_lock_if_inplace_updateable_rel(relation, otid, newtup);
3385 : : #endif
3386 : :
3387 : : /*
3388 : : * Fetch the list of attributes to be checked for various operations.
3389 : : *
3390 : : * For HOT considerations, this is wasted effort if we fail to update or
3391 : : * have to put the new tuple on a different page. But we must compute the
3392 : : * list before obtaining buffer lock --- in the worst case, if we are
3393 : : * doing an update on one of the relevant system catalogs, we could
3394 : : * deadlock if we try to fetch the list later. In any case, the relcache
3395 : : * caches the data so this is usually pretty cheap.
3396 : : *
3397 : : * We also need columns used by the replica identity and columns that are
3398 : : * considered the "key" of rows in the table.
3399 : : *
3400 : : * Note that we get copies of each bitmap, so we need not worry about
3401 : : * relcache flush happening midway through.
3402 : : */
3403 : 24127 : hot_attrs = RelationGetIndexAttrBitmap(relation,
3404 : : INDEX_ATTR_BITMAP_HOT_BLOCKING);
3405 : 24127 : sum_attrs = RelationGetIndexAttrBitmap(relation,
3406 : : INDEX_ATTR_BITMAP_SUMMARIZED);
3407 : 24127 : key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3408 : 24127 : id_attrs = RelationGetIndexAttrBitmap(relation,
3409 : : INDEX_ATTR_BITMAP_IDENTITY_KEY);
3410 : 24127 : interesting_attrs = NULL;
3411 : 24127 : interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
3412 : 24127 : interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
3413 : 24127 : interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
3414 : 24127 : interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
3415 : :
3416 : 24127 : block = ItemPointerGetBlockNumber(otid);
3417 : : INJECTION_POINT("heap_update-before-pin", NULL);
3418 : 24127 : buffer = ReadBuffer(relation, block);
3419 : 24127 : page = BufferGetPage(buffer);
3420 : :
3421 : : /*
3422 : : * Before locking the buffer, pin the visibility map page if it appears to
3423 : : * be necessary. Since we haven't got the lock yet, someone else might be
3424 : : * in the middle of changing this, so we'll need to recheck after we have
3425 : : * the lock.
3426 : : */
3427 [ + + ]: 24127 : if (PageIsAllVisible(page))
3428 : 243 : visibilitymap_pin(relation, block, &vmbuffer);
3429 : :
3430 : 24127 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3431 : :
3432 : 24127 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3433 : :
3434 : : /*
3435 : : * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3436 : : * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3437 : : * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3438 : : * of which indicates concurrent pruning.
3439 : : *
3440 : : * Failing with TM_Updated would be most accurate. However, unlike other
3441 : : * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3442 : : * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3443 : : * does matter to SQL statements UPDATE and MERGE, those SQL statements
3444 : : * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3445 : : * TM_Updated and TM_Deleted affects only the wording of error messages.
3446 : : * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3447 : : * the specification of when tmfd->ctid is valid. Second, it creates
3448 : : * error log evidence that we took this branch.
3449 : : *
3450 : : * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3451 : : * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3452 : : * unrelated row, we'll fail with "duplicate key value violates unique".
3453 : : * XXX if otid is the live, newer version of the newtup row, we'll discard
3454 : : * changes originating in versions of this catalog row after the version
3455 : : * the caller got from syscache. See syscache-update-pruned.spec.
3456 : : */
3457 [ + - ]: 24127 : if (!ItemIdIsNormal(lp))
3458 : : {
3459 [ # # ]: 0 : Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
3460 : :
3461 : 0 : UnlockReleaseBuffer(buffer);
3462 [ # # ]: 0 : Assert(!have_tuple_lock);
3463 [ # # ]: 0 : if (vmbuffer != InvalidBuffer)
3464 : 0 : ReleaseBuffer(vmbuffer);
3465 : 0 : tmfd->ctid = *otid;
3466 : 0 : tmfd->xmax = InvalidTransactionId;
3467 : 0 : tmfd->cmax = InvalidCommandId;
3468 : 0 : *update_indexes = TU_None;
3469 : :
3470 : 0 : bms_free(hot_attrs);
3471 : 0 : bms_free(sum_attrs);
3472 : 0 : bms_free(key_attrs);
3473 : 0 : bms_free(id_attrs);
3474 : : /* modified_attrs not yet initialized */
3475 : 0 : bms_free(interesting_attrs);
3476 : 0 : return TM_Deleted;
3477 : : }
3478 : :
3479 : : /*
3480 : : * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3481 : : * properly.
3482 : : */
3483 : 24127 : oldtup.t_tableOid = RelationGetRelid(relation);
3484 : 24127 : oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3485 : 24127 : oldtup.t_len = ItemIdGetLength(lp);
3486 : 24127 : oldtup.t_self = *otid;
3487 : :
3488 : : /* the new tuple is ready, except for this: */
3489 : 24127 : newtup->t_tableOid = RelationGetRelid(relation);
3490 : :
3491 : : /*
3492 : : * Determine columns modified by the update. Additionally, identify
3493 : : * whether any of the unmodified replica identity key attributes in the
3494 : : * old tuple is externally stored or not. This is required because for
3495 : : * such attributes the flattened value won't be WAL logged as part of the
3496 : : * new tuple so we must include it as part of the old_key_tuple. See
3497 : : * ExtractReplicaIdentity.
3498 : : */
3499 : 48254 : modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
3500 : 24127 : id_attrs, &oldtup,
3501 : 24127 : newtup, &id_has_external);
3502 : :
3503 : : /*
3504 : : * If we're not updating any "key" column, we can grab a weaker lock type.
3505 : : * This allows for more concurrency when we are running simultaneously
3506 : : * with foreign key checks.
3507 : : *
3508 : : * Note that if a column gets detoasted while executing the update, but
3509 : : * the value ends up being the same, this test will fail and we will use
3510 : : * the stronger lock. This is acceptable; the important case to optimize
3511 : : * is updates that don't manipulate key columns, not those that
3512 : : * serendipitously arrive at the same key values.
3513 : : */
3514 [ + + ]: 48254 : if (!bms_overlap(modified_attrs, key_attrs))
3515 : : {
3516 : 22984 : *lockmode = LockTupleNoKeyExclusive;
3517 : 22984 : mxact_status = MultiXactStatusNoKeyUpdate;
3518 : 22984 : key_intact = true;
3519 : :
3520 : : /*
3521 : : * If this is the first possibly-multixact-able operation in the
3522 : : * current transaction, set my per-backend OldestMemberMXactId
3523 : : * setting. We can be certain that the transaction will never become a
3524 : : * member of any older MultiXactIds than that. (We have to do this
3525 : : * even if we end up just using our own TransactionId below, since
3526 : : * some other backend could incorporate our XID into a MultiXact
3527 : : * immediately afterwards.)
3528 : : */
3529 : 22984 : MultiXactIdSetOldestMember();
3530 : 22984 : }
3531 : : else
3532 : : {
3533 : 1143 : *lockmode = LockTupleExclusive;
3534 : 1143 : mxact_status = MultiXactStatusUpdate;
3535 : 1143 : key_intact = false;
3536 : : }
3537 : :
3538 : : /*
3539 : : * Note: beyond this point, use oldtup not otid to refer to old tuple.
3540 : : * otid may very well point at newtup->t_self, which we will overwrite
3541 : : * with the new tuple's location, so there's great risk of confusion if we
3542 : : * use otid anymore.
3543 : : */
3544 : :
3545 : : l2:
3546 : 24127 : checked_lockers = false;
3547 : 24127 : locker_remains = false;
3548 : 24127 : result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3549 : :
3550 : : /* see below about the "no wait" case */
3551 [ + + + - ]: 24127 : Assert(result != TM_BeingModified || wait);
3552 : :
3553 [ + - ]: 24127 : if (result == TM_Invisible)
3554 : : {
3555 : 0 : UnlockReleaseBuffer(buffer);
3556 [ # # # # ]: 0 : ereport(ERROR,
3557 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3558 : : errmsg("attempted to update invisible tuple")));
3559 : 0 : }
3560 [ + + - + ]: 24127 : else if (result == TM_BeingModified && wait)
3561 : : {
3562 : 324 : TransactionId xwait;
3563 : 324 : uint16 infomask;
3564 : 324 : bool can_continue = false;
3565 : :
3566 : : /*
3567 : : * XXX note that we don't consider the "no wait" case here. This
3568 : : * isn't a problem currently because no caller uses that case, but it
3569 : : * should be fixed if such a caller is introduced. It wasn't a
3570 : : * problem previously because this code would always wait, but now
3571 : : * that some tuple locks do not conflict with one of the lock modes we
3572 : : * use, it is possible that this case is interesting to handle
3573 : : * specially.
3574 : : *
3575 : : * This may cause failures with third-party code that calls
3576 : : * heap_update directly.
3577 : : */
3578 : :
3579 : : /* must copy state data before unlocking buffer */
3580 : 324 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3581 : 324 : infomask = oldtup.t_data->t_infomask;
3582 : :
3583 : : /*
3584 : : * Now we have to do something about the existing locker. If it's a
3585 : : * multi, sleep on it; we might be awakened before it is completely
3586 : : * gone (or even not sleep at all in some cases); we need to preserve
3587 : : * it as locker, unless it is gone completely.
3588 : : *
3589 : : * If it's not a multi, we need to check for sleeping conditions
3590 : : * before actually going to sleep. If the update doesn't conflict
3591 : : * with the locks, we just continue without sleeping (but making sure
3592 : : * it is preserved).
3593 : : *
3594 : : * Before sleeping, we need to acquire tuple lock to establish our
3595 : : * priority for the tuple (see heap_lock_tuple). LockTuple will
3596 : : * release us when we are next-in-line for the tuple. Note we must
3597 : : * not acquire the tuple lock until we're sure we're going to sleep;
3598 : : * otherwise we're open for race conditions with other transactions
3599 : : * holding the tuple lock which sleep on us.
3600 : : *
3601 : : * If we are forced to "start over" below, we keep the tuple lock;
3602 : : * this arranges that we stay at the head of the line while rechecking
3603 : : * tuple state.
3604 : : */
3605 [ - + ]: 324 : if (infomask & HEAP_XMAX_IS_MULTI)
3606 : : {
3607 : 0 : TransactionId update_xact;
3608 : 0 : int remain;
3609 : 0 : bool current_is_member = false;
3610 : :
3611 [ # # # # ]: 0 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3612 : 0 : *lockmode, ¤t_is_member))
3613 : : {
3614 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3615 : :
3616 : : /*
3617 : : * Acquire the lock, if necessary (but skip it when we're
3618 : : * requesting a lock and already have one; avoids deadlock).
3619 : : */
3620 [ # # ]: 0 : if (!current_is_member)
3621 : 0 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3622 : : LockWaitBlock, &have_tuple_lock);
3623 : :
3624 : : /* wait for multixact */
3625 : 0 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3626 : 0 : relation, &oldtup.t_self, XLTW_Update,
3627 : : &remain);
3628 : 0 : checked_lockers = true;
3629 : 0 : locker_remains = remain != 0;
3630 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3631 : :
3632 : : /*
3633 : : * If xwait had just locked the tuple then some other xact
3634 : : * could update this tuple before we get to this point. Check
3635 : : * for xmax change, and start over if so.
3636 : : */
3637 : 0 : if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3638 [ # # # # : 0 : infomask) ||
# # ]
3639 : 0 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3640 : : xwait))
3641 : 0 : goto l2;
3642 : 0 : }
3643 : :
3644 : : /*
3645 : : * Note that the multixact may not be done by now. It could have
3646 : : * surviving members; our own xact or other subxacts of this
3647 : : * backend, and also any other concurrent transaction that locked
3648 : : * the tuple with LockTupleKeyShare if we only got
3649 : : * LockTupleNoKeyExclusive. If this is the case, we have to be
3650 : : * careful to mark the updated tuple with the surviving members in
3651 : : * Xmax.
3652 : : *
3653 : : * Note that there could have been another update in the
3654 : : * MultiXact. In that case, we need to check whether it committed
3655 : : * or aborted. If it aborted we are safe to update it again;
3656 : : * otherwise there is an update conflict, and we have to return
3657 : : * TableTuple{Deleted, Updated} below.
3658 : : *
3659 : : * In the LockTupleExclusive case, we still need to preserve the
3660 : : * surviving members: those would include the tuple locks we had
3661 : : * before this one, which are important to keep in case this
3662 : : * subxact aborts.
3663 : : */
3664 [ # # ]: 0 : if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3665 : 0 : update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3666 : : else
3667 : 0 : update_xact = InvalidTransactionId;
3668 : :
3669 : : /*
3670 : : * There was no UPDATE in the MultiXact; or it aborted. No
3671 : : * TransactionIdIsInProgress() call needed here, since we called
3672 : : * MultiXactIdWait() above.
3673 : : */
3674 [ # # # # ]: 0 : if (!TransactionIdIsValid(update_xact) ||
3675 : 0 : TransactionIdDidAbort(update_xact))
3676 : 0 : can_continue = true;
3677 [ # # ]: 0 : }
3678 [ + - ]: 324 : else if (TransactionIdIsCurrentTransactionId(xwait))
3679 : : {
3680 : : /*
3681 : : * The only locker is ourselves; we can avoid grabbing the tuple
3682 : : * lock here, but must preserve our locking information.
3683 : : */
3684 : 324 : checked_lockers = true;
3685 : 324 : locker_remains = true;
3686 : 324 : can_continue = true;
3687 : 324 : }
3688 [ # # # # ]: 0 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3689 : : {
3690 : : /*
3691 : : * If it's just a key-share locker, and we're not changing the key
3692 : : * columns, we don't need to wait for it to end; but we need to
3693 : : * preserve it as locker.
3694 : : */
3695 : 0 : checked_lockers = true;
3696 : 0 : locker_remains = true;
3697 : 0 : can_continue = true;
3698 : 0 : }
3699 : : else
3700 : : {
3701 : : /*
3702 : : * Wait for regular transaction to end; but first, acquire tuple
3703 : : * lock.
3704 : : */
3705 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3706 : 0 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3707 : : LockWaitBlock, &have_tuple_lock);
3708 : 0 : XactLockTableWait(xwait, relation, &oldtup.t_self,
3709 : : XLTW_Update);
3710 : 0 : checked_lockers = true;
3711 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3712 : :
3713 : : /*
3714 : : * xwait is done, but if xwait had just locked the tuple then some
3715 : : * other xact could update this tuple before we get to this point.
3716 : : * Check for xmax change, and start over if so.
3717 : : */
3718 [ # # # # ]: 0 : if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3719 : 0 : !TransactionIdEquals(xwait,
3720 : : HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3721 : 0 : goto l2;
3722 : :
3723 : : /* Otherwise check if it committed or aborted */
3724 : 0 : UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3725 [ # # ]: 0 : if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3726 : 0 : can_continue = true;
3727 : : }
3728 : :
3729 [ + - ]: 324 : if (can_continue)
3730 : 324 : result = TM_Ok;
3731 [ # # ]: 0 : else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3732 : 0 : result = TM_Updated;
3733 : : else
3734 : 0 : result = TM_Deleted;
3735 [ - - + ]: 324 : }
3736 : :
3737 : : /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3738 [ + + ]: 24127 : if (result != TM_Ok)
3739 : : {
3740 [ - + # # : 17 : Assert(result == TM_SelfModified ||
# # # # ]
3741 : : result == TM_Updated ||
3742 : : result == TM_Deleted ||
3743 : : result == TM_BeingModified);
3744 [ + - ]: 17 : Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3745 [ - + # # ]: 17 : Assert(result != TM_Updated ||
3746 : : !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3747 : 17 : }
3748 : :
3749 [ - + # # ]: 24127 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3750 : : {
3751 : : /* Perform additional check for transaction-snapshot mode RI updates */
3752 [ # # ]: 0 : if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3753 : 0 : result = TM_Updated;
3754 : 0 : }
3755 : :
3756 [ + + ]: 24127 : if (result != TM_Ok)
3757 : : {
3758 : 17 : tmfd->ctid = oldtup.t_data->t_ctid;
3759 : 17 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3760 [ + - ]: 17 : if (result == TM_SelfModified)
3761 : 17 : tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3762 : : else
3763 : 0 : tmfd->cmax = InvalidCommandId;
3764 : 17 : UnlockReleaseBuffer(buffer);
3765 [ + - ]: 17 : if (have_tuple_lock)
3766 : 0 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3767 [ + - ]: 17 : if (vmbuffer != InvalidBuffer)
3768 : 0 : ReleaseBuffer(vmbuffer);
3769 : 17 : *update_indexes = TU_None;
3770 : :
3771 : 17 : bms_free(hot_attrs);
3772 : 17 : bms_free(sum_attrs);
3773 : 17 : bms_free(key_attrs);
3774 : 17 : bms_free(id_attrs);
3775 : 17 : bms_free(modified_attrs);
3776 : 17 : bms_free(interesting_attrs);
3777 : 17 : return result;
3778 : : }
3779 : :
3780 : : /*
3781 : : * If we didn't pin the visibility map page and the page has become all
3782 : : * visible while we were busy locking the buffer, or during some
3783 : : * subsequent window during which we had it unlocked, we'll have to unlock
3784 : : * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3785 : : * bit unfortunate, especially since we'll now have to recheck whether the
3786 : : * tuple has been locked or updated under us, but hopefully it won't
3787 : : * happen very often.
3788 : : */
3789 [ + + + - ]: 24110 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3790 : : {
3791 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3792 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
3793 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3794 : 0 : goto l2;
3795 : : }
3796 : :
3797 : : /* Fill in transaction status data */
3798 : :
3799 : : /*
3800 : : * If the tuple we're updating is locked, we need to preserve the locking
3801 : : * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3802 : : */
3803 : 48220 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3804 : 24110 : oldtup.t_data->t_infomask,
3805 : 24110 : oldtup.t_data->t_infomask2,
3806 : 24110 : xid, *lockmode, true,
3807 : : &xmax_old_tuple, &infomask_old_tuple,
3808 : : &infomask2_old_tuple);
3809 : :
3810 : : /*
3811 : : * And also prepare an Xmax value for the new copy of the tuple. If there
3812 : : * was no xmax previously, or there was one but all lockers are now gone,
3813 : : * then use InvalidTransactionId; otherwise, get the xmax from the old
3814 : : * tuple. (In rare cases that might also be InvalidTransactionId and yet
3815 : : * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3816 : : */
3817 [ + + ]: 24110 : if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3818 [ + - + - ]: 648 : HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3819 [ + - ]: 324 : (checked_lockers && !locker_remains))
3820 : 23786 : xmax_new_tuple = InvalidTransactionId;
3821 : : else
3822 : 324 : xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3823 : :
3824 [ + + ]: 24110 : if (!TransactionIdIsValid(xmax_new_tuple))
3825 : : {
3826 : 23786 : infomask_new_tuple = HEAP_XMAX_INVALID;
3827 : 23786 : infomask2_new_tuple = 0;
3828 : 23786 : }
3829 : : else
3830 : : {
3831 : : /*
3832 : : * If we found a valid Xmax for the new tuple, then the infomask bits
3833 : : * to use on the new tuple depend on what was there on the old one.
3834 : : * Note that since we're doing an update, the only possibility is that
3835 : : * the lockers had FOR KEY SHARE lock.
3836 : : */
3837 [ - + ]: 324 : if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3838 : : {
3839 : 0 : GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3840 : : &infomask2_new_tuple);
3841 : 0 : }
3842 : : else
3843 : : {
3844 : 324 : infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3845 : 324 : infomask2_new_tuple = 0;
3846 : : }
3847 : : }
3848 : :
3849 : : /*
3850 : : * Prepare the new tuple with the appropriate initial values of Xmin and
3851 : : * Xmax, as well as initial infomask bits as computed above.
3852 : : */
3853 : 24110 : newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3854 : 24110 : newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3855 : 24110 : HeapTupleHeaderSetXmin(newtup->t_data, xid);
3856 : 24110 : HeapTupleHeaderSetCmin(newtup->t_data, cid);
3857 : 24110 : newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3858 : 24110 : newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3859 : 24110 : HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3860 : :
3861 : : /*
3862 : : * Replace cid with a combo CID if necessary. Note that we already put
3863 : : * the plain cid into the new tuple.
3864 : : */
3865 : 24110 : HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3866 : :
3867 : : /*
3868 : : * If the toaster needs to be activated, OR if the new tuple will not fit
3869 : : * on the same page as the old, then we need to release the content lock
3870 : : * (but not the pin!) on the old tuple's buffer while we are off doing
3871 : : * TOAST and/or table-file-extension work. We must mark the old tuple to
3872 : : * show that it's locked, else other processes may try to update it
3873 : : * themselves.
3874 : : *
3875 : : * We need to invoke the toaster if there are already any out-of-line
3876 : : * toasted values present, or if the new tuple is over-threshold.
3877 : : */
3878 [ - + # # ]: 24110 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3879 : 0 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3880 : : {
3881 : : /* toast table entries should never be recursively toasted */
3882 [ # # ]: 0 : Assert(!HeapTupleHasExternal(&oldtup));
3883 [ # # ]: 0 : Assert(!HeapTupleHasExternal(newtup));
3884 : 0 : need_toast = false;
3885 : 0 : }
3886 : : else
3887 [ + + ]: 48193 : need_toast = (HeapTupleHasExternal(&oldtup) ||
3888 [ + + ]: 24083 : HeapTupleHasExternal(newtup) ||
3889 : 24075 : newtup->t_len > TOAST_TUPLE_THRESHOLD);
3890 : :
3891 : 24110 : pagefree = PageGetHeapFreeSpace(page);
3892 : :
3893 : 24110 : newtupsize = MAXALIGN(newtup->t_len);
3894 : :
3895 [ + + + + ]: 24110 : if (need_toast || newtupsize > pagefree)
3896 : : {
3897 : 6650 : TransactionId xmax_lock_old_tuple;
3898 : 6650 : uint16 infomask_lock_old_tuple,
3899 : : infomask2_lock_old_tuple;
3900 : 6650 : bool cleared_all_frozen = false;
3901 : :
3902 : : /*
3903 : : * To prevent concurrent sessions from updating the tuple, we have to
3904 : : * temporarily mark it locked, while we release the page-level lock.
3905 : : *
3906 : : * To satisfy the rule that any xid potentially appearing in a buffer
3907 : : * written out to disk, we unfortunately have to WAL log this
3908 : : * temporary modification. We can reuse xl_heap_lock for this
3909 : : * purpose. If we crash/error before following through with the
3910 : : * actual update, xmax will be of an aborted transaction, allowing
3911 : : * other sessions to proceed.
3912 : : */
3913 : :
3914 : : /*
3915 : : * Compute xmax / infomask appropriate for locking the tuple. This has
3916 : : * to be done separately from the combo that's going to be used for
3917 : : * updating, because the potentially created multixact would otherwise
3918 : : * be wrong.
3919 : : */
3920 : 13300 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3921 : 6650 : oldtup.t_data->t_infomask,
3922 : 6650 : oldtup.t_data->t_infomask2,
3923 : 6650 : xid, *lockmode, false,
3924 : : &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3925 : : &infomask2_lock_old_tuple);
3926 : :
3927 [ + - ]: 6650 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3928 : :
3929 : 6650 : START_CRIT_SECTION();
3930 : :
3931 : : /* Clear obsolete visibility flags ... */
3932 : 6650 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3933 : 6650 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3934 : 6650 : HeapTupleClearHotUpdated(&oldtup);
3935 : : /* ... and store info about transaction updating this tuple */
3936 [ + - ]: 6650 : Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3937 : 6650 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3938 : 6650 : oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3939 : 6650 : oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3940 : 6650 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3941 : :
3942 : : /* temporarily make it look not-updated, but locked */
3943 : 6650 : oldtup.t_data->t_ctid = oldtup.t_self;
3944 : :
3945 : : /*
3946 : : * Clear all-frozen bit on visibility map if needed. We could
3947 : : * immediately reset ALL_VISIBLE, but given that the WAL logging
3948 : : * overhead would be unchanged, that doesn't seem necessarily
3949 : : * worthwhile.
3950 : : */
3951 [ + + + + ]: 6650 : if (PageIsAllVisible(page) &&
3952 : 53 : visibilitymap_clear(relation, block, vmbuffer,
3953 : : VISIBILITYMAP_ALL_FROZEN))
3954 : 9 : cleared_all_frozen = true;
3955 : :
3956 : 6650 : MarkBufferDirty(buffer);
3957 : :
3958 [ + + + + : 6650 : if (RelationNeedsWAL(relation))
+ - + + ]
3959 : : {
3960 : 6642 : xl_heap_lock xlrec;
3961 : 6642 : XLogRecPtr recptr;
3962 : :
3963 : 6642 : XLogBeginInsert();
3964 : 6642 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3965 : :
3966 : 6642 : xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3967 : 6642 : xlrec.xmax = xmax_lock_old_tuple;
3968 : 13284 : xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3969 : 6642 : oldtup.t_data->t_infomask2);
3970 : 6642 : xlrec.flags =
3971 : 6642 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
3972 : 6642 : XLogRegisterData(&xlrec, SizeOfHeapLock);
3973 : 6642 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3974 : 6642 : PageSetLSN(page, recptr);
3975 : 6642 : }
3976 : :
3977 [ + - ]: 6650 : END_CRIT_SECTION();
3978 : :
3979 : 6650 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3980 : :
3981 : : /*
3982 : : * Let the toaster do its thing, if needed.
3983 : : *
3984 : : * Note: below this point, heaptup is the data we actually intend to
3985 : : * store into the relation; newtup is the caller's original untoasted
3986 : : * data.
3987 : : */
3988 [ + + ]: 6650 : if (need_toast)
3989 : : {
3990 : : /* Note we always use WAL and FSM during updates */
3991 : 98 : heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
3992 : 98 : newtupsize = MAXALIGN(heaptup->t_len);
3993 : 98 : }
3994 : : else
3995 : 6552 : heaptup = newtup;
3996 : :
3997 : : /*
3998 : : * Now, do we need a new page for the tuple, or not? This is a bit
3999 : : * tricky since someone else could have added tuples to the page while
4000 : : * we weren't looking. We have to recheck the available space after
4001 : : * reacquiring the buffer lock. But don't bother to do that if the
4002 : : * former amount of free space is still not enough; it's unlikely
4003 : : * there's more free now than before.
4004 : : *
4005 : : * What's more, if we need to get a new page, we will need to acquire
4006 : : * buffer locks on both old and new pages. To avoid deadlock against
4007 : : * some other backend trying to get the same two locks in the other
4008 : : * order, we must be consistent about the order we get the locks in.
4009 : : * We use the rule "lock the lower-numbered page of the relation
4010 : : * first". To implement this, we must do RelationGetBufferForTuple
4011 : : * while not holding the lock on the old page, and we must rely on it
4012 : : * to get the locks on both pages in the correct order.
4013 : : *
4014 : : * Another consideration is that we need visibility map page pin(s) if
4015 : : * we will have to clear the all-visible flag on either page. If we
4016 : : * call RelationGetBufferForTuple, we rely on it to acquire any such
4017 : : * pins; but if we don't, we have to handle that here. Hence we need
4018 : : * a loop.
4019 : : */
4020 : 6650 : for (;;)
4021 : : {
4022 [ + + ]: 6650 : if (newtupsize > pagefree)
4023 : : {
4024 : : /* It doesn't fit, must use RelationGetBufferForTuple. */
4025 : 13152 : newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
4026 : 6576 : buffer, 0, NULL,
4027 : : &vmbuffer_new, &vmbuffer,
4028 : : 0);
4029 : : /* We're all done. */
4030 : 6576 : break;
4031 : : }
4032 : : /* Acquire VM page pin if needed and we don't have it. */
4033 [ + - + - ]: 74 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
4034 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
4035 : : /* Re-acquire the lock on the old tuple's page. */
4036 : 74 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
4037 : : /* Re-check using the up-to-date free space */
4038 : 74 : pagefree = PageGetHeapFreeSpace(page);
4039 [ + - - + ]: 148 : if (newtupsize > pagefree ||
4040 [ + - ]: 74 : (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
4041 : : {
4042 : : /*
4043 : : * Rats, it doesn't fit anymore, or somebody just now set the
4044 : : * all-visible flag. We must now unlock and loop to avoid
4045 : : * deadlock. Fortunately, this path should seldom be taken.
4046 : : */
4047 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4048 : 0 : }
4049 : : else
4050 : : {
4051 : : /* We're all done. */
4052 : 74 : newbuf = buffer;
4053 : 74 : break;
4054 : : }
4055 : : }
4056 : 6650 : }
4057 : : else
4058 : : {
4059 : : /* No TOAST work needed, and it'll fit on same page */
4060 : 17460 : newbuf = buffer;
4061 : 17460 : heaptup = newtup;
4062 : : }
4063 : :
4064 : : /*
4065 : : * We're about to do the actual update -- check for conflict first, to
4066 : : * avoid possibly having to roll back work we've just done.
4067 : : *
4068 : : * This is safe without a recheck as long as there is no possibility of
4069 : : * another process scanning the pages between this check and the update
4070 : : * being visible to the scan (i.e., exclusive buffer content lock(s) are
4071 : : * continuously held from this point until the tuple update is visible).
4072 : : *
4073 : : * For the new tuple the only check needed is at the relation level, but
4074 : : * since both tuples are in the same relation and the check for oldtup
4075 : : * will include checking the relation level, there is no benefit to a
4076 : : * separate check for the new tuple.
4077 : : */
4078 : 48220 : CheckForSerializableConflictIn(relation, &oldtup.t_self,
4079 : 24110 : BufferGetBlockNumber(buffer));
4080 : :
4081 : : /*
4082 : : * At this point newbuf and buffer are both pinned and locked, and newbuf
4083 : : * has enough space for the new tuple. If they are the same buffer, only
4084 : : * one pin is held.
4085 : : */
4086 : :
4087 [ + + ]: 24110 : if (newbuf == buffer)
4088 : : {
4089 : : /*
4090 : : * Since the new tuple is going into the same page, we might be able
4091 : : * to do a HOT update. Check if any of the index columns have been
4092 : : * changed.
4093 : : */
4094 [ + + ]: 17534 : if (!bms_overlap(modified_attrs, hot_attrs))
4095 : : {
4096 : 14631 : use_hot_update = true;
4097 : :
4098 : : /*
4099 : : * If none of the columns that are used in hot-blocking indexes
4100 : : * were updated, we can apply HOT, but we do still need to check
4101 : : * if we need to update the summarizing indexes, and update those
4102 : : * indexes if the columns were updated, or we may fail to detect
4103 : : * e.g. value bound changes in BRIN minmax indexes.
4104 : : */
4105 [ + + ]: 14631 : if (bms_overlap(modified_attrs, sum_attrs))
4106 : 547 : summarized_update = true;
4107 : 14631 : }
4108 : 17534 : }
4109 : : else
4110 : : {
4111 : : /* Set a hint that the old page could use prune/defrag */
4112 : 6576 : PageSetFull(page);
4113 : : }
4114 : :
4115 : : /*
4116 : : * Compute replica identity tuple before entering the critical section so
4117 : : * we don't PANIC upon a memory allocation failure.
4118 : : * ExtractReplicaIdentity() will return NULL if nothing needs to be
4119 : : * logged. Pass old key required as true only if the replica identity key
4120 : : * columns are modified or it has external data.
4121 : : */
4122 : 48055 : old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
4123 [ + + ]: 24110 : bms_overlap(modified_attrs, id_attrs) ||
4124 : 23945 : id_has_external,
4125 : : &old_key_copied);
4126 : :
4127 : : /* NO EREPORT(ERROR) from here till changes are logged */
4128 : 24110 : START_CRIT_SECTION();
4129 : :
4130 : : /*
4131 : : * If this transaction commits, the old tuple will become DEAD sooner or
4132 : : * later. Set flag that this page is a candidate for pruning once our xid
4133 : : * falls below the OldestXmin horizon. If the transaction finally aborts,
4134 : : * the subsequent page pruning will be a no-op and the hint will be
4135 : : * cleared.
4136 : : *
4137 : : * XXX Should we set hint on newbuf as well? If the transaction aborts,
4138 : : * there would be a prunable tuple in the newbuf; but for now we choose
4139 : : * not to optimize for aborts. Note that heap_xlog_update must be kept in
4140 : : * sync if this decision changes.
4141 : : */
4142 [ + - + + : 24110 : PageSetPrunable(page, xid);
+ + ]
4143 : :
4144 [ + + ]: 24110 : if (use_hot_update)
4145 : : {
4146 : : /* Mark the old tuple as HOT-updated */
4147 : 14631 : HeapTupleSetHotUpdated(&oldtup);
4148 : : /* And mark the new tuple as heap-only */
4149 : 14631 : HeapTupleSetHeapOnly(heaptup);
4150 : : /* Mark the caller's copy too, in case different from heaptup */
4151 : 14631 : HeapTupleSetHeapOnly(newtup);
4152 : 14631 : }
4153 : : else
4154 : : {
4155 : : /* Make sure tuples are correctly marked as not-HOT */
4156 : 9479 : HeapTupleClearHotUpdated(&oldtup);
4157 : 9479 : HeapTupleClearHeapOnly(heaptup);
4158 : 9479 : HeapTupleClearHeapOnly(newtup);
4159 : : }
4160 : :
4161 : 24110 : RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4162 : :
4163 : :
4164 : : /* Clear obsolete visibility flags, possibly set by ourselves above... */
4165 : 24110 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4166 : 24110 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4167 : : /* ... and store info about transaction updating this tuple */
4168 [ + - ]: 24110 : Assert(TransactionIdIsValid(xmax_old_tuple));
4169 : 24110 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4170 : 24110 : oldtup.t_data->t_infomask |= infomask_old_tuple;
4171 : 24110 : oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4172 : 24110 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4173 : :
4174 : : /* record address of new tuple in t_ctid of old one */
4175 : 24110 : oldtup.t_data->t_ctid = heaptup->t_self;
4176 : :
4177 : : /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4178 [ + + ]: 24110 : if (PageIsAllVisible(BufferGetPage(buffer)))
4179 : : {
4180 : 243 : all_visible_cleared = true;
4181 : 243 : PageClearAllVisible(BufferGetPage(buffer));
4182 : 486 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4183 : 243 : vmbuffer, VISIBILITYMAP_VALID_BITS);
4184 : 243 : }
4185 [ + + + + ]: 24110 : if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4186 : : {
4187 : 44 : all_visible_cleared_new = true;
4188 : 44 : PageClearAllVisible(BufferGetPage(newbuf));
4189 : 88 : visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4190 : 44 : vmbuffer_new, VISIBILITYMAP_VALID_BITS);
4191 : 44 : }
4192 : :
4193 [ + + ]: 24110 : if (newbuf != buffer)
4194 : 6576 : MarkBufferDirty(newbuf);
4195 : 24110 : MarkBufferDirty(buffer);
4196 : :
4197 : : /* XLOG stuff */
4198 [ + + + + : 24110 : if (RelationNeedsWAL(relation))
+ + + + ]
4199 : : {
4200 : 23699 : XLogRecPtr recptr;
4201 : :
4202 : : /*
4203 : : * For logical decoding we need combo CIDs to properly decode the
4204 : : * catalog.
4205 : : */
4206 [ + - - + : 23699 : if (RelationIsAccessibleInLogicalDecoding(relation))
# # # # #
# # # # #
# # ]
4207 : : {
4208 : 0 : log_heap_new_cid(relation, &oldtup);
4209 : 0 : log_heap_new_cid(relation, heaptup);
4210 : 0 : }
4211 : :
4212 : 47398 : recptr = log_heap_update(relation, buffer,
4213 : 23699 : newbuf, &oldtup, heaptup,
4214 : 23699 : old_key_tuple,
4215 : 23699 : all_visible_cleared,
4216 : 23699 : all_visible_cleared_new);
4217 [ + + ]: 23699 : if (newbuf != buffer)
4218 : : {
4219 : 6570 : PageSetLSN(BufferGetPage(newbuf), recptr);
4220 : 6570 : }
4221 : 23699 : PageSetLSN(BufferGetPage(buffer), recptr);
4222 : 23699 : }
4223 : :
4224 [ + - ]: 24110 : END_CRIT_SECTION();
4225 : :
4226 [ + + ]: 24110 : if (newbuf != buffer)
4227 : 6576 : LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
4228 : 24110 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4229 : :
4230 : : /*
4231 : : * Mark old tuple for invalidation from system caches at next command
4232 : : * boundary, and mark the new tuple for invalidation in case we abort. We
4233 : : * have to do this before releasing the buffer because oldtup is in the
4234 : : * buffer. (heaptup is all in local memory, but it's necessary to process
4235 : : * both tuple versions in one call to inval.c so we can avoid redundant
4236 : : * sinval messages.)
4237 : : */
4238 : 24110 : CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4239 : :
4240 : : /* Now we can release the buffer(s) */
4241 [ + + ]: 24110 : if (newbuf != buffer)
4242 : 6576 : ReleaseBuffer(newbuf);
4243 : 24110 : ReleaseBuffer(buffer);
4244 [ + + ]: 24110 : if (BufferIsValid(vmbuffer_new))
4245 : 44 : ReleaseBuffer(vmbuffer_new);
4246 [ + + ]: 24110 : if (BufferIsValid(vmbuffer))
4247 : 243 : ReleaseBuffer(vmbuffer);
4248 : :
4249 : : /*
4250 : : * Release the lmgr tuple lock, if we had it.
4251 : : */
4252 [ + - ]: 24110 : if (have_tuple_lock)
4253 : 0 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4254 : :
4255 : 24110 : pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4256 : :
4257 : : /*
4258 : : * If heaptup is a private copy, release it. Don't forget to copy t_self
4259 : : * back to the caller's image, too.
4260 : : */
4261 [ + + ]: 24110 : if (heaptup != newtup)
4262 : : {
4263 : 92 : newtup->t_self = heaptup->t_self;
4264 : 92 : heap_freetuple(heaptup);
4265 : 92 : }
4266 : :
4267 : : /*
4268 : : * If it is a HOT update, the update may still need to update summarized
4269 : : * indexes, lest we fail to update those summaries and get incorrect
4270 : : * results (for example, minmax bounds of the block may change with this
4271 : : * update).
4272 : : */
4273 [ + + ]: 24110 : if (use_hot_update)
4274 : : {
4275 [ + + ]: 14631 : if (summarized_update)
4276 : 547 : *update_indexes = TU_Summarizing;
4277 : : else
4278 : 14084 : *update_indexes = TU_None;
4279 : 14631 : }
4280 : : else
4281 : 9479 : *update_indexes = TU_All;
4282 : :
4283 [ - + # # ]: 24110 : if (old_key_tuple != NULL && old_key_copied)
4284 : 0 : heap_freetuple(old_key_tuple);
4285 : :
4286 : 24110 : bms_free(hot_attrs);
4287 : 24110 : bms_free(sum_attrs);
4288 : 24110 : bms_free(key_attrs);
4289 : 24110 : bms_free(id_attrs);
4290 : 24110 : bms_free(modified_attrs);
4291 : 24110 : bms_free(interesting_attrs);
4292 : :
4293 : 24110 : return TM_Ok;
4294 : 24127 : }
4295 : :
4296 : : #ifdef USE_ASSERT_CHECKING
4297 : : /*
4298 : : * Confirm adequate lock held during heap_update(), per rules from
4299 : : * README.tuplock section "Locking to write inplace-updated tables".
4300 : : */
4301 : : static void
4302 : 24127 : check_lock_if_inplace_updateable_rel(Relation relation,
4303 : : const ItemPointerData *otid,
4304 : : HeapTuple newtup)
4305 : : {
4306 : : /* LOCKTAG_TUPLE acceptable for any catalog */
4307 [ + + ]: 24127 : switch (RelationGetRelid(relation))
4308 : : {
4309 : : case RelationRelationId:
4310 : : case DatabaseRelationId:
4311 : : {
4312 : 10252 : LOCKTAG tuptag;
4313 : :
4314 : 10252 : SET_LOCKTAG_TUPLE(tuptag,
4315 : : relation->rd_lockInfo.lockRelId.dbId,
4316 : : relation->rd_lockInfo.lockRelId.relId,
4317 : : ItemPointerGetBlockNumber(otid),
4318 : : ItemPointerGetOffsetNumber(otid));
4319 [ + + ]: 10252 : if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
4320 : 2395 : return;
4321 [ - + + ]: 10252 : }
4322 : 7857 : break;
4323 : : default:
4324 [ + - ]: 13875 : Assert(!IsInplaceUpdateRelation(relation));
4325 : 13875 : return;
4326 : : }
4327 : :
4328 [ - + - ]: 7857 : switch (RelationGetRelid(relation))
4329 : : {
4330 : : case RelationRelationId:
4331 : : {
4332 : : /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4333 : 7857 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
4334 : 7857 : Oid relid = classForm->oid;
4335 : 7857 : Oid dbid;
4336 : 7857 : LOCKTAG tag;
4337 : :
4338 [ + + ]: 7857 : if (IsSharedRelation(relid))
4339 : 3 : dbid = InvalidOid;
4340 : : else
4341 : 7854 : dbid = MyDatabaseId;
4342 : :
4343 [ + + ]: 7857 : if (classForm->relkind == RELKIND_INDEX)
4344 : : {
4345 : 137 : Relation irel = index_open(relid, AccessShareLock);
4346 : :
4347 : 137 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4348 : 137 : index_close(irel, AccessShareLock);
4349 : 137 : }
4350 : : else
4351 : 7720 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4352 : :
4353 [ + + + - ]: 7857 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4354 : 7102 : !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4355 [ # # # # ]: 0 : elog(WARNING,
4356 : : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4357 : : NameStr(classForm->relname),
4358 : : relid,
4359 : : classForm->relkind,
4360 : : ItemPointerGetBlockNumber(otid),
4361 : : ItemPointerGetOffsetNumber(otid));
4362 : 7857 : }
4363 : 7857 : break;
4364 : : case DatabaseRelationId:
4365 : : {
4366 : : /* LOCKTAG_TUPLE required */
4367 : 0 : Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
4368 : :
4369 [ # # # # ]: 0 : elog(WARNING,
4370 : : "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4371 : : NameStr(dbForm->datname),
4372 : : dbForm->oid,
4373 : : ItemPointerGetBlockNumber(otid),
4374 : : ItemPointerGetOffsetNumber(otid));
4375 : 0 : }
4376 : 0 : break;
4377 : : }
4378 : 24127 : }
4379 : :
4380 : : /*
4381 : : * Confirm adequate relation lock held, per rules from README.tuplock section
4382 : : * "Locking to write inplace-updated tables".
4383 : : */
4384 : : static void
4385 : 10378 : check_inplace_rel_lock(HeapTuple oldtup)
4386 : : {
4387 : 10378 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
4388 : 10378 : Oid relid = classForm->oid;
4389 : 10378 : Oid dbid;
4390 : 10378 : LOCKTAG tag;
4391 : :
4392 [ + + ]: 10378 : if (IsSharedRelation(relid))
4393 : 145 : dbid = InvalidOid;
4394 : : else
4395 : 10233 : dbid = MyDatabaseId;
4396 : :
4397 [ + + ]: 10378 : if (classForm->relkind == RELKIND_INDEX)
4398 : : {
4399 : 4494 : Relation irel = index_open(relid, AccessShareLock);
4400 : :
4401 : 4494 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4402 : 4494 : index_close(irel, AccessShareLock);
4403 : 4494 : }
4404 : : else
4405 : 5884 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4406 : :
4407 [ + - ]: 10378 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4408 [ # # # # ]: 0 : elog(WARNING,
4409 : : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4410 : : NameStr(classForm->relname),
4411 : : relid,
4412 : : classForm->relkind,
4413 : : ItemPointerGetBlockNumber(&oldtup->t_self),
4414 : : ItemPointerGetOffsetNumber(&oldtup->t_self));
4415 : 10378 : }
4416 : : #endif
4417 : :
4418 : : /*
4419 : : * Check if the specified attribute's values are the same. Subroutine for
4420 : : * HeapDetermineColumnsInfo.
4421 : : */
4422 : : static bool
4423 : 104887 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
4424 : : bool isnull1, bool isnull2)
4425 : : {
4426 : : /*
4427 : : * If one value is NULL and other is not, then they are certainly not
4428 : : * equal
4429 : : */
4430 [ + + ]: 104887 : if (isnull1 != isnull2)
4431 : 15 : return false;
4432 : :
4433 : : /*
4434 : : * If both are NULL, they can be considered equal.
4435 : : */
4436 [ + + ]: 104872 : if (isnull1)
4437 : 1650 : return true;
4438 : :
4439 : : /*
4440 : : * We do simple binary comparison of the two datums. This may be overly
4441 : : * strict because there can be multiple binary representations for the
4442 : : * same logical value. But we should be OK as long as there are no false
4443 : : * positives. Using a type-specific equality operator is messy because
4444 : : * there could be multiple notions of equality in different operator
4445 : : * classes; furthermore, we cannot safely invoke user-defined functions
4446 : : * while holding exclusive buffer lock.
4447 : : */
4448 [ - + ]: 103222 : if (attrnum <= 0)
4449 : : {
4450 : : /* The only allowed system columns are OIDs, so do this */
4451 : 0 : return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4452 : : }
4453 : : else
4454 : : {
4455 : 103222 : CompactAttribute *att;
4456 : :
4457 [ + - ]: 103222 : Assert(attrnum <= tupdesc->natts);
4458 : 103222 : att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4459 : 103222 : return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4460 : 103222 : }
4461 : 104887 : }
4462 : :
4463 : : /*
4464 : : * Check which columns are being updated.
4465 : : *
4466 : : * Given an updated tuple, determine (and return into the output bitmapset),
4467 : : * from those listed as interesting, the set of columns that changed.
4468 : : *
4469 : : * has_external indicates if any of the unmodified attributes (from those
4470 : : * listed as interesting) of the old tuple is a member of external_cols and is
4471 : : * stored externally.
4472 : : */
4473 : : static Bitmapset *
4474 : 24127 : HeapDetermineColumnsInfo(Relation relation,
4475 : : Bitmapset *interesting_cols,
4476 : : Bitmapset *external_cols,
4477 : : HeapTuple oldtup, HeapTuple newtup,
4478 : : bool *has_external)
4479 : : {
4480 : 24127 : int attidx;
4481 : 24127 : Bitmapset *modified = NULL;
4482 : 24127 : TupleDesc tupdesc = RelationGetDescr(relation);
4483 : :
4484 : 24127 : attidx = -1;
4485 [ + + ]: 129014 : while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4486 : : {
4487 : : /* attidx is zero-based, attrnum is the normal attribute number */
4488 : 104887 : AttrNumber attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
4489 : 104887 : Datum value1,
4490 : : value2;
4491 : 104887 : bool isnull1,
4492 : : isnull2;
4493 : :
4494 : : /*
4495 : : * If it's a whole-tuple reference, say "not equal". It's not really
4496 : : * worth supporting this case, since it could only succeed after a
4497 : : * no-op update, which is hardly a case worth optimizing for.
4498 : : */
4499 [ + - ]: 104887 : if (attrnum == 0)
4500 : : {
4501 : 0 : modified = bms_add_member(modified, attidx);
4502 : 0 : continue;
4503 : : }
4504 : :
4505 : : /*
4506 : : * Likewise, automatically say "not equal" for any system attribute
4507 : : * other than tableOID; we cannot expect these to be consistent in a
4508 : : * HOT chain, or even to be set correctly yet in the new tuple.
4509 : : */
4510 [ + - ]: 104887 : if (attrnum < 0)
4511 : : {
4512 [ # # ]: 0 : if (attrnum != TableOidAttributeNumber)
4513 : : {
4514 : 0 : modified = bms_add_member(modified, attidx);
4515 : 0 : continue;
4516 : : }
4517 : 0 : }
4518 : :
4519 : : /*
4520 : : * Extract the corresponding values. XXX this is pretty inefficient
4521 : : * if there are many indexed columns. Should we do a single
4522 : : * heap_deform_tuple call on each tuple, instead? But that doesn't
4523 : : * work for system columns ...
4524 : : */
4525 : 104887 : value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4526 : 104887 : value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4527 : :
4528 [ + + + + ]: 209774 : if (!heap_attr_equals(tupdesc, attrnum, value1,
4529 : 104887 : value2, isnull1, isnull2))
4530 : : {
4531 : 6157 : modified = bms_add_member(modified, attidx);
4532 : 6157 : continue;
4533 : : }
4534 : :
4535 : : /*
4536 : : * No need to check attributes that can't be stored externally. Note
4537 : : * that system attributes can't be stored externally.
4538 : : */
4539 [ + - + + : 98730 : if (attrnum < 0 || isnull1 ||
+ + ]
4540 : 97080 : TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4541 : 92759 : continue;
4542 : :
4543 : : /*
4544 : : * Check if the old tuple's attribute is stored externally and is a
4545 : : * member of external_cols.
4546 : : */
4547 [ - + # # ]: 5971 : if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
4548 : 0 : bms_is_member(attidx, external_cols))
4549 : 0 : *has_external = true;
4550 [ - + + ]: 104887 : }
4551 : :
4552 : 48254 : return modified;
4553 : 24127 : }
4554 : :
4555 : : /*
4556 : : * simple_heap_update - replace a tuple
4557 : : *
4558 : : * This routine may be used to update a tuple when concurrent updates of
4559 : : * the target tuple are not expected (for example, because we have a lock
4560 : : * on the relation associated with the tuple). Any failure is reported
4561 : : * via ereport().
4562 : : */
4563 : : void
4564 : 17527 : simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup,
4565 : : TU_UpdateIndexes *update_indexes)
4566 : : {
4567 : 17527 : TM_Result result;
4568 : 17527 : TM_FailureData tmfd;
4569 : 17527 : LockTupleMode lockmode;
4570 : :
4571 : 35054 : result = heap_update(relation, otid, tup,
4572 : 17527 : GetCurrentCommandId(true), InvalidSnapshot,
4573 : : true /* wait for commit */ ,
4574 : 17527 : &tmfd, &lockmode, update_indexes);
4575 [ + - - - : 17527 : switch (result)
- ]
4576 : : {
4577 : : case TM_SelfModified:
4578 : : /* Tuple was already updated in current command? */
4579 [ # # # # ]: 0 : elog(ERROR, "tuple already updated by self");
4580 : 0 : break;
4581 : :
4582 : : case TM_Ok:
4583 : : /* done successfully */
4584 : : break;
4585 : :
4586 : : case TM_Updated:
4587 [ # # # # ]: 0 : elog(ERROR, "tuple concurrently updated");
4588 : 0 : break;
4589 : :
4590 : : case TM_Deleted:
4591 [ # # # # ]: 0 : elog(ERROR, "tuple concurrently deleted");
4592 : 0 : break;
4593 : :
4594 : : default:
4595 [ # # # # ]: 0 : elog(ERROR, "unrecognized heap_update status: %u", result);
4596 : 0 : break;
4597 : : }
4598 : 17527 : }
4599 : :
4600 : :
4601 : : /*
4602 : : * Return the MultiXactStatus corresponding to the given tuple lock mode.
4603 : : */
4604 : : static MultiXactStatus
4605 : 3 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4606 : : {
4607 : 3 : int retval;
4608 : :
4609 [ + + ]: 3 : if (is_update)
4610 : 1 : retval = tupleLockExtraInfo[mode].updstatus;
4611 : : else
4612 : 2 : retval = tupleLockExtraInfo[mode].lockstatus;
4613 : :
4614 [ + - ]: 3 : if (retval == -1)
4615 [ # # # # ]: 0 : elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4616 : : is_update ? "true" : "false");
4617 : :
4618 : 6 : return (MultiXactStatus) retval;
4619 : 3 : }
4620 : :
4621 : : /*
4622 : : * heap_lock_tuple - lock a tuple in shared or exclusive mode
4623 : : *
4624 : : * Note that this acquires a buffer pin, which the caller must release.
4625 : : *
4626 : : * Input parameters:
4627 : : * relation: relation containing tuple (caller must hold suitable lock)
4628 : : * cid: current command ID (used for visibility test, and stored into
4629 : : * tuple's cmax if lock is successful)
4630 : : * mode: indicates if shared or exclusive tuple lock is desired
4631 : : * wait_policy: what to do if tuple lock is not available
4632 : : * follow_updates: if true, follow the update chain to also lock descendant
4633 : : * tuples.
4634 : : *
4635 : : * Output parameters:
4636 : : * *tuple: all fields filled in
4637 : : * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4638 : : * *tmfd: filled in failure cases (see below)
4639 : : *
4640 : : * Function results are the same as the ones for table_tuple_lock().
4641 : : *
4642 : : * In the failure cases other than TM_Invisible, the routine fills
4643 : : * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4644 : : * if necessary), and t_cmax (the last only for TM_SelfModified,
4645 : : * since we cannot obtain cmax from a combo CID generated by another
4646 : : * transaction).
4647 : : * See comments for struct TM_FailureData for additional info.
4648 : : *
4649 : : * See README.tuplock for a thorough explanation of this mechanism.
4650 : : */
4651 : : TM_Result
4652 : 400949 : heap_lock_tuple(Relation relation, HeapTuple tuple,
4653 : : CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4654 : : bool follow_updates,
4655 : : Buffer *buffer, TM_FailureData *tmfd)
4656 : : {
4657 : 400949 : TM_Result result;
4658 : 400949 : ItemPointer tid = &(tuple->t_self);
4659 : 400949 : ItemId lp;
4660 : 400949 : Page page;
4661 : 400949 : Buffer vmbuffer = InvalidBuffer;
4662 : 400949 : BlockNumber block;
4663 : 400949 : TransactionId xid,
4664 : : xmax;
4665 : 400949 : uint16 old_infomask,
4666 : : new_infomask,
4667 : : new_infomask2;
4668 : 400949 : bool first_time = true;
4669 : 400949 : bool skip_tuple_lock = false;
4670 : 400949 : bool have_tuple_lock = false;
4671 : 400949 : bool cleared_all_frozen = false;
4672 : :
4673 : 400949 : *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4674 : 400949 : block = ItemPointerGetBlockNumber(tid);
4675 : :
4676 : : /*
4677 : : * Before locking the buffer, pin the visibility map page if it appears to
4678 : : * be necessary. Since we haven't got the lock yet, someone else might be
4679 : : * in the middle of changing this, so we'll need to recheck after we have
4680 : : * the lock.
4681 : : */
4682 [ + + ]: 400949 : if (PageIsAllVisible(BufferGetPage(*buffer)))
4683 : 400001 : visibilitymap_pin(relation, block, &vmbuffer);
4684 : :
4685 : 400949 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4686 : :
4687 : 400949 : page = BufferGetPage(*buffer);
4688 : 400949 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4689 [ + - ]: 400949 : Assert(ItemIdIsNormal(lp));
4690 : :
4691 : 400949 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4692 : 400949 : tuple->t_len = ItemIdGetLength(lp);
4693 : 400949 : tuple->t_tableOid = RelationGetRelid(relation);
4694 : :
4695 : : l3:
4696 : 400949 : result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4697 : :
4698 [ + + ]: 402040 : if (result == TM_Invisible)
4699 : : {
4700 : : /*
4701 : : * This is possible, but only when locking a tuple for ON CONFLICT
4702 : : * UPDATE. We return this value here rather than throwing an error in
4703 : : * order to give that case the opportunity to throw a more specific
4704 : : * error.
4705 : : */
4706 : 4 : result = TM_Invisible;
4707 : 4 : goto out_locked;
4708 : : }
4709 [ + + ]: 400945 : else if (result == TM_BeingModified ||
4710 [ + - - + ]: 1081 : result == TM_Updated ||
4711 : 1081 : result == TM_Deleted)
4712 : : {
4713 : 399864 : TransactionId xwait;
4714 : 399864 : uint16 infomask;
4715 : 399864 : uint16 infomask2;
4716 : 399864 : bool require_sleep;
4717 : 399864 : ItemPointerData t_ctid;
4718 : :
4719 : : /* must copy state data before unlocking buffer */
4720 : 399864 : xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4721 : 399864 : infomask = tuple->t_data->t_infomask;
4722 : 399864 : infomask2 = tuple->t_data->t_infomask2;
4723 : 399864 : ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4724 : :
4725 : 399864 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4726 : :
4727 : : /*
4728 : : * If any subtransaction of the current top transaction already holds
4729 : : * a lock as strong as or stronger than what we're requesting, we
4730 : : * effectively hold the desired lock already. We *must* succeed
4731 : : * without trying to take the tuple lock, else we will deadlock
4732 : : * against anyone wanting to acquire a stronger lock.
4733 : : *
4734 : : * Note we only do this the first time we loop on the HTSU result;
4735 : : * there is no point in testing in subsequent passes, because
4736 : : * evidently our own transaction cannot have acquired a new lock after
4737 : : * the first time we checked.
4738 : : */
4739 [ - + ]: 399864 : if (first_time)
4740 : : {
4741 : 399864 : first_time = false;
4742 : :
4743 [ + + ]: 399864 : if (infomask & HEAP_XMAX_IS_MULTI)
4744 : : {
4745 : 1 : int i;
4746 : 1 : int nmembers;
4747 : 1 : MultiXactMember *members;
4748 : :
4749 : : /*
4750 : : * We don't need to allow old multixacts here; if that had
4751 : : * been the case, HeapTupleSatisfiesUpdate would have returned
4752 : : * MayBeUpdated and we wouldn't be here.
4753 : : */
4754 : 1 : nmembers =
4755 : 2 : GetMultiXactIdMembers(xwait, &members, false,
4756 : 1 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4757 : :
4758 [ + + ]: 3 : for (i = 0; i < nmembers; i++)
4759 : : {
4760 : : /* only consider members of our own transaction */
4761 [ + + ]: 2 : if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4762 : 1 : continue;
4763 : :
4764 [ - + ]: 1 : if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4765 : : {
4766 : 0 : pfree(members);
4767 : 0 : result = TM_Ok;
4768 : 0 : goto out_unlocked;
4769 : : }
4770 : : else
4771 : : {
4772 : : /*
4773 : : * Disable acquisition of the heavyweight tuple lock.
4774 : : * Otherwise, when promoting a weaker lock, we might
4775 : : * deadlock with another locker that has acquired the
4776 : : * heavyweight tuple lock and is waiting for our
4777 : : * transaction to finish.
4778 : : *
4779 : : * Note that in this case we still need to wait for
4780 : : * the multixact if required, to avoid acquiring
4781 : : * conflicting locks.
4782 : : */
4783 : 1 : skip_tuple_lock = true;
4784 : : }
4785 : 1 : }
4786 : :
4787 [ - + ]: 1 : if (members)
4788 : 1 : pfree(members);
4789 [ - + ]: 1 : }
4790 [ - + ]: 399863 : else if (TransactionIdIsCurrentTransactionId(xwait))
4791 : : {
4792 [ - + - + : 399863 : switch (mode)
+ ]
4793 : : {
4794 : : case LockTupleKeyShare:
4795 [ - + # # : 399843 : Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
# # ]
4796 : : HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4797 : : HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4798 : 399843 : result = TM_Ok;
4799 : 399843 : goto out_unlocked;
4800 : : case LockTupleShare:
4801 [ # # # # ]: 0 : if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4802 : 0 : HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4803 : : {
4804 : 0 : result = TM_Ok;
4805 : 0 : goto out_unlocked;
4806 : : }
4807 : 0 : break;
4808 : : case LockTupleNoKeyExclusive:
4809 [ + - ]: 10 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4810 : : {
4811 : 10 : result = TM_Ok;
4812 : 10 : goto out_unlocked;
4813 : : }
4814 : 0 : break;
4815 : : case LockTupleExclusive:
4816 [ + + + + ]: 10 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4817 : 8 : infomask2 & HEAP_KEYS_UPDATED)
4818 : : {
4819 : 1 : result = TM_Ok;
4820 : 1 : goto out_unlocked;
4821 : : }
4822 : 9 : break;
4823 : : }
4824 : 9 : }
4825 : 10 : }
4826 : :
4827 : : /*
4828 : : * Initially assume that we will have to wait for the locking
4829 : : * transaction(s) to finish. We check various cases below in which
4830 : : * this can be turned off.
4831 : : */
4832 : 10 : require_sleep = true;
4833 [ + - ]: 10 : if (mode == LockTupleKeyShare)
4834 : : {
4835 : : /*
4836 : : * If we're requesting KeyShare, and there's no update present, we
4837 : : * don't need to wait. Even if there is an update, we can still
4838 : : * continue if the key hasn't been modified.
4839 : : *
4840 : : * However, if there are updates, we need to walk the update chain
4841 : : * to mark future versions of the row as locked, too. That way,
4842 : : * if somebody deletes that future version, we're protected
4843 : : * against the key going away. This locking of future versions
4844 : : * could block momentarily, if a concurrent transaction is
4845 : : * deleting a key; or it could return a value to the effect that
4846 : : * the transaction deleting the key has already committed. So we
4847 : : * do this before re-locking the buffer; otherwise this would be
4848 : : * prone to deadlocks.
4849 : : *
4850 : : * Note that the TID we're locking was grabbed before we unlocked
4851 : : * the buffer. For it to change while we're not looking, the
4852 : : * other properties we're testing for below after re-locking the
4853 : : * buffer would also change, in which case we would restart this
4854 : : * loop above.
4855 : : */
4856 [ # # ]: 0 : if (!(infomask2 & HEAP_KEYS_UPDATED))
4857 : : {
4858 : 0 : bool updated;
4859 : :
4860 : 0 : updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4861 : :
4862 : : /*
4863 : : * If there are updates, follow the update chain; bail out if
4864 : : * that cannot be done.
4865 : : */
4866 [ # # # # : 0 : if (follow_updates && updated &&
# # ]
4867 : 0 : !ItemPointerEquals(&tuple->t_self, &t_ctid))
4868 : : {
4869 : 0 : TM_Result res;
4870 : :
4871 : 0 : res = heap_lock_updated_tuple(relation,
4872 : 0 : infomask, xwait, &t_ctid,
4873 : 0 : GetCurrentTransactionId(),
4874 : 0 : mode);
4875 [ # # ]: 0 : if (res != TM_Ok)
4876 : : {
4877 : 0 : result = res;
4878 : : /* recovery code expects to have buffer lock held */
4879 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4880 : 0 : goto failed;
4881 : : }
4882 [ # # ]: 0 : }
4883 : :
4884 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4885 : :
4886 : : /*
4887 : : * Make sure it's still an appropriate lock, else start over.
4888 : : * Also, if it wasn't updated before we released the lock, but
4889 : : * is updated now, we start over too; the reason is that we
4890 : : * now need to follow the update chain to lock the new
4891 : : * versions.
4892 : : */
4893 [ # # # # ]: 0 : if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4894 [ # # ]: 0 : ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4895 : 0 : !updated))
4896 : 0 : goto l3;
4897 : :
4898 : : /* Things look okay, so we can skip sleeping */
4899 : 0 : require_sleep = false;
4900 : :
4901 : : /*
4902 : : * Note we allow Xmax to change here; other updaters/lockers
4903 : : * could have modified it before we grabbed the buffer lock.
4904 : : * However, this is not a problem, because with the recheck we
4905 : : * just did we ensure that they still don't conflict with the
4906 : : * lock we want.
4907 : : */
4908 [ # # ]: 0 : }
4909 : 0 : }
4910 [ - + ]: 10 : else if (mode == LockTupleShare)
4911 : : {
4912 : : /*
4913 : : * If we're requesting Share, we can similarly avoid sleeping if
4914 : : * there's no update and no exclusive lock present.
4915 : : */
4916 [ # # # # ]: 0 : if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4917 : 0 : !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4918 : : {
4919 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4920 : :
4921 : : /*
4922 : : * Make sure it's still an appropriate lock, else start over.
4923 : : * See above about allowing xmax to change.
4924 : : */
4925 [ # # # # ]: 0 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4926 : 0 : HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4927 : 0 : goto l3;
4928 : 0 : require_sleep = false;
4929 : 0 : }
4930 : 0 : }
4931 [ + - ]: 10 : else if (mode == LockTupleNoKeyExclusive)
4932 : : {
4933 : : /*
4934 : : * If we're requesting NoKeyExclusive, we might also be able to
4935 : : * avoid sleeping; just ensure that there no conflicting lock
4936 : : * already acquired.
4937 : : */
4938 [ # # ]: 0 : if (infomask & HEAP_XMAX_IS_MULTI)
4939 : : {
4940 [ # # # # ]: 0 : if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4941 : 0 : mode, NULL))
4942 : : {
4943 : : /*
4944 : : * No conflict, but if the xmax changed under us in the
4945 : : * meantime, start over.
4946 : : */
4947 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4948 [ # # # # ]: 0 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4949 : 0 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4950 : : xwait))
4951 : 0 : goto l3;
4952 : :
4953 : : /* otherwise, we're good */
4954 : 0 : require_sleep = false;
4955 : 0 : }
4956 : 0 : }
4957 [ # # ]: 0 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4958 : : {
4959 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4960 : :
4961 : : /* if the xmax changed in the meantime, start over */
4962 [ # # # # ]: 0 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4963 : 0 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4964 : : xwait))
4965 : 0 : goto l3;
4966 : : /* otherwise, we're good */
4967 : 0 : require_sleep = false;
4968 : 0 : }
4969 : 0 : }
4970 : :
4971 : : /*
4972 : : * As a check independent from those above, we can also avoid sleeping
4973 : : * if the current transaction is the sole locker of the tuple. Note
4974 : : * that the strength of the lock already held is irrelevant; this is
4975 : : * not about recording the lock in Xmax (which will be done regardless
4976 : : * of this optimization, below). Also, note that the cases where we
4977 : : * hold a lock stronger than we are requesting are already handled
4978 : : * above by not doing anything.
4979 : : *
4980 : : * Note we only deal with the non-multixact case here; MultiXactIdWait
4981 : : * is well equipped to deal with this situation on its own.
4982 : : */
4983 [ + - + + : 10 : if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
- + ]
4984 : 9 : TransactionIdIsCurrentTransactionId(xwait))
4985 : : {
4986 : : /* ... but if the xmax changed in the meantime, start over */
4987 : 9 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4988 [ + - - + ]: 9 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4989 : 9 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4990 : : xwait))
4991 : 0 : goto l3;
4992 [ + - ]: 9 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4993 : 9 : require_sleep = false;
4994 : 9 : }
4995 : :
4996 : : /*
4997 : : * Time to sleep on the other transaction/multixact, if necessary.
4998 : : *
4999 : : * If the other transaction is an update/delete that's already
5000 : : * committed, then sleeping cannot possibly do any good: if we're
5001 : : * required to sleep, get out to raise an error instead.
5002 : : *
5003 : : * By here, we either have already acquired the buffer exclusive lock,
5004 : : * or we must wait for the locking transaction or multixact; so below
5005 : : * we ensure that we grab buffer lock after the sleep.
5006 : : */
5007 [ + + + - : 10 : if (require_sleep && (result == TM_Updated || result == TM_Deleted))
- + ]
5008 : : {
5009 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5010 : 0 : goto failed;
5011 : : }
5012 [ + + ]: 10 : else if (require_sleep)
5013 : : {
5014 : : /*
5015 : : * Acquire tuple lock to establish our priority for the tuple, or
5016 : : * die trying. LockTuple will release us when we are next-in-line
5017 : : * for the tuple. We must do this even if we are share-locking,
5018 : : * but not if we already have a weaker lock on the tuple.
5019 : : *
5020 : : * If we are forced to "start over" below, we keep the tuple lock;
5021 : : * this arranges that we stay at the head of the line while
5022 : : * rechecking tuple state.
5023 : : */
5024 [ - + # # ]: 1 : if (!skip_tuple_lock &&
5025 : 0 : !heap_acquire_tuplock(relation, tid, mode, wait_policy,
5026 : : &have_tuple_lock))
5027 : : {
5028 : : /*
5029 : : * This can only happen if wait_policy is Skip and the lock
5030 : : * couldn't be obtained.
5031 : : */
5032 : 0 : result = TM_WouldBlock;
5033 : : /* recovery code expects to have buffer lock held */
5034 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5035 : 0 : goto failed;
5036 : : }
5037 : :
5038 [ + - ]: 1 : if (infomask & HEAP_XMAX_IS_MULTI)
5039 : : {
5040 : 1 : MultiXactStatus status = get_mxact_status_for_lock(mode, false);
5041 : :
5042 : : /* We only ever lock tuples, never update them */
5043 [ - + ]: 1 : if (status >= MultiXactStatusNoKeyUpdate)
5044 [ # # # # ]: 0 : elog(ERROR, "invalid lock mode in heap_lock_tuple");
5045 : :
5046 : : /* wait for multixact to end, or die trying */
5047 [ - + - - ]: 1 : switch (wait_policy)
5048 : : {
5049 : : case LockWaitBlock:
5050 : 2 : MultiXactIdWait((MultiXactId) xwait, status, infomask,
5051 : 1 : relation, &tuple->t_self, XLTW_Lock, NULL);
5052 : 1 : break;
5053 : : case LockWaitSkip:
5054 [ # # # # ]: 0 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
5055 : 0 : status, infomask, relation,
5056 : : NULL, false))
5057 : : {
5058 : 0 : result = TM_WouldBlock;
5059 : : /* recovery code expects to have buffer lock held */
5060 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5061 : 0 : goto failed;
5062 : : }
5063 : 0 : break;
5064 : : case LockWaitError:
5065 [ # # # # ]: 0 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
5066 : 0 : status, infomask, relation,
5067 : 0 : NULL, log_lock_failures))
5068 [ # # # # ]: 0 : ereport(ERROR,
5069 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5070 : : errmsg("could not obtain lock on row in relation \"%s\"",
5071 : : RelationGetRelationName(relation))));
5072 : :
5073 : 0 : break;
5074 : : }
5075 : :
5076 : : /*
5077 : : * Of course, the multixact might not be done here: if we're
5078 : : * requesting a light lock mode, other transactions with light
5079 : : * locks could still be alive, as well as locks owned by our
5080 : : * own xact or other subxacts of this backend. We need to
5081 : : * preserve the surviving MultiXact members. Note that it
5082 : : * isn't absolutely necessary in the latter case, but doing so
5083 : : * is simpler.
5084 : : */
5085 [ - + ]: 1 : }
5086 : : else
5087 : : {
5088 : : /* wait for regular transaction to end, or die trying */
5089 [ # # # # ]: 0 : switch (wait_policy)
5090 : : {
5091 : : case LockWaitBlock:
5092 : 0 : XactLockTableWait(xwait, relation, &tuple->t_self,
5093 : : XLTW_Lock);
5094 : 0 : break;
5095 : : case LockWaitSkip:
5096 [ # # ]: 0 : if (!ConditionalXactLockTableWait(xwait, false))
5097 : : {
5098 : 0 : result = TM_WouldBlock;
5099 : : /* recovery code expects to have buffer lock held */
5100 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5101 : 0 : goto failed;
5102 : : }
5103 : 0 : break;
5104 : : case LockWaitError:
5105 [ # # ]: 0 : if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
5106 [ # # # # ]: 0 : ereport(ERROR,
5107 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5108 : : errmsg("could not obtain lock on row in relation \"%s\"",
5109 : : RelationGetRelationName(relation))));
5110 : 0 : break;
5111 : : }
5112 : : }
5113 : :
5114 : : /* if there are updates, follow the update chain */
5115 [ + - + - : 1 : if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
- + ]
5116 : 1 : !ItemPointerEquals(&tuple->t_self, &t_ctid))
5117 : : {
5118 : 1 : TM_Result res;
5119 : :
5120 : 2 : res = heap_lock_updated_tuple(relation,
5121 : 1 : infomask, xwait, &t_ctid,
5122 : 1 : GetCurrentTransactionId(),
5123 : 1 : mode);
5124 [ - + ]: 1 : if (res != TM_Ok)
5125 : : {
5126 : 0 : result = res;
5127 : : /* recovery code expects to have buffer lock held */
5128 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5129 : 0 : goto failed;
5130 : : }
5131 [ - + ]: 1 : }
5132 : :
5133 : 1 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5134 : :
5135 : : /*
5136 : : * xwait is done, but if xwait had just locked the tuple then some
5137 : : * other xact could update this tuple before we get to this point.
5138 : : * Check for xmax change, and start over if so.
5139 : : */
5140 [ + - - + ]: 1 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5141 : 1 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
5142 : : xwait))
5143 : 0 : goto l3;
5144 : :
5145 [ + - ]: 1 : if (!(infomask & HEAP_XMAX_IS_MULTI))
5146 : : {
5147 : : /*
5148 : : * Otherwise check if it committed or aborted. Note we cannot
5149 : : * be here if the tuple was only locked by somebody who didn't
5150 : : * conflict with us; that would have been handled above. So
5151 : : * that transaction must necessarily be gone by now. But
5152 : : * don't check for this in the multixact case, because some
5153 : : * locker transactions might still be running.
5154 : : */
5155 : 0 : UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5156 : 0 : }
5157 : 1 : }
5158 : :
5159 : : /* By here, we're certain that we hold buffer exclusive lock again */
5160 : :
5161 : : /*
5162 : : * We may lock if previous xmax aborted, or if it committed but only
5163 : : * locked the tuple without updating it; or if we didn't have to wait
5164 : : * at all for whatever reason.
5165 : : */
5166 [ + + ]: 10 : if (!require_sleep ||
5167 [ + - ]: 1 : (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5168 [ + - + - ]: 1 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
5169 : 1 : HeapTupleHeaderIsOnlyLocked(tuple->t_data))
5170 : 10 : result = TM_Ok;
5171 [ # # ]: 0 : else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5172 : 0 : result = TM_Updated;
5173 : : else
5174 : 0 : result = TM_Deleted;
5175 [ - - - + : 399864 : }
+ ]
5176 : :
5177 : : failed:
5178 [ + + ]: 1091 : if (result != TM_Ok)
5179 : : {
5180 [ - + # # : 2 : Assert(result == TM_SelfModified || result == TM_Updated ||
# # # # ]
5181 : : result == TM_Deleted || result == TM_WouldBlock);
5182 : :
5183 : : /*
5184 : : * When locking a tuple under LockWaitSkip semantics and we fail with
5185 : : * TM_WouldBlock above, it's possible for concurrent transactions to
5186 : : * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5187 : : * this assert is slightly different from the equivalent one in
5188 : : * heap_delete and heap_update.
5189 : : */
5190 [ + - + - ]: 2 : Assert((result == TM_WouldBlock) ||
5191 : : !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5192 [ - + # # ]: 2 : Assert(result != TM_Updated ||
5193 : : !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5194 : 2 : tmfd->ctid = tuple->t_data->t_ctid;
5195 : 2 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5196 [ + - ]: 2 : if (result == TM_SelfModified)
5197 : 2 : tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5198 : : else
5199 : 0 : tmfd->cmax = InvalidCommandId;
5200 : 2 : goto out_locked;
5201 : : }
5202 : :
5203 : : /*
5204 : : * If we didn't pin the visibility map page and the page has become all
5205 : : * visible while we were busy locking the buffer, or during some
5206 : : * subsequent window during which we had it unlocked, we'll have to unlock
5207 : : * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5208 : : * unfortunate, especially since we'll now have to recheck whether the
5209 : : * tuple has been locked or updated under us, but hopefully it won't
5210 : : * happen very often.
5211 : : */
5212 [ + + + - ]: 1089 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5213 : : {
5214 : 0 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5215 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
5216 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5217 : 0 : goto l3;
5218 : : }
5219 : :
5220 : 1089 : xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5221 : 1089 : old_infomask = tuple->t_data->t_infomask;
5222 : :
5223 : : /*
5224 : : * If this is the first possibly-multixact-able operation in the current
5225 : : * transaction, set my per-backend OldestMemberMXactId setting. We can be
5226 : : * certain that the transaction will never become a member of any older
5227 : : * MultiXactIds than that. (We have to do this even if we end up just
5228 : : * using our own TransactionId below, since some other backend could
5229 : : * incorporate our XID into a MultiXact immediately afterwards.)
5230 : : */
5231 : 1089 : MultiXactIdSetOldestMember();
5232 : :
5233 : : /*
5234 : : * Compute the new xmax and infomask to store into the tuple. Note we do
5235 : : * not modify the tuple just yet, because that would leave it in the wrong
5236 : : * state if multixact.c elogs.
5237 : : */
5238 : 2178 : compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5239 : 1089 : GetCurrentTransactionId(), mode, false,
5240 : : &xid, &new_infomask, &new_infomask2);
5241 : :
5242 : 1089 : START_CRIT_SECTION();
5243 : :
5244 : : /*
5245 : : * Store transaction information of xact locking the tuple.
5246 : : *
5247 : : * Note: Cmax is meaningless in this context, so don't set it; this avoids
5248 : : * possibly generating a useless combo CID. Moreover, if we're locking a
5249 : : * previously updated tuple, it's important to preserve the Cmax.
5250 : : *
5251 : : * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5252 : : * we would break the HOT chain.
5253 : : */
5254 : 1089 : tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5255 : 1089 : tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5256 : 1089 : tuple->t_data->t_infomask |= new_infomask;
5257 : 1089 : tuple->t_data->t_infomask2 |= new_infomask2;
5258 [ - + ]: 1089 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5259 : 1089 : HeapTupleHeaderClearHotUpdated(tuple->t_data);
5260 : 1089 : HeapTupleHeaderSetXmax(tuple->t_data, xid);
5261 : :
5262 : : /*
5263 : : * Make sure there is no forward chain link in t_ctid. Note that in the
5264 : : * cases where the tuple has been updated, we must not overwrite t_ctid,
5265 : : * because it was set by the updater. Moreover, if the tuple has been
5266 : : * updated, we need to follow the update chain to lock the new versions of
5267 : : * the tuple as well.
5268 : : */
5269 [ - + ]: 1089 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5270 : 1089 : tuple->t_data->t_ctid = *tid;
5271 : :
5272 : : /* Clear only the all-frozen bit on visibility map if needed */
5273 [ + + + - ]: 1089 : if (PageIsAllVisible(page) &&
5274 : 207 : visibilitymap_clear(relation, block, vmbuffer,
5275 : : VISIBILITYMAP_ALL_FROZEN))
5276 : 0 : cleared_all_frozen = true;
5277 : :
5278 : :
5279 : 1089 : MarkBufferDirty(*buffer);
5280 : :
5281 : : /*
5282 : : * XLOG stuff. You might think that we don't need an XLOG record because
5283 : : * there is no state change worth restoring after a crash. You would be
5284 : : * wrong however: we have just written either a TransactionId or a
5285 : : * MultiXactId that may never have been seen on disk before, and we need
5286 : : * to make sure that there are XLOG entries covering those ID numbers.
5287 : : * Else the same IDs might be re-used after a crash, which would be
5288 : : * disastrous if this page made it to disk before the crash. Essentially
5289 : : * we have to enforce the WAL log-before-data rule even in this case.
5290 : : * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5291 : : * entries for everything anyway.)
5292 : : */
5293 [ + + + + : 1089 : if (RelationNeedsWAL(relation))
+ - - + ]
5294 : : {
5295 : 1012 : xl_heap_lock xlrec;
5296 : 1012 : XLogRecPtr recptr;
5297 : :
5298 : 1012 : XLogBeginInsert();
5299 : 1012 : XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5300 : :
5301 : 1012 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5302 : 1012 : xlrec.xmax = xid;
5303 : 2024 : xlrec.infobits_set = compute_infobits(new_infomask,
5304 : 1012 : tuple->t_data->t_infomask2);
5305 : 1012 : xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5306 : 1012 : XLogRegisterData(&xlrec, SizeOfHeapLock);
5307 : :
5308 : : /* we don't decode row locks atm, so no need to log the origin */
5309 : :
5310 : 1012 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5311 : :
5312 : 1012 : PageSetLSN(page, recptr);
5313 : 1012 : }
5314 : :
5315 [ + - ]: 1089 : END_CRIT_SECTION();
5316 : :
5317 : 1089 : result = TM_Ok;
5318 : :
5319 : : out_locked:
5320 : 1095 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5321 : :
5322 : : out_unlocked:
5323 [ + + ]: 400949 : if (BufferIsValid(vmbuffer))
5324 : 400001 : ReleaseBuffer(vmbuffer);
5325 : :
5326 : : /*
5327 : : * Don't update the visibility map here. Locking a tuple doesn't change
5328 : : * visibility info.
5329 : : */
5330 : :
5331 : : /*
5332 : : * Now that we have successfully marked the tuple as locked, we can
5333 : : * release the lmgr tuple lock, if we had it.
5334 : : */
5335 [ + - ]: 400949 : if (have_tuple_lock)
5336 : 0 : UnlockTupleTuplock(relation, tid, mode);
5337 : :
5338 : 400949 : return result;
5339 : 400949 : }
5340 : :
5341 : : /*
5342 : : * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5343 : : * its normal, Xmax-based tuple lock.
5344 : : *
5345 : : * have_tuple_lock is an input and output parameter: on input, it indicates
5346 : : * whether the lock has previously been acquired (and this function does
5347 : : * nothing in that case). If this function returns success, have_tuple_lock
5348 : : * has been flipped to true.
5349 : : *
5350 : : * Returns false if it was unable to obtain the lock; this can only happen if
5351 : : * wait_policy is Skip.
5352 : : */
5353 : : static bool
5354 : 0 : heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode,
5355 : : LockWaitPolicy wait_policy, bool *have_tuple_lock)
5356 : : {
5357 [ # # ]: 0 : if (*have_tuple_lock)
5358 : 0 : return true;
5359 : :
5360 [ # # # # ]: 0 : switch (wait_policy)
5361 : : {
5362 : : case LockWaitBlock:
5363 : 0 : LockTupleTuplock(relation, tid, mode);
5364 : 0 : break;
5365 : :
5366 : : case LockWaitSkip:
5367 [ # # ]: 0 : if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5368 : 0 : return false;
5369 : 0 : break;
5370 : :
5371 : : case LockWaitError:
5372 [ # # ]: 0 : if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
5373 [ # # # # ]: 0 : ereport(ERROR,
5374 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5375 : : errmsg("could not obtain lock on row in relation \"%s\"",
5376 : : RelationGetRelationName(relation))));
5377 : 0 : break;
5378 : : }
5379 : 0 : *have_tuple_lock = true;
5380 : :
5381 : 0 : return true;
5382 : 0 : }
5383 : :
5384 : : /*
5385 : : * Given an original set of Xmax and infomask, and a transaction (identified by
5386 : : * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5387 : : * corresponding infomasks to use on the tuple.
5388 : : *
5389 : : * Note that this might have side effects such as creating a new MultiXactId.
5390 : : *
5391 : : * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5392 : : * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5393 : : * but it was not running anymore. There is a race condition, which is that the
5394 : : * MultiXactId may have finished since then, but that uncommon case is handled
5395 : : * either here, or within MultiXactIdExpand.
5396 : : *
5397 : : * There is a similar race condition possible when the old xmax was a regular
5398 : : * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5399 : : * window, but it's still possible to end up creating an unnecessary
5400 : : * MultiXactId. Fortunately this is harmless.
5401 : : */
5402 : : static void
5403 : 335669 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
5404 : : uint16 old_infomask2, TransactionId add_to_xmax,
5405 : : LockTupleMode mode, bool is_update,
5406 : : TransactionId *result_xmax, uint16 *result_infomask,
5407 : : uint16 *result_infomask2)
5408 : : {
5409 : 335669 : TransactionId new_xmax;
5410 : 335669 : uint16 new_infomask,
5411 : : new_infomask2;
5412 : :
5413 [ + - ]: 335669 : Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
5414 : :
5415 : : l5:
5416 : 336094 : new_infomask = 0;
5417 : 336094 : new_infomask2 = 0;
5418 [ + + ]: 336094 : if (old_infomask & HEAP_XMAX_INVALID)
5419 : : {
5420 : : /*
5421 : : * No previous locker; we just insert our own TransactionId.
5422 : : *
5423 : : * Note that it's critical that this case be the first one checked,
5424 : : * because there are several blocks below that come back to this one
5425 : : * to implement certain optimizations; old_infomask might contain
5426 : : * other dirty bits in those cases, but we don't really care.
5427 : : */
5428 [ + + ]: 335667 : if (is_update)
5429 : : {
5430 : 327929 : new_xmax = add_to_xmax;
5431 [ + + ]: 327929 : if (mode == LockTupleExclusive)
5432 : 304999 : new_infomask2 |= HEAP_KEYS_UPDATED;
5433 : 327929 : }
5434 : : else
5435 : : {
5436 : 7738 : new_infomask |= HEAP_XMAX_LOCK_ONLY;
5437 [ + + + + : 7738 : switch (mode)
- ]
5438 : : {
5439 : : case LockTupleKeyShare:
5440 : 669 : new_xmax = add_to_xmax;
5441 : 669 : new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5442 : 669 : break;
5443 : : case LockTupleShare:
5444 : 12 : new_xmax = add_to_xmax;
5445 : 12 : new_infomask |= HEAP_XMAX_SHR_LOCK;
5446 : 12 : break;
5447 : : case LockTupleNoKeyExclusive:
5448 : 6860 : new_xmax = add_to_xmax;
5449 : 6860 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5450 : 6860 : break;
5451 : : case LockTupleExclusive:
5452 : 197 : new_xmax = add_to_xmax;
5453 : 197 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5454 : 197 : new_infomask2 |= HEAP_KEYS_UPDATED;
5455 : 197 : break;
5456 : : default:
5457 : 0 : new_xmax = InvalidTransactionId; /* silence compiler */
5458 [ # # # # ]: 0 : elog(ERROR, "invalid lock mode");
5459 : 0 : }
5460 : : }
5461 : 335667 : }
5462 [ + + ]: 427 : else if (old_infomask & HEAP_XMAX_IS_MULTI)
5463 : : {
5464 : 1 : MultiXactStatus new_status;
5465 : :
5466 : : /*
5467 : : * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5468 : : * cross-check.
5469 : : */
5470 [ - + ]: 1 : Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5471 : :
5472 : : /*
5473 : : * A multixact together with LOCK_ONLY set but neither lock bit set
5474 : : * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5475 : : * anymore. This check is critical for databases upgraded by
5476 : : * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5477 : : * that such multis are never passed.
5478 : : */
5479 [ - + ]: 1 : if (HEAP_LOCKED_UPGRADED(old_infomask))
5480 : : {
5481 : 0 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5482 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5483 : 0 : goto l5;
5484 : : }
5485 : :
5486 : : /*
5487 : : * If the XMAX is already a MultiXactId, then we need to expand it to
5488 : : * include add_to_xmax; but if all the members were lockers and are
5489 : : * all gone, we can do away with the IS_MULTI bit and just set
5490 : : * add_to_xmax as the only locker/updater. If all lockers are gone
5491 : : * and we have an updater that aborted, we can also do without a
5492 : : * multi.
5493 : : *
5494 : : * The cost of doing GetMultiXactIdMembers would be paid by
5495 : : * MultiXactIdExpand if we weren't to do this, so this check is not
5496 : : * incurring extra work anyhow.
5497 : : */
5498 [ + - ]: 1 : if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5499 : : {
5500 [ # # # # ]: 0 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5501 : 0 : !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
5502 : 0 : old_infomask)))
5503 : : {
5504 : : /*
5505 : : * Reset these bits and restart; otherwise fall through to
5506 : : * create a new multi below.
5507 : : */
5508 : 0 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5509 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5510 : 0 : goto l5;
5511 : : }
5512 : 0 : }
5513 : :
5514 : 1 : new_status = get_mxact_status_for_lock(mode, is_update);
5515 : :
5516 : 2 : new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5517 : 1 : new_status);
5518 : 1 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5519 [ - + ]: 1 : }
5520 [ + - ]: 426 : else if (old_infomask & HEAP_XMAX_COMMITTED)
5521 : : {
5522 : : /*
5523 : : * It's a committed update, so we need to preserve him as updater of
5524 : : * the tuple.
5525 : : */
5526 : 0 : MultiXactStatus status;
5527 : 0 : MultiXactStatus new_status;
5528 : :
5529 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5530 : 0 : status = MultiXactStatusUpdate;
5531 : : else
5532 : 0 : status = MultiXactStatusNoKeyUpdate;
5533 : :
5534 : 0 : new_status = get_mxact_status_for_lock(mode, is_update);
5535 : :
5536 : : /*
5537 : : * since it's not running, it's obviously impossible for the old
5538 : : * updater to be identical to the current one, so we need not check
5539 : : * for that case as we do in the block above.
5540 : : */
5541 : 0 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5542 : 0 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5543 : 0 : }
5544 [ + - ]: 426 : else if (TransactionIdIsInProgress(xmax))
5545 : : {
5546 : : /*
5547 : : * If the XMAX is a valid, in-progress TransactionId, then we need to
5548 : : * create a new MultiXactId that includes both the old locker or
5549 : : * updater and our own TransactionId.
5550 : : */
5551 : 426 : MultiXactStatus new_status;
5552 : 426 : MultiXactStatus old_status;
5553 : 426 : LockTupleMode old_mode;
5554 : :
5555 [ + - ]: 426 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5556 : : {
5557 [ + + ]: 426 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5558 : 10 : old_status = MultiXactStatusForKeyShare;
5559 [ + + ]: 416 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5560 : 1 : old_status = MultiXactStatusForShare;
5561 [ + - ]: 415 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5562 : : {
5563 [ + + ]: 415 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5564 : 69 : old_status = MultiXactStatusForUpdate;
5565 : : else
5566 : 346 : old_status = MultiXactStatusForNoKeyUpdate;
5567 : 415 : }
5568 : : else
5569 : : {
5570 : : /*
5571 : : * LOCK_ONLY can be present alone only when a page has been
5572 : : * upgraded by pg_upgrade. But in that case,
5573 : : * TransactionIdIsInProgress() should have returned false. We
5574 : : * assume it's no longer locked in this case.
5575 : : */
5576 [ # # # # ]: 0 : elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5577 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5578 : 0 : old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5579 : 0 : goto l5;
5580 : : }
5581 : 426 : }
5582 : : else
5583 : : {
5584 : : /* it's an update, but which kind? */
5585 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5586 : 0 : old_status = MultiXactStatusUpdate;
5587 : : else
5588 : 0 : old_status = MultiXactStatusNoKeyUpdate;
5589 : : }
5590 : :
5591 : 426 : old_mode = TUPLOCK_from_mxstatus(old_status);
5592 : :
5593 : : /*
5594 : : * If the lock to be acquired is for the same TransactionId as the
5595 : : * existing lock, there's an optimization possible: consider only the
5596 : : * strongest of both locks as the only one present, and restart.
5597 : : */
5598 [ + + ]: 426 : if (xmax == add_to_xmax)
5599 : : {
5600 : : /*
5601 : : * Note that it's not possible for the original tuple to be
5602 : : * updated: we wouldn't be here because the tuple would have been
5603 : : * invisible and we wouldn't try to update it. As a subtlety,
5604 : : * this code can also run when traversing an update chain to lock
5605 : : * future versions of a tuple. But we wouldn't be here either,
5606 : : * because the add_to_xmax would be different from the original
5607 : : * updater.
5608 : : */
5609 [ + - ]: 425 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5610 : :
5611 : : /* acquire the strongest of both */
5612 [ + + ]: 425 : if (mode < old_mode)
5613 : 36 : mode = old_mode;
5614 : : /* mustn't touch is_update */
5615 : :
5616 : 425 : old_infomask |= HEAP_XMAX_INVALID;
5617 : 425 : goto l5;
5618 : : }
5619 : :
5620 : : /* otherwise, just fall back to creating a new multixact */
5621 : 1 : new_status = get_mxact_status_for_lock(mode, is_update);
5622 : 2 : new_xmax = MultiXactIdCreate(xmax, old_status,
5623 : 1 : add_to_xmax, new_status);
5624 : 1 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5625 [ + + ]: 426 : }
5626 [ # # # # ]: 0 : else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5627 : 0 : TransactionIdDidCommit(xmax))
5628 : : {
5629 : : /*
5630 : : * It's a committed update, so we gotta preserve him as updater of the
5631 : : * tuple.
5632 : : */
5633 : 0 : MultiXactStatus status;
5634 : 0 : MultiXactStatus new_status;
5635 : :
5636 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5637 : 0 : status = MultiXactStatusUpdate;
5638 : : else
5639 : 0 : status = MultiXactStatusNoKeyUpdate;
5640 : :
5641 : 0 : new_status = get_mxact_status_for_lock(mode, is_update);
5642 : :
5643 : : /*
5644 : : * since it's not running, it's obviously impossible for the old
5645 : : * updater to be identical to the current one, so we need not check
5646 : : * for that case as we do in the block above.
5647 : : */
5648 : 0 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5649 : 0 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5650 : 0 : }
5651 : : else
5652 : : {
5653 : : /*
5654 : : * Can get here iff the locking/updating transaction was running when
5655 : : * the infomask was extracted from the tuple, but finished before
5656 : : * TransactionIdIsInProgress got to run. Deal with it as if there was
5657 : : * no locker at all in the first place.
5658 : : */
5659 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5660 : 0 : goto l5;
5661 : : }
5662 : :
5663 : 335669 : *result_infomask = new_infomask;
5664 : 335669 : *result_infomask2 = new_infomask2;
5665 : 335669 : *result_xmax = new_xmax;
5666 : 335669 : }
5667 : :
5668 : : /*
5669 : : * Subroutine for heap_lock_updated_tuple_rec.
5670 : : *
5671 : : * Given a hypothetical multixact status held by the transaction identified
5672 : : * with the given xid, does the current transaction need to wait, fail, or can
5673 : : * it continue if it wanted to acquire a lock of the given mode? "needwait"
5674 : : * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5675 : : * returned. If the lock is already held by the current transaction, return
5676 : : * TM_SelfModified. In case of a conflict with another transaction, a
5677 : : * different HeapTupleSatisfiesUpdate return code is returned.
5678 : : *
5679 : : * The held status is said to be hypothetical because it might correspond to a
5680 : : * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5681 : : * way for simplicity of API.
5682 : : */
5683 : : static TM_Result
5684 : 0 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5685 : : LockTupleMode mode, HeapTuple tup,
5686 : : bool *needwait)
5687 : : {
5688 : 0 : MultiXactStatus wantedstatus;
5689 : :
5690 : 0 : *needwait = false;
5691 : 0 : wantedstatus = get_mxact_status_for_lock(mode, false);
5692 : :
5693 : : /*
5694 : : * Note: we *must* check TransactionIdIsInProgress before
5695 : : * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5696 : : * for an explanation.
5697 : : */
5698 [ # # ]: 0 : if (TransactionIdIsCurrentTransactionId(xid))
5699 : : {
5700 : : /*
5701 : : * The tuple has already been locked by our own transaction. This is
5702 : : * very rare but can happen if multiple transactions are trying to
5703 : : * lock an ancient version of the same tuple.
5704 : : */
5705 : 0 : return TM_SelfModified;
5706 : : }
5707 [ # # ]: 0 : else if (TransactionIdIsInProgress(xid))
5708 : : {
5709 : : /*
5710 : : * If the locking transaction is running, what we do depends on
5711 : : * whether the lock modes conflict: if they do, then we must wait for
5712 : : * it to finish; otherwise we can fall through to lock this tuple
5713 : : * version without waiting.
5714 : : */
5715 [ # # # # ]: 0 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5716 : 0 : LOCKMODE_from_mxstatus(wantedstatus)))
5717 : : {
5718 : 0 : *needwait = true;
5719 : 0 : }
5720 : :
5721 : : /*
5722 : : * If we set needwait above, then this value doesn't matter;
5723 : : * otherwise, this value signals to caller that it's okay to proceed.
5724 : : */
5725 : 0 : return TM_Ok;
5726 : : }
5727 [ # # ]: 0 : else if (TransactionIdDidAbort(xid))
5728 : 0 : return TM_Ok;
5729 [ # # ]: 0 : else if (TransactionIdDidCommit(xid))
5730 : : {
5731 : : /*
5732 : : * The other transaction committed. If it was only a locker, then the
5733 : : * lock is completely gone now and we can return success; but if it
5734 : : * was an update, then what we do depends on whether the two lock
5735 : : * modes conflict. If they conflict, then we must report error to
5736 : : * caller. But if they don't, we can fall through to allow the current
5737 : : * transaction to lock the tuple.
5738 : : *
5739 : : * Note: the reason we worry about ISUPDATE here is because as soon as
5740 : : * a transaction ends, all its locks are gone and meaningless, and
5741 : : * thus we can ignore them; whereas its updates persist. In the
5742 : : * TransactionIdIsInProgress case, above, we don't need to check
5743 : : * because we know the lock is still "alive" and thus a conflict needs
5744 : : * always be checked.
5745 : : */
5746 [ # # ]: 0 : if (!ISUPDATE_from_mxstatus(status))
5747 : 0 : return TM_Ok;
5748 : :
5749 [ # # # # ]: 0 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5750 : 0 : LOCKMODE_from_mxstatus(wantedstatus)))
5751 : : {
5752 : : /* bummer */
5753 [ # # ]: 0 : if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5754 : 0 : return TM_Updated;
5755 : : else
5756 : 0 : return TM_Deleted;
5757 : : }
5758 : :
5759 : 0 : return TM_Ok;
5760 : : }
5761 : :
5762 : : /* Not in progress, not aborted, not committed -- must have crashed */
5763 : 0 : return TM_Ok;
5764 : 0 : }
5765 : :
5766 : :
5767 : : /*
5768 : : * Recursive part of heap_lock_updated_tuple
5769 : : *
5770 : : * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5771 : : * xid with the given mode; if this tuple is updated, recurse to lock the new
5772 : : * version as well.
5773 : : */
5774 : : static TM_Result
5775 : 1 : heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax,
5776 : : const ItemPointerData *tid, TransactionId xid,
5777 : : LockTupleMode mode)
5778 : : {
5779 : 1 : TM_Result result;
5780 : 1 : ItemPointerData tupid;
5781 : 1 : HeapTupleData mytup;
5782 : 1 : Buffer buf;
5783 : 1 : uint16 new_infomask,
5784 : : new_infomask2,
5785 : : old_infomask,
5786 : : old_infomask2;
5787 : 1 : TransactionId xmax,
5788 : : new_xmax;
5789 : 1 : bool cleared_all_frozen = false;
5790 : 1 : bool pinned_desired_page;
5791 : 1 : Buffer vmbuffer = InvalidBuffer;
5792 : 1 : BlockNumber block;
5793 : :
5794 : 1 : ItemPointerCopy(tid, &tupid);
5795 : :
5796 : 1 : for (;;)
5797 : : {
5798 : 1 : new_infomask = 0;
5799 : 1 : new_xmax = InvalidTransactionId;
5800 : 1 : block = ItemPointerGetBlockNumber(&tupid);
5801 : 1 : ItemPointerCopy(&tupid, &(mytup.t_self));
5802 : :
5803 [ - + ]: 1 : if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5804 : : {
5805 : : /*
5806 : : * if we fail to find the updated version of the tuple, it's
5807 : : * because it was vacuumed/pruned away after its creator
5808 : : * transaction aborted. So behave as if we got to the end of the
5809 : : * chain, and there's no further tuple to lock: return success to
5810 : : * caller.
5811 : : */
5812 : 0 : result = TM_Ok;
5813 : 0 : goto out_unlocked;
5814 : : }
5815 : :
5816 : : l4:
5817 [ - + ]: 1 : CHECK_FOR_INTERRUPTS();
5818 : :
5819 : : /*
5820 : : * Before locking the buffer, pin the visibility map page if it
5821 : : * appears to be necessary. Since we haven't got the lock yet,
5822 : : * someone else might be in the middle of changing this, so we'll need
5823 : : * to recheck after we have the lock.
5824 : : */
5825 [ - + ]: 1 : if (PageIsAllVisible(BufferGetPage(buf)))
5826 : : {
5827 : 0 : visibilitymap_pin(rel, block, &vmbuffer);
5828 : 0 : pinned_desired_page = true;
5829 : 0 : }
5830 : : else
5831 : 1 : pinned_desired_page = false;
5832 : :
5833 : 1 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5834 : :
5835 : : /*
5836 : : * If we didn't pin the visibility map page and the page has become
5837 : : * all visible while we were busy locking the buffer, we'll have to
5838 : : * unlock and re-lock, to avoid holding the buffer lock across I/O.
5839 : : * That's a bit unfortunate, but hopefully shouldn't happen often.
5840 : : *
5841 : : * Note: in some paths through this function, we will reach here
5842 : : * holding a pin on a vm page that may or may not be the one matching
5843 : : * this page. If this page isn't all-visible, we won't use the vm
5844 : : * page, but we hold onto such a pin till the end of the function.
5845 : : */
5846 [ + - + - ]: 1 : if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5847 : : {
5848 : 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5849 : 0 : visibilitymap_pin(rel, block, &vmbuffer);
5850 : 0 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5851 : 0 : }
5852 : :
5853 : : /*
5854 : : * Check the tuple XMIN against prior XMAX, if any. If we reached the
5855 : : * end of the chain, we're done, so return success.
5856 : : */
5857 [ + - - + ]: 1 : if (TransactionIdIsValid(priorXmax) &&
5858 : 1 : !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5859 : : priorXmax))
5860 : : {
5861 : 0 : result = TM_Ok;
5862 : 0 : goto out_locked;
5863 : : }
5864 : :
5865 : : /*
5866 : : * Also check Xmin: if this tuple was created by an aborted
5867 : : * (sub)transaction, then we already locked the last live one in the
5868 : : * chain, thus we're done, so return success.
5869 : : */
5870 [ + - ]: 1 : if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
5871 : : {
5872 : 1 : result = TM_Ok;
5873 : 1 : goto out_locked;
5874 : : }
5875 : :
5876 : 0 : old_infomask = mytup.t_data->t_infomask;
5877 : 0 : old_infomask2 = mytup.t_data->t_infomask2;
5878 : 0 : xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5879 : :
5880 : : /*
5881 : : * If this tuple version has been updated or locked by some concurrent
5882 : : * transaction(s), what we do depends on whether our lock mode
5883 : : * conflicts with what those other transactions hold, and also on the
5884 : : * status of them.
5885 : : */
5886 [ # # ]: 0 : if (!(old_infomask & HEAP_XMAX_INVALID))
5887 : : {
5888 : 0 : TransactionId rawxmax;
5889 : 0 : bool needwait;
5890 : :
5891 : 0 : rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5892 [ # # ]: 0 : if (old_infomask & HEAP_XMAX_IS_MULTI)
5893 : : {
5894 : 0 : int nmembers;
5895 : 0 : int i;
5896 : 0 : MultiXactMember *members;
5897 : :
5898 : : /*
5899 : : * We don't need a test for pg_upgrade'd tuples: this is only
5900 : : * applied to tuples after the first in an update chain. Said
5901 : : * first tuple in the chain may well be locked-in-9.2-and-
5902 : : * pg_upgraded, but that one was already locked by our caller,
5903 : : * not us; and any subsequent ones cannot be because our
5904 : : * caller must necessarily have obtained a snapshot later than
5905 : : * the pg_upgrade itself.
5906 : : */
5907 [ # # ]: 0 : Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5908 : :
5909 : 0 : nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5910 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5911 [ # # ]: 0 : for (i = 0; i < nmembers; i++)
5912 : : {
5913 : 0 : result = test_lockmode_for_conflict(members[i].status,
5914 : 0 : members[i].xid,
5915 : 0 : mode,
5916 : : &mytup,
5917 : : &needwait);
5918 : :
5919 : : /*
5920 : : * If the tuple was already locked by ourselves in a
5921 : : * previous iteration of this (say heap_lock_tuple was
5922 : : * forced to restart the locking loop because of a change
5923 : : * in xmax), then we hold the lock already on this tuple
5924 : : * version and we don't need to do anything; and this is
5925 : : * not an error condition either. We just need to skip
5926 : : * this tuple and continue locking the next version in the
5927 : : * update chain.
5928 : : */
5929 [ # # ]: 0 : if (result == TM_SelfModified)
5930 : : {
5931 : 0 : pfree(members);
5932 : 0 : goto next;
5933 : : }
5934 : :
5935 [ # # ]: 0 : if (needwait)
5936 : : {
5937 : 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5938 : 0 : XactLockTableWait(members[i].xid, rel,
5939 : 0 : &mytup.t_self,
5940 : : XLTW_LockUpdated);
5941 : 0 : pfree(members);
5942 : 0 : goto l4;
5943 : : }
5944 [ # # ]: 0 : if (result != TM_Ok)
5945 : : {
5946 : 0 : pfree(members);
5947 : 0 : goto out_locked;
5948 : : }
5949 : 0 : }
5950 [ # # ]: 0 : if (members)
5951 : 0 : pfree(members);
5952 [ # # ]: 0 : }
5953 : : else
5954 : : {
5955 : 0 : MultiXactStatus status;
5956 : :
5957 : : /*
5958 : : * For a non-multi Xmax, we first need to compute the
5959 : : * corresponding MultiXactStatus by using the infomask bits.
5960 : : */
5961 [ # # ]: 0 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5962 : : {
5963 [ # # ]: 0 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5964 : 0 : status = MultiXactStatusForKeyShare;
5965 [ # # ]: 0 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5966 : 0 : status = MultiXactStatusForShare;
5967 [ # # ]: 0 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5968 : : {
5969 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5970 : 0 : status = MultiXactStatusForUpdate;
5971 : : else
5972 : 0 : status = MultiXactStatusForNoKeyUpdate;
5973 : 0 : }
5974 : : else
5975 : : {
5976 : : /*
5977 : : * LOCK_ONLY present alone (a pg_upgraded tuple marked
5978 : : * as share-locked in the old cluster) shouldn't be
5979 : : * seen in the middle of an update chain.
5980 : : */
5981 [ # # # # ]: 0 : elog(ERROR, "invalid lock status in tuple");
5982 : : }
5983 : 0 : }
5984 : : else
5985 : : {
5986 : : /* it's an update, but which kind? */
5987 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5988 : 0 : status = MultiXactStatusUpdate;
5989 : : else
5990 : 0 : status = MultiXactStatusNoKeyUpdate;
5991 : : }
5992 : :
5993 : 0 : result = test_lockmode_for_conflict(status, rawxmax, mode,
5994 : : &mytup, &needwait);
5995 : :
5996 : : /*
5997 : : * If the tuple was already locked by ourselves in a previous
5998 : : * iteration of this (say heap_lock_tuple was forced to
5999 : : * restart the locking loop because of a change in xmax), then
6000 : : * we hold the lock already on this tuple version and we don't
6001 : : * need to do anything; and this is not an error condition
6002 : : * either. We just need to skip this tuple and continue
6003 : : * locking the next version in the update chain.
6004 : : */
6005 [ # # ]: 0 : if (result == TM_SelfModified)
6006 : 0 : goto next;
6007 : :
6008 [ # # ]: 0 : if (needwait)
6009 : : {
6010 : 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
6011 : 0 : XactLockTableWait(rawxmax, rel, &mytup.t_self,
6012 : : XLTW_LockUpdated);
6013 : 0 : goto l4;
6014 : : }
6015 [ # # ]: 0 : if (result != TM_Ok)
6016 : : {
6017 : 0 : goto out_locked;
6018 : : }
6019 [ # # ]: 0 : }
6020 [ # # # # : 0 : }
# ]
6021 : :
6022 : : /* compute the new Xmax and infomask values for the tuple ... */
6023 : 0 : compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
6024 : 0 : xid, mode, false,
6025 : : &new_xmax, &new_infomask, &new_infomask2);
6026 : :
6027 [ # # # # ]: 0 : if (PageIsAllVisible(BufferGetPage(buf)) &&
6028 : 0 : visibilitymap_clear(rel, block, vmbuffer,
6029 : : VISIBILITYMAP_ALL_FROZEN))
6030 : 0 : cleared_all_frozen = true;
6031 : :
6032 : 0 : START_CRIT_SECTION();
6033 : :
6034 : : /* ... and set them */
6035 : 0 : HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
6036 : 0 : mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
6037 : 0 : mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6038 : 0 : mytup.t_data->t_infomask |= new_infomask;
6039 : 0 : mytup.t_data->t_infomask2 |= new_infomask2;
6040 : :
6041 : 0 : MarkBufferDirty(buf);
6042 : :
6043 : : /* XLOG stuff */
6044 [ # # # # : 0 : if (RelationNeedsWAL(rel))
# # # # ]
6045 : : {
6046 : 0 : xl_heap_lock_updated xlrec;
6047 : 0 : XLogRecPtr recptr;
6048 : 0 : Page page = BufferGetPage(buf);
6049 : :
6050 : 0 : XLogBeginInsert();
6051 : 0 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
6052 : :
6053 : 0 : xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
6054 : 0 : xlrec.xmax = new_xmax;
6055 : 0 : xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
6056 : 0 : xlrec.flags =
6057 : 0 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
6058 : :
6059 : 0 : XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
6060 : :
6061 : 0 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
6062 : :
6063 : 0 : PageSetLSN(page, recptr);
6064 : 0 : }
6065 : :
6066 [ # # ]: 0 : END_CRIT_SECTION();
6067 : :
6068 : : next:
6069 : : /* if we find the end of update chain, we're done. */
6070 [ # # ]: 0 : if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
6071 [ # # ]: 0 : HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
6072 [ # # # # ]: 0 : ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
6073 : 0 : HeapTupleHeaderIsOnlyLocked(mytup.t_data))
6074 : : {
6075 : 0 : result = TM_Ok;
6076 : 0 : goto out_locked;
6077 : : }
6078 : :
6079 : : /* tail recursion */
6080 : 0 : priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
6081 : 0 : ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6082 : 0 : UnlockReleaseBuffer(buf);
6083 : : }
6084 : :
6085 : : result = TM_Ok;
6086 : :
6087 : : out_locked:
6088 : 1 : UnlockReleaseBuffer(buf);
6089 : :
6090 : : out_unlocked:
6091 [ + - ]: 1 : if (vmbuffer != InvalidBuffer)
6092 : 0 : ReleaseBuffer(vmbuffer);
6093 : :
6094 : 1 : return result;
6095 : 1 : }
6096 : :
6097 : : /*
6098 : : * heap_lock_updated_tuple
6099 : : * Follow update chain when locking an updated tuple, acquiring locks (row
6100 : : * marks) on the updated versions.
6101 : : *
6102 : : * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
6103 : : * fields from the initial tuple. We will lock the tuples starting from the
6104 : : * one that 'prior_ctid' points to. Note: This function does not lock the
6105 : : * initial tuple itself.
6106 : : *
6107 : : * This function doesn't check visibility, it just unconditionally marks the
6108 : : * tuple(s) as locked. If any tuple in the updated chain is being deleted
6109 : : * concurrently (or updated with the key being modified), sleep until the
6110 : : * transaction doing it is finished.
6111 : : *
6112 : : * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6113 : : * when we have to wait for other transactions to release them, as opposed to
6114 : : * what heap_lock_tuple does. The reason is that having more than one
6115 : : * transaction walking the chain is probably uncommon enough that risk of
6116 : : * starvation is not likely: one of the preconditions for being here is that
6117 : : * the snapshot in use predates the update that created this tuple (because we
6118 : : * started at an earlier version of the tuple), but at the same time such a
6119 : : * transaction cannot be using repeatable read or serializable isolation
6120 : : * levels, because that would lead to a serializability failure.
6121 : : */
6122 : : static TM_Result
6123 : 1 : heap_lock_updated_tuple(Relation rel,
6124 : : uint16 prior_infomask,
6125 : : TransactionId prior_raw_xmax,
6126 : : const ItemPointerData *prior_ctid,
6127 : : TransactionId xid, LockTupleMode mode)
6128 : : {
6129 : : INJECTION_POINT("heap_lock_updated_tuple", NULL);
6130 : :
6131 : : /*
6132 : : * If the tuple has moved into another partition (effectively a delete)
6133 : : * stop here.
6134 : : */
6135 [ - + ]: 1 : if (!ItemPointerIndicatesMovedPartitions(prior_ctid))
6136 : : {
6137 : 1 : TransactionId prior_xmax;
6138 : :
6139 : : /*
6140 : : * If this is the first possibly-multixact-able operation in the
6141 : : * current transaction, set my per-backend OldestMemberMXactId
6142 : : * setting. We can be certain that the transaction will never become a
6143 : : * member of any older MultiXactIds than that. (We have to do this
6144 : : * even if we end up just using our own TransactionId below, since
6145 : : * some other backend could incorporate our XID into a MultiXact
6146 : : * immediately afterwards.)
6147 : : */
6148 : 1 : MultiXactIdSetOldestMember();
6149 : :
6150 [ + - ]: 1 : prior_xmax = (prior_infomask & HEAP_XMAX_IS_MULTI) ?
6151 : 1 : MultiXactIdGetUpdateXid(prior_raw_xmax, prior_infomask) : prior_raw_xmax;
6152 : 1 : return heap_lock_updated_tuple_rec(rel, prior_xmax, prior_ctid, xid, mode);
6153 : 1 : }
6154 : :
6155 : : /* nothing to lock */
6156 : 0 : return TM_Ok;
6157 : 1 : }
6158 : :
6159 : : /*
6160 : : * heap_finish_speculative - mark speculative insertion as successful
6161 : : *
6162 : : * To successfully finish a speculative insertion we have to clear speculative
6163 : : * token from tuple. To do so the t_ctid field, which will contain a
6164 : : * speculative token value, is modified in place to point to the tuple itself,
6165 : : * which is characteristic of a newly inserted ordinary tuple.
6166 : : *
6167 : : * NB: It is not ok to commit without either finishing or aborting a
6168 : : * speculative insertion. We could treat speculative tuples of committed
6169 : : * transactions implicitly as completed, but then we would have to be prepared
6170 : : * to deal with speculative tokens on committed tuples. That wouldn't be
6171 : : * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6172 : : * but clearing the token at completion isn't very expensive either.
6173 : : * An explicit confirmation WAL record also makes logical decoding simpler.
6174 : : */
6175 : : void
6176 : 86 : heap_finish_speculative(Relation relation, const ItemPointerData *tid)
6177 : : {
6178 : 86 : Buffer buffer;
6179 : 86 : Page page;
6180 : 86 : OffsetNumber offnum;
6181 : 86 : ItemId lp;
6182 : 86 : HeapTupleHeader htup;
6183 : :
6184 : 86 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6185 : 86 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6186 : 86 : page = BufferGetPage(buffer);
6187 : :
6188 : 86 : offnum = ItemPointerGetOffsetNumber(tid);
6189 [ + - ]: 86 : if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page))
6190 [ # # # # ]: 0 : elog(ERROR, "offnum out of range");
6191 : 86 : lp = PageGetItemId(page, offnum);
6192 [ + - ]: 86 : if (!ItemIdIsNormal(lp))
6193 [ # # # # ]: 0 : elog(ERROR, "invalid lp");
6194 : :
6195 : 86 : htup = (HeapTupleHeader) PageGetItem(page, lp);
6196 : :
6197 : : /* NO EREPORT(ERROR) from here till changes are logged */
6198 : 86 : START_CRIT_SECTION();
6199 : :
6200 [ + - ]: 86 : Assert(HeapTupleHeaderIsSpeculative(htup));
6201 : :
6202 : 86 : MarkBufferDirty(buffer);
6203 : :
6204 : : /*
6205 : : * Replace the speculative insertion token with a real t_ctid, pointing to
6206 : : * itself like it does on regular tuples.
6207 : : */
6208 : 86 : htup->t_ctid = *tid;
6209 : :
6210 : : /* XLOG stuff */
6211 [ + + + - : 86 : if (RelationNeedsWAL(relation))
+ - - + ]
6212 : : {
6213 : 83 : xl_heap_confirm xlrec;
6214 : 83 : XLogRecPtr recptr;
6215 : :
6216 : 83 : xlrec.offnum = ItemPointerGetOffsetNumber(tid);
6217 : :
6218 : 83 : XLogBeginInsert();
6219 : :
6220 : : /* We want the same filtering on this as on a plain insert */
6221 : 83 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
6222 : :
6223 : 83 : XLogRegisterData(&xlrec, SizeOfHeapConfirm);
6224 : 83 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6225 : :
6226 : 83 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6227 : :
6228 : 83 : PageSetLSN(page, recptr);
6229 : 83 : }
6230 : :
6231 [ + - ]: 86 : END_CRIT_SECTION();
6232 : :
6233 : 86 : UnlockReleaseBuffer(buffer);
6234 : 86 : }
6235 : :
6236 : : /*
6237 : : * heap_abort_speculative - kill a speculatively inserted tuple
6238 : : *
6239 : : * Marks a tuple that was speculatively inserted in the same command as dead,
6240 : : * by setting its xmin as invalid. That makes it immediately appear as dead
6241 : : * to all transactions, including our own. In particular, it makes
6242 : : * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6243 : : * inserting a duplicate key value won't unnecessarily wait for our whole
6244 : : * transaction to finish (it'll just wait for our speculative insertion to
6245 : : * finish).
6246 : : *
6247 : : * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6248 : : * that arise due to a mutual dependency that is not user visible. By
6249 : : * definition, unprincipled deadlocks cannot be prevented by the user
6250 : : * reordering lock acquisition in client code, because the implementation level
6251 : : * lock acquisitions are not under the user's direct control. If speculative
6252 : : * inserters did not take this precaution, then under high concurrency they
6253 : : * could deadlock with each other, which would not be acceptable.
6254 : : *
6255 : : * This is somewhat redundant with heap_delete, but we prefer to have a
6256 : : * dedicated routine with stripped down requirements. Note that this is also
6257 : : * used to delete the TOAST tuples created during speculative insertion.
6258 : : *
6259 : : * This routine does not affect logical decoding as it only looks at
6260 : : * confirmation records.
6261 : : */
6262 : : void
6263 : 0 : heap_abort_speculative(Relation relation, const ItemPointerData *tid)
6264 : : {
6265 : 0 : TransactionId xid = GetCurrentTransactionId();
6266 : 0 : ItemId lp;
6267 : 0 : HeapTupleData tp;
6268 : 0 : Page page;
6269 : 0 : BlockNumber block;
6270 : 0 : Buffer buffer;
6271 : :
6272 [ # # ]: 0 : Assert(ItemPointerIsValid(tid));
6273 : :
6274 : 0 : block = ItemPointerGetBlockNumber(tid);
6275 : 0 : buffer = ReadBuffer(relation, block);
6276 : 0 : page = BufferGetPage(buffer);
6277 : :
6278 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6279 : :
6280 : : /*
6281 : : * Page can't be all visible, we just inserted into it, and are still
6282 : : * running.
6283 : : */
6284 [ # # ]: 0 : Assert(!PageIsAllVisible(page));
6285 : :
6286 : 0 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6287 [ # # ]: 0 : Assert(ItemIdIsNormal(lp));
6288 : :
6289 : 0 : tp.t_tableOid = RelationGetRelid(relation);
6290 : 0 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6291 : 0 : tp.t_len = ItemIdGetLength(lp);
6292 : 0 : tp.t_self = *tid;
6293 : :
6294 : : /*
6295 : : * Sanity check that the tuple really is a speculatively inserted tuple,
6296 : : * inserted by us.
6297 : : */
6298 [ # # ]: 0 : if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6299 [ # # # # ]: 0 : elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6300 [ # # # # ]: 0 : if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6301 [ # # # # ]: 0 : elog(ERROR, "attempted to kill a non-speculative tuple");
6302 [ # # ]: 0 : Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
6303 : :
6304 : : /*
6305 : : * No need to check for serializable conflicts here. There is never a
6306 : : * need for a combo CID, either. No need to extract replica identity, or
6307 : : * do anything special with infomask bits.
6308 : : */
6309 : :
6310 : 0 : START_CRIT_SECTION();
6311 : :
6312 : : /*
6313 : : * The tuple will become DEAD immediately. Flag that this page is a
6314 : : * candidate for pruning by setting xmin to TransactionXmin. While not
6315 : : * immediately prunable, it is the oldest xid we can cheaply determine
6316 : : * that's safe against wraparound / being older than the table's
6317 : : * relfrozenxid. To defend against the unlikely case of a new relation
6318 : : * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6319 : : * if so (vacuum can't subsequently move relfrozenxid to beyond
6320 : : * TransactionXmin, so there's no race here).
6321 : : */
6322 [ # # ]: 0 : Assert(TransactionIdIsValid(TransactionXmin));
6323 : : {
6324 : 0 : TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6325 : 0 : TransactionId prune_xid;
6326 : :
6327 [ # # ]: 0 : if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6328 : 0 : prune_xid = relfrozenxid;
6329 : : else
6330 : 0 : prune_xid = TransactionXmin;
6331 [ # # # # : 0 : PageSetPrunable(page, prune_xid);
# # ]
6332 : 0 : }
6333 : :
6334 : : /* store transaction information of xact deleting the tuple */
6335 : 0 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
6336 : 0 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6337 : :
6338 : : /*
6339 : : * Set the tuple header xmin to InvalidTransactionId. This makes the
6340 : : * tuple immediately invisible everyone. (In particular, to any
6341 : : * transactions waiting on the speculative token, woken up later.)
6342 : : */
6343 : 0 : HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
6344 : :
6345 : : /* Clear the speculative insertion token too */
6346 : 0 : tp.t_data->t_ctid = tp.t_self;
6347 : :
6348 : 0 : MarkBufferDirty(buffer);
6349 : :
6350 : : /*
6351 : : * XLOG stuff
6352 : : *
6353 : : * The WAL records generated here match heap_delete(). The same recovery
6354 : : * routines are used.
6355 : : */
6356 [ # # # # : 0 : if (RelationNeedsWAL(relation))
# # # # ]
6357 : : {
6358 : 0 : xl_heap_delete xlrec;
6359 : 0 : XLogRecPtr recptr;
6360 : :
6361 : 0 : xlrec.flags = XLH_DELETE_IS_SUPER;
6362 : 0 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6363 : 0 : tp.t_data->t_infomask2);
6364 : 0 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
6365 : 0 : xlrec.xmax = xid;
6366 : :
6367 : 0 : XLogBeginInsert();
6368 : 0 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
6369 : 0 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6370 : :
6371 : : /* No replica identity & replication origin logged */
6372 : :
6373 : 0 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6374 : :
6375 : 0 : PageSetLSN(page, recptr);
6376 : 0 : }
6377 : :
6378 [ # # ]: 0 : END_CRIT_SECTION();
6379 : :
6380 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6381 : :
6382 [ # # ]: 0 : if (HeapTupleHasExternal(&tp))
6383 : : {
6384 [ # # ]: 0 : Assert(!IsToastRelation(relation));
6385 : 0 : heap_toast_delete(relation, &tp, true);
6386 : 0 : }
6387 : :
6388 : : /*
6389 : : * Never need to mark tuple for invalidation, since catalogs don't support
6390 : : * speculative insertion
6391 : : */
6392 : :
6393 : : /* Now we can release the buffer */
6394 : 0 : ReleaseBuffer(buffer);
6395 : :
6396 : : /* count deletion, as we counted the insertion too */
6397 : 0 : pgstat_count_heap_delete(relation);
6398 : 0 : }
6399 : :
6400 : : /*
6401 : : * heap_inplace_lock - protect inplace update from concurrent heap_update()
6402 : : *
6403 : : * Evaluate whether the tuple's state is compatible with a no-key update.
6404 : : * Current transaction rowmarks are fine, as is KEY SHARE from any
6405 : : * transaction. If compatible, return true with the buffer exclusive-locked,
6406 : : * and the caller must release that by calling
6407 : : * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6408 : : * an error. Otherwise, call release_callback(arg), wait for blocking
6409 : : * transactions to end, and return false.
6410 : : *
6411 : : * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6412 : : * DDL, this doesn't guarantee any particular predicate locking.
6413 : : *
6414 : : * heap_delete() is a rarer source of blocking transactions (xwait). We'll
6415 : : * wait for such a transaction just like for the normal heap_update() case.
6416 : : * Normal concurrent DROP commands won't cause that, because all inplace
6417 : : * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE
6418 : : * FROM pg_class" can cause it. By waiting, if the concurrent transaction
6419 : : * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
6420 : : * can find the successor tuple.
6421 : : *
6422 : : * Readers of inplace-updated fields expect changes to those fields are
6423 : : * durable. For example, vac_truncate_clog() reads datfrozenxid from
6424 : : * pg_database tuples via catalog snapshots. A future snapshot must not
6425 : : * return a lower datfrozenxid for the same database OID (lower in the
6426 : : * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6427 : : * tuple can start while we hold a lock on its buffer. In cases like
6428 : : * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6429 : : * to this transaction. ROLLBACK then is one case where it's okay to lose
6430 : : * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6431 : : * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6432 : : * committed tuple.)
6433 : : *
6434 : : * In principle, we could avoid waiting by overwriting every tuple in the
6435 : : * updated tuple chain. Reader expectations permit updating a tuple only if
6436 : : * it's aborted, is the tail of the chain, or we already updated the tuple
6437 : : * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6438 : : * order from tail to head. That would imply either (a) mutating all tuples
6439 : : * in one critical section or (b) accepting a chance of partial completion.
6440 : : * Partial completion of a relfrozenxid update would have the weird
6441 : : * consequence that the table's next VACUUM could see the table's relfrozenxid
6442 : : * move forward between vacuum_get_cutoffs() and finishing.
6443 : : */
6444 : : bool
6445 : 10566 : heap_inplace_lock(Relation relation,
6446 : : HeapTuple oldtup_ptr, Buffer buffer,
6447 : : void (*release_callback) (void *), void *arg)
6448 : : {
6449 : 10566 : HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6450 : 10566 : TM_Result result;
6451 : 10566 : bool ret;
6452 : :
6453 : : #ifdef USE_ASSERT_CHECKING
6454 [ + + ]: 10566 : if (RelationGetRelid(relation) == RelationRelationId)
6455 : 10378 : check_inplace_rel_lock(oldtup_ptr);
6456 : : #endif
6457 : :
6458 [ + - ]: 10566 : Assert(BufferIsValid(buffer));
6459 : :
6460 : : /*
6461 : : * Register shared cache invals if necessary. Other sessions may finish
6462 : : * inplace updates of this tuple between this step and LockTuple(). Since
6463 : : * inplace updates don't change cache keys, that's harmless.
6464 : : *
6465 : : * While it's tempting to register invals only after confirming we can
6466 : : * return true, the following obstacle precludes reordering steps that
6467 : : * way. Registering invals might reach a CatalogCacheInitializeCache()
6468 : : * that locks "buffer". That would hang indefinitely if running after our
6469 : : * own LockBuffer(). Hence, we must register invals before LockBuffer().
6470 : : */
6471 : 10566 : CacheInvalidateHeapTupleInplace(relation, oldtup_ptr);
6472 : :
6473 : 10566 : LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6474 : 10566 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6475 : :
6476 : : /*----------
6477 : : * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6478 : : *
6479 : : * - wait unconditionally
6480 : : * - already locked tuple above, since inplace needs that unconditionally
6481 : : * - don't recheck header after wait: simpler to defer to next iteration
6482 : : * - don't try to continue even if the updater aborts: likewise
6483 : : * - no crosscheck
6484 : : */
6485 : 21132 : result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
6486 : 10566 : buffer);
6487 : :
6488 [ + - ]: 10566 : if (result == TM_Invisible)
6489 : : {
6490 : : /* no known way this can happen */
6491 [ # # # # ]: 0 : ereport(ERROR,
6492 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6493 : : errmsg_internal("attempted to overwrite invisible tuple")));
6494 : 0 : }
6495 [ + - ]: 10566 : else if (result == TM_SelfModified)
6496 : : {
6497 : : /*
6498 : : * CREATE INDEX might reach this if an expression is silly enough to
6499 : : * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6500 : : * statements might get here after a heap_update() of the same row, in
6501 : : * the absence of an intervening CommandCounterIncrement().
6502 : : */
6503 [ # # # # ]: 0 : ereport(ERROR,
6504 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6505 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6506 : 0 : }
6507 [ - + ]: 10566 : else if (result == TM_BeingModified)
6508 : : {
6509 : 0 : TransactionId xwait;
6510 : 0 : uint16 infomask;
6511 : :
6512 : 0 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
6513 : 0 : infomask = oldtup.t_data->t_infomask;
6514 : :
6515 [ # # ]: 0 : if (infomask & HEAP_XMAX_IS_MULTI)
6516 : : {
6517 : 0 : LockTupleMode lockmode = LockTupleNoKeyExclusive;
6518 : 0 : MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
6519 : 0 : int remain;
6520 : :
6521 [ # # # # ]: 0 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
6522 : 0 : lockmode, NULL))
6523 : : {
6524 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6525 : 0 : release_callback(arg);
6526 : 0 : ret = false;
6527 : 0 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
6528 : 0 : relation, &oldtup.t_self, XLTW_Update,
6529 : : &remain);
6530 : 0 : }
6531 : : else
6532 : 0 : ret = true;
6533 : 0 : }
6534 [ # # ]: 0 : else if (TransactionIdIsCurrentTransactionId(xwait))
6535 : 0 : ret = true;
6536 [ # # ]: 0 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
6537 : 0 : ret = true;
6538 : : else
6539 : : {
6540 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6541 : 0 : release_callback(arg);
6542 : 0 : ret = false;
6543 : 0 : XactLockTableWait(xwait, relation, &oldtup.t_self,
6544 : : XLTW_Update);
6545 : : }
6546 : 0 : }
6547 : : else
6548 : : {
6549 : 10566 : ret = (result == TM_Ok);
6550 [ + - ]: 10566 : if (!ret)
6551 : : {
6552 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6553 : 0 : release_callback(arg);
6554 : 0 : }
6555 : : }
6556 : :
6557 : : /*
6558 : : * GetCatalogSnapshot() relies on invalidation messages to know when to
6559 : : * take a new snapshot. COMMIT of xwait is responsible for sending the
6560 : : * invalidation. We're not acquiring heavyweight locks sufficient to
6561 : : * block if not yet sent, so we must take a new snapshot to ensure a later
6562 : : * attempt has a fair chance. While we don't need this if xwait aborted,
6563 : : * don't bother optimizing that.
6564 : : */
6565 [ + - ]: 10566 : if (!ret)
6566 : : {
6567 : 0 : UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6568 : 0 : ForgetInplace_Inval();
6569 : 0 : InvalidateCatalogSnapshot();
6570 : 0 : }
6571 : 21132 : return ret;
6572 : 10566 : }
6573 : :
6574 : : /*
6575 : : * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6576 : : *
6577 : : * The tuple cannot change size, and therefore its header fields and null
6578 : : * bitmap (if any) don't change either.
6579 : : *
6580 : : * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6581 : : */
6582 : : void
6583 : 7953 : heap_inplace_update_and_unlock(Relation relation,
6584 : : HeapTuple oldtup, HeapTuple tuple,
6585 : : Buffer buffer)
6586 : : {
6587 : 7953 : HeapTupleHeader htup = oldtup->t_data;
6588 : 7953 : uint32 oldlen;
6589 : 7953 : uint32 newlen;
6590 : 7953 : char *dst;
6591 : 7953 : char *src;
6592 : 7953 : int nmsgs = 0;
6593 : 7953 : SharedInvalidationMessage *invalMessages = NULL;
6594 : 7953 : bool RelcacheInitFileInval = false;
6595 : :
6596 [ + - ]: 7953 : Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6597 : 7953 : oldlen = oldtup->t_len - htup->t_hoff;
6598 : 7953 : newlen = tuple->t_len - tuple->t_data->t_hoff;
6599 [ + - ]: 7953 : if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6600 [ # # # # ]: 0 : elog(ERROR, "wrong tuple length");
6601 : :
6602 : 7953 : dst = (char *) htup + htup->t_hoff;
6603 : 7953 : src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6604 : :
6605 : : /* Like RecordTransactionCommit(), log only if needed */
6606 [ + + ]: 7953 : if (XLogStandbyInfoActive())
6607 : 653 : nmsgs = inplaceGetInvalidationMessages(&invalMessages,
6608 : : &RelcacheInitFileInval);
6609 : :
6610 : : /*
6611 : : * Unlink relcache init files as needed. If unlinking, acquire
6612 : : * RelCacheInitLock until after associated invalidations. By doing this
6613 : : * in advance, if we checkpoint and then crash between inplace
6614 : : * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6615 : : * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6616 : : * neglect to PANIC on EIO.
6617 : : */
6618 : 7953 : PreInplace_Inval();
6619 : :
6620 : : /*----------
6621 : : * NO EREPORT(ERROR) from here till changes are complete
6622 : : *
6623 : : * Our buffer lock won't stop a reader having already pinned and checked
6624 : : * visibility for this tuple. Hence, we write WAL first, then mutate the
6625 : : * buffer. Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
6626 : : * checkpoint delay makes that acceptable. With the usual order of
6627 : : * changes, a crash after memcpy() and before XLogInsert() could allow
6628 : : * datfrozenxid to overtake relfrozenxid:
6629 : : *
6630 : : * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6631 : : * ["R" is a VACUUM tbl]
6632 : : * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6633 : : * D: systable_getnext() returns pg_class tuple of tbl
6634 : : * R: memcpy() into pg_class tuple of tbl
6635 : : * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6636 : : * [crash]
6637 : : * [recovery restores datfrozenxid w/o relfrozenxid]
6638 : : *
6639 : : * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
6640 : : * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
6641 : : * The stack copy facilitates a FPI of the post-mutation block before we
6642 : : * accept other sessions seeing it. DELAY_CHKPT_START allows us to
6643 : : * XLogInsert() before MarkBufferDirty(). Since XLogSaveBufferForHint()
6644 : : * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
6645 : : * This function, however, likely could avoid it with the following order
6646 : : * of operations: MarkBufferDirty(), XLogInsert(), memcpy(). Opt to use
6647 : : * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
6648 : : * patterns to analyze. Inplace update isn't so frequent that it should
6649 : : * pursue the small optimization of skipping DELAY_CHKPT_START.
6650 : : */
6651 [ + - ]: 7953 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
6652 : 7953 : START_CRIT_SECTION();
6653 : 7953 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
6654 : :
6655 : : /* XLOG stuff */
6656 [ + - + + : 7953 : if (RelationNeedsWAL(relation))
+ - + + ]
6657 : : {
6658 : 7949 : xl_heap_inplace xlrec;
6659 : 7949 : PGAlignedBlock copied_buffer;
6660 : 7949 : char *origdata = (char *) BufferGetBlock(buffer);
6661 : 7949 : Page page = BufferGetPage(buffer);
6662 : 7949 : uint16 lower = ((PageHeader) page)->pd_lower;
6663 : 7949 : uint16 upper = ((PageHeader) page)->pd_upper;
6664 : 7949 : uintptr_t dst_offset_in_block;
6665 : 7949 : RelFileLocator rlocator;
6666 : 7949 : ForkNumber forkno;
6667 : 7949 : BlockNumber blkno;
6668 : 7949 : XLogRecPtr recptr;
6669 : :
6670 : 7949 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6671 : 7949 : xlrec.dbId = MyDatabaseId;
6672 : 7949 : xlrec.tsId = MyDatabaseTableSpace;
6673 : 7949 : xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6674 : 7949 : xlrec.nmsgs = nmsgs;
6675 : :
6676 : 7949 : XLogBeginInsert();
6677 : 7949 : XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
6678 [ + + ]: 7949 : if (nmsgs != 0)
6679 : 720 : XLogRegisterData(invalMessages,
6680 : 360 : nmsgs * sizeof(SharedInvalidationMessage));
6681 : :
6682 : : /* register block matching what buffer will look like after changes */
6683 : 7949 : memcpy(copied_buffer.data, origdata, lower);
6684 : 7949 : memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
6685 : 7949 : dst_offset_in_block = dst - origdata;
6686 : 7949 : memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
6687 : 7949 : BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6688 [ + - ]: 7949 : Assert(forkno == MAIN_FORKNUM);
6689 : 7949 : XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6690 : : REGBUF_STANDARD);
6691 : 7949 : XLogRegisterBufData(0, src, newlen);
6692 : :
6693 : : /* inplace updates aren't decoded atm, don't log the origin */
6694 : :
6695 : 7949 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6696 : :
6697 : 7949 : PageSetLSN(page, recptr);
6698 : 7949 : }
6699 : :
6700 : 7953 : memcpy(dst, src, newlen);
6701 : :
6702 : 7953 : MarkBufferDirty(buffer);
6703 : :
6704 : 7953 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6705 : :
6706 : : /*
6707 : : * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6708 : : * do this before UnlockTuple().
6709 : : */
6710 : 7953 : AtInplace_Inval();
6711 : :
6712 : 7953 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
6713 [ + - ]: 7953 : END_CRIT_SECTION();
6714 : 7953 : UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6715 : :
6716 : 7953 : AcceptInvalidationMessages(); /* local processing of just-sent inval */
6717 : :
6718 : : /*
6719 : : * Queue a transactional inval, for logical decoding and for third-party
6720 : : * code that might have been relying on it since long before inplace
6721 : : * update adopted immediate invalidation. See README.tuplock section
6722 : : * "Reading inplace-updated columns" for logical decoding details.
6723 : : */
6724 [ + + ]: 7953 : if (!IsBootstrapProcessingMode())
6725 : 7660 : CacheInvalidateHeapTuple(relation, tuple, NULL);
6726 : 7953 : }
6727 : :
6728 : : /*
6729 : : * heap_inplace_unlock - reverse of heap_inplace_lock
6730 : : */
6731 : : void
6732 : 2613 : heap_inplace_unlock(Relation relation,
6733 : : HeapTuple oldtup, Buffer buffer)
6734 : : {
6735 : 2613 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6736 : 2613 : UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6737 : 2613 : ForgetInplace_Inval();
6738 : 2613 : }
6739 : :
6740 : : #define FRM_NOOP 0x0001
6741 : : #define FRM_INVALIDATE_XMAX 0x0002
6742 : : #define FRM_RETURN_IS_XID 0x0004
6743 : : #define FRM_RETURN_IS_MULTI 0x0008
6744 : : #define FRM_MARK_COMMITTED 0x0010
6745 : :
6746 : : /*
6747 : : * FreezeMultiXactId
6748 : : * Determine what to do during freezing when a tuple is marked by a
6749 : : * MultiXactId.
6750 : : *
6751 : : * "flags" is an output value; it's used to tell caller what to do on return.
6752 : : * "pagefrz" is an input/output value, used to manage page level freezing.
6753 : : *
6754 : : * Possible values that we can set in "flags":
6755 : : * FRM_NOOP
6756 : : * don't do anything -- keep existing Xmax
6757 : : * FRM_INVALIDATE_XMAX
6758 : : * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6759 : : * FRM_RETURN_IS_XID
6760 : : * The Xid return value is a single update Xid to set as xmax.
6761 : : * FRM_MARK_COMMITTED
6762 : : * Xmax can be marked as HEAP_XMAX_COMMITTED
6763 : : * FRM_RETURN_IS_MULTI
6764 : : * The return value is a new MultiXactId to set as new Xmax.
6765 : : * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6766 : : *
6767 : : * Caller delegates control of page freezing to us. In practice we always
6768 : : * force freezing of caller's page unless FRM_NOOP processing is indicated.
6769 : : * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6770 : : * can never be left behind. We freely choose when and how to process each
6771 : : * Multi, without ever violating the cutoff postconditions for freezing.
6772 : : *
6773 : : * It's useful to remove Multis on a proactive timeline (relative to freezing
6774 : : * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6775 : : * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6776 : : * misses through eager processing.
6777 : : *
6778 : : * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6779 : : * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6780 : : * This can usually be put off, which is usually enough to avoid it altogether.
6781 : : * Allocating new multis during VACUUM should be avoided on general principle;
6782 : : * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6783 : : * its own special risks.
6784 : : *
6785 : : * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6786 : : * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6787 : : *
6788 : : * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6789 : : * have already forced page-level freezing, since that might incur the same
6790 : : * SLRU buffer misses that we specifically intended to avoid by freezing.
6791 : : */
6792 : : static TransactionId
6793 : 0 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
6794 : : const struct VacuumCutoffs *cutoffs, uint16 *flags,
6795 : : HeapPageFreeze *pagefrz)
6796 : : {
6797 : 0 : TransactionId newxmax;
6798 : 0 : MultiXactMember *members;
6799 : 0 : int nmembers;
6800 : 0 : bool need_replace;
6801 : 0 : int nnewmembers;
6802 : 0 : MultiXactMember *newmembers;
6803 : 0 : bool has_lockers;
6804 : 0 : TransactionId update_xid;
6805 : 0 : bool update_committed;
6806 : 0 : TransactionId FreezePageRelfrozenXid;
6807 : :
6808 : 0 : *flags = 0;
6809 : :
6810 : : /* We should only be called in Multis */
6811 [ # # ]: 0 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6812 : :
6813 [ # # # # ]: 0 : if (!MultiXactIdIsValid(multi) ||
6814 : 0 : HEAP_LOCKED_UPGRADED(t_infomask))
6815 : : {
6816 : 0 : *flags |= FRM_INVALIDATE_XMAX;
6817 : 0 : pagefrz->freeze_required = true;
6818 : 0 : return InvalidTransactionId;
6819 : : }
6820 [ # # ]: 0 : else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6821 [ # # # # ]: 0 : ereport(ERROR,
6822 : : (errcode(ERRCODE_DATA_CORRUPTED),
6823 : : errmsg_internal("found multixact %u from before relminmxid %u",
6824 : : multi, cutoffs->relminmxid)));
6825 [ # # ]: 0 : else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6826 : : {
6827 : 0 : TransactionId update_xact;
6828 : :
6829 : : /*
6830 : : * This old multi cannot possibly have members still running, but
6831 : : * verify just in case. If it was a locker only, it can be removed
6832 : : * without any further consideration; but if it contained an update,
6833 : : * we might need to preserve it.
6834 : : */
6835 [ # # # # ]: 0 : if (MultiXactIdIsRunning(multi,
6836 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6837 [ # # # # ]: 0 : ereport(ERROR,
6838 : : (errcode(ERRCODE_DATA_CORRUPTED),
6839 : : errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6840 : : multi, cutoffs->OldestMxact)));
6841 : :
6842 [ # # ]: 0 : if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6843 : : {
6844 : 0 : *flags |= FRM_INVALIDATE_XMAX;
6845 : 0 : pagefrz->freeze_required = true;
6846 : 0 : return InvalidTransactionId;
6847 : : }
6848 : :
6849 : : /* replace multi with single XID for its updater? */
6850 : 0 : update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6851 [ # # ]: 0 : if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
6852 [ # # # # ]: 0 : ereport(ERROR,
6853 : : (errcode(ERRCODE_DATA_CORRUPTED),
6854 : : errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6855 : : multi, update_xact,
6856 : : cutoffs->relfrozenxid)));
6857 [ # # ]: 0 : else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6858 : : {
6859 : : /*
6860 : : * Updater XID has to have aborted (otherwise the tuple would have
6861 : : * been pruned away instead, since updater XID is < OldestXmin).
6862 : : * Just remove xmax.
6863 : : */
6864 [ # # ]: 0 : if (TransactionIdDidCommit(update_xact))
6865 [ # # # # ]: 0 : ereport(ERROR,
6866 : : (errcode(ERRCODE_DATA_CORRUPTED),
6867 : : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6868 : : multi, update_xact,
6869 : : cutoffs->OldestXmin)));
6870 : 0 : *flags |= FRM_INVALIDATE_XMAX;
6871 : 0 : pagefrz->freeze_required = true;
6872 : 0 : return InvalidTransactionId;
6873 : : }
6874 : :
6875 : : /* Have to keep updater XID as new xmax */
6876 : 0 : *flags |= FRM_RETURN_IS_XID;
6877 : 0 : pagefrz->freeze_required = true;
6878 : 0 : return update_xact;
6879 : 0 : }
6880 : :
6881 : : /*
6882 : : * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6883 : : * need to walk the whole members array to figure out what to do, if
6884 : : * anything.
6885 : : */
6886 : 0 : nmembers =
6887 : 0 : GetMultiXactIdMembers(multi, &members, false,
6888 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6889 [ # # ]: 0 : if (nmembers <= 0)
6890 : : {
6891 : : /* Nothing worth keeping */
6892 : 0 : *flags |= FRM_INVALIDATE_XMAX;
6893 : 0 : pagefrz->freeze_required = true;
6894 : 0 : return InvalidTransactionId;
6895 : : }
6896 : :
6897 : : /*
6898 : : * The FRM_NOOP case is the only case where we might need to ratchet back
6899 : : * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6900 : : * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6901 : : * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6902 : : * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6903 : : * trackers managed by VACUUM being ratcheting back by xmax to the degree
6904 : : * required to make it safe to leave xmax undisturbed, independent of
6905 : : * whether or not page freezing is triggered somewhere else.
6906 : : *
6907 : : * Our policy is to force freezing in every case other than FRM_NOOP,
6908 : : * which obviates the need to maintain either set of trackers, anywhere.
6909 : : * Every other case will reliably execute a freeze plan for xmax that
6910 : : * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6911 : : * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6912 : : * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6913 : : * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6914 : : */
6915 : 0 : need_replace = false;
6916 : 0 : FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6917 [ # # ]: 0 : for (int i = 0; i < nmembers; i++)
6918 : : {
6919 : 0 : TransactionId xid = members[i].xid;
6920 : :
6921 [ # # ]: 0 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6922 : :
6923 [ # # ]: 0 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6924 : : {
6925 : : /* Can't violate the FreezeLimit postcondition */
6926 : 0 : need_replace = true;
6927 : 0 : break;
6928 : : }
6929 [ # # ]: 0 : if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6930 : 0 : FreezePageRelfrozenXid = xid;
6931 [ # # ]: 0 : }
6932 : :
6933 : : /* Can't violate the MultiXactCutoff postcondition, either */
6934 [ # # ]: 0 : if (!need_replace)
6935 : 0 : need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
6936 : :
6937 [ # # ]: 0 : if (!need_replace)
6938 : : {
6939 : : /*
6940 : : * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6941 : : * both together to make it safe to retain this particular multi after
6942 : : * freezing its page
6943 : : */
6944 : 0 : *flags |= FRM_NOOP;
6945 : 0 : pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6946 [ # # ]: 0 : if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6947 : 0 : pagefrz->FreezePageRelminMxid = multi;
6948 : 0 : pfree(members);
6949 : 0 : return multi;
6950 : : }
6951 : :
6952 : : /*
6953 : : * Do a more thorough second pass over the multi to figure out which
6954 : : * member XIDs actually need to be kept. Checking the precise status of
6955 : : * individual members might even show that we don't need to keep anything.
6956 : : * That is quite possible even though the Multi must be >= OldestMxact,
6957 : : * since our second pass only keeps member XIDs when it's truly necessary;
6958 : : * even member XIDs >= OldestXmin often won't be kept by second pass.
6959 : : */
6960 : 0 : nnewmembers = 0;
6961 : 0 : newmembers = palloc_array(MultiXactMember, nmembers);
6962 : 0 : has_lockers = false;
6963 : 0 : update_xid = InvalidTransactionId;
6964 : 0 : update_committed = false;
6965 : :
6966 : : /*
6967 : : * Determine whether to keep each member xid, or to ignore it instead
6968 : : */
6969 [ # # ]: 0 : for (int i = 0; i < nmembers; i++)
6970 : : {
6971 : 0 : TransactionId xid = members[i].xid;
6972 : 0 : MultiXactStatus mstatus = members[i].status;
6973 : :
6974 [ # # ]: 0 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6975 : :
6976 [ # # ]: 0 : if (!ISUPDATE_from_mxstatus(mstatus))
6977 : : {
6978 : : /*
6979 : : * Locker XID (not updater XID). We only keep lockers that are
6980 : : * still running.
6981 : : */
6982 [ # # # # ]: 0 : if (TransactionIdIsCurrentTransactionId(xid) ||
6983 : 0 : TransactionIdIsInProgress(xid))
6984 : : {
6985 [ # # ]: 0 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6986 [ # # # # ]: 0 : ereport(ERROR,
6987 : : (errcode(ERRCODE_DATA_CORRUPTED),
6988 : : errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6989 : : multi, xid,
6990 : : cutoffs->OldestXmin)));
6991 : 0 : newmembers[nnewmembers++] = members[i];
6992 : 0 : has_lockers = true;
6993 : 0 : }
6994 : :
6995 : 0 : continue;
6996 : : }
6997 : :
6998 : : /*
6999 : : * Updater XID (not locker XID). Should we keep it?
7000 : : *
7001 : : * Since the tuple wasn't totally removed when vacuum pruned, the
7002 : : * update Xid cannot possibly be older than OldestXmin cutoff unless
7003 : : * the updater XID aborted. If the updater transaction is known
7004 : : * aborted or crashed then it's okay to ignore it, otherwise not.
7005 : : *
7006 : : * In any case the Multi should never contain two updaters, whatever
7007 : : * their individual commit status. Check for that first, in passing.
7008 : : */
7009 [ # # ]: 0 : if (TransactionIdIsValid(update_xid))
7010 [ # # # # ]: 0 : ereport(ERROR,
7011 : : (errcode(ERRCODE_DATA_CORRUPTED),
7012 : : errmsg_internal("multixact %u has two or more updating members",
7013 : : multi),
7014 : : errdetail_internal("First updater XID=%u second updater XID=%u.",
7015 : : update_xid, xid)));
7016 : :
7017 : : /*
7018 : : * As with all tuple visibility routines, it's critical to test
7019 : : * TransactionIdIsInProgress before TransactionIdDidCommit, because of
7020 : : * race conditions explained in detail in heapam_visibility.c.
7021 : : */
7022 [ # # # # ]: 0 : if (TransactionIdIsCurrentTransactionId(xid) ||
7023 : 0 : TransactionIdIsInProgress(xid))
7024 : 0 : update_xid = xid;
7025 [ # # ]: 0 : else if (TransactionIdDidCommit(xid))
7026 : : {
7027 : : /*
7028 : : * The transaction committed, so we can tell caller to set
7029 : : * HEAP_XMAX_COMMITTED. (We can only do this because we know the
7030 : : * transaction is not running.)
7031 : : */
7032 : 0 : update_committed = true;
7033 : 0 : update_xid = xid;
7034 : 0 : }
7035 : : else
7036 : : {
7037 : : /*
7038 : : * Not in progress, not committed -- must be aborted or crashed;
7039 : : * we can ignore it.
7040 : : */
7041 : 0 : continue;
7042 : : }
7043 : :
7044 : : /*
7045 : : * We determined that updater must be kept -- add it to pending new
7046 : : * members list
7047 : : */
7048 [ # # ]: 0 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
7049 [ # # # # ]: 0 : ereport(ERROR,
7050 : : (errcode(ERRCODE_DATA_CORRUPTED),
7051 : : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
7052 : : multi, xid, cutoffs->OldestXmin)));
7053 : 0 : newmembers[nnewmembers++] = members[i];
7054 [ # # # ]: 0 : }
7055 : :
7056 : 0 : pfree(members);
7057 : :
7058 : : /*
7059 : : * Determine what to do with caller's multi based on information gathered
7060 : : * during our second pass
7061 : : */
7062 [ # # ]: 0 : if (nnewmembers == 0)
7063 : : {
7064 : : /* Nothing worth keeping */
7065 : 0 : *flags |= FRM_INVALIDATE_XMAX;
7066 : 0 : newxmax = InvalidTransactionId;
7067 : 0 : }
7068 [ # # # # ]: 0 : else if (TransactionIdIsValid(update_xid) && !has_lockers)
7069 : : {
7070 : : /*
7071 : : * If there's a single member and it's an update, pass it back alone
7072 : : * without creating a new Multi. (XXX we could do this when there's a
7073 : : * single remaining locker, too, but that would complicate the API too
7074 : : * much; moreover, the case with the single updater is more
7075 : : * interesting, because those are longer-lived.)
7076 : : */
7077 [ # # ]: 0 : Assert(nnewmembers == 1);
7078 : 0 : *flags |= FRM_RETURN_IS_XID;
7079 [ # # ]: 0 : if (update_committed)
7080 : 0 : *flags |= FRM_MARK_COMMITTED;
7081 : 0 : newxmax = update_xid;
7082 : 0 : }
7083 : : else
7084 : : {
7085 : : /*
7086 : : * Create a new multixact with the surviving members of the previous
7087 : : * one, to set as new Xmax in the tuple
7088 : : */
7089 : 0 : newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
7090 : 0 : *flags |= FRM_RETURN_IS_MULTI;
7091 : : }
7092 : :
7093 : 0 : pfree(newmembers);
7094 : :
7095 : 0 : pagefrz->freeze_required = true;
7096 : 0 : return newxmax;
7097 : 0 : }
7098 : :
7099 : : /*
7100 : : * heap_prepare_freeze_tuple
7101 : : *
7102 : : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7103 : : * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7104 : : * setup enough state (in the *frz output argument) to enable caller to
7105 : : * process this tuple as part of freezing its page, and return true. Return
7106 : : * false if nothing can be changed about the tuple right now.
7107 : : *
7108 : : * Also sets *totally_frozen to true if the tuple will be totally frozen once
7109 : : * caller executes returned freeze plan (or if the tuple was already totally
7110 : : * frozen by an earlier VACUUM). This indicates that there are no remaining
7111 : : * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7112 : : *
7113 : : * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7114 : : * tuple that we returned true for, and then execute freezing. Caller must
7115 : : * initialize pagefrz fields for page as a whole before first call here for
7116 : : * each heap page.
7117 : : *
7118 : : * VACUUM caller decides on whether or not to freeze the page as a whole.
7119 : : * We'll often prepare freeze plans for a page that caller just discards.
7120 : : * However, VACUUM doesn't always get to make a choice; it must freeze when
7121 : : * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7122 : : * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7123 : : * that VACUUM always follows that rule.
7124 : : *
7125 : : * We sometimes force freezing of xmax MultiXactId values long before it is
7126 : : * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7127 : : * It's worth processing MultiXactIds proactively when it is cheap to do so,
7128 : : * and it's convenient to make that happen by piggy-backing it on the "force
7129 : : * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7130 : : * because it is expensive right now (though only when it's still possible to
7131 : : * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7132 : : *
7133 : : * It is assumed that the caller has checked the tuple with
7134 : : * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7135 : : * (else we should be removing the tuple, not freezing it).
7136 : : *
7137 : : * NB: This function has side effects: it might allocate a new MultiXactId.
7138 : : * It will be set as tuple's new xmax when our *frz output is processed within
7139 : : * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7140 : : * then caller had better have an exclusive lock on it already.
7141 : : */
7142 : : bool
7143 : 1062076 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
7144 : : const struct VacuumCutoffs *cutoffs,
7145 : : HeapPageFreeze *pagefrz,
7146 : : HeapTupleFreeze *frz, bool *totally_frozen)
7147 : : {
7148 : 1062076 : bool xmin_already_frozen = false,
7149 : 1062076 : xmax_already_frozen = false;
7150 : 3186228 : bool freeze_xmin = false,
7151 : 1062076 : replace_xvac = false,
7152 : 1062076 : replace_xmax = false,
7153 : 1062076 : freeze_xmax = false;
7154 : 1062076 : TransactionId xid;
7155 : :
7156 : 1062076 : frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7157 : 1062076 : frz->t_infomask2 = tuple->t_infomask2;
7158 : 1062076 : frz->t_infomask = tuple->t_infomask;
7159 : 1062076 : frz->frzflags = 0;
7160 : 1062076 : frz->checkflags = 0;
7161 : :
7162 : : /*
7163 : : * Process xmin, while keeping track of whether it's already frozen, or
7164 : : * will become frozen iff our freeze plan is executed by caller (could be
7165 : : * neither).
7166 : : */
7167 : 1062076 : xid = HeapTupleHeaderGetXmin(tuple);
7168 [ + + ]: 1062076 : if (!TransactionIdIsNormal(xid))
7169 : 28996 : xmin_already_frozen = true;
7170 : : else
7171 : : {
7172 [ + - ]: 1033080 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7173 [ # # # # ]: 0 : ereport(ERROR,
7174 : : (errcode(ERRCODE_DATA_CORRUPTED),
7175 : : errmsg_internal("found xmin %u from before relfrozenxid %u",
7176 : : xid, cutoffs->relfrozenxid)));
7177 : :
7178 : : /* Will set freeze_xmin flags in freeze plan below */
7179 : 1033080 : freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7180 : :
7181 : : /* Verify that xmin committed if and when freeze plan is executed */
7182 [ + + ]: 1033080 : if (freeze_xmin)
7183 : 831296 : frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
7184 : : }
7185 : :
7186 : : /*
7187 : : * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7188 : : * as we support having MOVED_OFF/MOVED_IN tuples in the database
7189 : : */
7190 : 1062076 : xid = HeapTupleHeaderGetXvac(tuple);
7191 [ + - ]: 1062076 : if (TransactionIdIsNormal(xid))
7192 : : {
7193 [ # # ]: 0 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7194 [ # # ]: 0 : Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7195 : :
7196 : : /*
7197 : : * For Xvac, we always freeze proactively. This allows totally_frozen
7198 : : * tracking to ignore xvac.
7199 : : */
7200 : 0 : replace_xvac = pagefrz->freeze_required = true;
7201 : :
7202 : : /* Will set replace_xvac flags in freeze plan below */
7203 : 0 : }
7204 : :
7205 : : /* Now process xmax */
7206 : 1062076 : xid = frz->xmax;
7207 [ - + ]: 1062076 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7208 : : {
7209 : : /* Raw xmax is a MultiXactId */
7210 : 0 : TransactionId newxmax;
7211 : 0 : uint16 flags;
7212 : :
7213 : : /*
7214 : : * We will either remove xmax completely (in the "freeze_xmax" path),
7215 : : * process xmax by replacing it (in the "replace_xmax" path), or
7216 : : * perform no-op xmax processing. The only constraint is that the
7217 : : * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7218 : : */
7219 : 0 : newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7220 : 0 : &flags, pagefrz);
7221 : :
7222 [ # # ]: 0 : if (flags & FRM_NOOP)
7223 : : {
7224 : : /*
7225 : : * xmax is a MultiXactId, and nothing about it changes for now.
7226 : : * This is the only case where 'freeze_required' won't have been
7227 : : * set for us by FreezeMultiXactId, as well as the only case where
7228 : : * neither freeze_xmax nor replace_xmax are set (given a multi).
7229 : : *
7230 : : * This is a no-op, but the call to FreezeMultiXactId might have
7231 : : * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7232 : : * for us (the "freeze page" variants, specifically). That'll
7233 : : * make it safe for our caller to freeze the page later on, while
7234 : : * leaving this particular xmax undisturbed.
7235 : : *
7236 : : * FreezeMultiXactId is _not_ responsible for the "no freeze"
7237 : : * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7238 : : * job. A call to heap_tuple_should_freeze for this same tuple
7239 : : * will take place below if 'freeze_required' isn't set already.
7240 : : * (This repeats work from FreezeMultiXactId, but allows "no
7241 : : * freeze" tracker maintenance to happen in only one place.)
7242 : : */
7243 [ # # ]: 0 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
7244 [ # # ]: 0 : Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
7245 : 0 : }
7246 [ # # ]: 0 : else if (flags & FRM_RETURN_IS_XID)
7247 : : {
7248 : : /*
7249 : : * xmax will become an updater Xid (original MultiXact's updater
7250 : : * member Xid will be carried forward as a simple Xid in Xmax).
7251 : : */
7252 [ # # ]: 0 : Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
7253 : :
7254 : : /*
7255 : : * NB -- some of these transformations are only valid because we
7256 : : * know the return Xid is a tuple updater (i.e. not merely a
7257 : : * locker.) Also note that the only reason we don't explicitly
7258 : : * worry about HEAP_KEYS_UPDATED is because it lives in
7259 : : * t_infomask2 rather than t_infomask.
7260 : : */
7261 : 0 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7262 : 0 : frz->xmax = newxmax;
7263 [ # # ]: 0 : if (flags & FRM_MARK_COMMITTED)
7264 : 0 : frz->t_infomask |= HEAP_XMAX_COMMITTED;
7265 : 0 : replace_xmax = true;
7266 : 0 : }
7267 [ # # ]: 0 : else if (flags & FRM_RETURN_IS_MULTI)
7268 : : {
7269 : 0 : uint16 newbits;
7270 : 0 : uint16 newbits2;
7271 : :
7272 : : /*
7273 : : * xmax is an old MultiXactId that we have to replace with a new
7274 : : * MultiXactId, to carry forward two or more original member XIDs.
7275 : : */
7276 [ # # ]: 0 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
7277 : :
7278 : : /*
7279 : : * We can't use GetMultiXactIdHintBits directly on the new multi
7280 : : * here; that routine initializes the masks to all zeroes, which
7281 : : * would lose other bits we need. Doing it this way ensures all
7282 : : * unrelated bits remain untouched.
7283 : : */
7284 : 0 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7285 : 0 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7286 : 0 : GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
7287 : 0 : frz->t_infomask |= newbits;
7288 : 0 : frz->t_infomask2 |= newbits2;
7289 : 0 : frz->xmax = newxmax;
7290 : 0 : replace_xmax = true;
7291 : 0 : }
7292 : : else
7293 : : {
7294 : : /*
7295 : : * Freeze plan for tuple "freezes xmax" in the strictest sense:
7296 : : * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7297 : : */
7298 [ # # ]: 0 : Assert(flags & FRM_INVALIDATE_XMAX);
7299 [ # # ]: 0 : Assert(!TransactionIdIsValid(newxmax));
7300 : :
7301 : : /* Will set freeze_xmax flags in freeze plan below */
7302 : 0 : freeze_xmax = true;
7303 : : }
7304 : :
7305 : : /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7306 [ # # # # ]: 0 : Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7307 : 0 : }
7308 [ + + ]: 1062076 : else if (TransactionIdIsNormal(xid))
7309 : : {
7310 : : /* Raw xmax is normal XID */
7311 [ + - ]: 180337 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7312 [ # # # # ]: 0 : ereport(ERROR,
7313 : : (errcode(ERRCODE_DATA_CORRUPTED),
7314 : : errmsg_internal("found xmax %u from before relfrozenxid %u",
7315 : : xid, cutoffs->relfrozenxid)));
7316 : :
7317 : : /* Will set freeze_xmax flags in freeze plan below */
7318 : 180337 : freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7319 : :
7320 : : /*
7321 : : * Verify that xmax aborted if and when freeze plan is executed,
7322 : : * provided it's from an update. (A lock-only xmax can be removed
7323 : : * independent of this, since the lock is released at xact end.)
7324 : : */
7325 [ + + + + ]: 180337 : if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
7326 : 32 : frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7327 : 180337 : }
7328 [ + - ]: 881739 : else if (!TransactionIdIsValid(xid))
7329 : : {
7330 : : /* Raw xmax is InvalidTransactionId XID */
7331 [ + - ]: 881739 : Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7332 : 881739 : xmax_already_frozen = true;
7333 : 881739 : }
7334 : : else
7335 [ # # # # ]: 0 : ereport(ERROR,
7336 : : (errcode(ERRCODE_DATA_CORRUPTED),
7337 : : errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7338 : : xid, tuple->t_infomask)));
7339 : :
7340 [ + + ]: 1062076 : if (freeze_xmin)
7341 : : {
7342 [ + - ]: 831296 : Assert(!xmin_already_frozen);
7343 : :
7344 : 831296 : frz->t_infomask |= HEAP_XMIN_FROZEN;
7345 : 831296 : }
7346 [ + - ]: 1062076 : if (replace_xvac)
7347 : : {
7348 : : /*
7349 : : * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7350 : : * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7351 : : * transaction succeeded.
7352 : : */
7353 [ # # ]: 0 : Assert(pagefrz->freeze_required);
7354 [ # # ]: 0 : if (tuple->t_infomask & HEAP_MOVED_OFF)
7355 : 0 : frz->frzflags |= XLH_INVALID_XVAC;
7356 : : else
7357 : 0 : frz->frzflags |= XLH_FREEZE_XVAC;
7358 : 0 : }
7359 [ + - ]: 1062076 : if (replace_xmax)
7360 : : {
7361 [ # # ]: 0 : Assert(!xmax_already_frozen && !freeze_xmax);
7362 [ # # ]: 0 : Assert(pagefrz->freeze_required);
7363 : :
7364 : : /* Already set replace_xmax flags in freeze plan earlier */
7365 : 0 : }
7366 [ + + ]: 1062076 : if (freeze_xmax)
7367 : : {
7368 [ + - ]: 42 : Assert(!xmax_already_frozen && !replace_xmax);
7369 : :
7370 : 42 : frz->xmax = InvalidTransactionId;
7371 : :
7372 : : /*
7373 : : * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7374 : : * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7375 : : * Also get rid of the HEAP_KEYS_UPDATED bit.
7376 : : */
7377 : 42 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7378 : 42 : frz->t_infomask |= HEAP_XMAX_INVALID;
7379 : 42 : frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7380 : 42 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7381 : 42 : }
7382 : :
7383 : : /*
7384 : : * Determine if this tuple is already totally frozen, or will become
7385 : : * totally frozen (provided caller executes freeze plans for the page)
7386 : : */
7387 [ + + + + ]: 1922368 : *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
7388 [ + + ]: 860292 : (freeze_xmax || xmax_already_frozen));
7389 : :
7390 [ + + + + : 1062076 : if (!pagefrz->freeze_required && !(xmin_already_frozen &&
+ + ]
7391 : 27600 : xmax_already_frozen))
7392 : : {
7393 : : /*
7394 : : * So far no previous tuple from the page made freezing mandatory.
7395 : : * Does this tuple force caller to freeze the entire page?
7396 : : */
7397 : 841020 : pagefrz->freeze_required =
7398 : 1682040 : heap_tuple_should_freeze(tuple, cutoffs,
7399 : 841020 : &pagefrz->NoFreezePageRelfrozenXid,
7400 : 841020 : &pagefrz->NoFreezePageRelminMxid);
7401 : 841020 : }
7402 : :
7403 : : /* Tell caller if this tuple has a usable freeze plan set in *frz */
7404 [ + + + - : 1062076 : return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
- + ]
7405 : 1062076 : }
7406 : :
7407 : : /*
7408 : : * Perform xmin/xmax XID status sanity checks before actually executing freeze
7409 : : * plans.
7410 : : *
7411 : : * heap_prepare_freeze_tuple doesn't perform these checks directly because
7412 : : * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7413 : : * successive VACUUMs that each decide against freezing the same page.
7414 : : */
7415 : : void
7416 : 1921 : heap_pre_freeze_checks(Buffer buffer,
7417 : : HeapTupleFreeze *tuples, int ntuples)
7418 : : {
7419 : 1921 : Page page = BufferGetPage(buffer);
7420 : :
7421 [ + + ]: 92723 : for (int i = 0; i < ntuples; i++)
7422 : : {
7423 : 90802 : HeapTupleFreeze *frz = tuples + i;
7424 : 90802 : ItemId itemid = PageGetItemId(page, frz->offset);
7425 : 90802 : HeapTupleHeader htup;
7426 : :
7427 : 90802 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7428 : :
7429 : : /* Deliberately avoid relying on tuple hint bits here */
7430 [ - + ]: 90802 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7431 : : {
7432 : 90802 : TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
7433 : :
7434 [ + - ]: 90802 : Assert(!HeapTupleHeaderXminFrozen(htup));
7435 [ + - ]: 90802 : if (unlikely(!TransactionIdDidCommit(xmin)))
7436 [ # # # # ]: 0 : ereport(ERROR,
7437 : : (errcode(ERRCODE_DATA_CORRUPTED),
7438 : : errmsg_internal("uncommitted xmin %u needs to be frozen",
7439 : : xmin)));
7440 : 90802 : }
7441 : :
7442 : : /*
7443 : : * TransactionIdDidAbort won't work reliably in the presence of XIDs
7444 : : * left behind by transactions that were in progress during a crash,
7445 : : * so we can only check that xmax didn't commit
7446 : : */
7447 [ + - ]: 90802 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7448 : : {
7449 : 0 : TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
7450 : :
7451 [ # # ]: 0 : Assert(TransactionIdIsNormal(xmax));
7452 [ # # ]: 0 : if (unlikely(TransactionIdDidCommit(xmax)))
7453 [ # # # # ]: 0 : ereport(ERROR,
7454 : : (errcode(ERRCODE_DATA_CORRUPTED),
7455 : : errmsg_internal("cannot freeze committed xmax %u",
7456 : : xmax)));
7457 : 0 : }
7458 : 90802 : }
7459 : 1921 : }
7460 : :
7461 : : /*
7462 : : * Helper which executes freezing of one or more heap tuples on a page on
7463 : : * behalf of caller. Caller passes an array of tuple plans from
7464 : : * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7465 : : * Must be called in a critical section that also marks the buffer dirty and,
7466 : : * if needed, emits WAL.
7467 : : */
7468 : : void
7469 : 1921 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
7470 : : {
7471 : 1921 : Page page = BufferGetPage(buffer);
7472 : :
7473 [ + + ]: 92723 : for (int i = 0; i < ntuples; i++)
7474 : : {
7475 : 90802 : HeapTupleFreeze *frz = tuples + i;
7476 : 90802 : ItemId itemid = PageGetItemId(page, frz->offset);
7477 : 90802 : HeapTupleHeader htup;
7478 : :
7479 : 90802 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7480 : 90802 : heap_execute_freeze_tuple(htup, frz);
7481 : 90802 : }
7482 : 1921 : }
7483 : :
7484 : : /*
7485 : : * heap_freeze_tuple
7486 : : * Freeze tuple in place, without WAL logging.
7487 : : *
7488 : : * Useful for callers like CLUSTER that perform their own WAL logging.
7489 : : */
7490 : : bool
7491 : 114899 : heap_freeze_tuple(HeapTupleHeader tuple,
7492 : : TransactionId relfrozenxid, TransactionId relminmxid,
7493 : : TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7494 : : {
7495 : 114899 : HeapTupleFreeze frz;
7496 : 114899 : bool do_freeze;
7497 : 114899 : bool totally_frozen;
7498 : 114899 : struct VacuumCutoffs cutoffs;
7499 : 114899 : HeapPageFreeze pagefrz;
7500 : :
7501 : 114899 : cutoffs.relfrozenxid = relfrozenxid;
7502 : 114899 : cutoffs.relminmxid = relminmxid;
7503 : 114899 : cutoffs.OldestXmin = FreezeLimit;
7504 : 114899 : cutoffs.OldestMxact = MultiXactCutoff;
7505 : 114899 : cutoffs.FreezeLimit = FreezeLimit;
7506 : 114899 : cutoffs.MultiXactCutoff = MultiXactCutoff;
7507 : :
7508 : 114899 : pagefrz.freeze_required = true;
7509 : 114899 : pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7510 : 114899 : pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7511 : 114899 : pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7512 : 114899 : pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7513 : :
7514 : 114899 : do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7515 : : &pagefrz, &frz, &totally_frozen);
7516 : :
7517 : : /*
7518 : : * Note that because this is not a WAL-logged operation, we don't need to
7519 : : * fill in the offset in the freeze record.
7520 : : */
7521 : :
7522 [ + + ]: 114899 : if (do_freeze)
7523 : 90497 : heap_execute_freeze_tuple(tuple, &frz);
7524 : 229798 : return do_freeze;
7525 : 114899 : }
7526 : :
7527 : : /*
7528 : : * For a given MultiXactId, return the hint bits that should be set in the
7529 : : * tuple's infomask.
7530 : : *
7531 : : * Normally this should be called for a multixact that was just created, and
7532 : : * so is on our local cache, so the GetMembers call is fast.
7533 : : */
7534 : : static void
7535 : 2 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
7536 : : uint16 *new_infomask2)
7537 : : {
7538 : 2 : int nmembers;
7539 : 2 : MultiXactMember *members;
7540 : 2 : int i;
7541 : 2 : uint16 bits = HEAP_XMAX_IS_MULTI;
7542 : 2 : uint16 bits2 = 0;
7543 : 2 : bool has_update = false;
7544 : 2 : LockTupleMode strongest = LockTupleKeyShare;
7545 : :
7546 : : /*
7547 : : * We only use this in multis we just created, so they cannot be values
7548 : : * pre-pg_upgrade.
7549 : : */
7550 : 2 : nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7551 : :
7552 [ + + ]: 6 : for (i = 0; i < nmembers; i++)
7553 : : {
7554 : 4 : LockTupleMode mode;
7555 : :
7556 : : /*
7557 : : * Remember the strongest lock mode held by any member of the
7558 : : * multixact.
7559 : : */
7560 : 4 : mode = TUPLOCK_from_mxstatus(members[i].status);
7561 [ + + ]: 4 : if (mode > strongest)
7562 : 2 : strongest = mode;
7563 : :
7564 : : /* See what other bits we need */
7565 [ + + + - : 4 : switch (members[i].status)
- ]
7566 : : {
7567 : : case MultiXactStatusForKeyShare:
7568 : : case MultiXactStatusForShare:
7569 : : case MultiXactStatusForNoKeyUpdate:
7570 : 2 : break;
7571 : :
7572 : : case MultiXactStatusForUpdate:
7573 : 1 : bits2 |= HEAP_KEYS_UPDATED;
7574 : 1 : break;
7575 : :
7576 : : case MultiXactStatusNoKeyUpdate:
7577 : 1 : has_update = true;
7578 : 1 : break;
7579 : :
7580 : : case MultiXactStatusUpdate:
7581 : 0 : bits2 |= HEAP_KEYS_UPDATED;
7582 : 0 : has_update = true;
7583 : 0 : break;
7584 : : }
7585 : 4 : }
7586 : :
7587 [ + + + - ]: 2 : if (strongest == LockTupleExclusive ||
7588 : 1 : strongest == LockTupleNoKeyExclusive)
7589 : 2 : bits |= HEAP_XMAX_EXCL_LOCK;
7590 [ # # ]: 0 : else if (strongest == LockTupleShare)
7591 : 0 : bits |= HEAP_XMAX_SHR_LOCK;
7592 [ # # ]: 0 : else if (strongest == LockTupleKeyShare)
7593 : 0 : bits |= HEAP_XMAX_KEYSHR_LOCK;
7594 : :
7595 [ + + ]: 2 : if (!has_update)
7596 : 1 : bits |= HEAP_XMAX_LOCK_ONLY;
7597 : :
7598 [ - + ]: 2 : if (nmembers > 0)
7599 : 2 : pfree(members);
7600 : :
7601 : 2 : *new_infomask = bits;
7602 : 2 : *new_infomask2 = bits2;
7603 : 2 : }
7604 : :
7605 : : /*
7606 : : * MultiXactIdGetUpdateXid
7607 : : *
7608 : : * Given a multixact Xmax and corresponding infomask, which does not have the
7609 : : * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7610 : : * transaction.
7611 : : *
7612 : : * Caller is expected to check the status of the updating transaction, if
7613 : : * necessary.
7614 : : */
7615 : : static TransactionId
7616 : 4 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
7617 : : {
7618 : 4 : TransactionId update_xact = InvalidTransactionId;
7619 : 4 : MultiXactMember *members;
7620 : 4 : int nmembers;
7621 : :
7622 [ + - ]: 4 : Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7623 [ + - ]: 4 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7624 : :
7625 : : /*
7626 : : * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7627 : : * pre-pg_upgrade.
7628 : : */
7629 : 4 : nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7630 : :
7631 [ - + ]: 4 : if (nmembers > 0)
7632 : : {
7633 : 4 : int i;
7634 : :
7635 [ + + ]: 12 : for (i = 0; i < nmembers; i++)
7636 : : {
7637 : : /* Ignore lockers */
7638 [ + + ]: 8 : if (!ISUPDATE_from_mxstatus(members[i].status))
7639 : 4 : continue;
7640 : :
7641 : : /* there can be at most one updater */
7642 [ - + ]: 4 : Assert(update_xact == InvalidTransactionId);
7643 : 4 : update_xact = members[i].xid;
7644 : : #ifndef USE_ASSERT_CHECKING
7645 : :
7646 : : /*
7647 : : * in an assert-enabled build, walk the whole array to ensure
7648 : : * there's no other updater.
7649 : : */
7650 : : break;
7651 : : #endif
7652 : 4 : }
7653 : :
7654 : 4 : pfree(members);
7655 : 4 : }
7656 : :
7657 : 8 : return update_xact;
7658 : 4 : }
7659 : :
7660 : : /*
7661 : : * HeapTupleGetUpdateXid
7662 : : * As above, but use a HeapTupleHeader
7663 : : *
7664 : : * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7665 : : * checking the hint bits.
7666 : : */
7667 : : TransactionId
7668 : 3 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
7669 : : {
7670 : 6 : return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
7671 : 3 : tup->t_infomask);
7672 : : }
7673 : :
7674 : : /*
7675 : : * Does the given multixact conflict with the current transaction grabbing a
7676 : : * tuple lock of the given strength?
7677 : : *
7678 : : * The passed infomask pairs up with the given multixact in the tuple header.
7679 : : *
7680 : : * If current_is_member is not NULL, it is set to 'true' if the current
7681 : : * transaction is a member of the given multixact.
7682 : : */
7683 : : static bool
7684 : 0 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
7685 : : LockTupleMode lockmode, bool *current_is_member)
7686 : : {
7687 : 0 : int nmembers;
7688 : 0 : MultiXactMember *members;
7689 : 0 : bool result = false;
7690 : 0 : LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7691 : :
7692 [ # # ]: 0 : if (HEAP_LOCKED_UPGRADED(infomask))
7693 : 0 : return false;
7694 : :
7695 : 0 : nmembers = GetMultiXactIdMembers(multi, &members, false,
7696 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7697 [ # # ]: 0 : if (nmembers >= 0)
7698 : : {
7699 : 0 : int i;
7700 : :
7701 [ # # ]: 0 : for (i = 0; i < nmembers; i++)
7702 : : {
7703 : 0 : TransactionId memxid;
7704 : 0 : LOCKMODE memlockmode;
7705 : :
7706 [ # # # # : 0 : if (result && (current_is_member == NULL || *current_is_member))
# # ]
7707 : 0 : break;
7708 : :
7709 : 0 : memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7710 : :
7711 : : /* ignore members from current xact (but track their presence) */
7712 : 0 : memxid = members[i].xid;
7713 [ # # ]: 0 : if (TransactionIdIsCurrentTransactionId(memxid))
7714 : : {
7715 [ # # ]: 0 : if (current_is_member != NULL)
7716 : 0 : *current_is_member = true;
7717 : 0 : continue;
7718 : : }
7719 [ # # ]: 0 : else if (result)
7720 : 0 : continue;
7721 : :
7722 : : /* ignore members that don't conflict with the lock we want */
7723 [ # # ]: 0 : if (!DoLockModesConflict(memlockmode, wanted))
7724 : 0 : continue;
7725 : :
7726 [ # # ]: 0 : if (ISUPDATE_from_mxstatus(members[i].status))
7727 : : {
7728 : : /* ignore aborted updaters */
7729 [ # # ]: 0 : if (TransactionIdDidAbort(memxid))
7730 : 0 : continue;
7731 : 0 : }
7732 : : else
7733 : : {
7734 : : /* ignore lockers-only that are no longer in progress */
7735 [ # # ]: 0 : if (!TransactionIdIsInProgress(memxid))
7736 : 0 : continue;
7737 : : }
7738 : :
7739 : : /*
7740 : : * Whatever remains are either live lockers that conflict with our
7741 : : * wanted lock, and updaters that are not aborted. Those conflict
7742 : : * with what we want. Set up to return true, but keep going to
7743 : : * look for the current transaction among the multixact members,
7744 : : * if needed.
7745 : : */
7746 : 0 : result = true;
7747 [ # # # # ]: 0 : }
7748 : 0 : pfree(members);
7749 : 0 : }
7750 : :
7751 : 0 : return result;
7752 : 0 : }
7753 : :
7754 : : /*
7755 : : * Do_MultiXactIdWait
7756 : : * Actual implementation for the two functions below.
7757 : : *
7758 : : * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7759 : : * needed to ensure we only sleep on conflicting members, and the infomask is
7760 : : * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7761 : : * indicates whether to use conditional lock acquisition, to allow callers to
7762 : : * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7763 : : * context information for error messages. 'remaining', if not NULL, receives
7764 : : * the number of members that are still running, including any (non-aborted)
7765 : : * subtransactions of our own transaction. 'logLockFailure' indicates whether
7766 : : * to log details when a lock acquisition fails with 'nowait' enabled.
7767 : : *
7768 : : * We do this by sleeping on each member using XactLockTableWait. Any
7769 : : * members that belong to the current backend are *not* waited for, however;
7770 : : * this would not merely be useless but would lead to Assert failure inside
7771 : : * XactLockTableWait. By the time this returns, it is certain that all
7772 : : * transactions *of other backends* that were members of the MultiXactId
7773 : : * that conflict with the requested status are dead (and no new ones can have
7774 : : * been added, since it is not legal to add members to an existing
7775 : : * MultiXactId).
7776 : : *
7777 : : * But by the time we finish sleeping, someone else may have changed the Xmax
7778 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7779 : : *
7780 : : * Note that in case we return false, the number of remaining members is
7781 : : * not to be trusted.
7782 : : */
7783 : : static bool
7784 : 1 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7785 : : uint16 infomask, bool nowait,
7786 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7787 : : int *remaining, bool logLockFailure)
7788 : : {
7789 : 1 : bool result = true;
7790 : 1 : MultiXactMember *members;
7791 : 1 : int nmembers;
7792 : 1 : int remain = 0;
7793 : :
7794 : : /* for pre-pg_upgrade tuples, no need to sleep at all */
7795 [ - + ]: 1 : nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7796 : 2 : GetMultiXactIdMembers(multi, &members, false,
7797 : 1 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7798 : :
7799 [ - + ]: 1 : if (nmembers >= 0)
7800 : : {
7801 : 1 : int i;
7802 : :
7803 [ + + ]: 3 : for (i = 0; i < nmembers; i++)
7804 : : {
7805 : 2 : TransactionId memxid = members[i].xid;
7806 : 2 : MultiXactStatus memstatus = members[i].status;
7807 : :
7808 [ + + ]: 2 : if (TransactionIdIsCurrentTransactionId(memxid))
7809 : : {
7810 : 1 : remain++;
7811 : 1 : continue;
7812 : : }
7813 : :
7814 [ + - + - ]: 2 : if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
7815 : 1 : LOCKMODE_from_mxstatus(status)))
7816 : : {
7817 [ # # # # ]: 0 : if (remaining && TransactionIdIsInProgress(memxid))
7818 : 0 : remain++;
7819 : 0 : continue;
7820 : : }
7821 : :
7822 : : /*
7823 : : * This member conflicts with our multi, so we have to sleep (or
7824 : : * return failure, if asked to avoid waiting.)
7825 : : *
7826 : : * Note that we don't set up an error context callback ourselves,
7827 : : * but instead we pass the info down to XactLockTableWait. This
7828 : : * might seem a bit wasteful because the context is set up and
7829 : : * tore down for each member of the multixact, but in reality it
7830 : : * should be barely noticeable, and it avoids duplicate code.
7831 : : */
7832 [ - + ]: 1 : if (nowait)
7833 : : {
7834 : 0 : result = ConditionalXactLockTableWait(memxid, logLockFailure);
7835 [ # # ]: 0 : if (!result)
7836 : 0 : break;
7837 : 0 : }
7838 : : else
7839 : 1 : XactLockTableWait(memxid, rel, ctid, oper);
7840 [ - + - + ]: 2 : }
7841 : :
7842 : 1 : pfree(members);
7843 : 1 : }
7844 : :
7845 [ + - ]: 1 : if (remaining)
7846 : 0 : *remaining = remain;
7847 : :
7848 : 2 : return result;
7849 : 1 : }
7850 : :
7851 : : /*
7852 : : * MultiXactIdWait
7853 : : * Sleep on a MultiXactId.
7854 : : *
7855 : : * By the time we finish sleeping, someone else may have changed the Xmax
7856 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7857 : : *
7858 : : * We return (in *remaining, if not NULL) the number of members that are still
7859 : : * running, including any (non-aborted) subtransactions of our own transaction.
7860 : : */
7861 : : static void
7862 : 1 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
7863 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7864 : : int *remaining)
7865 : : {
7866 : 2 : (void) Do_MultiXactIdWait(multi, status, infomask, false,
7867 : 1 : rel, ctid, oper, remaining, false);
7868 : 1 : }
7869 : :
7870 : : /*
7871 : : * ConditionalMultiXactIdWait
7872 : : * As above, but only lock if we can get the lock without blocking.
7873 : : *
7874 : : * By the time we finish sleeping, someone else may have changed the Xmax
7875 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7876 : : *
7877 : : * If the multixact is now all gone, return true. Returns false if some
7878 : : * transactions might still be running.
7879 : : *
7880 : : * We return (in *remaining, if not NULL) the number of members that are still
7881 : : * running, including any (non-aborted) subtransactions of our own transaction.
7882 : : */
7883 : : static bool
7884 : 0 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7885 : : uint16 infomask, Relation rel, int *remaining,
7886 : : bool logLockFailure)
7887 : : {
7888 : 0 : return Do_MultiXactIdWait(multi, status, infomask, true,
7889 : 0 : rel, NULL, XLTW_None, remaining, logLockFailure);
7890 : : }
7891 : :
7892 : : /*
7893 : : * heap_tuple_needs_eventual_freeze
7894 : : *
7895 : : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7896 : : * will eventually require freezing (if tuple isn't removed by pruning first).
7897 : : */
7898 : : bool
7899 : 122351 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
7900 : : {
7901 : 122351 : TransactionId xid;
7902 : :
7903 : : /*
7904 : : * If xmin is a normal transaction ID, this tuple is definitely not
7905 : : * frozen.
7906 : : */
7907 : 122351 : xid = HeapTupleHeaderGetXmin(tuple);
7908 [ + + ]: 122351 : if (TransactionIdIsNormal(xid))
7909 : 4344 : return true;
7910 : :
7911 : : /*
7912 : : * If xmax is a valid xact or multixact, this tuple is also not frozen.
7913 : : */
7914 [ - + ]: 118007 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7915 : : {
7916 : 0 : MultiXactId multi;
7917 : :
7918 : 0 : multi = HeapTupleHeaderGetRawXmax(tuple);
7919 [ # # ]: 0 : if (MultiXactIdIsValid(multi))
7920 : 0 : return true;
7921 [ # # ]: 0 : }
7922 : : else
7923 : : {
7924 : 118007 : xid = HeapTupleHeaderGetRawXmax(tuple);
7925 [ + + ]: 118007 : if (TransactionIdIsNormal(xid))
7926 : 1 : return true;
7927 : : }
7928 : :
7929 [ + - ]: 118006 : if (tuple->t_infomask & HEAP_MOVED)
7930 : : {
7931 : 0 : xid = HeapTupleHeaderGetXvac(tuple);
7932 [ # # ]: 0 : if (TransactionIdIsNormal(xid))
7933 : 0 : return true;
7934 : 0 : }
7935 : :
7936 : 118006 : return false;
7937 : 122351 : }
7938 : :
7939 : : /*
7940 : : * heap_tuple_should_freeze
7941 : : *
7942 : : * Return value indicates if heap_prepare_freeze_tuple sibling function would
7943 : : * (or should) force freezing of the heap page that contains caller's tuple.
7944 : : * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7945 : : * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7946 : : *
7947 : : * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7948 : : * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7949 : : * Our working assumption is that caller won't decide to freeze this tuple.
7950 : : * It's up to caller to only ratchet back its own top-level trackers after the
7951 : : * point that it fully commits to not freezing the tuple/page in question.
7952 : : */
7953 : : bool
7954 : 841020 : heap_tuple_should_freeze(HeapTupleHeader tuple,
7955 : : const struct VacuumCutoffs *cutoffs,
7956 : : TransactionId *NoFreezePageRelfrozenXid,
7957 : : MultiXactId *NoFreezePageRelminMxid)
7958 : : {
7959 : 841020 : TransactionId xid;
7960 : 841020 : MultiXactId multi;
7961 : 841020 : bool freeze = false;
7962 : :
7963 : : /* First deal with xmin */
7964 : 841020 : xid = HeapTupleHeaderGetXmin(tuple);
7965 [ + + ]: 841020 : if (TransactionIdIsNormal(xid))
7966 : : {
7967 [ + - ]: 840989 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7968 [ + + ]: 840989 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7969 : 2119 : *NoFreezePageRelfrozenXid = xid;
7970 [ + + ]: 840989 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7971 : 1545 : freeze = true;
7972 : 840989 : }
7973 : :
7974 : : /* Now deal with xmax */
7975 : 841020 : xid = InvalidTransactionId;
7976 : 841020 : multi = InvalidMultiXactId;
7977 [ - + ]: 841020 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7978 : 0 : multi = HeapTupleHeaderGetRawXmax(tuple);
7979 : : else
7980 : 841020 : xid = HeapTupleHeaderGetRawXmax(tuple);
7981 : :
7982 [ + + ]: 841020 : if (TransactionIdIsNormal(xid))
7983 : : {
7984 [ + - ]: 161356 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7985 : : /* xmax is a non-permanent XID */
7986 [ + + ]: 161356 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7987 : 1 : *NoFreezePageRelfrozenXid = xid;
7988 [ + + ]: 161356 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7989 : 1 : freeze = true;
7990 : 161356 : }
7991 [ - + ]: 679664 : else if (!MultiXactIdIsValid(multi))
7992 : : {
7993 : : /* xmax is a permanent XID or invalid MultiXactId/XID */
7994 : 679664 : }
7995 [ # # ]: 0 : else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7996 : : {
7997 : : /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
7998 [ # # ]: 0 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7999 : 0 : *NoFreezePageRelminMxid = multi;
8000 : : /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
8001 : 0 : freeze = true;
8002 : 0 : }
8003 : : else
8004 : : {
8005 : : /* xmax is a MultiXactId that may have an updater XID */
8006 : 0 : MultiXactMember *members;
8007 : 0 : int nmembers;
8008 : :
8009 [ # # ]: 0 : Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
8010 [ # # ]: 0 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
8011 : 0 : *NoFreezePageRelminMxid = multi;
8012 [ # # ]: 0 : if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
8013 : 0 : freeze = true;
8014 : :
8015 : : /* need to check whether any member of the mxact is old */
8016 : 0 : nmembers = GetMultiXactIdMembers(multi, &members, false,
8017 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
8018 : :
8019 [ # # ]: 0 : for (int i = 0; i < nmembers; i++)
8020 : : {
8021 : 0 : xid = members[i].xid;
8022 [ # # ]: 0 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
8023 [ # # ]: 0 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
8024 : 0 : *NoFreezePageRelfrozenXid = xid;
8025 [ # # ]: 0 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
8026 : 0 : freeze = true;
8027 : 0 : }
8028 [ # # ]: 0 : if (nmembers > 0)
8029 : 0 : pfree(members);
8030 : 0 : }
8031 : :
8032 [ + - ]: 841020 : if (tuple->t_infomask & HEAP_MOVED)
8033 : : {
8034 : 0 : xid = HeapTupleHeaderGetXvac(tuple);
8035 [ # # ]: 0 : if (TransactionIdIsNormal(xid))
8036 : : {
8037 [ # # ]: 0 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
8038 [ # # ]: 0 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
8039 : 0 : *NoFreezePageRelfrozenXid = xid;
8040 : : /* heap_prepare_freeze_tuple forces xvac freezing */
8041 : 0 : freeze = true;
8042 : 0 : }
8043 : 0 : }
8044 : :
8045 : 1682040 : return freeze;
8046 : 841020 : }
8047 : :
8048 : : /*
8049 : : * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
8050 : : * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
8051 : : * that caller is in the process of physically removing, e.g. via HOT pruning
8052 : : * or index deletion.
8053 : : *
8054 : : * Caller must initialize its value to InvalidTransactionId, which is
8055 : : * generally interpreted as "definitely no need for a recovery conflict".
8056 : : * Final value must reflect all heap tuples that caller will physically remove
8057 : : * (or remove TID references to) via its ongoing pruning/deletion operation.
8058 : : * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
8059 : : * caller's WAL record) by REDO routine when it replays caller's operation.
8060 : : */
8061 : : void
8062 : 196068 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
8063 : : TransactionId *snapshotConflictHorizon)
8064 : : {
8065 : 196068 : TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
8066 : 196068 : TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
8067 : 196068 : TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
8068 : :
8069 [ + - ]: 196068 : if (tuple->t_infomask & HEAP_MOVED)
8070 : : {
8071 [ # # ]: 0 : if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
8072 : 0 : *snapshotConflictHorizon = xvac;
8073 : 0 : }
8074 : :
8075 : : /*
8076 : : * Ignore tuples inserted by an aborted transaction or if the tuple was
8077 : : * updated/deleted by the inserting transaction.
8078 : : *
8079 : : * Look for a committed hint bit, or if no xmin bit is set, check clog.
8080 : : */
8081 [ + + + - ]: 196070 : if (HeapTupleHeaderXminCommitted(tuple) ||
8082 [ + + ]: 25277 : (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
8083 : : {
8084 [ + + + + ]: 170793 : if (xmax != xmin &&
8085 : 158610 : TransactionIdFollows(xmax, *snapshotConflictHorizon))
8086 : 16316 : *snapshotConflictHorizon = xmax;
8087 : 170793 : }
8088 : 196068 : }
8089 : :
8090 : : #ifdef USE_PREFETCH
8091 : : /*
8092 : : * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8093 : : * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8094 : : * we can prefetch, and which have already been prefetched; each call to this
8095 : : * function picks up where the previous call left off.
8096 : : *
8097 : : * Note: we expect the deltids array to be sorted in an order that groups TIDs
8098 : : * by heap block, with all TIDs for each block appearing together in exactly
8099 : : * one group.
8100 : : */
8101 : : static void
8102 : 3680 : index_delete_prefetch_buffer(Relation rel,
8103 : : IndexDeletePrefetchState *prefetch_state,
8104 : : int prefetch_count)
8105 : : {
8106 : 3680 : BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
8107 : 3680 : int count = 0;
8108 : 3680 : int i;
8109 : 3680 : int ndeltids = prefetch_state->ndeltids;
8110 : 3680 : TM_IndexDelete *deltids = prefetch_state->deltids;
8111 : :
8112 [ + + ]: 216650 : for (i = prefetch_state->next_item;
8113 [ + + ]: 108325 : i < ndeltids && count < prefetch_count;
8114 : 104645 : i++)
8115 : : {
8116 : 104645 : ItemPointer htid = &deltids[i].tid;
8117 : :
8118 [ + + + + ]: 104645 : if (cur_hblkno == InvalidBlockNumber ||
8119 : 103608 : ItemPointerGetBlockNumber(htid) != cur_hblkno)
8120 : : {
8121 : 3091 : cur_hblkno = ItemPointerGetBlockNumber(htid);
8122 : 3091 : PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
8123 : 3091 : count++;
8124 : 3091 : }
8125 : 104645 : }
8126 : :
8127 : : /*
8128 : : * Save the prefetch position so that next time we can continue from that
8129 : : * position.
8130 : : */
8131 : 3680 : prefetch_state->next_item = i;
8132 : 3680 : prefetch_state->cur_hblkno = cur_hblkno;
8133 : 3680 : }
8134 : : #endif
8135 : :
8136 : : /*
8137 : : * Helper function for heap_index_delete_tuples. Checks for index corruption
8138 : : * involving an invalid TID in index AM caller's index page.
8139 : : *
8140 : : * This is an ideal place for these checks. The index AM must hold a buffer
8141 : : * lock on the index page containing the TIDs we examine here, so we don't
8142 : : * have to worry about concurrent VACUUMs at all. We can be sure that the
8143 : : * index is corrupt when htid points directly to an LP_UNUSED item or
8144 : : * heap-only tuple, which is not the case during standard index scans.
8145 : : */
8146 : : static inline void
8147 : 88977 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
8148 : : Page page, OffsetNumber maxoff,
8149 : : const ItemPointerData *htid, TM_IndexStatus *istatus)
8150 : : {
8151 : 88977 : OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
8152 : 88977 : ItemId iid;
8153 : :
8154 [ - + + - ]: 88977 : Assert(OffsetNumberIsValid(istatus->idxoffnum));
8155 : :
8156 [ + - ]: 88977 : if (unlikely(indexpagehoffnum > maxoff))
8157 [ # # # # ]: 0 : ereport(ERROR,
8158 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8159 : : errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8160 : : ItemPointerGetBlockNumber(htid),
8161 : : indexpagehoffnum,
8162 : : istatus->idxoffnum, delstate->iblknum,
8163 : : RelationGetRelationName(delstate->irel))));
8164 : :
8165 : 88977 : iid = PageGetItemId(page, indexpagehoffnum);
8166 [ + - ]: 88977 : if (unlikely(!ItemIdIsUsed(iid)))
8167 [ # # # # ]: 0 : ereport(ERROR,
8168 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8169 : : errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8170 : : ItemPointerGetBlockNumber(htid),
8171 : : indexpagehoffnum,
8172 : : istatus->idxoffnum, delstate->iblknum,
8173 : : RelationGetRelationName(delstate->irel))));
8174 : :
8175 [ + + ]: 88977 : if (ItemIdHasStorage(iid))
8176 : : {
8177 : 54528 : HeapTupleHeader htup;
8178 : :
8179 [ + - ]: 54528 : Assert(ItemIdIsNormal(iid));
8180 : 54528 : htup = (HeapTupleHeader) PageGetItem(page, iid);
8181 : :
8182 [ + - ]: 54528 : if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
8183 [ # # # # ]: 0 : ereport(ERROR,
8184 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8185 : : errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8186 : : ItemPointerGetBlockNumber(htid),
8187 : : indexpagehoffnum,
8188 : : istatus->idxoffnum, delstate->iblknum,
8189 : : RelationGetRelationName(delstate->irel))));
8190 : 54528 : }
8191 : 88977 : }
8192 : :
8193 : : /*
8194 : : * heapam implementation of tableam's index_delete_tuples interface.
8195 : : *
8196 : : * This helper function is called by index AMs during index tuple deletion.
8197 : : * See tableam header comments for an explanation of the interface implemented
8198 : : * here and a general theory of operation. Note that each call here is either
8199 : : * a simple index deletion call, or a bottom-up index deletion call.
8200 : : *
8201 : : * It's possible for this to generate a fair amount of I/O, since we may be
8202 : : * deleting hundreds of tuples from a single index block. To amortize that
8203 : : * cost to some degree, this uses prefetching and combines repeat accesses to
8204 : : * the same heap block.
8205 : : */
8206 : : TransactionId
8207 : 1037 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
8208 : : {
8209 : : /* Initial assumption is that earlier pruning took care of conflict */
8210 : 1037 : TransactionId snapshotConflictHorizon = InvalidTransactionId;
8211 : 1037 : BlockNumber blkno = InvalidBlockNumber;
8212 : 1037 : Buffer buf = InvalidBuffer;
8213 : 1037 : Page page = NULL;
8214 : 1037 : OffsetNumber maxoff = InvalidOffsetNumber;
8215 : 1037 : TransactionId priorXmax;
8216 : : #ifdef USE_PREFETCH
8217 : 1037 : IndexDeletePrefetchState prefetch_state;
8218 : 1037 : int prefetch_distance;
8219 : : #endif
8220 : 1037 : SnapshotData SnapshotNonVacuumable;
8221 : 1037 : int finalndeltids = 0,
8222 : 1037 : nblocksaccessed = 0;
8223 : :
8224 : : /* State that's only used in bottom-up index deletion case */
8225 : 1037 : int nblocksfavorable = 0;
8226 : 2074 : int curtargetfreespace = delstate->bottomupfreespace,
8227 : 1037 : lastfreespace = 0,
8228 : 1037 : actualfreespace = 0;
8229 : 1037 : bool bottomup_final_block = false;
8230 : :
8231 : 1037 : InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
8232 : :
8233 : : /* Sort caller's deltids array by TID for further processing */
8234 : 1037 : index_delete_sort(delstate);
8235 : :
8236 : : /*
8237 : : * Bottom-up case: resort deltids array in an order attuned to where the
8238 : : * greatest number of promising TIDs are to be found, and determine how
8239 : : * many blocks from the start of sorted array should be considered
8240 : : * favorable. This will also shrink the deltids array in order to
8241 : : * eliminate completely unfavorable blocks up front.
8242 : : */
8243 [ + + ]: 1037 : if (delstate->bottomup)
8244 : 183 : nblocksfavorable = bottomup_sort_and_shrink(delstate);
8245 : :
8246 : : #ifdef USE_PREFETCH
8247 : : /* Initialize prefetch state. */
8248 : 1037 : prefetch_state.cur_hblkno = InvalidBlockNumber;
8249 : 1037 : prefetch_state.next_item = 0;
8250 : 1037 : prefetch_state.ndeltids = delstate->ndeltids;
8251 : 1037 : prefetch_state.deltids = delstate->deltids;
8252 : :
8253 : : /*
8254 : : * Determine the prefetch distance that we will attempt to maintain.
8255 : : *
8256 : : * Since the caller holds a buffer lock somewhere in rel, we'd better make
8257 : : * sure that isn't a catalog relation before we call code that does
8258 : : * syscache lookups, to avoid risk of deadlock.
8259 : : */
8260 [ + + ]: 1037 : if (IsCatalogRelation(rel))
8261 : 871 : prefetch_distance = maintenance_io_concurrency;
8262 : : else
8263 : 166 : prefetch_distance =
8264 : 166 : get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
8265 : :
8266 : : /* Cap initial prefetch distance for bottom-up deletion caller */
8267 [ + + ]: 1037 : if (delstate->bottomup)
8268 : : {
8269 [ + - ]: 183 : Assert(nblocksfavorable >= 1);
8270 [ + - ]: 183 : Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
8271 [ - + ]: 183 : prefetch_distance = Min(prefetch_distance, nblocksfavorable);
8272 : 183 : }
8273 : :
8274 : : /* Start prefetching. */
8275 : 1037 : index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
8276 : : #endif
8277 : :
8278 : : /* Iterate over deltids, determine which to delete, check their horizon */
8279 [ + - ]: 1037 : Assert(delstate->ndeltids > 0);
8280 [ + + ]: 90197 : for (int i = 0; i < delstate->ndeltids; i++)
8281 : : {
8282 : 89160 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8283 : 89160 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8284 : 89160 : ItemPointer htid = &ideltid->tid;
8285 : 89160 : OffsetNumber offnum;
8286 : :
8287 : : /*
8288 : : * Read buffer, and perform required extra steps each time a new block
8289 : : * is encountered. Avoid refetching if it's the same block as the one
8290 : : * from the last htid.
8291 : : */
8292 [ + + + + ]: 89160 : if (blkno == InvalidBlockNumber ||
8293 : 88123 : ItemPointerGetBlockNumber(htid) != blkno)
8294 : : {
8295 : : /*
8296 : : * Consider giving up early for bottom-up index deletion caller
8297 : : * first. (Only prefetch next-next block afterwards, when it
8298 : : * becomes clear that we're at least going to access the next
8299 : : * block in line.)
8300 : : *
8301 : : * Sometimes the first block frees so much space for bottom-up
8302 : : * caller that the deletion process can end without accessing any
8303 : : * more blocks. It is usually necessary to access 2 or 3 blocks
8304 : : * per bottom-up deletion operation, though.
8305 : : */
8306 [ + + ]: 2826 : if (delstate->bottomup)
8307 : : {
8308 : : /*
8309 : : * We often allow caller to delete a few additional items
8310 : : * whose entries we reached after the point that space target
8311 : : * from caller was satisfied. The cost of accessing the page
8312 : : * was already paid at that point, so it made sense to finish
8313 : : * it off. When that happened, we finalize everything here
8314 : : * (by finishing off the whole bottom-up deletion operation
8315 : : * without needlessly paying the cost of accessing any more
8316 : : * blocks).
8317 : : */
8318 [ + + ]: 387 : if (bottomup_final_block)
8319 : 21 : break;
8320 : :
8321 : : /*
8322 : : * Give up when we didn't enable our caller to free any
8323 : : * additional space as a result of processing the page that we
8324 : : * just finished up with. This rule is the main way in which
8325 : : * we keep the cost of bottom-up deletion under control.
8326 : : */
8327 [ + + + + ]: 366 : if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
8328 : 162 : break;
8329 : 204 : lastfreespace = actualfreespace; /* for next time */
8330 : :
8331 : : /*
8332 : : * Deletion operation (which is bottom-up) will definitely
8333 : : * access the next block in line. Prepare for that now.
8334 : : *
8335 : : * Decay target free space so that we don't hang on for too
8336 : : * long with a marginal case. (Space target is only truly
8337 : : * helpful when it allows us to recognize that we don't need
8338 : : * to access more than 1 or 2 blocks to satisfy caller due to
8339 : : * agreeable workload characteristics.)
8340 : : *
8341 : : * We are a bit more patient when we encounter contiguous
8342 : : * blocks, though: these are treated as favorable blocks. The
8343 : : * decay process is only applied when the next block in line
8344 : : * is not a favorable/contiguous block. This is not an
8345 : : * exception to the general rule; we still insist on finding
8346 : : * at least one deletable item per block accessed. See
8347 : : * bottomup_nblocksfavorable() for full details of the theory
8348 : : * behind favorable blocks and heap block locality in general.
8349 : : *
8350 : : * Note: The first block in line is always treated as a
8351 : : * favorable block, so the earliest possible point that the
8352 : : * decay can be applied is just before we access the second
8353 : : * block in line. The Assert() verifies this for us.
8354 : : */
8355 [ + + - + ]: 204 : Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
8356 [ + + ]: 204 : if (nblocksfavorable > 0)
8357 : 188 : nblocksfavorable--;
8358 : : else
8359 : 16 : curtargetfreespace /= 2;
8360 : 204 : }
8361 : :
8362 : : /* release old buffer */
8363 [ + + ]: 2643 : if (BufferIsValid(buf))
8364 : 1606 : UnlockReleaseBuffer(buf);
8365 : :
8366 : 2643 : blkno = ItemPointerGetBlockNumber(htid);
8367 : 2643 : buf = ReadBuffer(rel, blkno);
8368 : 2643 : nblocksaccessed++;
8369 [ + + + - ]: 2643 : Assert(!delstate->bottomup ||
8370 : : nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
8371 : :
8372 : : #ifdef USE_PREFETCH
8373 : :
8374 : : /*
8375 : : * To maintain the prefetch distance, prefetch one more page for
8376 : : * each page we read.
8377 : : */
8378 : 2643 : index_delete_prefetch_buffer(rel, &prefetch_state, 1);
8379 : : #endif
8380 : :
8381 : 2643 : LockBuffer(buf, BUFFER_LOCK_SHARE);
8382 : :
8383 : 2643 : page = BufferGetPage(buf);
8384 : 2643 : maxoff = PageGetMaxOffsetNumber(page);
8385 : 2643 : }
8386 : :
8387 : : /*
8388 : : * In passing, detect index corruption involving an index page with a
8389 : : * TID that points to a location in the heap that couldn't possibly be
8390 : : * correct. We only do this with actual TIDs from caller's index page
8391 : : * (not items reached by traversing through a HOT chain).
8392 : : */
8393 : 88977 : index_delete_check_htid(delstate, page, maxoff, htid, istatus);
8394 : :
8395 [ + + ]: 88977 : if (istatus->knowndeletable)
8396 [ + - ]: 20000 : Assert(!delstate->bottomup && !istatus->promising);
8397 : : else
8398 : : {
8399 : 68977 : ItemPointerData tmp = *htid;
8400 : 68977 : HeapTupleData heapTuple;
8401 : :
8402 : : /* Are any tuples from this HOT chain non-vacuumable? */
8403 [ + + ]: 68977 : if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
8404 : : &heapTuple, NULL, true))
8405 : 32392 : continue; /* can't delete entry */
8406 : :
8407 : : /* Caller will delete, since whole HOT chain is vacuumable */
8408 : 36585 : istatus->knowndeletable = true;
8409 : :
8410 : : /* Maintain index free space info for bottom-up deletion case */
8411 [ + + ]: 36585 : if (delstate->bottomup)
8412 : : {
8413 [ + - ]: 646 : Assert(istatus->freespace > 0);
8414 : 646 : actualfreespace += istatus->freespace;
8415 [ + + ]: 646 : if (actualfreespace >= curtargetfreespace)
8416 : 282 : bottomup_final_block = true;
8417 : 646 : }
8418 [ + + ]: 68977 : }
8419 : :
8420 : : /*
8421 : : * Maintain snapshotConflictHorizon value for deletion operation as a
8422 : : * whole by advancing current value using heap tuple headers. This is
8423 : : * loosely based on the logic for pruning a HOT chain.
8424 : : */
8425 : 56585 : offnum = ItemPointerGetOffsetNumber(htid);
8426 : 56585 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8427 : 57001 : for (;;)
8428 : : {
8429 : 57459 : ItemId lp;
8430 : 57459 : HeapTupleHeader htup;
8431 : :
8432 : : /* Sanity check (pure paranoia) */
8433 [ + - ]: 57459 : if (offnum < FirstOffsetNumber)
8434 : 0 : break;
8435 : :
8436 : : /*
8437 : : * An offset past the end of page's line pointer array is possible
8438 : : * when the array was truncated
8439 : : */
8440 [ - + ]: 57459 : if (offnum > maxoff)
8441 : 0 : break;
8442 : :
8443 : 57459 : lp = PageGetItemId(page, offnum);
8444 [ + + ]: 57459 : if (ItemIdIsRedirected(lp))
8445 : : {
8446 : 458 : offnum = ItemIdGetRedirect(lp);
8447 : 458 : continue;
8448 : : }
8449 : :
8450 : : /*
8451 : : * We'll often encounter LP_DEAD line pointers (especially with an
8452 : : * entry marked knowndeletable by our caller up front). No heap
8453 : : * tuple headers get examined for an htid that leads us to an
8454 : : * LP_DEAD item. This is okay because the earlier pruning
8455 : : * operation that made the line pointer LP_DEAD in the first place
8456 : : * must have considered the original tuple header as part of
8457 : : * generating its own snapshotConflictHorizon value.
8458 : : *
8459 : : * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8460 : : * the same strategy that index vacuuming uses in all cases. Index
8461 : : * VACUUM WAL records don't even have a snapshotConflictHorizon
8462 : : * field of their own for this reason.
8463 : : */
8464 [ + + ]: 57001 : if (!ItemIdIsNormal(lp))
8465 : 32688 : break;
8466 : :
8467 : 24313 : htup = (HeapTupleHeader) PageGetItem(page, lp);
8468 : :
8469 : : /*
8470 : : * Check the tuple XMIN against prior XMAX, if any
8471 : : */
8472 [ + + + - ]: 24313 : if (TransactionIdIsValid(priorXmax) &&
8473 : 416 : !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
8474 : 0 : break;
8475 : :
8476 : 24313 : HeapTupleHeaderAdvanceConflictHorizon(htup,
8477 : : &snapshotConflictHorizon);
8478 : :
8479 : : /*
8480 : : * If the tuple is not HOT-updated, then we are at the end of this
8481 : : * HOT-chain. No need to visit later tuples from the same update
8482 : : * chain (they get their own index entries) -- just move on to
8483 : : * next htid from index AM caller.
8484 : : */
8485 [ + + ]: 24313 : if (!HeapTupleHeaderIsHotUpdated(htup))
8486 : 23897 : break;
8487 : :
8488 : : /* Advance to next HOT chain member */
8489 [ - + ]: 416 : Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8490 : 416 : offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8491 : 416 : priorXmax = HeapTupleHeaderGetUpdateXid(htup);
8492 [ - + + + ]: 57459 : }
8493 : :
8494 : : /* Enable further/final shrinking of deltids for caller */
8495 : 56585 : finalndeltids = i + 1;
8496 [ + + + ]: 89160 : }
8497 : :
8498 : 1037 : UnlockReleaseBuffer(buf);
8499 : :
8500 : : /*
8501 : : * Shrink deltids array to exclude non-deletable entries at the end. This
8502 : : * is not just a minor optimization. Final deltids array size might be
8503 : : * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8504 : : * ndeltids being zero in all cases with zero total deletable entries.
8505 : : */
8506 [ + + + - ]: 1037 : Assert(finalndeltids > 0 || delstate->bottomup);
8507 : 1037 : delstate->ndeltids = finalndeltids;
8508 : :
8509 : 2074 : return snapshotConflictHorizon;
8510 : 1037 : }
8511 : :
8512 : : /*
8513 : : * Specialized inlineable comparison function for index_delete_sort()
8514 : : */
8515 : : static inline int
8516 : 1889554 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
8517 : : {
8518 : 1889554 : ItemPointer tid1 = &deltid1->tid;
8519 : 1889554 : ItemPointer tid2 = &deltid2->tid;
8520 : :
8521 : : {
8522 : 1889554 : BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
8523 : 1889554 : BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
8524 : :
8525 [ + + ]: 1889554 : if (blk1 != blk2)
8526 : 878613 : return (blk1 < blk2) ? -1 : 1;
8527 [ + + ]: 1889554 : }
8528 : : {
8529 : 1010941 : OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
8530 : 1010941 : OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
8531 : :
8532 [ + - ]: 1010941 : if (pos1 != pos2)
8533 : 1010941 : return (pos1 < pos2) ? -1 : 1;
8534 [ + - ]: 1010941 : }
8535 : :
8536 : 0 : Assert(false);
8537 : :
8538 : 0 : return 0;
8539 : 1889554 : }
8540 : :
8541 : : /*
8542 : : * Sort deltids array from delstate by TID. This prepares it for further
8543 : : * processing by heap_index_delete_tuples().
8544 : : *
8545 : : * This operation becomes a noticeable consumer of CPU cycles with some
8546 : : * workloads, so we go to the trouble of specialization/micro optimization.
8547 : : * We use shellsort for this because it's easy to specialize, compiles to
8548 : : * relatively few instructions, and is adaptive to presorted inputs/subsets
8549 : : * (which are typical here).
8550 : : */
8551 : : static void
8552 : 1037 : index_delete_sort(TM_IndexDeleteOp *delstate)
8553 : : {
8554 : 1037 : TM_IndexDelete *deltids = delstate->deltids;
8555 : 1037 : int ndeltids = delstate->ndeltids;
8556 : :
8557 : : /*
8558 : : * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8559 : : *
8560 : : * This implementation is fast with array sizes up to ~4500. This covers
8561 : : * all supported BLCKSZ values.
8562 : : */
8563 : 1037 : const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8564 : :
8565 : : /* Think carefully before changing anything here -- keep swaps cheap */
8566 : : StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8567 : : "element size exceeds 8 bytes");
8568 : :
8569 [ + + ]: 10370 : for (int g = 0; g < lengthof(gaps); g++)
8570 : : {
8571 [ + + ]: 1198232 : for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8572 : : {
8573 : 1188899 : TM_IndexDelete d = deltids[i];
8574 : 1188899 : int j = i;
8575 : :
8576 [ + + + + ]: 1940764 : while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8577 : : {
8578 : 751865 : deltids[j] = deltids[j - hi];
8579 : 751865 : j -= hi;
8580 : : }
8581 : 1188899 : deltids[j] = d;
8582 : 1188899 : }
8583 : 9333 : }
8584 : 1037 : }
8585 : :
8586 : : /*
8587 : : * Returns how many blocks should be considered favorable/contiguous for a
8588 : : * bottom-up index deletion pass. This is a number of heap blocks that starts
8589 : : * from and includes the first block in line.
8590 : : *
8591 : : * There is always at least one favorable block during bottom-up index
8592 : : * deletion. In the worst case (i.e. with totally random heap blocks) the
8593 : : * first block in line (the only favorable block) can be thought of as a
8594 : : * degenerate array of contiguous blocks that consists of a single block.
8595 : : * heap_index_delete_tuples() will expect this.
8596 : : *
8597 : : * Caller passes blockgroups, a description of the final order that deltids
8598 : : * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8599 : : * processing. Note that deltids need not actually be sorted just yet (caller
8600 : : * only passes deltids to us so that we can interpret blockgroups).
8601 : : *
8602 : : * You might guess that the existence of contiguous blocks cannot matter much,
8603 : : * since in general the main factor that determines which blocks we visit is
8604 : : * the number of promising TIDs, which is a fixed hint from the index AM.
8605 : : * We're not really targeting the general case, though -- the actual goal is
8606 : : * to adapt our behavior to a wide variety of naturally occurring conditions.
8607 : : * The effects of most of the heuristics we apply are only noticeable in the
8608 : : * aggregate, over time and across many _related_ bottom-up index deletion
8609 : : * passes.
8610 : : *
8611 : : * Deeming certain blocks favorable allows heapam to recognize and adapt to
8612 : : * workloads where heap blocks visited during bottom-up index deletion can be
8613 : : * accessed contiguously, in the sense that each newly visited block is the
8614 : : * neighbor of the block that bottom-up deletion just finished processing (or
8615 : : * close enough to it). It will likely be cheaper to access more favorable
8616 : : * blocks sooner rather than later (e.g. in this pass, not across a series of
8617 : : * related bottom-up passes). Either way it is probably only a matter of time
8618 : : * (or a matter of further correlated version churn) before all blocks that
8619 : : * appear together as a single large batch of favorable blocks get accessed by
8620 : : * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8621 : : * appear almost constantly or not even once (it all depends on per-index
8622 : : * workload characteristics).
8623 : : *
8624 : : * Note that the blockgroups sort order applies a power-of-two bucketing
8625 : : * scheme that creates opportunities for contiguous groups of blocks to get
8626 : : * batched together, at least with workloads that are naturally amenable to
8627 : : * being driven by heap block locality. This doesn't just enhance the spatial
8628 : : * locality of bottom-up heap block processing in the obvious way. It also
8629 : : * enables temporal locality of access, since sorting by heap block number
8630 : : * naturally tends to make the bottom-up processing order deterministic.
8631 : : *
8632 : : * Consider the following example to get a sense of how temporal locality
8633 : : * might matter: There is a heap relation with several indexes, each of which
8634 : : * is low to medium cardinality. It is subject to constant non-HOT updates.
8635 : : * The updates are skewed (in one part of the primary key, perhaps). None of
8636 : : * the indexes are logically modified by the UPDATE statements (if they were
8637 : : * then bottom-up index deletion would not be triggered in the first place).
8638 : : * Naturally, each new round of index tuples (for each heap tuple that gets a
8639 : : * heap_update() call) will have the same heap TID in each and every index.
8640 : : * Since these indexes are low cardinality and never get logically modified,
8641 : : * heapam processing during bottom-up deletion passes will access heap blocks
8642 : : * in approximately sequential order. Temporal locality of access occurs due
8643 : : * to bottom-up deletion passes behaving very similarly across each of the
8644 : : * indexes at any given moment. This keeps the number of buffer misses needed
8645 : : * to visit heap blocks to a minimum.
8646 : : */
8647 : : static int
8648 : 183 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
8649 : : TM_IndexDelete *deltids)
8650 : : {
8651 : 183 : int64 lastblock = -1;
8652 : 183 : int nblocksfavorable = 0;
8653 : :
8654 [ + - ]: 183 : Assert(nblockgroups >= 1);
8655 [ + - ]: 183 : Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
8656 : :
8657 : : /*
8658 : : * We tolerate heap blocks that will be accessed only slightly out of
8659 : : * physical order. Small blips occur when a pair of almost-contiguous
8660 : : * blocks happen to fall into different buckets (perhaps due only to a
8661 : : * small difference in npromisingtids that the bucketing scheme didn't
8662 : : * quite manage to ignore). We effectively ignore these blips by applying
8663 : : * a small tolerance. The precise tolerance we use is a little arbitrary,
8664 : : * but it works well enough in practice.
8665 : : */
8666 [ + + ]: 814 : for (int b = 0; b < nblockgroups; b++)
8667 : : {
8668 : 631 : IndexDeleteCounts *group = blockgroups + b;
8669 : 631 : TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8670 : 631 : BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
8671 : :
8672 [ + + + + ]: 1007 : if (lastblock != -1 &&
8673 [ + + ]: 448 : ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
8674 : 376 : (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
8675 : 135 : break;
8676 : :
8677 : 496 : nblocksfavorable++;
8678 : 496 : lastblock = block;
8679 [ + + ]: 631 : }
8680 : :
8681 : : /* Always indicate that there is at least 1 favorable block */
8682 [ + - ]: 183 : Assert(nblocksfavorable >= 1);
8683 : :
8684 : 366 : return nblocksfavorable;
8685 : 183 : }
8686 : :
8687 : : /*
8688 : : * qsort comparison function for bottomup_sort_and_shrink()
8689 : : */
8690 : : static int
8691 : 46170 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8692 : : {
8693 : 46170 : const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
8694 : 46170 : const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
8695 : :
8696 : : /*
8697 : : * Most significant field is npromisingtids (which we invert the order of
8698 : : * so as to sort in desc order).
8699 : : *
8700 : : * Caller should have already normalized npromisingtids fields into
8701 : : * power-of-two values (buckets).
8702 : : */
8703 [ + + ]: 46170 : if (group1->npromisingtids > group2->npromisingtids)
8704 : 1811 : return -1;
8705 [ + + ]: 44359 : if (group1->npromisingtids < group2->npromisingtids)
8706 : 1488 : return 1;
8707 : :
8708 : : /*
8709 : : * Tiebreak: desc ntids sort order.
8710 : : *
8711 : : * We cannot expect power-of-two values for ntids fields. We should
8712 : : * behave as if they were already rounded up for us instead.
8713 : : */
8714 [ + + ]: 42871 : if (group1->ntids != group2->ntids)
8715 : : {
8716 : 29128 : uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
8717 : 29128 : uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
8718 : :
8719 [ + + ]: 29128 : if (ntids1 > ntids2)
8720 : 3483 : return -1;
8721 [ + + ]: 25645 : if (ntids1 < ntids2)
8722 : 4238 : return 1;
8723 [ + + ]: 29128 : }
8724 : :
8725 : : /*
8726 : : * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8727 : : * block in deltids array) order.
8728 : : *
8729 : : * This is equivalent to sorting in ascending heap block number order
8730 : : * (among otherwise equal subsets of the array). This approach allows us
8731 : : * to avoid accessing the out-of-line TID. (We rely on the assumption
8732 : : * that the deltids array was sorted in ascending heap TID order when
8733 : : * these offsets to the first TID from each heap block group were formed.)
8734 : : */
8735 [ + + ]: 35150 : if (group1->ifirsttid > group2->ifirsttid)
8736 : 17216 : return 1;
8737 [ + - ]: 17934 : if (group1->ifirsttid < group2->ifirsttid)
8738 : 17934 : return -1;
8739 : :
8740 : 0 : pg_unreachable();
8741 : :
8742 : : return 0;
8743 : 46170 : }
8744 : :
8745 : : /*
8746 : : * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8747 : : *
8748 : : * Sorts deltids array in the order needed for useful processing by bottom-up
8749 : : * deletion. The array should already be sorted in TID order when we're
8750 : : * called. The sort process groups heap TIDs from deltids into heap block
8751 : : * groupings. Earlier/more-promising groups/blocks are usually those that are
8752 : : * known to have the most "promising" TIDs.
8753 : : *
8754 : : * Sets new size of deltids array (ndeltids) in state. deltids will only have
8755 : : * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8756 : : * return. This often means that deltids will be shrunk to a small fraction
8757 : : * of its original size (we eliminate many heap blocks from consideration for
8758 : : * caller up front).
8759 : : *
8760 : : * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8761 : : * for a definition and full details.
8762 : : */
8763 : : static int
8764 : 183 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
8765 : : {
8766 : 183 : IndexDeleteCounts *blockgroups;
8767 : 183 : TM_IndexDelete *reordereddeltids;
8768 : 183 : BlockNumber curblock = InvalidBlockNumber;
8769 : 183 : int nblockgroups = 0;
8770 : 183 : int ncopied = 0;
8771 : 183 : int nblocksfavorable = 0;
8772 : :
8773 [ + - ]: 183 : Assert(delstate->bottomup);
8774 [ + - ]: 183 : Assert(delstate->ndeltids > 0);
8775 : :
8776 : : /* Calculate per-heap-block count of TIDs */
8777 : 183 : blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
8778 [ + + ]: 127276 : for (int i = 0; i < delstate->ndeltids; i++)
8779 : : {
8780 : 127093 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8781 : 127093 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8782 : 127093 : ItemPointer htid = &ideltid->tid;
8783 : 127093 : bool promising = istatus->promising;
8784 : :
8785 [ + + ]: 127093 : if (curblock != ItemPointerGetBlockNumber(htid))
8786 : : {
8787 : : /* New block group */
8788 : 7224 : nblockgroups++;
8789 : :
8790 [ + + - + ]: 7224 : Assert(curblock < ItemPointerGetBlockNumber(htid) ||
8791 : : !BlockNumberIsValid(curblock));
8792 : :
8793 : 7224 : curblock = ItemPointerGetBlockNumber(htid);
8794 : 7224 : blockgroups[nblockgroups - 1].ifirsttid = i;
8795 : 7224 : blockgroups[nblockgroups - 1].ntids = 1;
8796 : 7224 : blockgroups[nblockgroups - 1].npromisingtids = 0;
8797 : 7224 : }
8798 : : else
8799 : : {
8800 : 119869 : blockgroups[nblockgroups - 1].ntids++;
8801 : : }
8802 : :
8803 [ + + ]: 127093 : if (promising)
8804 : 12605 : blockgroups[nblockgroups - 1].npromisingtids++;
8805 : 127093 : }
8806 : :
8807 : : /*
8808 : : * We're about ready to sort block groups to determine the optimal order
8809 : : * for visiting heap blocks. But before we do, round the number of
8810 : : * promising tuples for each block group up to the next power-of-two,
8811 : : * unless it is very low (less than 4), in which case we round up to 4.
8812 : : * npromisingtids is far too noisy to trust when choosing between a pair
8813 : : * of block groups that both have very low values.
8814 : : *
8815 : : * This scheme divides heap blocks/block groups into buckets. Each bucket
8816 : : * contains blocks that have _approximately_ the same number of promising
8817 : : * TIDs as each other. The goal is to ignore relatively small differences
8818 : : * in the total number of promising entries, so that the whole process can
8819 : : * give a little weight to heapam factors (like heap block locality)
8820 : : * instead. This isn't a trade-off, really -- we have nothing to lose. It
8821 : : * would be foolish to interpret small differences in npromisingtids
8822 : : * values as anything more than noise.
8823 : : *
8824 : : * We tiebreak on nhtids when sorting block group subsets that have the
8825 : : * same npromisingtids, but this has the same issues as npromisingtids,
8826 : : * and so nhtids is subject to the same power-of-two bucketing scheme. The
8827 : : * only reason that we don't fix nhtids in the same way here too is that
8828 : : * we'll need accurate nhtids values after the sort. We handle nhtids
8829 : : * bucketization dynamically instead (in the sort comparator).
8830 : : *
8831 : : * See bottomup_nblocksfavorable() for a full explanation of when and how
8832 : : * heap locality/favorable blocks can significantly influence when and how
8833 : : * heap blocks are accessed.
8834 : : */
8835 [ + + ]: 7407 : for (int b = 0; b < nblockgroups; b++)
8836 : : {
8837 : 7224 : IndexDeleteCounts *group = blockgroups + b;
8838 : :
8839 : : /* Better off falling back on nhtids with low npromisingtids */
8840 [ + + ]: 7224 : if (group->npromisingtids <= 4)
8841 : 6477 : group->npromisingtids = 4;
8842 : : else
8843 : 747 : group->npromisingtids =
8844 : 747 : pg_nextpower2_32((uint32) group->npromisingtids);
8845 : 7224 : }
8846 : :
8847 : : /* Sort groups and rearrange caller's deltids array */
8848 : 183 : qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
8849 : : bottomup_sort_and_shrink_cmp);
8850 : 183 : reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8851 : :
8852 [ + + ]: 183 : nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
8853 : : /* Determine number of favorable blocks at the start of final deltids */
8854 : 366 : nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
8855 : 183 : delstate->deltids);
8856 : :
8857 [ + + ]: 1260 : for (int b = 0; b < nblockgroups; b++)
8858 : : {
8859 : 1077 : IndexDeleteCounts *group = blockgroups + b;
8860 : 1077 : TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8861 : :
8862 : 1077 : memcpy(reordereddeltids + ncopied, firstdtid,
8863 : : sizeof(TM_IndexDelete) * group->ntids);
8864 : 1077 : ncopied += group->ntids;
8865 : 1077 : }
8866 : :
8867 : : /* Copy final grouped and sorted TIDs back into start of caller's array */
8868 : 183 : memcpy(delstate->deltids, reordereddeltids,
8869 : : sizeof(TM_IndexDelete) * ncopied);
8870 : 183 : delstate->ndeltids = ncopied;
8871 : :
8872 : 183 : pfree(reordereddeltids);
8873 : 183 : pfree(blockgroups);
8874 : :
8875 : 366 : return nblocksfavorable;
8876 : 183 : }
8877 : :
8878 : : /*
8879 : : * Perform XLogInsert for a heap-visible operation. 'block' is the block
8880 : : * being marked all-visible, and vm_buffer is the buffer containing the
8881 : : * corresponding visibility map block. Both should have already been modified
8882 : : * and dirtied.
8883 : : *
8884 : : * snapshotConflictHorizon comes from the largest xmin on the page being
8885 : : * marked all-visible. REDO routine uses it to generate recovery conflicts.
8886 : : *
8887 : : * If checksums or wal_log_hints are enabled, we may also generate a full-page
8888 : : * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8889 : : * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8890 : : * update the heap page's LSN.
8891 : : */
8892 : : XLogRecPtr
8893 : 6127 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8894 : : TransactionId snapshotConflictHorizon, uint8 vmflags)
8895 : : {
8896 : 6127 : xl_heap_visible xlrec;
8897 : 6127 : XLogRecPtr recptr;
8898 : 6127 : uint8 flags;
8899 : :
8900 [ + - ]: 6127 : Assert(BufferIsValid(heap_buffer));
8901 [ + - ]: 6127 : Assert(BufferIsValid(vm_buffer));
8902 : :
8903 : 6127 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
8904 : 6127 : xlrec.flags = vmflags;
8905 [ + - - + : 6127 : if (RelationIsAccessibleInLogicalDecoding(rel))
# # # # #
# # # # #
# # ]
8906 : 0 : xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
8907 : 6127 : XLogBeginInsert();
8908 : 6127 : XLogRegisterData(&xlrec, SizeOfHeapVisible);
8909 : :
8910 : 6127 : XLogRegisterBuffer(0, vm_buffer, 0);
8911 : :
8912 : 6127 : flags = REGBUF_STANDARD;
8913 [ - + # # ]: 6127 : if (!XLogHintBitIsNeeded())
8914 : 0 : flags |= REGBUF_NO_IMAGE;
8915 : 6127 : XLogRegisterBuffer(1, heap_buffer, flags);
8916 : :
8917 : 6127 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8918 : :
8919 : 12254 : return recptr;
8920 : 6127 : }
8921 : :
8922 : : /*
8923 : : * Perform XLogInsert for a heap-update operation. Caller must already
8924 : : * have modified the buffer(s) and marked them dirty.
8925 : : */
8926 : : static XLogRecPtr
8927 : 23699 : log_heap_update(Relation reln, Buffer oldbuf,
8928 : : Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
8929 : : HeapTuple old_key_tuple,
8930 : : bool all_visible_cleared, bool new_all_visible_cleared)
8931 : : {
8932 : 23699 : xl_heap_update xlrec;
8933 : 23699 : xl_heap_header xlhdr;
8934 : 23699 : xl_heap_header xlhdr_idx;
8935 : 23699 : uint8 info;
8936 : 23699 : uint16 prefix_suffix[2];
8937 : 23699 : uint16 prefixlen = 0,
8938 : 23699 : suffixlen = 0;
8939 : 23699 : XLogRecPtr recptr;
8940 : 23699 : Page page = BufferGetPage(newbuf);
8941 [ + - - + : 23699 : bool need_tuple_data = RelationIsLogicallyLogged(reln);
# # # # #
# ]
8942 : 23699 : bool init;
8943 : 23699 : int bufflags;
8944 : :
8945 : : /* Caller should not call me on a non-WAL-logged relation */
8946 [ + - + + ]: 23699 : Assert(RelationNeedsWAL(reln));
8947 : :
8948 : 23699 : XLogBeginInsert();
8949 : :
8950 [ + + ]: 23699 : if (HeapTupleIsHeapOnly(newtup))
8951 : 14246 : info = XLOG_HEAP_HOT_UPDATE;
8952 : : else
8953 : 9453 : info = XLOG_HEAP_UPDATE;
8954 : :
8955 : : /*
8956 : : * If the old and new tuple are on the same page, we only need to log the
8957 : : * parts of the new tuple that were changed. That saves on the amount of
8958 : : * WAL we need to write. Currently, we just count any unchanged bytes in
8959 : : * the beginning and end of the tuple. That's quick to check, and
8960 : : * perfectly covers the common case that only one field is updated.
8961 : : *
8962 : : * We could do this even if the old and new tuple are on different pages,
8963 : : * but only if we don't make a full-page image of the old page, which is
8964 : : * difficult to know in advance. Also, if the old tuple is corrupt for
8965 : : * some reason, it would allow the corruption to propagate the new page,
8966 : : * so it seems best to avoid. Under the general assumption that most
8967 : : * updates tend to create the new tuple version on the same page, there
8968 : : * isn't much to be gained by doing this across pages anyway.
8969 : : *
8970 : : * Skip this if we're taking a full-page image of the new page, as we
8971 : : * don't include the new tuple in the WAL record in that case. Also
8972 : : * disable if effective_wal_level='logical', as logical decoding needs to
8973 : : * be able to read the new tuple in whole from the WAL record alone.
8974 : : */
8975 [ + + + - : 23699 : if (oldbuf == newbuf && !need_tuple_data &&
+ + ]
8976 : 17129 : !XLogCheckBufferNeedsBackup(newbuf))
8977 : : {
8978 : 17110 : char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8979 : 17110 : char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8980 : 17110 : int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8981 : 17110 : int newlen = newtup->t_len - newtup->t_data->t_hoff;
8982 : :
8983 : : /* Check for common prefix between old and new tuple */
8984 [ + + + + ]: 1656994 : for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8985 : : {
8986 [ + + ]: 1655460 : if (newp[prefixlen] != oldp[prefixlen])
8987 : 15576 : break;
8988 : 1639884 : }
8989 : :
8990 : : /*
8991 : : * Storing the length of the prefix takes 2 bytes, so we need to save
8992 : : * at least 3 bytes or there's no point.
8993 : : */
8994 [ + + ]: 17110 : if (prefixlen < 3)
8995 : 499 : prefixlen = 0;
8996 : :
8997 : : /* Same for suffix */
8998 [ + + + + ]: 1048119 : for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
8999 : : {
9000 [ + + ]: 1046527 : if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
9001 : 15518 : break;
9002 : 1031009 : }
9003 [ + + ]: 17110 : if (suffixlen < 3)
9004 : 3574 : suffixlen = 0;
9005 : 17110 : }
9006 : :
9007 : : /* Prepare main WAL data chain */
9008 : 23699 : xlrec.flags = 0;
9009 [ + + ]: 23699 : if (all_visible_cleared)
9010 : 243 : xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
9011 [ + + ]: 23699 : if (new_all_visible_cleared)
9012 : 44 : xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
9013 [ + + ]: 23699 : if (prefixlen > 0)
9014 : 16611 : xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
9015 [ + + ]: 23699 : if (suffixlen > 0)
9016 : 13536 : xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
9017 [ + - ]: 23699 : if (need_tuple_data)
9018 : : {
9019 : 0 : xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
9020 [ # # ]: 0 : if (old_key_tuple)
9021 : : {
9022 [ # # ]: 0 : if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
9023 : 0 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
9024 : : else
9025 : 0 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
9026 : 0 : }
9027 : 0 : }
9028 : :
9029 : : /* If new tuple is the single and first tuple on page... */
9030 [ + + + + ]: 23699 : if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
9031 : 472 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
9032 : : {
9033 : 465 : info |= XLOG_HEAP_INIT_PAGE;
9034 : 465 : init = true;
9035 : 465 : }
9036 : : else
9037 : 23234 : init = false;
9038 : :
9039 : : /* Prepare WAL data for the old page */
9040 : 23699 : xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
9041 : 23699 : xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
9042 : 47398 : xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
9043 : 23699 : oldtup->t_data->t_infomask2);
9044 : :
9045 : : /* Prepare WAL data for the new page */
9046 : 23699 : xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
9047 : 23699 : xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
9048 : :
9049 : 23699 : bufflags = REGBUF_STANDARD;
9050 [ + + ]: 23699 : if (init)
9051 : 465 : bufflags |= REGBUF_WILL_INIT;
9052 [ + - ]: 23699 : if (need_tuple_data)
9053 : 0 : bufflags |= REGBUF_KEEP_DATA;
9054 : :
9055 : 23699 : XLogRegisterBuffer(0, newbuf, bufflags);
9056 [ + + ]: 23699 : if (oldbuf != newbuf)
9057 : 6570 : XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
9058 : :
9059 : 23699 : XLogRegisterData(&xlrec, SizeOfHeapUpdate);
9060 : :
9061 : : /*
9062 : : * Prepare WAL data for the new tuple.
9063 : : */
9064 [ + + + + ]: 23699 : if (prefixlen > 0 || suffixlen > 0)
9065 : : {
9066 [ + + + + ]: 17015 : if (prefixlen > 0 && suffixlen > 0)
9067 : : {
9068 : 13132 : prefix_suffix[0] = prefixlen;
9069 : 13132 : prefix_suffix[1] = suffixlen;
9070 : 13132 : XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
9071 : 13132 : }
9072 [ + + ]: 3883 : else if (prefixlen > 0)
9073 : : {
9074 : 3479 : XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
9075 : 3479 : }
9076 : : else
9077 : : {
9078 : 404 : XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
9079 : : }
9080 : 17015 : }
9081 : :
9082 : 23699 : xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
9083 : 23699 : xlhdr.t_infomask = newtup->t_data->t_infomask;
9084 : 23699 : xlhdr.t_hoff = newtup->t_data->t_hoff;
9085 [ + - ]: 23699 : Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
9086 : :
9087 : : /*
9088 : : * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9089 : : *
9090 : : * The 'data' doesn't include the common prefix or suffix.
9091 : : */
9092 : 23699 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
9093 [ + + ]: 23699 : if (prefixlen == 0)
9094 : : {
9095 : 7088 : XLogRegisterBufData(0,
9096 : 7088 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9097 : 7088 : newtup->t_len - SizeofHeapTupleHeader - suffixlen);
9098 : 7088 : }
9099 : : else
9100 : : {
9101 : : /*
9102 : : * Have to write the null bitmap and data after the common prefix as
9103 : : * two separate rdata entries.
9104 : : */
9105 : : /* bitmap [+ padding] [+ oid] */
9106 [ - + ]: 16611 : if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9107 : : {
9108 : 16611 : XLogRegisterBufData(0,
9109 : 16611 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9110 : 16611 : newtup->t_data->t_hoff - SizeofHeapTupleHeader);
9111 : 16611 : }
9112 : :
9113 : : /* data after common prefix */
9114 : 16611 : XLogRegisterBufData(0,
9115 : 16611 : (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
9116 : 16611 : newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9117 : : }
9118 : :
9119 : : /* We need to log a tuple identity */
9120 [ - + # # ]: 23699 : if (need_tuple_data && old_key_tuple)
9121 : : {
9122 : : /* don't really need this, but its more comfy to decode */
9123 : 0 : xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9124 : 0 : xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9125 : 0 : xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9126 : :
9127 : 0 : XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
9128 : :
9129 : : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
9130 : 0 : XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
9131 : 0 : old_key_tuple->t_len - SizeofHeapTupleHeader);
9132 : 0 : }
9133 : :
9134 : : /* filtering by origin on a row level is much more efficient */
9135 : 23699 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
9136 : :
9137 : 23699 : recptr = XLogInsert(RM_HEAP_ID, info);
9138 : :
9139 : 47398 : return recptr;
9140 : 23699 : }
9141 : :
9142 : : /*
9143 : : * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9144 : : *
9145 : : * This is only used when effective_wal_level is logical, and only for
9146 : : * catalog tuples.
9147 : : */
9148 : : static XLogRecPtr
9149 : 0 : log_heap_new_cid(Relation relation, HeapTuple tup)
9150 : : {
9151 : 0 : xl_heap_new_cid xlrec;
9152 : :
9153 : 0 : XLogRecPtr recptr;
9154 : 0 : HeapTupleHeader hdr = tup->t_data;
9155 : :
9156 [ # # ]: 0 : Assert(ItemPointerIsValid(&tup->t_self));
9157 [ # # ]: 0 : Assert(tup->t_tableOid != InvalidOid);
9158 : :
9159 : 0 : xlrec.top_xid = GetTopTransactionId();
9160 : 0 : xlrec.target_locator = relation->rd_locator;
9161 : 0 : xlrec.target_tid = tup->t_self;
9162 : :
9163 : : /*
9164 : : * If the tuple got inserted & deleted in the same TX we definitely have a
9165 : : * combo CID, set cmin and cmax.
9166 : : */
9167 [ # # ]: 0 : if (hdr->t_infomask & HEAP_COMBOCID)
9168 : : {
9169 [ # # ]: 0 : Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
9170 [ # # ]: 0 : Assert(!HeapTupleHeaderXminInvalid(hdr));
9171 : 0 : xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9172 : 0 : xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9173 : 0 : xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9174 : 0 : }
9175 : : /* No combo CID, so only cmin or cmax can be set by this TX */
9176 : : else
9177 : : {
9178 : : /*
9179 : : * Tuple inserted.
9180 : : *
9181 : : * We need to check for LOCK ONLY because multixacts might be
9182 : : * transferred to the new tuple in case of FOR KEY SHARE updates in
9183 : : * which case there will be an xmax, although the tuple just got
9184 : : * inserted.
9185 : : */
9186 [ # # # # ]: 0 : if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9187 : 0 : HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
9188 : : {
9189 : 0 : xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
9190 : 0 : xlrec.cmax = InvalidCommandId;
9191 : 0 : }
9192 : : /* Tuple from a different tx updated or deleted. */
9193 : : else
9194 : : {
9195 : 0 : xlrec.cmin = InvalidCommandId;
9196 : 0 : xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
9197 : : }
9198 : 0 : xlrec.combocid = InvalidCommandId;
9199 : : }
9200 : :
9201 : : /*
9202 : : * Note that we don't need to register the buffer here, because this
9203 : : * operation does not modify the page. The insert/update/delete that
9204 : : * called us certainly did, but that's WAL-logged separately.
9205 : : */
9206 : 0 : XLogBeginInsert();
9207 : 0 : XLogRegisterData(&xlrec, SizeOfHeapNewCid);
9208 : :
9209 : : /* will be looked at irrespective of origin */
9210 : :
9211 : 0 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
9212 : :
9213 : 0 : return recptr;
9214 : 0 : }
9215 : :
9216 : : /*
9217 : : * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9218 : : * the old tuple in an UPDATE or DELETE.
9219 : : *
9220 : : * Returns NULL if there's no need to log an identity or if there's no suitable
9221 : : * key defined.
9222 : : *
9223 : : * Pass key_required true if any replica identity columns changed value, or if
9224 : : * any of them have any external data. Delete must always pass true.
9225 : : *
9226 : : * *copy is set to true if the returned tuple is a modified copy rather than
9227 : : * the same tuple that was passed in.
9228 : : */
9229 : : static HeapTuple
9230 : 327930 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
9231 : : bool *copy)
9232 : : {
9233 : 327930 : TupleDesc desc = RelationGetDescr(relation);
9234 : 327930 : char replident = relation->rd_rel->relreplident;
9235 : 327930 : Bitmapset *idattrs;
9236 : 327930 : HeapTuple key_tuple;
9237 : 327930 : bool nulls[MaxHeapAttributeNumber];
9238 : 327930 : Datum values[MaxHeapAttributeNumber];
9239 : :
9240 : 327930 : *copy = false;
9241 : :
9242 [ + - - + : 327930 : if (!RelationIsLogicallyLogged(relation))
# # # # #
# # # ]
9243 : 327930 : return NULL;
9244 : :
9245 [ # # ]: 0 : if (replident == REPLICA_IDENTITY_NOTHING)
9246 : 0 : return NULL;
9247 : :
9248 [ # # ]: 0 : if (replident == REPLICA_IDENTITY_FULL)
9249 : : {
9250 : : /*
9251 : : * When logging the entire old tuple, it very well could contain
9252 : : * toasted columns. If so, force them to be inlined.
9253 : : */
9254 [ # # ]: 0 : if (HeapTupleHasExternal(tp))
9255 : : {
9256 : 0 : *copy = true;
9257 : 0 : tp = toast_flatten_tuple(tp, desc);
9258 : 0 : }
9259 : 0 : return tp;
9260 : : }
9261 : :
9262 : : /* if the key isn't required and we're only logging the key, we're done */
9263 [ # # ]: 0 : if (!key_required)
9264 : 0 : return NULL;
9265 : :
9266 : : /* find out the replica identity columns */
9267 : 0 : idattrs = RelationGetIndexAttrBitmap(relation,
9268 : : INDEX_ATTR_BITMAP_IDENTITY_KEY);
9269 : :
9270 : : /*
9271 : : * If there's no defined replica identity columns, treat as !key_required.
9272 : : * (This case should not be reachable from heap_update, since that should
9273 : : * calculate key_required accurately. But heap_delete just passes
9274 : : * constant true for key_required, so we can hit this case in deletes.)
9275 : : */
9276 [ # # ]: 0 : if (bms_is_empty(idattrs))
9277 : 0 : return NULL;
9278 : :
9279 : : /*
9280 : : * Construct a new tuple containing only the replica identity columns,
9281 : : * with nulls elsewhere. While we're at it, assert that the replica
9282 : : * identity columns aren't null.
9283 : : */
9284 : 0 : heap_deform_tuple(tp, desc, values, nulls);
9285 : :
9286 [ # # ]: 0 : for (int i = 0; i < desc->natts; i++)
9287 : : {
9288 [ # # # # ]: 0 : if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
9289 : 0 : idattrs))
9290 [ # # ]: 0 : Assert(!nulls[i]);
9291 : : else
9292 : 0 : nulls[i] = true;
9293 : 0 : }
9294 : :
9295 : 0 : key_tuple = heap_form_tuple(desc, values, nulls);
9296 : 0 : *copy = true;
9297 : :
9298 : 0 : bms_free(idattrs);
9299 : :
9300 : : /*
9301 : : * If the tuple, which by here only contains indexed columns, still has
9302 : : * toasted columns, force them to be inlined. This is somewhat unlikely
9303 : : * since there's limits on the size of indexed columns, so we don't
9304 : : * duplicate toast_flatten_tuple()s functionality in the above loop over
9305 : : * the indexed columns, even if it would be more efficient.
9306 : : */
9307 [ # # ]: 0 : if (HeapTupleHasExternal(key_tuple))
9308 : : {
9309 : 0 : HeapTuple oldtup = key_tuple;
9310 : :
9311 : 0 : key_tuple = toast_flatten_tuple(oldtup, desc);
9312 : 0 : heap_freetuple(oldtup);
9313 : 0 : }
9314 : :
9315 : 0 : return key_tuple;
9316 : 327930 : }
9317 : :
9318 : : /*
9319 : : * HeapCheckForSerializableConflictOut
9320 : : * We are reading a tuple. If it's not visible, there may be a
9321 : : * rw-conflict out with the inserter. Otherwise, if it is visible to us
9322 : : * but has been deleted, there may be a rw-conflict out with the deleter.
9323 : : *
9324 : : * We will determine the top level xid of the writing transaction with which
9325 : : * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9326 : : * for overlap with our own transaction.
9327 : : *
9328 : : * This function should be called just about anywhere in heapam.c where a
9329 : : * tuple has been read. The caller must hold at least a shared lock on the
9330 : : * buffer, because this function might set hint bits on the tuple. There is
9331 : : * currently no known reason to call this function from an index AM.
9332 : : */
9333 : : void
9334 : 6782812 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
9335 : : HeapTuple tuple, Buffer buffer,
9336 : : Snapshot snapshot)
9337 : : {
9338 : 6782812 : TransactionId xid;
9339 : 6782812 : HTSV_Result htsvResult;
9340 : :
9341 [ + + ]: 6782812 : if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9342 : 6782726 : return;
9343 : :
9344 : : /*
9345 : : * Check to see whether the tuple has been written to by a concurrent
9346 : : * transaction, either to create it not visible to us, or to delete it
9347 : : * while it is visible to us. The "visible" bool indicates whether the
9348 : : * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9349 : : * is going on with it.
9350 : : *
9351 : : * In the event of a concurrently inserted tuple that also happens to have
9352 : : * been concurrently updated (by a separate transaction), the xmin of the
9353 : : * tuple will be used -- not the updater's xid.
9354 : : */
9355 : 86 : htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
9356 [ + + + + : 86 : switch (htsvResult)
- ]
9357 : : {
9358 : : case HEAPTUPLE_LIVE:
9359 [ + - ]: 63 : if (visible)
9360 : 63 : return;
9361 : 0 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9362 : 0 : break;
9363 : : case HEAPTUPLE_RECENTLY_DEAD:
9364 : : case HEAPTUPLE_DELETE_IN_PROGRESS:
9365 [ + + ]: 2 : if (visible)
9366 : 1 : xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9367 : : else
9368 : 1 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9369 : :
9370 [ - + ]: 2 : if (TransactionIdPrecedes(xid, TransactionXmin))
9371 : : {
9372 : : /* This is like the HEAPTUPLE_DEAD case */
9373 [ # # ]: 0 : Assert(!visible);
9374 : 0 : return;
9375 : : }
9376 : 2 : break;
9377 : : case HEAPTUPLE_INSERT_IN_PROGRESS:
9378 : 7 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9379 : 7 : break;
9380 : : case HEAPTUPLE_DEAD:
9381 [ + - ]: 14 : Assert(!visible);
9382 : 14 : return;
9383 : : default:
9384 : :
9385 : : /*
9386 : : * The only way to get to this default clause is if a new value is
9387 : : * added to the enum type without adding it to this switch
9388 : : * statement. That's a bug, so elog.
9389 : : */
9390 [ # # # # ]: 0 : elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9391 : :
9392 : : /*
9393 : : * In spite of having all enum values covered and calling elog on
9394 : : * this default, some compilers think this is a code path which
9395 : : * allows xid to be used below without initialization. Silence
9396 : : * that warning.
9397 : : */
9398 : 0 : xid = InvalidTransactionId;
9399 : 0 : }
9400 : :
9401 [ + - ]: 9 : Assert(TransactionIdIsValid(xid));
9402 [ + - ]: 9 : Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
9403 : :
9404 : : /*
9405 : : * Find top level xid. Bail out if xid is too early to be a conflict, or
9406 : : * if it's our own xid.
9407 : : */
9408 [ + + ]: 9 : if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
9409 : 8 : return;
9410 : 1 : xid = SubTransGetTopmostTransaction(xid);
9411 [ - + ]: 1 : if (TransactionIdPrecedes(xid, TransactionXmin))
9412 : 0 : return;
9413 : :
9414 : 1 : CheckForSerializableConflictOut(relation, xid, snapshot);
9415 [ - + ]: 6782812 : }
|