Branch data Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * gist.c
4 : : * interface routines for the postgres GiST index access method.
5 : : *
6 : : *
7 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : : * Portions Copyright (c) 1994, Regents of the University of California
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/gist/gist.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : #include "postgres.h"
16 : :
17 : : #include "access/gist_private.h"
18 : : #include "access/gistscan.h"
19 : : #include "access/xloginsert.h"
20 : : #include "catalog/pg_collation.h"
21 : : #include "commands/vacuum.h"
22 : : #include "miscadmin.h"
23 : : #include "nodes/execnodes.h"
24 : : #include "storage/predicate.h"
25 : : #include "utils/fmgrprotos.h"
26 : : #include "utils/index_selfuncs.h"
27 : : #include "utils/memutils.h"
28 : : #include "utils/rel.h"
29 : :
30 : : /* non-export function prototypes */
31 : : static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
32 : : static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
33 : : GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
34 : : static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
35 : : GISTSTATE *giststate,
36 : : IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
37 : : Buffer leftchild, Buffer rightchild,
38 : : bool unlockbuf, bool unlockleftchild);
39 : : static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
40 : : GISTSTATE *giststate, List *splitinfo, bool unlockbuf);
41 : : static void gistprunepage(Relation rel, Page page, Buffer buffer,
42 : : Relation heapRel);
43 : :
44 : :
45 : : #define ROTATEDIST(d) do { \
46 : : SplitPageLayout *tmp = palloc0_object(SplitPageLayout); \
47 : : tmp->block.blkno = InvalidBlockNumber; \
48 : : tmp->buffer = InvalidBuffer; \
49 : : tmp->next = (d); \
50 : : (d)=tmp; \
51 : : } while(0)
52 : :
53 : :
54 : : /*
55 : : * GiST handler function: return IndexAmRoutine with access method parameters
56 : : * and callbacks.
57 : : */
58 : : Datum
59 : 1231 : gisthandler(PG_FUNCTION_ARGS)
60 : : {
61 : : static const IndexAmRoutine amroutine = {
62 : : .type = T_IndexAmRoutine,
63 : : .amstrategies = 0,
64 : : .amsupport = GISTNProcs,
65 : : .amoptsprocnum = GIST_OPTIONS_PROC,
66 : : .amcanorder = false,
67 : : .amcanorderbyop = true,
68 : : .amcanhash = false,
69 : : .amconsistentequality = false,
70 : : .amconsistentordering = false,
71 : : .amcanbackward = false,
72 : : .amcanunique = false,
73 : : .amcanmulticol = true,
74 : : .amoptionalkey = true,
75 : : .amsearcharray = false,
76 : : .amsearchnulls = true,
77 : : .amstorage = true,
78 : : .amclusterable = true,
79 : : .ampredlocks = true,
80 : : .amcanparallel = false,
81 : : .amcanbuildparallel = false,
82 : : .amcaninclude = true,
83 : : .amusemaintenanceworkmem = false,
84 : : .amsummarizing = false,
85 : : .amparallelvacuumoptions =
86 : : VACUUM_OPTION_PARALLEL_BULKDEL | VACUUM_OPTION_PARALLEL_COND_CLEANUP,
87 : : .amkeytype = InvalidOid,
88 : :
89 : : .ambuild = gistbuild,
90 : : .ambuildempty = gistbuildempty,
91 : : .aminsert = gistinsert,
92 : : .aminsertcleanup = NULL,
93 : : .ambulkdelete = gistbulkdelete,
94 : : .amvacuumcleanup = gistvacuumcleanup,
95 : : .amcanreturn = gistcanreturn,
96 : : .amcostestimate = gistcostestimate,
97 : : .amgettreeheight = NULL,
98 : : .amoptions = gistoptions,
99 : : .amproperty = gistproperty,
100 : : .ambuildphasename = NULL,
101 : : .amvalidate = gistvalidate,
102 : : .amadjustmembers = gistadjustmembers,
103 : : .ambeginscan = gistbeginscan,
104 : : .amrescan = gistrescan,
105 : : .amgettuple = gistgettuple,
106 : : .amgetbitmap = gistgetbitmap,
107 : : .amendscan = gistendscan,
108 : : .ammarkpos = NULL,
109 : : .amrestrpos = NULL,
110 : : .amestimateparallelscan = NULL,
111 : : .aminitparallelscan = NULL,
112 : : .amparallelrescan = NULL,
113 : : .amtranslatestrategy = NULL,
114 : : .amtranslatecmptype = gisttranslatecmptype,
115 : : };
116 : :
117 : 1231 : PG_RETURN_POINTER(&amroutine);
118 : : }
119 : :
120 : : /*
121 : : * Create and return a temporary memory context for use by GiST. We
122 : : * _always_ invoke user-provided methods in a temporary memory
123 : : * context, so that memory leaks in those functions cannot cause
124 : : * problems. Also, we use some additional temporary contexts in the
125 : : * GiST code itself, to avoid the need to do some awkward manual
126 : : * memory management.
127 : : */
128 : : MemoryContext
129 : 1207 : createTempGistContext(void)
130 : : {
131 : 1207 : return AllocSetContextCreate(CurrentMemoryContext,
132 : : "GiST temporary context",
133 : : ALLOCSET_DEFAULT_SIZES);
134 : : }
135 : :
136 : : /*
137 : : * gistbuildempty() -- build an empty gist index in the initialization fork
138 : : */
139 : : void
140 : 1 : gistbuildempty(Relation index)
141 : : {
142 : 1 : Buffer buffer;
143 : :
144 : : /* Initialize the root page */
145 : 1 : buffer = ExtendBufferedRel(BMR_REL(index), INIT_FORKNUM, NULL,
146 : : EB_SKIP_EXTENSION_LOCK | EB_LOCK_FIRST);
147 : :
148 : : /* Initialize and xlog buffer */
149 : 1 : START_CRIT_SECTION();
150 : 1 : GISTInitBuffer(buffer, F_LEAF);
151 : 1 : MarkBufferDirty(buffer);
152 : 1 : log_newpage_buffer(buffer, true);
153 [ + - ]: 1 : END_CRIT_SECTION();
154 : :
155 : : /* Unlock and release the buffer */
156 : 1 : UnlockReleaseBuffer(buffer);
157 : 1 : }
158 : :
159 : : /*
160 : : * gistinsert -- wrapper for GiST tuple insertion.
161 : : *
162 : : * This is the public interface routine for tuple insertion in GiSTs.
163 : : * It doesn't do any work; just locks the relation and passes the buck.
164 : : */
165 : : bool
166 : 36419 : gistinsert(Relation r, Datum *values, bool *isnull,
167 : : ItemPointer ht_ctid, Relation heapRel,
168 : : IndexUniqueCheck checkUnique,
169 : : bool indexUnchanged,
170 : : IndexInfo *indexInfo)
171 : : {
172 : 36419 : GISTSTATE *giststate = (GISTSTATE *) indexInfo->ii_AmCache;
173 : 36419 : IndexTuple itup;
174 : 36419 : MemoryContext oldCxt;
175 : :
176 : : /* Initialize GISTSTATE cache if first call in this statement */
177 [ + + ]: 36419 : if (giststate == NULL)
178 : : {
179 : 246 : oldCxt = MemoryContextSwitchTo(indexInfo->ii_Context);
180 : 246 : giststate = initGISTstate(r);
181 : 246 : giststate->tempCxt = createTempGistContext();
182 : 246 : indexInfo->ii_AmCache = giststate;
183 : 246 : MemoryContextSwitchTo(oldCxt);
184 : 246 : }
185 : :
186 : 36419 : oldCxt = MemoryContextSwitchTo(giststate->tempCxt);
187 : :
188 : 36419 : itup = gistFormTuple(giststate, r, values, isnull, true);
189 : 36419 : itup->t_tid = *ht_ctid;
190 : :
191 : 36419 : gistdoinsert(r, itup, 0, giststate, heapRel, false);
192 : :
193 : : /* cleanup */
194 : 36419 : MemoryContextSwitchTo(oldCxt);
195 : 36419 : MemoryContextReset(giststate->tempCxt);
196 : :
197 : 36419 : return false;
198 : 36419 : }
199 : :
200 : :
201 : : /*
202 : : * Place tuples from 'itup' to 'buffer'. If 'oldoffnum' is valid, the tuple
203 : : * at that offset is atomically removed along with inserting the new tuples.
204 : : * This is used to replace a tuple with a new one.
205 : : *
206 : : * If 'leftchildbuf' is valid, we're inserting the downlink for the page
207 : : * to the right of 'leftchildbuf', or updating the downlink for 'leftchildbuf'.
208 : : * F_FOLLOW_RIGHT flag on 'leftchildbuf' is cleared and NSN is set.
209 : : *
210 : : * If 'markfollowright' is true and the page is split, the left child is
211 : : * marked with F_FOLLOW_RIGHT flag. That is the normal case. During buffered
212 : : * index build, however, there is no concurrent access and the page splitting
213 : : * is done in a slightly simpler fashion, and false is passed.
214 : : *
215 : : * If there is not enough room on the page, it is split. All the split
216 : : * pages are kept pinned and locked and returned in *splitinfo, the caller
217 : : * is responsible for inserting the downlinks for them. However, if
218 : : * 'buffer' is the root page and it needs to be split, gistplacetopage()
219 : : * performs the split as one atomic operation, and *splitinfo is set to NIL.
220 : : * In that case, we continue to hold the root page locked, and the child
221 : : * pages are released; note that new tuple(s) are *not* on the root page
222 : : * but in one of the new child pages.
223 : : *
224 : : * If 'newblkno' is not NULL, returns the block number of page the first
225 : : * new/updated tuple was inserted to. Usually it's the given page, but could
226 : : * be its right sibling if the page was split.
227 : : *
228 : : * Returns 'true' if the page was split, 'false' otherwise.
229 : : */
230 : : bool
231 : 202698 : gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
232 : : Buffer buffer,
233 : : IndexTuple *itup, int ntup, OffsetNumber oldoffnum,
234 : : BlockNumber *newblkno,
235 : : Buffer leftchildbuf,
236 : : List **splitinfo,
237 : : bool markfollowright,
238 : : Relation heapRel,
239 : : bool is_build)
240 : : {
241 : 202698 : BlockNumber blkno = BufferGetBlockNumber(buffer);
242 : 202698 : Page page = BufferGetPage(buffer);
243 : 202698 : bool is_leaf = (GistPageIsLeaf(page)) ? true : false;
244 : 202698 : XLogRecPtr recptr;
245 : 202698 : bool is_split;
246 : :
247 : : /*
248 : : * Refuse to modify a page that's incompletely split. This should not
249 : : * happen because we finish any incomplete splits while we walk down the
250 : : * tree. However, it's remotely possible that another concurrent inserter
251 : : * splits a parent page, and errors out before completing the split. We
252 : : * will just throw an error in that case, and leave any split we had in
253 : : * progress unfinished too. The next insert that comes along will clean up
254 : : * the mess.
255 : : */
256 [ + - ]: 202698 : if (GistFollowRight(page))
257 [ # # # # ]: 0 : elog(ERROR, "concurrent GiST page split was incomplete");
258 : :
259 : : /* should never try to insert to a deleted page */
260 [ + - ]: 202698 : Assert(!GistPageIsDeleted(page));
261 : :
262 : 202698 : *splitinfo = NIL;
263 : :
264 : : /*
265 : : * if isupdate, remove old key: This node's key has been modified, either
266 : : * because a child split occurred or because we needed to adjust our key
267 : : * for an insert in a child node. Therefore, remove the old version of
268 : : * this node's key.
269 : : *
270 : : * for WAL replay, in the non-split case we handle this by setting up a
271 : : * one-element todelete array; in the split case, it's handled implicitly
272 : : * because the tuple vector passed to gistSplit won't include this tuple.
273 : : */
274 : 202698 : is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
275 : :
276 : : /*
277 : : * If leaf page is full, try at first to delete dead tuples. And then
278 : : * check again.
279 : : */
280 [ + + + + : 202698 : if (is_split && GistPageIsLeaf(page) && GistPageHasGarbage(page))
+ - ]
281 : : {
282 : 0 : gistprunepage(rel, page, buffer, heapRel);
283 : 0 : is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
284 : 0 : }
285 : :
286 [ + + ]: 202698 : if (is_split)
287 : : {
288 : : /* no space for insertion */
289 : 1674 : IndexTuple *itvec;
290 : 1674 : int tlen;
291 : 1674 : SplitPageLayout *dist = NULL,
292 : : *ptr;
293 : 1674 : BlockNumber oldrlink = InvalidBlockNumber;
294 : 1674 : GistNSN oldnsn = 0;
295 : 1674 : SplitPageLayout rootpg;
296 : 1674 : bool is_rootsplit;
297 : 1674 : int npage;
298 : :
299 : 1674 : is_rootsplit = (blkno == GIST_ROOT_BLKNO);
300 : :
301 : : /*
302 : : * Form index tuples vector to split. If we're replacing an old tuple,
303 : : * remove the old version from the vector.
304 : : */
305 : 1674 : itvec = gistextractpage(page, &tlen);
306 [ + + + + ]: 1674 : if (OffsetNumberIsValid(oldoffnum))
307 : : {
308 : : /* on inner page we should remove old tuple */
309 : 13 : int pos = oldoffnum - FirstOffsetNumber;
310 : :
311 : 13 : tlen--;
312 [ + + ]: 13 : if (pos != tlen)
313 : 3 : memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos));
314 : 13 : }
315 : 1674 : itvec = gistjoinvector(itvec, &tlen, itup, ntup);
316 : 1674 : dist = gistSplit(rel, page, itvec, tlen, giststate);
317 : :
318 : : /*
319 : : * Check that split didn't produce too many pages.
320 : : */
321 : 1674 : npage = 0;
322 [ + + ]: 5022 : for (ptr = dist; ptr; ptr = ptr->next)
323 : 3348 : npage++;
324 : : /* in a root split, we'll add one more page to the list below */
325 [ + + ]: 1674 : if (is_rootsplit)
326 : 25 : npage++;
327 [ + - ]: 1674 : if (npage > GIST_MAX_SPLIT_PAGES)
328 [ # # # # ]: 0 : elog(ERROR, "GiST page split into too many halves (%d, maximum %d)",
329 : : npage, GIST_MAX_SPLIT_PAGES);
330 : :
331 : : /*
332 : : * Set up pages to work with. Allocate new buffers for all but the
333 : : * leftmost page. The original page becomes the new leftmost page, and
334 : : * is just replaced with the new contents.
335 : : *
336 : : * For a root-split, allocate new buffers for all child pages, the
337 : : * original page is overwritten with new root page containing
338 : : * downlinks to the new child pages.
339 : : */
340 : 1674 : ptr = dist;
341 [ + + ]: 1674 : if (!is_rootsplit)
342 : : {
343 : : /* save old rightlink and NSN */
344 : 1649 : oldrlink = GistPageGetOpaque(page)->rightlink;
345 : 1649 : oldnsn = GistPageGetNSN(page);
346 : :
347 : 1649 : dist->buffer = buffer;
348 : 1649 : dist->block.blkno = BufferGetBlockNumber(buffer);
349 : 1649 : dist->page = PageGetTempPageCopySpecial(BufferGetPage(buffer));
350 : :
351 : : /* clean all flags except F_LEAF */
352 : 1649 : GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0;
353 : :
354 : 1649 : ptr = ptr->next;
355 : 1649 : }
356 [ + + ]: 3373 : for (; ptr; ptr = ptr->next)
357 : : {
358 : : /* Allocate new page */
359 : 1699 : ptr->buffer = gistNewBuffer(rel, heapRel);
360 : 1699 : GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0);
361 : 1699 : ptr->page = BufferGetPage(ptr->buffer);
362 : 1699 : ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
363 : 3398 : PredicateLockPageSplit(rel,
364 : 1699 : BufferGetBlockNumber(buffer),
365 : 1699 : BufferGetBlockNumber(ptr->buffer));
366 : 1699 : }
367 : :
368 : : /*
369 : : * Now that we know which blocks the new pages go to, set up downlink
370 : : * tuples to point to them.
371 : : */
372 [ + + ]: 5022 : for (ptr = dist; ptr; ptr = ptr->next)
373 : : {
374 : 3348 : ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
375 : 3348 : GistTupleSetValid(ptr->itup);
376 : 3348 : }
377 : :
378 : : /*
379 : : * If this is a root split, we construct the new root page with the
380 : : * downlinks here directly, instead of requiring the caller to insert
381 : : * them. Add the new root page to the list along with the child pages.
382 : : */
383 [ + + ]: 1674 : if (is_rootsplit)
384 : : {
385 : 25 : IndexTuple *downlinks;
386 : 25 : int ndownlinks = 0;
387 : 25 : int i;
388 : :
389 : 25 : rootpg.buffer = buffer;
390 : 25 : rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer));
391 : 25 : GistPageGetOpaque(rootpg.page)->flags = 0;
392 : :
393 : : /* Prepare a vector of all the downlinks */
394 [ + + ]: 75 : for (ptr = dist; ptr; ptr = ptr->next)
395 : 50 : ndownlinks++;
396 : 25 : downlinks = palloc_array(IndexTuple, ndownlinks);
397 [ + + ]: 75 : for (i = 0, ptr = dist; ptr; ptr = ptr->next)
398 : 50 : downlinks[i++] = ptr->itup;
399 : :
400 : 25 : rootpg.block.blkno = GIST_ROOT_BLKNO;
401 : 25 : rootpg.block.num = ndownlinks;
402 : 50 : rootpg.list = gistfillitupvec(downlinks, ndownlinks,
403 : 25 : &(rootpg.lenlist));
404 : 25 : rootpg.itup = NULL;
405 : :
406 : 25 : rootpg.next = dist;
407 : 25 : dist = &rootpg;
408 : 25 : }
409 : : else
410 : : {
411 : : /* Prepare split-info to be returned to caller */
412 [ + + ]: 4947 : for (ptr = dist; ptr; ptr = ptr->next)
413 : : {
414 : 3298 : GISTPageSplitInfo *si = palloc_object(GISTPageSplitInfo);
415 : :
416 : 3298 : si->buf = ptr->buffer;
417 : 3298 : si->downlink = ptr->itup;
418 : 3298 : *splitinfo = lappend(*splitinfo, si);
419 : 3298 : }
420 : : }
421 : :
422 : : /*
423 : : * Fill all pages. All the pages are new, ie. freshly allocated empty
424 : : * pages, or a temporary copy of the old page.
425 : : */
426 [ + + ]: 5047 : for (ptr = dist; ptr; ptr = ptr->next)
427 : : {
428 : 3373 : char *data = (char *) (ptr->list);
429 : :
430 [ + + ]: 224515 : for (int i = 0; i < ptr->block.num; i++)
431 : : {
432 : 221142 : IndexTuple thistup = (IndexTuple) data;
433 : :
434 [ + - ]: 221142 : if (PageAddItem(ptr->page, data, IndexTupleSize(thistup), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
435 [ # # # # ]: 0 : elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(rel));
436 : :
437 : : /*
438 : : * If this is the first inserted/updated tuple, let the caller
439 : : * know which page it landed on.
440 : : */
441 [ + + + + ]: 221142 : if (newblkno && ItemPointerEquals(&thistup->t_tid, &(*itup)->t_tid))
442 : 129 : *newblkno = ptr->block.blkno;
443 : :
444 : 221142 : data += IndexTupleSize(thistup);
445 : 221142 : }
446 : :
447 : : /* Set up rightlinks */
448 [ + + + + ]: 3373 : if (ptr->next && ptr->block.blkno != GIST_ROOT_BLKNO)
449 : 1674 : GistPageGetOpaque(ptr->page)->rightlink =
450 : 1674 : ptr->next->block.blkno;
451 : : else
452 : 1699 : GistPageGetOpaque(ptr->page)->rightlink = oldrlink;
453 : :
454 : : /*
455 : : * Mark the all but the right-most page with the follow-right
456 : : * flag. It will be cleared as soon as the downlink is inserted
457 : : * into the parent, but this ensures that if we error out before
458 : : * that, the index is still consistent. (in buffering build mode,
459 : : * any error will abort the index build anyway, so this is not
460 : : * needed.)
461 : : */
462 [ + + + + : 3373 : if (ptr->next && !is_rootsplit && markfollowright)
+ + ]
463 : 1521 : GistMarkFollowRight(ptr->page);
464 : : else
465 : 1852 : GistClearFollowRight(ptr->page);
466 : :
467 : : /*
468 : : * Copy the NSN of the original page to all pages. The
469 : : * F_FOLLOW_RIGHT flags ensure that scans will follow the
470 : : * rightlinks until the downlinks are inserted.
471 : : */
472 : 3373 : GistPageSetNSN(ptr->page, oldnsn);
473 : 3373 : }
474 : :
475 : : /*
476 : : * gistXLogSplit() needs to WAL log a lot of pages, prepare WAL
477 : : * insertion for that. NB: The number of pages and data segments
478 : : * specified here must match the calculations in gistXLogSplit()!
479 : : */
480 [ + + + + : 1674 : if (!is_build && RelationNeedsWAL(rel))
+ - + - -
+ ]
481 : 449 : XLogEnsureRecordSpace(npage, 1 + npage * 2);
482 : :
483 : 1674 : START_CRIT_SECTION();
484 : :
485 : : /*
486 : : * Must mark buffers dirty before XLogInsert, even though we'll still
487 : : * be changing their opaque fields below.
488 : : */
489 [ + + ]: 5047 : for (ptr = dist; ptr; ptr = ptr->next)
490 : 3373 : MarkBufferDirty(ptr->buffer);
491 [ + + ]: 1674 : if (BufferIsValid(leftchildbuf))
492 : 10 : MarkBufferDirty(leftchildbuf);
493 : :
494 : : /*
495 : : * The first page in the chain was a temporary working copy meant to
496 : : * replace the old page. Copy it over the old page.
497 : : */
498 : 1674 : PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
499 : 1674 : dist->page = BufferGetPage(dist->buffer);
500 : :
501 : : /*
502 : : * Write the WAL record.
503 : : *
504 : : * If we're building a new index, however, we don't WAL-log changes
505 : : * yet. The LSN-NSN interlock between parent and child requires that
506 : : * LSNs never move backwards, so set the LSNs to a value that's
507 : : * smaller than any real or fake unlogged LSN that might be generated
508 : : * later. (There can't be any concurrent scans during index build, so
509 : : * we don't need to be able to detect concurrent splits yet.)
510 : : */
511 [ + + ]: 1674 : if (is_build)
512 : 1224 : recptr = GistBuildLSN;
513 : : else
514 : : {
515 [ + + + - : 450 : if (RelationNeedsWAL(rel))
+ - - + ]
516 : 898 : recptr = gistXLogSplit(is_leaf,
517 : 449 : dist, oldrlink, oldnsn, leftchildbuf,
518 : 449 : markfollowright);
519 : : else
520 : 1 : recptr = gistGetFakeLSN(rel);
521 : : }
522 : :
523 [ + + ]: 5047 : for (ptr = dist; ptr; ptr = ptr->next)
524 : 3373 : PageSetLSN(ptr->page, recptr);
525 : :
526 : : /*
527 : : * Return the new child buffers to the caller.
528 : : *
529 : : * If this was a root split, we've already inserted the downlink
530 : : * pointers, in the form of a new root page. Therefore we can release
531 : : * all the new buffers, and keep just the root page locked.
532 : : */
533 [ + + ]: 1674 : if (is_rootsplit)
534 : : {
535 [ + + ]: 75 : for (ptr = dist->next; ptr; ptr = ptr->next)
536 : 50 : UnlockReleaseBuffer(ptr->buffer);
537 : 25 : }
538 : 1674 : }
539 : : else
540 : : {
541 : : /*
542 : : * Enough space. We always get here if ntup==0.
543 : : */
544 : 201024 : START_CRIT_SECTION();
545 : :
546 : : /*
547 : : * Delete old tuple if any, then insert new tuple(s) if any. If
548 : : * possible, use the fast path of PageIndexTupleOverwrite.
549 : : */
550 [ + + + + ]: 201024 : if (OffsetNumberIsValid(oldoffnum))
551 : : {
552 [ + + ]: 92606 : if (ntup == 1)
553 : : {
554 : : /* One-for-one replacement, so use PageIndexTupleOverwrite */
555 [ + - ]: 90970 : if (!PageIndexTupleOverwrite(page, oldoffnum, *itup, IndexTupleSize(*itup)))
556 [ # # # # ]: 0 : elog(ERROR, "failed to add item to index page in \"%s\"",
557 : : RelationGetRelationName(rel));
558 : 90970 : }
559 : : else
560 : : {
561 : : /* Delete old, then append new tuple(s) to page */
562 : 1636 : PageIndexTupleDelete(page, oldoffnum);
563 : 1636 : gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
564 : : }
565 : 92606 : }
566 : : else
567 : : {
568 : : /* Just append new tuples at the end of the page */
569 : 108418 : gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
570 : : }
571 : :
572 : 201024 : MarkBufferDirty(buffer);
573 : :
574 [ + + ]: 201024 : if (BufferIsValid(leftchildbuf))
575 : 1511 : MarkBufferDirty(leftchildbuf);
576 : :
577 [ + + ]: 201024 : if (is_build)
578 : 143706 : recptr = GistBuildLSN;
579 : : else
580 : : {
581 [ + + + - : 57318 : if (RelationNeedsWAL(rel))
+ - - + ]
582 : : {
583 : 57305 : OffsetNumber ndeloffs = 0,
584 : : deloffs[1];
585 : :
586 [ + + + + ]: 57305 : if (OffsetNumberIsValid(oldoffnum))
587 : : {
588 : 21345 : deloffs[0] = oldoffnum;
589 : 21345 : ndeloffs = 1;
590 : 21345 : }
591 : :
592 : 114610 : recptr = gistXLogUpdate(buffer,
593 : 57305 : deloffs, ndeloffs, itup, ntup,
594 : 57305 : leftchildbuf);
595 : 57305 : }
596 : : else
597 : 13 : recptr = gistGetFakeLSN(rel);
598 : : }
599 : 201024 : PageSetLSN(page, recptr);
600 : :
601 [ + + ]: 201024 : if (newblkno)
602 : 17395 : *newblkno = blkno;
603 : : }
604 : :
605 : : /*
606 : : * If we inserted the downlink for a child page, set NSN and clear
607 : : * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
608 : : * follow the rightlink if and only if they looked at the parent page
609 : : * before we inserted the downlink.
610 : : *
611 : : * Note that we do this *after* writing the WAL record. That means that
612 : : * the possible full page image in the WAL record does not include these
613 : : * changes, and they must be replayed even if the page is restored from
614 : : * the full page image. There's a chicken-and-egg problem: if we updated
615 : : * the child pages first, we wouldn't know the recptr of the WAL record
616 : : * we're about to write.
617 : : */
618 [ + + ]: 202698 : if (BufferIsValid(leftchildbuf))
619 : : {
620 : 1521 : Page leftpg = BufferGetPage(leftchildbuf);
621 : :
622 : 1521 : GistPageSetNSN(leftpg, recptr);
623 : 1521 : GistClearFollowRight(leftpg);
624 : :
625 : 1521 : PageSetLSN(leftpg, recptr);
626 : 1521 : }
627 : :
628 [ + - ]: 202698 : END_CRIT_SECTION();
629 : :
630 : 405396 : return is_split;
631 : 202698 : }
632 : :
633 : : /*
634 : : * Workhorse routine for doing insertion into a GiST index. Note that
635 : : * this routine assumes it is invoked in a short-lived memory context,
636 : : * so it does not bother releasing palloc'd allocations.
637 : : */
638 : : void
639 : 337490 : gistdoinsert(Relation r, IndexTuple itup, Size freespace,
640 : : GISTSTATE *giststate, Relation heapRel, bool is_build)
641 : : {
642 : 337490 : ItemId iid;
643 : 337490 : IndexTuple idxtuple;
644 : 337490 : GISTInsertStack firststack;
645 : 337490 : GISTInsertStack *stack;
646 : 337490 : GISTInsertState state;
647 : 337490 : bool xlocked = false;
648 : :
649 : 337490 : memset(&state, 0, sizeof(GISTInsertState));
650 : 337490 : state.freespace = freespace;
651 : 337490 : state.r = r;
652 : 337490 : state.heapRel = heapRel;
653 : 337490 : state.is_build = is_build;
654 : :
655 : : /* Start from the root */
656 : 337490 : firststack.blkno = GIST_ROOT_BLKNO;
657 : 337490 : firststack.lsn = 0;
658 : 337490 : firststack.retry_from_parent = false;
659 : 337490 : firststack.parent = NULL;
660 : 337490 : firststack.downlinkoffnum = InvalidOffsetNumber;
661 : 337490 : state.stack = stack = &firststack;
662 : :
663 : : /*
664 : : * Walk down along the path of smallest penalty, updating the parent
665 : : * pointers with the key we're inserting as we go. If we crash in the
666 : : * middle, the tree is consistent, although the possible parent updates
667 : : * were a waste.
668 : : */
669 : 454148 : for (;;)
670 : : {
671 : : /*
672 : : * If we split an internal page while descending the tree, we have to
673 : : * retry at the parent. (Normally, the LSN-NSN interlock below would
674 : : * also catch this and cause us to retry. But LSNs are not updated
675 : : * during index build.)
676 : : */
677 [ - + ]: 454148 : while (stack->retry_from_parent)
678 : : {
679 [ # # ]: 0 : if (xlocked)
680 : 0 : LockBuffer(stack->buffer, GIST_UNLOCK);
681 : 0 : xlocked = false;
682 : 0 : ReleaseBuffer(stack->buffer);
683 : 0 : state.stack = stack = stack->parent;
684 : : }
685 : :
686 [ + + ]: 454148 : if (!XLogRecPtrIsValid(stack->lsn))
687 : 220832 : stack->buffer = ReadBuffer(state.r, stack->blkno);
688 : :
689 : : /*
690 : : * Be optimistic and grab shared lock first. Swap it for an exclusive
691 : : * lock later if we need to update the page.
692 : : */
693 [ + + ]: 454148 : if (!xlocked)
694 : : {
695 : 220832 : LockBuffer(stack->buffer, GIST_SHARE);
696 : 220832 : gistcheckpage(state.r, stack->buffer);
697 : 220832 : }
698 : :
699 : 454148 : stack->page = BufferGetPage(stack->buffer);
700 [ + + ]: 454148 : stack->lsn = xlocked ?
701 : 454148 : PageGetLSN(stack->page) : BufferGetLSNAtomic(stack->buffer);
702 [ + + + - : 454148 : Assert(!RelationNeedsWAL(state.r) || XLogRecPtrIsValid(stack->lsn));
+ + # # ]
703 : :
704 : : /*
705 : : * If this page was split but the downlink was never inserted to the
706 : : * parent because the inserting backend crashed before doing that, fix
707 : : * that now.
708 : : */
709 [ - + ]: 454148 : if (GistFollowRight(stack->page))
710 : : {
711 [ # # ]: 0 : if (!xlocked)
712 : : {
713 : 0 : LockBuffer(stack->buffer, GIST_UNLOCK);
714 : 0 : LockBuffer(stack->buffer, GIST_EXCLUSIVE);
715 : 0 : xlocked = true;
716 : : /* someone might've completed the split when we unlocked */
717 [ # # ]: 0 : if (!GistFollowRight(stack->page))
718 : 0 : continue;
719 : 0 : }
720 : 0 : gistfixsplit(&state, giststate);
721 : :
722 : 0 : UnlockReleaseBuffer(stack->buffer);
723 : 0 : xlocked = false;
724 : 0 : state.stack = stack = stack->parent;
725 : 0 : continue;
726 : : }
727 : :
728 [ + + ]: 454148 : if ((stack->blkno != GIST_ROOT_BLKNO &&
729 [ + + ]: 454148 : stack->parent->lsn < GistPageGetNSN(stack->page)) ||
730 : 337490 : GistPageIsDeleted(stack->page))
731 : : {
732 : : /*
733 : : * Concurrent split or page deletion detected. There's no
734 : : * guarantee that the downlink for this page is consistent with
735 : : * the tuple we're inserting anymore, so go back to parent and
736 : : * rechoose the best child.
737 : : */
738 : 233316 : UnlockReleaseBuffer(stack->buffer);
739 : 233316 : xlocked = false;
740 : 233316 : state.stack = stack = stack->parent;
741 : 233316 : continue;
742 : : }
743 : :
744 [ + + ]: 220832 : if (!GistPageIsLeaf(stack->page))
745 : : {
746 : : /*
747 : : * This is an internal page so continue to walk down the tree.
748 : : * Find the child node that has the minimum insertion penalty.
749 : : */
750 : 116658 : BlockNumber childblkno;
751 : 116658 : IndexTuple newtup;
752 : 116658 : GISTInsertStack *item;
753 : 116658 : OffsetNumber downlinkoffnum;
754 : :
755 : 116658 : downlinkoffnum = gistchoose(state.r, stack->page, itup, giststate);
756 : 116658 : iid = PageGetItemId(stack->page, downlinkoffnum);
757 : 116658 : idxtuple = (IndexTuple) PageGetItem(stack->page, iid);
758 : 116658 : childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
759 : :
760 : : /*
761 : : * Check that it's not a leftover invalid tuple from pre-9.1
762 : : */
763 [ + - ]: 116658 : if (GistTupleIsInvalid(idxtuple))
764 [ # # # # ]: 0 : ereport(ERROR,
765 : : (errmsg("index \"%s\" contains an inner tuple marked as invalid",
766 : : RelationGetRelationName(r)),
767 : : errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),
768 : : errhint("Please REINDEX it.")));
769 : :
770 : : /*
771 : : * Check that the key representing the target child node is
772 : : * consistent with the key we're inserting. Update it if it's not.
773 : : */
774 : 116658 : newtup = gistgetadjusted(state.r, idxtuple, itup, giststate);
775 [ + + ]: 116658 : if (newtup)
776 : : {
777 : : /*
778 : : * Swap shared lock for an exclusive one. Beware, the page may
779 : : * change while we unlock/lock the page...
780 : : */
781 [ - + ]: 79479 : if (!xlocked)
782 : : {
783 : 79479 : LockBuffer(stack->buffer, GIST_UNLOCK);
784 : 79479 : LockBuffer(stack->buffer, GIST_EXCLUSIVE);
785 : 79479 : xlocked = true;
786 : 79479 : stack->page = BufferGetPage(stack->buffer);
787 : :
788 [ - + ]: 79479 : if (PageGetLSN(stack->page) != stack->lsn)
789 : : {
790 : : /* the page was changed while we unlocked it, retry */
791 : 0 : continue;
792 : : }
793 : 79479 : }
794 : :
795 : : /*
796 : : * Update the tuple.
797 : : *
798 : : * We still hold the lock after gistinserttuple(), but it
799 : : * might have to split the page to make the updated tuple fit.
800 : : * In that case the updated tuple might migrate to the other
801 : : * half of the split, so we have to go back to the parent and
802 : : * descend back to the half that's a better fit for the new
803 : : * tuple.
804 : : */
805 [ - + - + ]: 158958 : if (gistinserttuple(&state, stack, giststate, newtup,
806 : 79479 : downlinkoffnum))
807 : : {
808 : : /*
809 : : * If this was a root split, the root page continues to be
810 : : * the parent and the updated tuple went to one of the
811 : : * child pages, so we just need to retry from the root
812 : : * page.
813 : : */
814 [ # # ]: 0 : if (stack->blkno != GIST_ROOT_BLKNO)
815 : : {
816 : 0 : UnlockReleaseBuffer(stack->buffer);
817 : 0 : xlocked = false;
818 : 0 : state.stack = stack = stack->parent;
819 : 0 : }
820 : 0 : continue;
821 : : }
822 : 79479 : }
823 : 116658 : LockBuffer(stack->buffer, GIST_UNLOCK);
824 : 116658 : xlocked = false;
825 : :
826 : : /* descend to the chosen child */
827 : 116658 : item = palloc0_object(GISTInsertStack);
828 : 116658 : item->blkno = childblkno;
829 : 116658 : item->parent = stack;
830 : 116658 : item->downlinkoffnum = downlinkoffnum;
831 : 116658 : state.stack = stack = item;
832 [ - - + ]: 116658 : }
833 : : else
834 : : {
835 : : /*
836 : : * Leaf page. Insert the new key. We've already updated all the
837 : : * parents on the way down, but we might have to split the page if
838 : : * it doesn't fit. gistinserttuple() will take care of that.
839 : : */
840 : :
841 : : /*
842 : : * Swap shared lock for an exclusive one. Be careful, the page may
843 : : * change while we unlock/lock the page...
844 : : */
845 [ - + ]: 104174 : if (!xlocked)
846 : : {
847 : 104174 : LockBuffer(stack->buffer, GIST_UNLOCK);
848 : 104174 : LockBuffer(stack->buffer, GIST_EXCLUSIVE);
849 : 104174 : xlocked = true;
850 : 104174 : stack->page = BufferGetPage(stack->buffer);
851 : 104174 : stack->lsn = PageGetLSN(stack->page);
852 : :
853 [ + + ]: 104174 : if (stack->blkno == GIST_ROOT_BLKNO)
854 : : {
855 : : /*
856 : : * the only page that can become inner instead of leaf is
857 : : * the root page, so for root we should recheck it
858 : : */
859 [ + - ]: 3058 : if (!GistPageIsLeaf(stack->page))
860 : : {
861 : : /*
862 : : * very rare situation: during unlock/lock index with
863 : : * number of pages = 1 was increased
864 : : */
865 : 0 : LockBuffer(stack->buffer, GIST_UNLOCK);
866 : 0 : xlocked = false;
867 : 0 : continue;
868 : : }
869 : :
870 : : /*
871 : : * we don't need to check root split, because checking
872 : : * leaf/inner is enough to recognize split for root
873 : : */
874 : 3058 : }
875 [ + - ]: 101116 : else if ((GistFollowRight(stack->page) ||
876 [ + - + - ]: 101116 : stack->parent->lsn < GistPageGetNSN(stack->page)) ||
877 : 101116 : GistPageIsDeleted(stack->page))
878 : : {
879 : : /*
880 : : * The page was split or deleted while we momentarily
881 : : * unlocked the page. Go back to parent.
882 : : */
883 : 0 : UnlockReleaseBuffer(stack->buffer);
884 : 0 : xlocked = false;
885 : 0 : state.stack = stack = stack->parent;
886 : 0 : continue;
887 : : }
888 : 104174 : }
889 : :
890 : : /* now state.stack->(page, buffer and blkno) points to leaf page */
891 : :
892 : 104174 : gistinserttuple(&state, stack, giststate, itup,
893 : : InvalidOffsetNumber);
894 : 104174 : LockBuffer(stack->buffer, GIST_UNLOCK);
895 : :
896 : : /* Release any pins we might still hold before exiting */
897 [ + + ]: 325006 : for (; stack; stack = stack->parent)
898 : 220832 : ReleaseBuffer(stack->buffer);
899 : 104174 : break;
900 : : }
901 : : }
902 : 104174 : }
903 : :
904 : : /*
905 : : * Traverse the tree to find path from root page to specified "child" block.
906 : : *
907 : : * returns a new insertion stack, starting from the parent of "child", up
908 : : * to the root. *downlinkoffnum is set to the offset of the downlink in the
909 : : * direct parent of child.
910 : : *
911 : : * To prevent deadlocks, this should lock only one page at a time.
912 : : */
913 : : static GISTInsertStack *
914 : 0 : gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
915 : : {
916 : 0 : Page page;
917 : 0 : Buffer buffer;
918 : 0 : OffsetNumber i,
919 : : maxoff;
920 : 0 : ItemId iid;
921 : 0 : IndexTuple idxtuple;
922 : 0 : List *fifo;
923 : 0 : GISTInsertStack *top,
924 : : *ptr;
925 : 0 : BlockNumber blkno;
926 : :
927 : 0 : top = palloc0_object(GISTInsertStack);
928 : 0 : top->blkno = GIST_ROOT_BLKNO;
929 : 0 : top->downlinkoffnum = InvalidOffsetNumber;
930 : :
931 : 0 : fifo = list_make1(top);
932 [ # # ]: 0 : while (fifo != NIL)
933 : : {
934 : : /* Get next page to visit */
935 : 0 : top = linitial(fifo);
936 : 0 : fifo = list_delete_first(fifo);
937 : :
938 : 0 : buffer = ReadBuffer(r, top->blkno);
939 : 0 : LockBuffer(buffer, GIST_SHARE);
940 : 0 : gistcheckpage(r, buffer);
941 : 0 : page = BufferGetPage(buffer);
942 : :
943 [ # # ]: 0 : if (GistPageIsLeaf(page))
944 : : {
945 : : /*
946 : : * Because we scan the index top-down, all the rest of the pages
947 : : * in the queue must be leaf pages as well.
948 : : */
949 : 0 : UnlockReleaseBuffer(buffer);
950 : 0 : break;
951 : : }
952 : :
953 : : /* currently, internal pages are never deleted */
954 [ # # ]: 0 : Assert(!GistPageIsDeleted(page));
955 : :
956 : 0 : top->lsn = BufferGetLSNAtomic(buffer);
957 : :
958 : : /*
959 : : * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a
960 : : * downlink. This should not normally happen..
961 : : */
962 [ # # ]: 0 : if (GistFollowRight(page))
963 [ # # # # ]: 0 : elog(ERROR, "concurrent GiST page split was incomplete");
964 : :
965 [ # # # # : 0 : if (top->parent && top->parent->lsn < GistPageGetNSN(page) &&
# # ]
966 : 0 : GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
967 : : {
968 : : /*
969 : : * Page was split while we looked elsewhere. We didn't see the
970 : : * downlink to the right page when we scanned the parent, so add
971 : : * it to the queue now.
972 : : *
973 : : * Put the right page ahead of the queue, so that we visit it
974 : : * next. That's important, because if this is the lowest internal
975 : : * level, just above leaves, we might already have queued up some
976 : : * leaf pages, and we assume that there can't be any non-leaf
977 : : * pages behind leaf pages.
978 : : */
979 : 0 : ptr = palloc0_object(GISTInsertStack);
980 : 0 : ptr->blkno = GistPageGetOpaque(page)->rightlink;
981 : 0 : ptr->downlinkoffnum = InvalidOffsetNumber;
982 : 0 : ptr->parent = top->parent;
983 : :
984 : 0 : fifo = lcons(ptr, fifo);
985 : 0 : }
986 : :
987 : 0 : maxoff = PageGetMaxOffsetNumber(page);
988 : :
989 [ # # ]: 0 : for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
990 : : {
991 : 0 : iid = PageGetItemId(page, i);
992 : 0 : idxtuple = (IndexTuple) PageGetItem(page, iid);
993 : 0 : blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
994 [ # # ]: 0 : if (blkno == child)
995 : : {
996 : : /* Found it! */
997 : 0 : UnlockReleaseBuffer(buffer);
998 : 0 : *downlinkoffnum = i;
999 : 0 : return top;
1000 : : }
1001 : : else
1002 : : {
1003 : : /* Append this child to the list of pages to visit later */
1004 : 0 : ptr = palloc0_object(GISTInsertStack);
1005 : 0 : ptr->blkno = blkno;
1006 : 0 : ptr->downlinkoffnum = i;
1007 : 0 : ptr->parent = top;
1008 : :
1009 : 0 : fifo = lappend(fifo, ptr);
1010 : : }
1011 : 0 : }
1012 : :
1013 : 0 : UnlockReleaseBuffer(buffer);
1014 : : }
1015 : :
1016 [ # # # # ]: 0 : elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
1017 : : RelationGetRelationName(r), child);
1018 : 0 : return NULL; /* keep compiler quiet */
1019 : 0 : }
1020 : :
1021 : : /*
1022 : : * Updates the stack so that child->parent is the correct parent of the
1023 : : * child. child->parent must be exclusively locked on entry, and will
1024 : : * remain so at exit, but it might not be the same page anymore.
1025 : : */
1026 : : static void
1027 : 1521 : gistFindCorrectParent(Relation r, GISTInsertStack *child, bool is_build)
1028 : : {
1029 : 1521 : GISTInsertStack *parent = child->parent;
1030 : 1521 : ItemId iid;
1031 : 1521 : IndexTuple idxtuple;
1032 : 1521 : OffsetNumber maxoff;
1033 : 1521 : GISTInsertStack *ptr;
1034 : :
1035 : 1521 : gistcheckpage(r, parent->buffer);
1036 : 1521 : parent->page = BufferGetPage(parent->buffer);
1037 : 1521 : maxoff = PageGetMaxOffsetNumber(parent->page);
1038 : :
1039 : : /* Check if the downlink is still where it was before */
1040 [ + - - + ]: 1521 : if (child->downlinkoffnum != InvalidOffsetNumber && child->downlinkoffnum <= maxoff)
1041 : : {
1042 : 1521 : iid = PageGetItemId(parent->page, child->downlinkoffnum);
1043 : 1521 : idxtuple = (IndexTuple) PageGetItem(parent->page, iid);
1044 [ + - ]: 1521 : if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == child->blkno)
1045 : 1521 : return; /* still there */
1046 : 0 : }
1047 : :
1048 : : /*
1049 : : * The page has changed since we looked. During normal operation, every
1050 : : * update of a page changes its LSN, so the LSN we memorized should have
1051 : : * changed too.
1052 : : *
1053 : : * During index build, however, we don't WAL-log the changes until we have
1054 : : * built the index, so the LSN doesn't change. There is no concurrent
1055 : : * activity during index build, but we might have changed the parent
1056 : : * ourselves.
1057 : : *
1058 : : * We will also get here if child->downlinkoffnum is invalid. That happens
1059 : : * if 'parent' had been updated by an earlier call to this function on its
1060 : : * grandchild, which had to move right.
1061 : : */
1062 [ # # # # : 0 : Assert(parent->lsn != PageGetLSN(parent->page) || is_build ||
# # ]
1063 : : child->downlinkoffnum == InvalidOffsetNumber);
1064 : :
1065 : : /*
1066 : : * Scan the page to re-find the downlink. If the page was split, it might
1067 : : * have moved to a different page, so follow the right links until we find
1068 : : * it.
1069 : : */
1070 : 0 : while (true)
1071 : : {
1072 : 0 : OffsetNumber i;
1073 : :
1074 : 0 : maxoff = PageGetMaxOffsetNumber(parent->page);
1075 [ # # ]: 0 : for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
1076 : : {
1077 : 0 : iid = PageGetItemId(parent->page, i);
1078 : 0 : idxtuple = (IndexTuple) PageGetItem(parent->page, iid);
1079 [ # # ]: 0 : if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == child->blkno)
1080 : : {
1081 : : /* yes!!, found */
1082 : 0 : child->downlinkoffnum = i;
1083 : 0 : return;
1084 : : }
1085 : 0 : }
1086 : :
1087 : 0 : parent->blkno = GistPageGetOpaque(parent->page)->rightlink;
1088 : 0 : parent->downlinkoffnum = InvalidOffsetNumber;
1089 : 0 : UnlockReleaseBuffer(parent->buffer);
1090 [ # # ]: 0 : if (parent->blkno == InvalidBlockNumber)
1091 : : {
1092 : : /*
1093 : : * End of chain and still didn't find parent. It's a very-very
1094 : : * rare situation when the root was split.
1095 : : */
1096 : 0 : break;
1097 : : }
1098 : 0 : parent->buffer = ReadBuffer(r, parent->blkno);
1099 : 0 : LockBuffer(parent->buffer, GIST_EXCLUSIVE);
1100 : 0 : gistcheckpage(r, parent->buffer);
1101 : 0 : parent->page = BufferGetPage(parent->buffer);
1102 [ # # # ]: 0 : }
1103 : :
1104 : : /*
1105 : : * awful!!, we need search tree to find parent ... , but before we should
1106 : : * release all old parent
1107 : : */
1108 : :
1109 : 0 : ptr = child->parent->parent; /* child->parent already released above */
1110 [ # # ]: 0 : while (ptr)
1111 : : {
1112 : 0 : ReleaseBuffer(ptr->buffer);
1113 : 0 : ptr = ptr->parent;
1114 : : }
1115 : :
1116 : : /* ok, find new path */
1117 : 0 : ptr = parent = gistFindPath(r, child->blkno, &child->downlinkoffnum);
1118 : :
1119 : : /* read all buffers as expected by caller */
1120 : : /* note we don't lock them or gistcheckpage them here! */
1121 [ # # ]: 0 : while (ptr)
1122 : : {
1123 : 0 : ptr->buffer = ReadBuffer(r, ptr->blkno);
1124 : 0 : ptr->page = BufferGetPage(ptr->buffer);
1125 : 0 : ptr = ptr->parent;
1126 : : }
1127 : :
1128 : : /* install new chain of parents to stack */
1129 : 0 : child->parent = parent;
1130 : :
1131 : : /* make recursive call to normal processing */
1132 : 0 : LockBuffer(child->parent->buffer, GIST_EXCLUSIVE);
1133 : 0 : gistFindCorrectParent(r, child, is_build);
1134 [ - + ]: 1521 : }
1135 : :
1136 : : /*
1137 : : * Form a downlink pointer for the page in 'buf'.
1138 : : */
1139 : : static IndexTuple
1140 : 0 : gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
1141 : : GISTInsertStack *stack, bool is_build)
1142 : : {
1143 : 0 : Page page = BufferGetPage(buf);
1144 : 0 : OffsetNumber maxoff;
1145 : 0 : OffsetNumber offset;
1146 : 0 : IndexTuple downlink = NULL;
1147 : :
1148 : 0 : maxoff = PageGetMaxOffsetNumber(page);
1149 [ # # ]: 0 : for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
1150 : : {
1151 : 0 : IndexTuple ituple = (IndexTuple)
1152 : 0 : PageGetItem(page, PageGetItemId(page, offset));
1153 : :
1154 [ # # ]: 0 : if (downlink == NULL)
1155 : 0 : downlink = CopyIndexTuple(ituple);
1156 : : else
1157 : : {
1158 : 0 : IndexTuple newdownlink;
1159 : :
1160 : 0 : newdownlink = gistgetadjusted(rel, downlink, ituple,
1161 : 0 : giststate);
1162 [ # # ]: 0 : if (newdownlink)
1163 : 0 : downlink = newdownlink;
1164 : 0 : }
1165 : 0 : }
1166 : :
1167 : : /*
1168 : : * If the page is completely empty, we can't form a meaningful downlink
1169 : : * for it. But we have to insert a downlink for the page. Any key will do,
1170 : : * as long as its consistent with the downlink of parent page, so that we
1171 : : * can legally insert it to the parent. A minimal one that matches as few
1172 : : * scans as possible would be best, to keep scans from doing useless work,
1173 : : * but we don't know how to construct that. So we just use the downlink of
1174 : : * the original page that was split - that's as far from optimal as it can
1175 : : * get but will do..
1176 : : */
1177 [ # # ]: 0 : if (!downlink)
1178 : : {
1179 : 0 : ItemId iid;
1180 : :
1181 : 0 : LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
1182 : 0 : gistFindCorrectParent(rel, stack, is_build);
1183 : 0 : iid = PageGetItemId(stack->parent->page, stack->downlinkoffnum);
1184 : 0 : downlink = (IndexTuple) PageGetItem(stack->parent->page, iid);
1185 : 0 : downlink = CopyIndexTuple(downlink);
1186 : 0 : LockBuffer(stack->parent->buffer, GIST_UNLOCK);
1187 : 0 : }
1188 : :
1189 : 0 : ItemPointerSetBlockNumber(&(downlink->t_tid), BufferGetBlockNumber(buf));
1190 : 0 : GistTupleSetValid(downlink);
1191 : :
1192 : 0 : return downlink;
1193 : 0 : }
1194 : :
1195 : :
1196 : : /*
1197 : : * Complete the incomplete split of state->stack->page.
1198 : : */
1199 : : static void
1200 : 0 : gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
1201 : : {
1202 : 0 : GISTInsertStack *stack = state->stack;
1203 : 0 : Buffer buf;
1204 : 0 : Page page;
1205 : 0 : List *splitinfo = NIL;
1206 : :
1207 [ # # # # ]: 0 : ereport(LOG,
1208 : : (errmsg("fixing incomplete split in index \"%s\", block %u",
1209 : : RelationGetRelationName(state->r), stack->blkno)));
1210 : :
1211 [ # # ]: 0 : Assert(GistFollowRight(stack->page));
1212 [ # # # # ]: 0 : Assert(OffsetNumberIsValid(stack->downlinkoffnum));
1213 : :
1214 : 0 : buf = stack->buffer;
1215 : :
1216 : : /*
1217 : : * Read the chain of split pages, following the rightlinks. Construct a
1218 : : * downlink tuple for each page.
1219 : : */
1220 : 0 : for (;;)
1221 : : {
1222 : 0 : GISTPageSplitInfo *si = palloc_object(GISTPageSplitInfo);
1223 : 0 : IndexTuple downlink;
1224 : :
1225 : 0 : page = BufferGetPage(buf);
1226 : :
1227 : : /* Form the new downlink tuples to insert to parent */
1228 : 0 : downlink = gistformdownlink(state->r, buf, giststate, stack, state->is_build);
1229 : :
1230 : 0 : si->buf = buf;
1231 : 0 : si->downlink = downlink;
1232 : :
1233 : 0 : splitinfo = lappend(splitinfo, si);
1234 : :
1235 [ # # ]: 0 : if (GistFollowRight(page))
1236 : : {
1237 : : /* lock next page */
1238 : 0 : buf = ReadBuffer(state->r, GistPageGetOpaque(page)->rightlink);
1239 : 0 : LockBuffer(buf, GIST_EXCLUSIVE);
1240 : 0 : }
1241 : : else
1242 : 0 : break;
1243 [ # # # ]: 0 : }
1244 : :
1245 : : /* Insert the downlinks */
1246 : 0 : gistfinishsplit(state, stack, giststate, splitinfo, false);
1247 : 0 : }
1248 : :
1249 : : /*
1250 : : * Insert or replace a tuple in stack->buffer. If 'oldoffnum' is valid, the
1251 : : * tuple at 'oldoffnum' is replaced, otherwise the tuple is inserted as new.
1252 : : * 'stack' represents the path from the root to the page being updated.
1253 : : *
1254 : : * The caller must hold an exclusive lock on stack->buffer. The lock is still
1255 : : * held on return, but the page might not contain the inserted tuple if the
1256 : : * page was split. The function returns true if the page was split, false
1257 : : * otherwise.
1258 : : */
1259 : : static bool
1260 : 183653 : gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
1261 : : GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
1262 : : {
1263 : 183653 : return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
1264 : : InvalidBuffer, InvalidBuffer, false, false);
1265 : : }
1266 : :
1267 : : /* ----------------
1268 : : * An extended workhorse version of gistinserttuple(). This version allows
1269 : : * inserting multiple tuples, or replacing a single tuple with multiple tuples.
1270 : : * This is used to recursively update the downlinks in the parent when a page
1271 : : * is split.
1272 : : *
1273 : : * If leftchild and rightchild are valid, we're inserting/replacing the
1274 : : * downlink for rightchild, and leftchild is its left sibling. We clear the
1275 : : * F_FOLLOW_RIGHT flag and update NSN on leftchild, atomically with the
1276 : : * insertion of the downlink.
1277 : : *
1278 : : * To avoid holding locks for longer than necessary, when recursing up the
1279 : : * tree to update the parents, the locking is a bit peculiar here. On entry,
1280 : : * the caller must hold an exclusive lock on stack->buffer, as well as
1281 : : * leftchild and rightchild if given. On return:
1282 : : *
1283 : : * - Lock on stack->buffer is released, if 'unlockbuf' is true. The page is
1284 : : * always kept pinned, however.
1285 : : * - Lock on 'leftchild' is released, if 'unlockleftchild' is true. The page
1286 : : * is kept pinned.
1287 : : * - Lock and pin on 'rightchild' are always released.
1288 : : *
1289 : : * Returns 'true' if the page had to be split. Note that if the page was
1290 : : * split, the inserted/updated tuples might've been inserted to a right
1291 : : * sibling of stack->buffer instead of stack->buffer itself.
1292 : : */
1293 : : static bool
1294 : 185174 : gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
1295 : : GISTSTATE *giststate,
1296 : : IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
1297 : : Buffer leftchild, Buffer rightchild,
1298 : : bool unlockbuf, bool unlockleftchild)
1299 : : {
1300 : 185174 : List *splitinfo;
1301 : 185174 : bool is_split;
1302 : :
1303 : : /*
1304 : : * Check for any rw conflicts (in serializable isolation level) just
1305 : : * before we intend to modify the page
1306 : : */
1307 : 185174 : CheckForSerializableConflictIn(state->r, NULL, BufferGetBlockNumber(stack->buffer));
1308 : :
1309 : : /* Insert the tuple(s) to the page, splitting the page if necessary */
1310 : 370348 : is_split = gistplacetopage(state->r, state->freespace, giststate,
1311 : 185174 : stack->buffer,
1312 : 185174 : tuples, ntup,
1313 : 185174 : oldoffnum, NULL,
1314 : 185174 : leftchild,
1315 : : &splitinfo,
1316 : : true,
1317 : 185174 : state->heapRel,
1318 : 185174 : state->is_build);
1319 : :
1320 : : /*
1321 : : * Before recursing up in case the page was split, release locks on the
1322 : : * child pages. We don't need to keep them locked when updating the
1323 : : * parent.
1324 : : */
1325 [ + + ]: 185174 : if (BufferIsValid(rightchild))
1326 : 1521 : UnlockReleaseBuffer(rightchild);
1327 [ + + + + ]: 185174 : if (BufferIsValid(leftchild) && unlockleftchild)
1328 : 4 : LockBuffer(leftchild, GIST_UNLOCK);
1329 : :
1330 : : /*
1331 : : * If we had to split, insert/update the downlinks in the parent. If the
1332 : : * caller requested us to release the lock on stack->buffer, tell
1333 : : * gistfinishsplit() to do that as soon as it's safe to do so. If we
1334 : : * didn't have to split, release it ourselves.
1335 : : */
1336 [ + + ]: 185174 : if (splitinfo)
1337 : 1521 : gistfinishsplit(state, stack, giststate, splitinfo, unlockbuf);
1338 [ + + ]: 183653 : else if (unlockbuf)
1339 : 1517 : LockBuffer(stack->buffer, GIST_UNLOCK);
1340 : :
1341 : 370348 : return is_split;
1342 : 185174 : }
1343 : :
1344 : : /*
1345 : : * Finish an incomplete split by inserting/updating the downlinks in parent
1346 : : * page. 'splitinfo' contains all the child pages involved in the split,
1347 : : * from left-to-right.
1348 : : *
1349 : : * On entry, the caller must hold a lock on stack->buffer and all the child
1350 : : * pages in 'splitinfo'. If 'unlockbuf' is true, the lock on stack->buffer is
1351 : : * released on return. The child pages are always unlocked and unpinned.
1352 : : */
1353 : : static void
1354 : 1521 : gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
1355 : : GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
1356 : : {
1357 : 1521 : GISTPageSplitInfo *right;
1358 : 1521 : GISTPageSplitInfo *left;
1359 : 1521 : IndexTuple tuples[2];
1360 : :
1361 : : /* A split always contains at least two halves */
1362 [ + - ]: 1521 : Assert(list_length(splitinfo) >= 2);
1363 : :
1364 : : /*
1365 : : * We need to insert downlinks for each new page, and update the downlink
1366 : : * for the original (leftmost) page in the split. Begin at the rightmost
1367 : : * page, inserting one downlink at a time until there's only two pages
1368 : : * left. Finally insert the downlink for the last new page and update the
1369 : : * downlink for the original page as one operation.
1370 : : */
1371 : 1521 : LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
1372 : :
1373 : : /*
1374 : : * Insert downlinks for the siblings from right to left, until there are
1375 : : * only two siblings left.
1376 : : */
1377 [ - + ]: 1521 : for (int pos = list_length(splitinfo) - 1; pos > 1; pos--)
1378 : : {
1379 : 0 : right = (GISTPageSplitInfo *) list_nth(splitinfo, pos);
1380 : 0 : left = (GISTPageSplitInfo *) list_nth(splitinfo, pos - 1);
1381 : :
1382 : 0 : gistFindCorrectParent(state->r, stack, state->is_build);
1383 [ # # # # ]: 0 : if (gistinserttuples(state, stack->parent, giststate,
1384 : 0 : &right->downlink, 1,
1385 : : InvalidOffsetNumber,
1386 : 0 : left->buf, right->buf, false, false))
1387 : : {
1388 : : /*
1389 : : * If the parent page was split, the existing downlink might have
1390 : : * moved.
1391 : : */
1392 : 0 : stack->downlinkoffnum = InvalidOffsetNumber;
1393 : 0 : }
1394 : : /* gistinserttuples() released the lock on right->buf. */
1395 : 0 : }
1396 : :
1397 : 1521 : right = (GISTPageSplitInfo *) lsecond(splitinfo);
1398 : 1521 : left = (GISTPageSplitInfo *) linitial(splitinfo);
1399 : :
1400 : : /*
1401 : : * Finally insert downlink for the remaining right page and update the
1402 : : * downlink for the original page to not contain the tuples that were
1403 : : * moved to the new pages.
1404 : : */
1405 : 1521 : tuples[0] = left->downlink;
1406 : 1521 : tuples[1] = right->downlink;
1407 : 1521 : gistFindCorrectParent(state->r, stack, state->is_build);
1408 : 3042 : (void) gistinserttuples(state, stack->parent, giststate,
1409 : 1521 : tuples, 2,
1410 : 1521 : stack->downlinkoffnum,
1411 : 1521 : left->buf, right->buf,
1412 : : true, /* Unlock parent */
1413 : 1521 : unlockbuf /* Unlock stack->buffer if caller
1414 : : * wants that */
1415 : : );
1416 : :
1417 : : /*
1418 : : * The downlink might have moved when we updated it. Even if the page
1419 : : * wasn't split, because gistinserttuples() implements updating the old
1420 : : * tuple by removing and re-inserting it!
1421 : : */
1422 : 1521 : stack->downlinkoffnum = InvalidOffsetNumber;
1423 : :
1424 [ + - ]: 1521 : Assert(left->buf == stack->buffer);
1425 : :
1426 : : /*
1427 : : * If we split the page because we had to adjust the downlink on an
1428 : : * internal page, while descending the tree for inserting a new tuple,
1429 : : * then this might no longer be the correct page for the new tuple. The
1430 : : * downlink to this page might not cover the new tuple anymore, it might
1431 : : * need to go to the newly-created right sibling instead. Tell the caller
1432 : : * to walk back up the stack, to re-check at the parent which page to
1433 : : * insert to.
1434 : : *
1435 : : * Normally, the LSN-NSN interlock during the tree descend would also
1436 : : * detect that a concurrent split happened (by ourselves), and cause us to
1437 : : * retry at the parent. But that mechanism doesn't work during index
1438 : : * build, because we don't do WAL-logging, and don't update LSNs, during
1439 : : * index build.
1440 : : */
1441 : 1521 : stack->retry_from_parent = true;
1442 : 1521 : }
1443 : :
1444 : : /*
1445 : : * gistSplit -- split a page in the tree and fill struct
1446 : : * used for XLOG and real writes buffers. Function is recursive, ie
1447 : : * it will split page until keys will fit in every page.
1448 : : */
1449 : : SplitPageLayout *
1450 : 1881 : gistSplit(Relation r,
1451 : : Page page,
1452 : : IndexTuple *itup, /* contains compressed entry */
1453 : : int len,
1454 : : GISTSTATE *giststate)
1455 : : {
1456 : 1881 : IndexTuple *lvectup,
1457 : : *rvectup;
1458 : 1881 : GistSplitVector v;
1459 : 1881 : int i;
1460 : 1881 : SplitPageLayout *res = NULL;
1461 : :
1462 : : /* this should never recurse very deeply, but better safe than sorry */
1463 : 1881 : check_stack_depth();
1464 : :
1465 : : /* there's no point in splitting an empty page */
1466 [ + - ]: 1881 : Assert(len > 0);
1467 : :
1468 : : /*
1469 : : * If a single tuple doesn't fit on a page, no amount of splitting will
1470 : : * help.
1471 : : */
1472 [ + - ]: 1881 : if (len == 1)
1473 [ # # # # ]: 0 : ereport(ERROR,
1474 : : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1475 : : errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
1476 : : IndexTupleSize(itup[0]), GiSTPageSize,
1477 : : RelationGetRelationName(r))));
1478 : :
1479 : 1881 : memset(v.spl_lisnull, true,
1480 : : sizeof(bool) * giststate->nonLeafTupdesc->natts);
1481 : 1881 : memset(v.spl_risnull, true,
1482 : : sizeof(bool) * giststate->nonLeafTupdesc->natts);
1483 : 1881 : gistSplitByKey(r, page, itup, len, giststate, &v, 0);
1484 : :
1485 : : /* form left and right vector */
1486 : 1881 : lvectup = palloc_array(IndexTuple, len + 1);
1487 : 1881 : rvectup = palloc_array(IndexTuple, len + 1);
1488 : :
1489 [ + + ]: 144941 : for (i = 0; i < v.splitVector.spl_nleft; i++)
1490 : 143060 : lvectup[i] = itup[v.splitVector.spl_left[i] - 1];
1491 : :
1492 [ + + ]: 171216 : for (i = 0; i < v.splitVector.spl_nright; i++)
1493 : 169335 : rvectup[i] = itup[v.splitVector.spl_right[i] - 1];
1494 : :
1495 : : /* finalize splitting (may need another split) */
1496 [ + + ]: 1881 : if (!gistfitpage(rvectup, v.splitVector.spl_nright))
1497 : : {
1498 : 107 : res = gistSplit(r, page, rvectup, v.splitVector.spl_nright, giststate);
1499 : 107 : }
1500 : : else
1501 : : {
1502 : 1774 : ROTATEDIST(res);
1503 : 1774 : res->block.num = v.splitVector.spl_nright;
1504 : 1774 : res->list = gistfillitupvec(rvectup, v.splitVector.spl_nright, &(res->lenlist));
1505 : 1774 : res->itup = gistFormTuple(giststate, r, v.spl_rattr, v.spl_risnull, false);
1506 : : }
1507 : :
1508 [ + + ]: 1881 : if (!gistfitpage(lvectup, v.splitVector.spl_nleft))
1509 : : {
1510 : 50 : SplitPageLayout *resptr,
1511 : : *subres;
1512 : :
1513 : 50 : resptr = subres = gistSplit(r, page, lvectup, v.splitVector.spl_nleft, giststate);
1514 : :
1515 : : /* install on list's tail */
1516 [ + + ]: 130 : while (resptr->next)
1517 : 80 : resptr = resptr->next;
1518 : :
1519 : 50 : resptr->next = res;
1520 : 50 : res = subres;
1521 : 50 : }
1522 : : else
1523 : : {
1524 : 1831 : ROTATEDIST(res);
1525 : 1831 : res->block.num = v.splitVector.spl_nleft;
1526 : 1831 : res->list = gistfillitupvec(lvectup, v.splitVector.spl_nleft, &(res->lenlist));
1527 : 1831 : res->itup = gistFormTuple(giststate, r, v.spl_lattr, v.spl_lisnull, false);
1528 : : }
1529 : :
1530 : 3762 : return res;
1531 : 1881 : }
1532 : :
1533 : : /*
1534 : : * Create a GISTSTATE and fill it with information about the index
1535 : : */
1536 : : GISTSTATE *
1537 : 1207 : initGISTstate(Relation index)
1538 : : {
1539 : 1207 : GISTSTATE *giststate;
1540 : 1207 : MemoryContext scanCxt;
1541 : 1207 : MemoryContext oldCxt;
1542 : 1207 : int i;
1543 : :
1544 : : /* safety check to protect fixed-size arrays in GISTSTATE */
1545 [ + - ]: 1207 : if (index->rd_att->natts > INDEX_MAX_KEYS)
1546 [ # # # # ]: 0 : elog(ERROR, "numberOfAttributes %d > %d",
1547 : : index->rd_att->natts, INDEX_MAX_KEYS);
1548 : :
1549 : : /* Create the memory context that will hold the GISTSTATE */
1550 : 1207 : scanCxt = AllocSetContextCreate(CurrentMemoryContext,
1551 : : "GiST scan context",
1552 : : ALLOCSET_DEFAULT_SIZES);
1553 : 1207 : oldCxt = MemoryContextSwitchTo(scanCxt);
1554 : :
1555 : : /* Create and fill in the GISTSTATE */
1556 : 1207 : giststate = palloc_object(GISTSTATE);
1557 : :
1558 : 1207 : giststate->scanCxt = scanCxt;
1559 : 1207 : giststate->tempCxt = scanCxt; /* caller must change this if needed */
1560 : 1207 : giststate->leafTupdesc = index->rd_att;
1561 : :
1562 : : /*
1563 : : * The truncated tupdesc for non-leaf index tuples, which doesn't contain
1564 : : * the INCLUDE attributes.
1565 : : *
1566 : : * It is used to form tuples during tuple adjustment and page split.
1567 : : * B-tree creates shortened tuple descriptor for every truncated tuple,
1568 : : * because it is doing this less often: it does not have to form truncated
1569 : : * tuples during page split. Also, B-tree is not adjusting tuples on
1570 : : * internal pages the way GiST does.
1571 : : */
1572 : 2414 : giststate->nonLeafTupdesc = CreateTupleDescTruncatedCopy(index->rd_att,
1573 : 1207 : IndexRelationGetNumberOfKeyAttributes(index));
1574 : :
1575 [ + + ]: 3318 : for (i = 0; i < IndexRelationGetNumberOfKeyAttributes(index); i++)
1576 : : {
1577 : 4222 : fmgr_info_copy(&(giststate->consistentFn[i]),
1578 : 2111 : index_getprocinfo(index, i + 1, GIST_CONSISTENT_PROC),
1579 : 2111 : scanCxt);
1580 : 4222 : fmgr_info_copy(&(giststate->unionFn[i]),
1581 : 2111 : index_getprocinfo(index, i + 1, GIST_UNION_PROC),
1582 : 2111 : scanCxt);
1583 : :
1584 : : /* opclasses are not required to provide a Compress method */
1585 [ + + ]: 2111 : if (OidIsValid(index_getprocid(index, i + 1, GIST_COMPRESS_PROC)))
1586 : 1298 : fmgr_info_copy(&(giststate->compressFn[i]),
1587 : 649 : index_getprocinfo(index, i + 1, GIST_COMPRESS_PROC),
1588 : 649 : scanCxt);
1589 : : else
1590 : 1462 : giststate->compressFn[i].fn_oid = InvalidOid;
1591 : :
1592 : : /* opclasses are not required to provide a Decompress method */
1593 [ + + ]: 2111 : if (OidIsValid(index_getprocid(index, i + 1, GIST_DECOMPRESS_PROC)))
1594 : 212 : fmgr_info_copy(&(giststate->decompressFn[i]),
1595 : 106 : index_getprocinfo(index, i + 1, GIST_DECOMPRESS_PROC),
1596 : 106 : scanCxt);
1597 : : else
1598 : 2005 : giststate->decompressFn[i].fn_oid = InvalidOid;
1599 : :
1600 : 4222 : fmgr_info_copy(&(giststate->penaltyFn[i]),
1601 : 2111 : index_getprocinfo(index, i + 1, GIST_PENALTY_PROC),
1602 : 2111 : scanCxt);
1603 : 4222 : fmgr_info_copy(&(giststate->picksplitFn[i]),
1604 : 2111 : index_getprocinfo(index, i + 1, GIST_PICKSPLIT_PROC),
1605 : 2111 : scanCxt);
1606 : 4222 : fmgr_info_copy(&(giststate->equalFn[i]),
1607 : 2111 : index_getprocinfo(index, i + 1, GIST_EQUAL_PROC),
1608 : 2111 : scanCxt);
1609 : :
1610 : : /* opclasses are not required to provide a Distance method */
1611 [ + + ]: 2111 : if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC)))
1612 : 290 : fmgr_info_copy(&(giststate->distanceFn[i]),
1613 : 145 : index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
1614 : 145 : scanCxt);
1615 : : else
1616 : 1966 : giststate->distanceFn[i].fn_oid = InvalidOid;
1617 : :
1618 : : /* opclasses are not required to provide a Fetch method */
1619 [ + + ]: 2111 : if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
1620 : 116 : fmgr_info_copy(&(giststate->fetchFn[i]),
1621 : 58 : index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
1622 : 58 : scanCxt);
1623 : : else
1624 : 2053 : giststate->fetchFn[i].fn_oid = InvalidOid;
1625 : :
1626 : : /*
1627 : : * If the index column has a specified collation, we should honor that
1628 : : * while doing comparisons. However, we may have a collatable storage
1629 : : * type for a noncollatable indexed data type. If there's no index
1630 : : * collation then specify default collation in case the support
1631 : : * functions need collation. This is harmless if the support
1632 : : * functions don't care about collation, so we just do it
1633 : : * unconditionally. (We could alternatively call get_typcollation,
1634 : : * but that seems like expensive overkill --- there aren't going to be
1635 : : * any cases where a GiST storage type has a nondefault collation.)
1636 : : */
1637 [ - + ]: 2111 : if (OidIsValid(index->rd_indcollation[i]))
1638 : 0 : giststate->supportCollation[i] = index->rd_indcollation[i];
1639 : : else
1640 : 2111 : giststate->supportCollation[i] = DEFAULT_COLLATION_OID;
1641 : 2111 : }
1642 : :
1643 : : /* No opclass information for INCLUDE attributes */
1644 [ + + ]: 1290 : for (; i < index->rd_att->natts; i++)
1645 : : {
1646 : 83 : giststate->consistentFn[i].fn_oid = InvalidOid;
1647 : 83 : giststate->unionFn[i].fn_oid = InvalidOid;
1648 : 83 : giststate->compressFn[i].fn_oid = InvalidOid;
1649 : 83 : giststate->decompressFn[i].fn_oid = InvalidOid;
1650 : 83 : giststate->penaltyFn[i].fn_oid = InvalidOid;
1651 : 83 : giststate->picksplitFn[i].fn_oid = InvalidOid;
1652 : 83 : giststate->equalFn[i].fn_oid = InvalidOid;
1653 : 83 : giststate->distanceFn[i].fn_oid = InvalidOid;
1654 : 83 : giststate->fetchFn[i].fn_oid = InvalidOid;
1655 : 83 : giststate->supportCollation[i] = InvalidOid;
1656 : 83 : }
1657 : :
1658 : 1207 : MemoryContextSwitchTo(oldCxt);
1659 : :
1660 : 2414 : return giststate;
1661 : 1207 : }
1662 : :
1663 : : void
1664 : 918 : freeGISTstate(GISTSTATE *giststate)
1665 : : {
1666 : : /* It's sufficient to delete the scanCxt */
1667 : 918 : MemoryContextDelete(giststate->scanCxt);
1668 : 918 : }
1669 : :
1670 : : /*
1671 : : * gistprunepage() -- try to remove LP_DEAD items from the given page.
1672 : : * Function assumes that buffer is exclusively locked.
1673 : : */
1674 : : static void
1675 : 0 : gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel)
1676 : : {
1677 : 0 : OffsetNumber deletable[MaxIndexTuplesPerPage];
1678 : 0 : int ndeletable = 0;
1679 : 0 : OffsetNumber offnum,
1680 : : maxoff;
1681 : :
1682 [ # # ]: 0 : Assert(GistPageIsLeaf(page));
1683 : :
1684 : : /*
1685 : : * Scan over all items to see which ones need to be deleted according to
1686 : : * LP_DEAD flags.
1687 : : */
1688 : 0 : maxoff = PageGetMaxOffsetNumber(page);
1689 [ # # ]: 0 : for (offnum = FirstOffsetNumber;
1690 : 0 : offnum <= maxoff;
1691 : 0 : offnum = OffsetNumberNext(offnum))
1692 : : {
1693 : 0 : ItemId itemId = PageGetItemId(page, offnum);
1694 : :
1695 [ # # ]: 0 : if (ItemIdIsDead(itemId))
1696 : 0 : deletable[ndeletable++] = offnum;
1697 : 0 : }
1698 : :
1699 [ # # ]: 0 : if (ndeletable > 0)
1700 : : {
1701 : 0 : TransactionId snapshotConflictHorizon = InvalidTransactionId;
1702 : :
1703 [ # # # # : 0 : if (XLogStandbyInfoActive() && RelationNeedsWAL(rel))
# # # # #
# ]
1704 : 0 : snapshotConflictHorizon =
1705 : 0 : index_compute_xid_horizon_for_tuples(rel, heapRel, buffer,
1706 : 0 : deletable, ndeletable);
1707 : :
1708 : 0 : START_CRIT_SECTION();
1709 : :
1710 : 0 : PageIndexMultiDelete(page, deletable, ndeletable);
1711 : :
1712 : : /*
1713 : : * Mark the page as not containing any LP_DEAD items. This is not
1714 : : * certainly true (there might be some that have recently been marked,
1715 : : * but weren't included in our target-item list), but it will almost
1716 : : * always be true and it doesn't seem worth an additional page scan to
1717 : : * check it. Remember that F_HAS_GARBAGE is only a hint anyway.
1718 : : */
1719 : 0 : GistClearPageHasGarbage(page);
1720 : :
1721 : 0 : MarkBufferDirty(buffer);
1722 : :
1723 : : /* XLOG stuff */
1724 [ # # # # : 0 : if (RelationNeedsWAL(rel))
# # # # ]
1725 : : {
1726 : 0 : XLogRecPtr recptr;
1727 : :
1728 : 0 : recptr = gistXLogDelete(buffer,
1729 : 0 : deletable, ndeletable,
1730 : 0 : snapshotConflictHorizon,
1731 : 0 : heapRel);
1732 : :
1733 : 0 : PageSetLSN(page, recptr);
1734 : 0 : }
1735 : : else
1736 : 0 : PageSetLSN(page, gistGetFakeLSN(rel));
1737 : :
1738 [ # # ]: 0 : END_CRIT_SECTION();
1739 : 0 : }
1740 : :
1741 : : /*
1742 : : * Note: if we didn't find any LP_DEAD items, then the page's
1743 : : * F_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
1744 : : * separate write to clear it, however. We will clear it when we split
1745 : : * the page.
1746 : : */
1747 : 0 : }
|