1 /*-------------------------------------------------------------------------
4 * heap scan synchronization support
6 * When multiple backends run a sequential scan on the same table, we try
7 * to keep them synchronized to reduce the overall I/O needed. The goal is
8 * to read each page into shared buffer cache only once, and let all backends
9 * that take part in the shared scan process the page before it falls out of
12 * Since the "leader" in a pack of backends doing a seqscan will have to wait
13 * for I/O, while the "followers" don't, there is a strong self-synchronizing
14 * effect once we can get the backends examining approximately the same part
15 * of the table at the same time. Hence all that is really needed is to get
16 * a new backend beginning a seqscan to begin it close to where other backends
17 * are reading. We can scan the table circularly, from block X up to the
18 * end and then from block 0 to X-1, to ensure we visit all rows while still
19 * participating in the common scan.
21 * To accomplish that, we keep track of the scan position of each table, and
22 * start new scans close to where the previous scan(s) are. We don't try to
23 * do any extra synchronization to keep the scans together afterwards; some
24 * scans might progress much more slowly than others, for example if the
25 * results need to be transferred to the client over a slow network, and we
26 * don't want such queries to slow down others.
28 * There can realistically only be a few large sequential scans on different
29 * tables in progress at any time. Therefore we just keep the scan positions
30 * in a small LRU list which we scan every time we need to look up or update a
31 * scan position. The whole mechanism is only applied for tables exceeding
32 * a threshold size (but that is not the concern of this module).
35 * ss_get_location - return current scan location of a relation
36 * ss_report_location - update current scan location
39 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
40 * Portions Copyright (c) 1994, Regents of the University of California
43 * src/backend/access/heap/syncscan.c
45 *-------------------------------------------------------------------------
49 #include "access/heapam.h"
50 #include "miscadmin.h"
51 #include "utils/rel.h"
56 bool trace_syncscan = false;
61 * Size of the LRU list.
63 * Note: the code assumes that SYNC_SCAN_NELEM > 1.
65 * XXX: What's a good value? It should be large enough to hold the
66 * maximum number of large tables scanned simultaneously. But a larger value
67 * means more traversing of the LRU list when starting a new scan.
69 #define SYNC_SCAN_NELEM 20
72 * Interval between reports of the location of the current scan, in pages.
74 * Note: This should be smaller than the ring size (see buffer/freelist.c)
75 * we use for bulk reads. Otherwise a scan joining other scans might start
76 * from a page that's no longer in the buffer cache. This is a bit fuzzy;
77 * there's no guarantee that the new scan will read the page before it leaves
78 * the buffer cache anyway, and on the other hand the page is most likely
79 * still in the OS cache.
81 #define SYNC_SCAN_REPORT_INTERVAL (128 * 1024 / BLCKSZ)
85 * The scan locations structure is essentially a doubly-linked LRU with head
86 * and tail pointer, but designed to hold a fixed maximum number of elements in
87 * fixed-size shared memory.
89 typedef struct ss_scan_location_t
91 RelFileNode relfilenode; /* identity of a relation */
92 BlockNumber location; /* last-reported location in the relation */
95 typedef struct ss_lru_item_t
97 struct ss_lru_item_t *prev;
98 struct ss_lru_item_t *next;
99 ss_scan_location_t location;
102 typedef struct ss_scan_locations_t
106 ss_lru_item_t items[FLEXIBLE_ARRAY_MEMBER]; /* SYNC_SCAN_NELEM items */
107 } ss_scan_locations_t;
109 #define SizeOfScanLocations(N) \
110 (offsetof(ss_scan_locations_t, items) + (N) * sizeof(ss_lru_item_t))
112 /* Pointer to struct in shared memory */
113 static ss_scan_locations_t *scan_locations;
115 /* prototypes for internal functions */
116 static BlockNumber ss_search(RelFileNode relfilenode,
117 BlockNumber location, bool set);
121 * SyncScanShmemSize --- report amount of shared memory space needed
124 SyncScanShmemSize(void)
126 return SizeOfScanLocations(SYNC_SCAN_NELEM);
130 * SyncScanShmemInit --- initialize this module's shared memory
133 SyncScanShmemInit(void)
138 scan_locations = (ss_scan_locations_t *)
139 ShmemInitStruct("Sync Scan Locations List",
140 SizeOfScanLocations(SYNC_SCAN_NELEM),
143 if (!IsUnderPostmaster)
145 /* Initialize shared memory area */
148 scan_locations->head = &scan_locations->items[0];
149 scan_locations->tail = &scan_locations->items[SYNC_SCAN_NELEM - 1];
151 for (i = 0; i < SYNC_SCAN_NELEM; i++)
153 ss_lru_item_t *item = &scan_locations->items[i];
156 * Initialize all slots with invalid values. As scans are started,
157 * these invalid entries will fall off the LRU list and get
158 * replaced with real entries.
160 item->location.relfilenode.spcNode = InvalidOid;
161 item->location.relfilenode.dbNode = InvalidOid;
162 item->location.relfilenode.relNode = InvalidOid;
163 item->location.location = InvalidBlockNumber;
165 item->prev = (i > 0) ?
166 (&scan_locations->items[i - 1]) : NULL;
167 item->next = (i < SYNC_SCAN_NELEM - 1) ?
168 (&scan_locations->items[i + 1]) : NULL;
176 * ss_search --- search the scan_locations structure for an entry with the
179 * If "set" is true, the location is updated to the given location. If no
180 * entry for the given relfilenode is found, it will be created at the head
181 * of the list with the given location, even if "set" is false.
183 * In any case, the location after possible update is returned.
185 * Caller is responsible for having acquired suitable lock on the shared
189 ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
193 item = scan_locations->head;
198 match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
200 if (match || item->next == NULL)
203 * If we reached the end of list and no match was found, take over
208 item->location.relfilenode = relfilenode;
209 item->location.location = location;
212 item->location.location = location;
214 /* Move the entry to the front of the LRU list */
215 if (item != scan_locations->head)
218 if (item == scan_locations->tail)
219 scan_locations->tail = item->prev;
220 item->prev->next = item->next;
222 item->next->prev = item->prev;
226 item->next = scan_locations->head;
227 scan_locations->head->prev = item;
228 scan_locations->head = item;
231 return item->location.location;
241 * ss_get_location --- get the optimal starting location for scan
243 * Returns the last-reported location of a sequential scan on the
244 * relation, or 0 if no valid location is found.
246 * We expect the caller has just done RelationGetNumberOfBlocks(), and
247 * so that number is passed in rather than computing it again. The result
248 * is guaranteed less than relnblocks (assuming that's > 0).
251 ss_get_location(Relation rel, BlockNumber relnblocks)
253 BlockNumber startloc;
255 LWLockAcquire(SyncScanLock, LW_EXCLUSIVE);
256 startloc = ss_search(rel->rd_node, 0, false);
257 LWLockRelease(SyncScanLock);
260 * If the location is not a valid block number for this scan, start at 0.
262 * This can happen if for instance a VACUUM truncated the table since the
263 * location was saved.
265 if (startloc >= relnblocks)
268 #ifdef TRACE_SYNCSCAN
271 "SYNC_SCAN: start \"%s\" (size %u) at %u",
272 RelationGetRelationName(rel), relnblocks, startloc);
279 * ss_report_location --- update the current scan location
281 * Writes an entry into the shared Sync Scan state of the form
282 * (relfilenode, blocknumber), overwriting any existing entry for the
286 ss_report_location(Relation rel, BlockNumber location)
288 #ifdef TRACE_SYNCSCAN
291 if ((location % 1024) == 0)
293 "SYNC_SCAN: scanning \"%s\" at %u",
294 RelationGetRelationName(rel), location);
299 * To reduce lock contention, only report scan progress every N pages. For
300 * the same reason, don't block if the lock isn't immediately available.
301 * Missing a few updates isn't critical, it just means that a new scan
302 * that wants to join the pack will start a little bit behind the head of
303 * the scan. Hopefully the pages are still in OS cache and the scan
304 * catches up quickly.
306 if ((location % SYNC_SCAN_REPORT_INTERVAL) == 0)
308 if (LWLockConditionalAcquire(SyncScanLock, LW_EXCLUSIVE))
310 (void) ss_search(rel->rd_node, location, true);
311 LWLockRelease(SyncScanLock);
313 #ifdef TRACE_SYNCSCAN
314 else if (trace_syncscan)
316 "SYNC_SCAN: missed update for \"%s\" at %u",
317 RelationGetRelationName(rel), location);