@@ -221,6 +221,7 @@ typedef struct LVRelState
221
221
BlockNumber current_block ; /* last block returned */
222
222
BlockNumber next_unskippable_block ; /* next unskippable block */
223
223
bool next_unskippable_allvis ; /* its visibility status */
224
+ bool next_skipsallvis ; /* next all-visible range can be skipped */
224
225
Buffer next_unskippable_vmbuffer ; /* buffer containing its VM bit */
225
226
} LVRelState ;
226
227
@@ -237,7 +238,7 @@ typedef struct LVSavedErrInfo
237
238
static void lazy_scan_heap (LVRelState * vacrel );
238
239
static bool heap_vac_scan_next_block (LVRelState * vacrel , BlockNumber * blkno ,
239
240
bool * all_visible_according_to_vm );
240
- static void find_next_unskippable_block (LVRelState * vacrel , bool * skipsallvis );
241
+ static void find_next_unskippable_block (LVRelState * vacrel );
241
242
static bool lazy_scan_new_or_empty (LVRelState * vacrel , Buffer buf ,
242
243
BlockNumber blkno , Page page ,
243
244
bool sharelock , Buffer vmbuffer );
@@ -935,9 +936,9 @@ lazy_scan_heap(LVRelState *vacrel)
935
936
if (next_prefetch_block + prefetch_budget > blkno + vacrel -> io_concurrency )
936
937
prefetch_budget = blkno + vacrel -> io_concurrency - next_prefetch_block ;
937
938
938
- /* And only up to the next unskippable block */
939
- if (next_prefetch_block + prefetch_budget > vacrel -> next_unskippable_block )
940
- prefetch_budget = vacrel -> next_unskippable_block - next_prefetch_block ;
939
+ /* If next SKIP_PAGES_THRESHOLD are skapable then do not perform prefetch because vacuum will skip this blocks */
940
+ if (next_prefetch_block + SKIP_PAGES_THRESHOLD <= vacrel -> next_unskippable_block )
941
+ prefetch_budget = 0 ;
941
942
942
943
for (; prefetch_budget -- > 0 ; next_prefetch_block ++ )
943
944
PrefetchBuffer (vacrel -> rel , MAIN_FORKNUM , next_prefetch_block );
@@ -1150,31 +1151,30 @@ heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno,
1150
1151
* beginning of the scan). Find the next unskippable block using the
1151
1152
* visibility map.
1152
1153
*/
1153
- bool skipsallvis ;
1154
-
1155
- find_next_unskippable_block ( vacrel , & skipsallvis );
1154
+ find_next_unskippable_block ( vacrel ) ;
1155
+ }
1156
+ Assert ( next_block <= vacrel -> next_unskippable_block );
1156
1157
1157
- /*
1158
- * We now know the next block that we must process. It can be the
1159
- * next block after the one we just processed, or something further
1160
- * ahead. If it's further ahead, we can jump to it, but we choose to
1161
- * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1162
- * pages. Since we're reading sequentially, the OS should be doing
1163
- * readahead for us, so there's no gain in skipping a page now and
1164
- * then. Skipping such a range might even discourage sequential
1165
- * detection.
1166
- *
1167
- * This test also enables more frequent relfrozenxid advancement
1168
- * during non-aggressive VACUUMs. If the range has any all-visible
1169
- * pages then skipping makes updating relfrozenxid unsafe, which is a
1170
- * real downside.
1171
- */
1172
- if (vacrel -> next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD )
1173
- {
1174
- next_block = vacrel -> next_unskippable_block ;
1175
- if (skipsallvis )
1176
- vacrel -> skippedallvis = true;
1177
- }
1158
+ /*
1159
+ * We now know the next block that we must process. It can be the
1160
+ * next block after the one we just processed, or something further
1161
+ * ahead. If it's further ahead, we can jump to it, but we choose to
1162
+ * do so only if we can skip at least SKIP_PAGES_THRESHOLD consecutive
1163
+ * pages. Since we're reading sequentially, the OS should be doing
1164
+ * readahead for us, so there's no gain in skipping a page now and
1165
+ * then. Skipping such a range might even discourage sequential
1166
+ * detection.
1167
+ *
1168
+ * This test also enables more frequent relfrozenxid advancement
1169
+ * during non-aggressive VACUUMs. If the range has any all-visible
1170
+ * pages then skipping makes updating relfrozenxid unsafe, which is a
1171
+ * real downside.
1172
+ */
1173
+ if (vacrel -> next_unskippable_block - next_block >= SKIP_PAGES_THRESHOLD )
1174
+ {
1175
+ next_block = vacrel -> next_unskippable_block ;
1176
+ if (vacrel -> next_skipsallvis )
1177
+ vacrel -> skippedallvis = true;
1178
1178
}
1179
1179
1180
1180
/* Now we must be in one of the two remaining states: */
@@ -1185,9 +1185,7 @@ heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno,
1185
1185
* but chose not to. We know that they are all-visible in the VM,
1186
1186
* otherwise they would've been unskippable.
1187
1187
*/
1188
- * blkno = vacrel -> current_block = next_block ;
1189
1188
* all_visible_according_to_vm = true;
1190
- return true;
1191
1189
}
1192
1190
else
1193
1191
{
@@ -1197,10 +1195,11 @@ heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno,
1197
1195
*/
1198
1196
Assert (next_block == vacrel -> next_unskippable_block );
1199
1197
1200
- * blkno = vacrel -> current_block = next_block ;
1201
1198
* all_visible_according_to_vm = vacrel -> next_unskippable_allvis ;
1202
- return true ;
1199
+ find_next_unskippable_block ( vacrel ) ;
1203
1200
}
1201
+ * blkno = vacrel -> current_block = next_block ;
1202
+ return true;
1204
1203
}
1205
1204
1206
1205
/*
@@ -1213,18 +1212,18 @@ heap_vac_scan_next_block(LVRelState *vacrel, BlockNumber *blkno,
1213
1212
* was concurrently cleared, though. All that matters is that caller scan all
1214
1213
* pages whose tuples might contain XIDs < OldestXmin, or MXIDs < OldestMxact.
1215
1214
* (Actually, non-aggressive VACUUMs can choose to skip all-visible pages with
1216
- * older XIDs/MXIDs. The *skippedallvis flag will be set here when the choice
1215
+ * older XIDs/MXIDs. The vacrel->next_skippedallvis flag will be set here when the choice
1217
1216
* to skip such a range is actually made, making everything safe.)
1218
1217
*/
1219
1218
static void
1220
- find_next_unskippable_block (LVRelState * vacrel , bool * skipsallvis )
1219
+ find_next_unskippable_block (LVRelState * vacrel )
1221
1220
{
1222
1221
BlockNumber rel_pages = vacrel -> rel_pages ;
1223
1222
BlockNumber next_unskippable_block = vacrel -> next_unskippable_block + 1 ;
1224
1223
Buffer next_unskippable_vmbuffer = vacrel -> next_unskippable_vmbuffer ;
1225
1224
bool next_unskippable_allvis ;
1226
1225
1227
- * skipsallvis = false;
1226
+ vacrel -> next_skipsallvis = false;
1228
1227
1229
1228
for (;;)
1230
1229
{
@@ -1275,7 +1274,7 @@ find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis)
1275
1274
* All-visible block is safe to skip in non-aggressive case. But
1276
1275
* remember that the final range contains such a block for later.
1277
1276
*/
1278
- * skipsallvis = true;
1277
+ vacrel -> next_skipsallvis = true;
1279
1278
}
1280
1279
1281
1280
next_unskippable_block ++ ;
0 commit comments