CUBRID Engine  latest
vacuum.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * vacuum.c - Vacuuming system implementation.
21  *
22  */
23 #include "system.h"
24 #include "vacuum.h"
25 
26 #include "base_flag.hpp"
27 #include "boot_sr.h"
28 #include "btree.h"
29 #include "dbtype.h"
30 #include "heap_file.h"
32 #include "log_append.hpp"
33 #include "log_compress.h"
34 #include "log_lsa.hpp"
35 #include "log_impl.h"
36 #include "mvcc.h"
37 #include "mvcc_table.hpp"
38 #include "object_representation.h"
40 #include "overflow_file.h"
41 #include "page_buffer.h"
42 #include "perf_monitor.h"
43 #include "resource_shared_pool.hpp"
44 #include "thread_entry_task.hpp"
45 #if defined (SERVER_MODE)
46 #include "thread_daemon.hpp"
47 #endif /* SERVER_MODE */
48 #include "thread_looper.hpp"
49 #include "thread_manager.hpp"
50 #if defined (SERVER_MODE)
51 #include "thread_worker_pool.hpp"
52 #endif // SERVER_MODE
53 #include "util_func.h"
54 
55 #include <atomic>
56 #include <condition_variable>
57 #include <mutex>
58 #include <stack>
59 
60 #include <cstring>
61 
62 /* The maximum number of slots in a page if all of them are empty.
63  * IO_MAX_PAGE_SIZE is used for page size and any headers are ignored (it
64  * wouldn't bring a significant difference).
65  */
66 #define MAX_SLOTS_IN_PAGE (IO_MAX_PAGE_SIZE / sizeof (SPAGE_SLOT))
67 
68 /* The default number of cached entries in a vacuum statistics cache */
69 #define VACUUM_STATS_CACHE_SIZE 100
70 
71 /* Get first log page identifier in a log block */
72 #define VACUUM_FIRST_LOG_PAGEID_IN_BLOCK(blockid) \
73  ((blockid) * vacuum_Data.log_block_npages)
74 /* Get last log page identifier in a log block */
75 #define VACUUM_LAST_LOG_PAGEID_IN_BLOCK(blockid) \
76  (VACUUM_FIRST_LOG_PAGEID_IN_BLOCK (blockid + 1) - 1)
77 
78 /*
79  * Vacuum data section.
80  * Vacuum data contains useful information for the vacuum process. There are
81  * several fields, among which a table of entries which describe the progress
82  * of processing log data for vacuum.
83  *
84  * Vacuum data is organized as a queue of VACUUM_DATA_PAGE pages. Each page has a header and an array of
85  * VACUUM_DATA_ENTRY.
86  *
87  * The vacuum_Data global variable keeps useful meta-data which does not required disk storage.
88  */
89 
90 /* Vacuum log block data.
91  *
92  * Stores information on a block of log data relevant for vacuum.c
93  */
96 {
97  // *INDENT-OFF*
98  VACUUM_LOG_BLOCKID blockid; // blockid and flags
99  LOG_LSA start_lsa; // lsa of last mvcc op log record in block
100  MVCCID oldest_visible_mvccid; // oldest visible MVCCID while block was logged
101  MVCCID newest_mvccid; // newest MVCCID in log block
102 
103  vacuum_data_entry () = default;
104  vacuum_data_entry (const log_lsa & lsa, MVCCID oldest, MVCCID newest);
105  vacuum_data_entry (const log_header & hdr);
106 
108 
109  bool is_available () const;
110  bool is_vacuumed () const;
111  bool is_job_in_progress () const;
112  bool was_interrupted () const;
113 
114  void set_vacuumed ();
115  void set_job_in_progress ();
116  void set_interrupted ();
117 
118  // *INDENT-ON*
119 };
120 
121 /* One flag is required for entries currently being vacuumed. In order to
122  * avoid using an extra-field and because blockid will not use all its 64 bits
123  * first bit will be used for this flag.
124  */
125 /* Bits used for flag */
126 #define VACUUM_DATA_ENTRY_FLAG_MASK 0xE000000000000000
127 /* Bits used for blockid */
128 #define VACUUM_DATA_ENTRY_BLOCKID_MASK 0x1FFFFFFFFFFFFFFF
129 
130 /* Flags */
131 /* The represented block is being vacuumed */
132 #define VACUUM_BLOCK_STATUS_MASK 0xC000000000000000
133 #define VACUUM_BLOCK_STATUS_VACUUMED 0x8000000000000000
134 #define VACUUM_BLOCK_STATUS_IN_PROGRESS_VACUUM 0x4000000000000000
135 #define VACUUM_BLOCK_STATUS_AVAILABLE 0x0000000000000000
136 
137 #define VACUUM_BLOCK_FLAG_INTERRUPTED 0x2000000000000000
138 
139 /* Access fields in a vacuum data table entry */
140 /* Get blockid (use mask to cancel flag bits) */
141 #define VACUUM_BLOCKID_WITHOUT_FLAGS(blockid) \
142  ((blockid) & VACUUM_DATA_ENTRY_BLOCKID_MASK)
143 
144 /* Get flags from blockid. */
145 #define VACUUM_BLOCKID_GET_FLAGS(blockid) \
146  ((blockid) & VACUUM_DATA_ENTRY_FLAG_MASK)
147 
148 /* Vacuum block status: requested means that vacuum data has assigned it as
149  * a job, but no worker started it yet; running means that a work is currently
150  * vacuuming based on this entry's block.
151  */
152 /* Get vacuum block status */
153 #define VACUUM_BLOCK_STATUS(blockid) \
154  ((blockid) & VACUUM_BLOCK_STATUS_MASK)
155 
156 /* Check vacuum block status */
157 #define VACUUM_BLOCK_STATUS_IS_VACUUMED(blockid) \
158  (VACUUM_BLOCK_STATUS (blockid) == VACUUM_BLOCK_STATUS_VACUUMED)
159 #define VACUUM_BLOCK_STATUS_IS_IN_PROGRESS(blockid) \
160  (VACUUM_BLOCK_STATUS (blockid) == VACUUM_BLOCK_STATUS_IN_PROGRESS_VACUUM)
161 #define VACUUM_BLOCK_STATUS_IS_AVAILABLE(blockid) \
162  (VACUUM_BLOCK_STATUS (blockid) == VACUUM_BLOCK_STATUS_AVAILABLE)
163 
164 /* Set vacuum block status */
165 #define VACUUM_BLOCK_STATUS_SET_VACUUMED(blockid) \
166  ((blockid) = ((blockid) & ~VACUUM_BLOCK_STATUS_MASK) | VACUUM_BLOCK_STATUS_VACUUMED)
167 #define VACUUM_BLOCK_STATUS_SET_IN_PROGRESS(blockid) \
168  ((blockid) = ((blockid) & ~VACUUM_BLOCK_STATUS_MASK) | VACUUM_BLOCK_STATUS_IN_PROGRESS_VACUUM)
169 #define VACUUM_BLOCK_STATUS_SET_AVAILABLE(blockid) \
170  ((blockid) = ((blockid) & ~VACUUM_BLOCK_STATUS_MASK) | VACUUM_BLOCK_STATUS_AVAILABLE)
171 
172 #define VACUUM_BLOCK_IS_INTERRUPTED(blockid) \
173  (((blockid) & VACUUM_BLOCK_FLAG_INTERRUPTED) != 0)
174 #define VACUUM_BLOCK_SET_INTERRUPTED(blockid) \
175  ((blockid) |= VACUUM_BLOCK_FLAG_INTERRUPTED)
176 #define VACUUM_BLOCK_CLEAR_INTERRUPTED(blockid) \
177  ((blockid) &= ~VACUUM_BLOCK_FLAG_INTERRUPTED)
178 
179 /* Vacuum data page.
180  *
181  * One page of vacuum data file.
182  */
183 // *INDENT-OFF*
186 {
189  INT16 index_free;
190 
191  /* First vacuum data entry in page. It is followed by other entries based on the page capacity. */
193 
194  static const INT16 INDEX_NOT_FOUND = -1;
195 
196  bool is_empty () const;
197  bool is_index_valid (INT16 index) const;
198  INT16 get_index_of_blockid (VACUUM_LOG_BLOCKID blockid) const;
199 
200  VACUUM_LOG_BLOCKID get_first_blockid () const;
201 };
202 // *INDENT-ON*
203 #define VACUUM_DATA_PAGE_HEADER_SIZE (offsetof (VACUUM_DATA_PAGE, data))
204 
205 /*
206  * Overwritten versions of pgbuf_fix, pgbuf_unfix and pgbuf_set_dirty, adapted for the needs of vacuum data.
207  *
208  * NOTE: These macro's should make sure that first/last vacuum data pages are not unfixed or re-fixed.
209  */
210 
211 /* Fix a vacuum data page. If the VPID matches first or last vacuum data page, then the respective page is returned.
212  * Otherwise, the page is fixed from page buffer.
213  */
214 #define vacuum_fix_data_page(thread_p, vpidp) \
215  /* Check if page is vacuum_Data.first_page */ \
216  (vacuum_Data.first_page != NULL && VPID_EQ (pgbuf_get_vpid_ptr ((PAGE_PTR) vacuum_Data.first_page), vpidp) ? \
217  /* True: vacuum_Data.first_page */ \
218  vacuum_Data.first_page : \
219  /* False: check if page is vacuum_Data.last_page. */ \
220  vacuum_Data.last_page != NULL && VPID_EQ (pgbuf_get_vpid_ptr ((PAGE_PTR) vacuum_Data.last_page), vpidp) ? \
221  /* True: vacuum_Data.last_page */ \
222  vacuum_Data.last_page : \
223  /* False: fix the page. */ \
224  (VACUUM_DATA_PAGE *) pgbuf_fix (thread_p, vpidp, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH))
225 
226 /* Unfix vacuum data page. If the page is first or last in vacuum data, it is not unfixed. */
227 #define vacuum_unfix_data_page(thread_p, data_page) \
228  do \
229  { \
230  if ((data_page) != vacuum_Data.first_page && (data_page) != vacuum_Data.last_page) \
231  { \
232  /* Do not unfix first or last page. */ \
233  pgbuf_unfix (thread_p, (PAGE_PTR) (data_page)); \
234  } \
235  (data_page) = NULL; \
236  } while (0)
237 
238 /* Set page dirty [and free it]. First and last vacuum data page are not freed. */
239 #define vacuum_set_dirty_data_page(thread_p, data_page, free) \
240  do \
241  { \
242  if ((data_page) != vacuum_Data.first_page && (data_page) != vacuum_Data.last_page) \
243  { \
244  pgbuf_set_dirty (thread_p, (PAGE_PTR) (data_page), free); \
245  } \
246  else \
247  { \
248  /* Do not unfix first or last page. */ \
249  pgbuf_set_dirty (thread_p, (PAGE_PTR) (data_page), DONT_FREE); \
250  } \
251  if ((free) == FREE) \
252  { \
253  (data_page) = NULL; \
254  } \
255  } while (0)
256 
257 static inline void
259 {
260  assert (data_page != NULL);
261  pgbuf_set_dirty (thread_p, (PAGE_PTR) (data_page), DONT_FREE);
262 }
263 
264 /* Unfix first and last vacuum data page. */
265 #define vacuum_unfix_first_and_last_data_page(thread_p) \
266  do \
267  { \
268  if (vacuum_Data.last_page != NULL && vacuum_Data.last_page != vacuum_Data.first_page) \
269  { \
270  pgbuf_unfix (thread_p, (PAGE_PTR) vacuum_Data.last_page); \
271  } \
272  vacuum_Data.last_page = NULL; \
273  if (vacuum_Data.first_page != NULL) \
274  { \
275  pgbuf_unfix (thread_p, (PAGE_PTR) vacuum_Data.first_page); \
276  } \
277  vacuum_Data.first_page = NULL; \
278  } while (0)
279 
280 // *INDENT-OFF*
281 
282 //
283 // vacuum_job_cursor is a class that helps tracking job generation progress. its main indicative of progress is the
284 // blockid; however, after removing/adding log blocks to vacuum data this blockid can be relocated to a different
285 // page. it is cursor's job to maintain the correct position of blocks.
286 //
288 {
289  public:
291  ~vacuum_job_cursor ();
292 
293  bool is_valid () const; // return true if cursor valid (get_current_entry can be called)
294  bool is_loaded () const; // return true if cursor page/index are loaded
295 
296  void increment_blockid (); // increment cursor blockid
297  void set_on_vacuum_data_start (); // set cursor blockid to first block in vacuum data
298  void readjust_to_vacuum_data_changes (); // readjust cursor blockid after changes to vacuum data
299 
300  // getters
302  const VPID &get_page_vpid () const;
303  vacuum_data_page *get_page () const;
304  INT16 get_index () const;
305 
306  const vacuum_data_entry &get_current_entry () const; // get current entry; cursor must be valid
307  void start_job_on_current_entry () const;
308 
309  void force_data_update ();
310  void unload (); // unload page/index
311  void load (); // load page/index
312 
313  private:
314  void change_blockid (VACUUM_LOG_BLOCKID blockid); // reset m_blockid to argument
315  void reload (); // reload; if a page is loaded and if it contains current
316  // blockid, current configuration is kept
317  void search (); // search page/index of cursor blockid
318 
319  VACUUM_LOG_BLOCKID m_blockid; // current cursor blockid
320  VACUUM_DATA_PAGE *m_page; // loaded page of blockid or null
321  INT16 m_index; // loaded index of blockid or INDEX_NOT_FOUND
322 };
323 
324 // helper macros for printing vacuum_job_cursor
325 #define vacuum_job_cursor_print_format "vacuum_job_cursor(%lld, %d|%d|%d)"
326 #define vacuum_job_cursor_print_args(cursor) \
327  (long long int) (cursor).get_blockid (), VPID_AS_ARGS (&(cursor).get_page_vpid ()), (int) (cursor).get_index ()
328 
330 {
331  public:
333 
334  void request_shutdown ();
335  bool is_shutdown_requested ();
336  bool check_shutdown_request ();
337 
338  private:
339  enum state
340  {
342 #if defined (SERVER_MODE)
343  SHUTDOWN_REQUESTED,
344 #endif // SERVER_MODE
345  SHUTDOWN_REGISTERED
346  };
348 #if defined (SERVER_MODE)
349  std::mutex m_state_mutex;
350  std::condition_variable m_condvar;
351 #endif // SERVER_MODE
352 };
353 
354 /* Vacuum data.
355  *
356  * Stores data required for vacuum. It is also stored on disk in the first
357  * database volume.
358  */
359 typedef struct vacuum_data VACUUM_DATA;
361 {
362  public:
363  VFID vacuum_data_file; /* Vacuum data file VFID. */
364  LOG_PAGEID keep_from_log_pageid; /* Smallest LOG_PAGEID that vacuum may still need for its jobs. */
365 
366  MVCCID oldest_unvacuumed_mvccid; /* Global oldest MVCCID not vacuumed (yet). */
367 
368  VACUUM_DATA_PAGE *first_page; /* Cached first vacuum data page. Usually used to generate new jobs. */
369  VACUUM_DATA_PAGE *last_page; /* Cached last vacuum data page. Usually used to receive new data. */
370 
371  int page_data_max_count; /* Maximum data entries fitting one vacuum data page. */
372 
373  int log_block_npages; /* The number of pages in a log block. */
374 
375  bool is_loaded; /* True if vacuum data is loaded. */
377  bool is_archive_removal_safe; /* Set to true after keep_from_log_pageid is updated. */
378 
379  LOG_LSA recovery_lsa; /* This is the LSA where recovery starts. It will be used to go backward in the log
380  * if data on log blocks must be recovered.
381  */
383 
384  #if defined (SA_MODE)
385  bool is_vacuum_complete;
386  #endif /* SA_MODE */
387 
389  : vacuum_data_file (VFID_INITIALIZER)
390  , keep_from_log_pageid (NULL_PAGEID)
391  , oldest_unvacuumed_mvccid (MVCCID_NULL)
392  , first_page (NULL)
393  , last_page (NULL)
394  , page_data_max_count (0)
395  , log_block_npages (0)
396  , is_loaded (false)
397  , shutdown_sequence ()
398  , is_archive_removal_safe (false)
399  , recovery_lsa (LSA_INITIALIZER)
400  , is_restoredb_session (false)
401  #if defined (SA_MODE)
402  , is_vacuum_complete (false)
403  #endif // SA_MODE
404  , m_last_blockid (VACUUM_NULL_LOG_BLOCKID)
405  {
406  }
407 
408  bool is_empty () const; // returns true if vacuum data has no blocks
409  bool has_one_page () const; // returns true if vacuum data has one page only
410 
411  VACUUM_LOG_BLOCKID get_last_blockid () const; // get last blockid of vacuum data
412  VACUUM_LOG_BLOCKID get_first_blockid () const; // get first blockid of vacuum data; if vacuum data is empty
413 
414  // same as last blockid
415  void set_last_blockid (VACUUM_LOG_BLOCKID blockid); // set new value for last blockid of vacuum data
416 
417  void update ();
418  void set_oldest_unvacuumed_on_boot ();
419 
420  private:
421  const VACUUM_DATA_ENTRY &get_first_entry () const;
422  void upgrade_oldest_unvacuumed (MVCCID mvccid);
423 
424  VACUUM_LOG_BLOCKID m_last_blockid; /* Block id for last vacuum data entry... This entry is actually the id of last
425  * added block which may not even be in vacuum data (being already vacuumed).
426  */
427 };
429 // *INDENT-ON*
430 
431 /* vacuum data load */
434 {
437 };
439 
440 /* Vacuum worker structure used by vacuum master thread. */
441 /* This VACUUM_WORKER structure was designed for the needs of the vacuum workers. However, since the design of
442  * vacuum data was changed, and since vacuum master may have to allocate or deallocate disk pages, it needed to make
443  * use of system operations and transaction descriptor in similar ways with the workers.
444  * To extend that functionality in an easy way and to benefit from the postpone cache optimization, master was also
445  * assigned this VACUUM_WORKER.
446  */
448 
449 /*
450  * Vacuum worker/job related structures.
451  */
452 /* A lock-free buffer used for communication between logger transactions and
453  * auto-vacuum master. It is advisable to avoid synchronizing running
454  * transactions with vacuum threads and for this reason the block data is not
455  * added directly to vacuum data.
456  */
457 /* *INDENT-OFF* */
459 /* *INDENT-ON* */
460 #define VACUUM_BLOCK_DATA_BUFFER_CAPACITY 1024
461 
462 /* A lock free queue of vacuum jobs. Master will add jobs based on vacuum data
463  * and workers will execute the jobs one by one.
464  */
465 /* *INDENT-OFF* */
467 /* *INDENT-ON* */
468 
469 /* number or log pages on each block of buffer log prefetch */
470 #define VACUUM_PREFETCH_LOG_BLOCK_BUFFER_PAGES ((size_t) (1 + vacuum_Data.log_block_npages))
471 
472 #if defined(SERVER_MODE)
473 #define VACUUM_MAX_TASKS_IN_WORKER_POOL ((size_t) (3 * prm_get_integer_value (PRM_ID_VACUUM_WORKER_COUNT)))
474 #endif /* SERVER_MODE */
475 
476 #define VACUUM_FINISHED_JOB_QUEUE_CAPACITY 2048
477 
478 #define VACUUM_LOG_BLOCK_BUFFER_INVALID (-1)
479 
480 /* Convert vacuum worker TRANID to an index in vacuum worker's array */
481 #define VACUUM_WORKER_INDEX_TO_TRANID(index) \
482  (-index + LOG_LAST_VACUUM_WORKER_TRANID)
483 
484 /* Convert index in vacuum worker's array to TRANID */
485 #define VACUUM_WORKER_TRANID_TO_INDEX(trid) \
486  (-trid + LOG_LAST_VACUUM_WORKER_TRANID)
487 
488 /* Static array of vacuum workers */
490 
491 /* VACUUM_HEAP_HELPER -
492  * Structure used by vacuum heap functions.
493  */
496 {
497  PAGE_PTR home_page; /* Home page for objects being vacuumed. */
498  VPID home_vpid; /* VPID of home page. */
499  PAGE_PTR forward_page; /* Used to keep forward page of REC_RELOCATION or first overflow page of REC_BIGONE. */
500  OID forward_oid; /* Link to forward page. */
501  PGSLOTID crt_slotid; /* Slot ID of current record being vacuumed. */
502  INT16 record_type; /* Current record type. */
503  RECDES record; /* Current record data. */
504 
505  /* buffer of current record (used by HOME and NEW_HOME) */
507 
508  MVCC_REC_HEADER mvcc_header; /* MVCC header. */
509 
510  HFID hfid; /* Heap file identifier. */
511  VFID overflow_vfid; /* Overflow file identifier. */
512  bool reusable; /* True if heap file has reusable slots. */
513 
514  MVCC_SATISFIES_VACUUM_RESULT can_vacuum; /* Result of vacuum check. */
515 
516  /* Collect data on vacuum. */
517  PGSLOTID slots[MAX_SLOTS_IN_PAGE]; /* Slot ID's. */
518  MVCC_SATISFIES_VACUUM_RESULT results[MAX_SLOTS_IN_PAGE]; /* Vacuum check results. */
519 
520  OID forward_link; /* REC_BIGONE, REC_RELOCATION forward links. (buffer for forward_recdes) */
521  RECDES forward_recdes; /* Record descriptor to read forward links. */
522 
523  int n_bulk_vacuumed; /* Number of vacuumed objects to be logged in bulk mode. */
524  int n_vacuumed; /* Number of vacuumed objects. */
525  int initial_home_free_space; /* Free space in home page before vacuum */
526 
527  /* Performance tracking. */
528  PERF_UTIME_TRACKER time_track;
529 };
530 
531 #define VACUUM_PERF_HEAP_START(thread_p, helper) \
532  PERF_UTIME_TRACKER_START (thread_p, &(helper)->time_track);
533 #define VACUUM_PERF_HEAP_TRACK_PREPARE(thread_p, helper) \
534  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, \
535  PSTAT_HEAP_VACUUM_PREPARE)
536 #define VACUUM_PERF_HEAP_TRACK_EXECUTE(thread_p, helper) \
537  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, \
538  PSTAT_HEAP_VACUUM_EXECUTE)
539 #define VACUUM_PERF_HEAP_TRACK_LOGGING(thread_p, helper) \
540  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &(helper)->time_track, \
541  PSTAT_HEAP_VACUUM_LOG)
542 
543 /* Flags used to mark rcv->offset with hints about recovery process. */
544 /* Flags for reusable heap files. */
545 #define VACUUM_LOG_VACUUM_HEAP_REUSABLE 0x8000
546 /* Flag if page is entirely vacuumed. */
547 #define VACUUM_LOG_VACUUM_HEAP_ALL_VACUUMED 0x4000
548 /* Mask. */
549 #define VACUUM_LOG_VACUUM_HEAP_MASK 0xC000
550 
551 /* The buffer size of collected heap objects during a vacuum job. */
552 #define VACUUM_DEFAULT_HEAP_OBJECT_BUFFER_SIZE 4000
553 
554 /*
555  * Dropped files section.
556  */
557 
558 static bool vacuum_Dropped_files_loaded = false;
559 
560 /* Identifier for the file where dropped file list is kept */
562 
563 /* Identifier for first page in dropped files */
565 
566 /* Total count of dropped files */
567 static INT32 vacuum_Dropped_files_count = 0;
568 
569 /* Dropped file entry */
572 {
575 };
576 
577 /* A page of dropped files entries */
580 {
581  VPID next_page; /* VPID of next dropped files page. */
582  INT16 n_dropped_files; /* Number of entries on page */
583 
584  /* Leave the dropped files at the end of the structure */
585  VACUUM_DROPPED_FILE dropped_files[1]; /* Dropped files. */
586 };
587 
588 /* Size of dropped file page header */
589 #define VACUUM_DROPPED_FILES_PAGE_HEADER_SIZE \
590  (offsetof (VACUUM_DROPPED_FILES_PAGE, dropped_files))
591 
592 /* Capacity of dropped file page */
593 #define VACUUM_DROPPED_FILES_PAGE_CAPACITY \
594  ((INT16) ((DB_PAGESIZE - VACUUM_DROPPED_FILES_PAGE_HEADER_SIZE) \
595  / sizeof (VACUUM_DROPPED_FILE)))
596 /* Capacity of dropped file page when page size is max */
597 #define VACUUM_DROPPED_FILES_MAX_PAGE_CAPACITY \
598  ((INT16) ((IO_MAX_PAGE_SIZE - VACUUM_DROPPED_FILES_PAGE_HEADER_SIZE) \
599  / sizeof (VACUUM_DROPPED_FILE)))
600 
601 #define VACUUM_DROPPED_FILE_FLAG_DUPLICATE 0x8000
602 
603 /* Overwritten versions of pgbuf_fix, pgbuf_unfix and pgbuf_set_dirty,
604  * adapted for the needs of vacuum and its dropped files pages.
605  */
606 #define vacuum_fix_dropped_entries_page(thread_p, vpidp, latch) \
607  ((VACUUM_DROPPED_FILES_PAGE *) pgbuf_fix (thread_p, vpidp, OLD_PAGE, \
608  latch, \
609  PGBUF_UNCONDITIONAL_LATCH))
610 #define vacuum_unfix_dropped_entries_page(thread_p, dropped_page) \
611  do \
612  { \
613  pgbuf_unfix (thread_p, (PAGE_PTR) (dropped_page)); \
614  (dropped_page) = NULL; \
615  } while (0)
616 #define vacuum_set_dirty_dropped_entries_page(thread_p, dropped_page, free) \
617  do \
618  { \
619  pgbuf_set_dirty (thread_p, (PAGE_PTR) (dropped_page), free); \
620  if ((free) == FREE) \
621  { \
622  (dropped_page) = NULL; \
623  } \
624  } while (0)
625 
626 #if !defined (NDEBUG)
627 /* Track pages allocated for dropped files. Used for debugging only, for
628  * easy observation of the lists of dropped files at any time.
629  */
632 {
635 };
637 #define VACUUM_TRACK_DROPPED_FILES_SIZE \
638  (DB_PAGESIZE + sizeof (VACUUM_TRACK_DROPPED_FILES *))
639 #endif /* !NDEBUG */
640 
644 
647 {
650 };
651 
652 bool vacuum_Is_booted = false;
653 
654 /* Logging */
655 #define VACUUM_LOG_DATA_ENTRY_MSG(name) \
656  "name = {blockid = %lld, flags = %lld, start_lsa = %lld|%d, oldest_visible_mvccid=%llu, newest_mvccid=%llu }"
657 #define VACUUM_LOG_DATA_ENTRY_AS_ARGS(data) \
658  (long long) VACUUM_BLOCKID_WITHOUT_FLAGS ((data)->blockid), (long long) VACUUM_BLOCKID_GET_FLAGS ((data)->blockid), \
659  LSA_AS_ARGS (&(data)->start_lsa), (unsigned long long) (data)->oldest_visible_mvccid, \
660  (unsigned long long) (data)->newest_mvccid
661 
662 /* Vacuum static functions. */
663 static void vacuum_update_keep_from_log_pageid (THREAD_ENTRY * thread_p);
664 static int vacuum_compare_blockids (const void *ptr1, const void *ptr2);
665 static void vacuum_data_mark_finished (THREAD_ENTRY * thread_p);
666 static void vacuum_data_empty_page (THREAD_ENTRY * thread_p, VACUUM_DATA_PAGE * prev_data_page,
667  VACUUM_DATA_PAGE ** data_page);
668 static void vacuum_data_initialize_new_page (THREAD_ENTRY * thread_p, VACUUM_DATA_PAGE * data_page);
669 static void vacuum_init_data_page_with_last_blockid (THREAD_ENTRY * thread_p, VACUUM_DATA_PAGE * data_page,
671 static int vacuum_recover_lost_block_data (THREAD_ENTRY * thread_p);
672 
673 static int vacuum_process_log_block (THREAD_ENTRY * thread_p, VACUUM_DATA_ENTRY * block_data,
674  bool sa_mode_partial_block);
675 static int vacuum_process_log_record (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, LOG_LSA * log_lsa_p,
676  LOG_PAGE * log_page_p, LOG_DATA * log_record_data, MVCCID * mvccid,
677  char **undo_data_ptr, int *undo_data_size, LOG_VACUUM_INFO * vacuum_info,
678  bool * is_file_dropped, bool stop_after_vacuum_info);
679 static void vacuum_read_log_aligned (THREAD_ENTRY * thread_entry, LOG_LSA * log_lsa, LOG_PAGE * log_page);
680 static void vacuum_read_log_add_aligned (THREAD_ENTRY * thread_entry, size_t size, LOG_LSA * log_lsa,
681  LOG_PAGE * log_page);
682 static void vacuum_read_advance_when_doesnt_fit (THREAD_ENTRY * thread_entry, size_t size, LOG_LSA * log_lsa,
683  LOG_PAGE * log_page);
684 static void vacuum_copy_data_from_log (THREAD_ENTRY * thread_p, char *area, int length, LOG_LSA * log_lsa,
685  LOG_PAGE * log_page);
686 static void vacuum_finished_block_vacuum (THREAD_ENTRY * thread_p, VACUUM_DATA_ENTRY * block_data,
687  bool is_vacuum_complete);
688 static bool vacuum_is_work_in_progress (THREAD_ENTRY * thread_p);
689 static int vacuum_worker_allocate_resources (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker);
690 static void vacuum_finalize_worker (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker_info);
691 
692 static int vacuum_compare_heap_object (const void *a, const void *b);
693 static int vacuum_collect_heap_objects (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, OID * oid, VFID * vfid);
694 static void vacuum_cleanup_collected_by_vfid (VACUUM_WORKER * worker, VFID * vfid);
695 static int vacuum_heap (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, MVCCID threshold_mvccid, bool was_interrupted);
696 static int vacuum_heap_prepare_record (THREAD_ENTRY * thread_p, VACUUM_HEAP_HELPER * helper);
698 static int vacuum_heap_record (THREAD_ENTRY * thread_p, VACUUM_HEAP_HELPER * helper);
699 static int vacuum_heap_get_hfid_and_file_type (THREAD_ENTRY * thread_p, VACUUM_HEAP_HELPER * helper, const VFID * vfid);
700 static void vacuum_heap_page_log_and_reset (THREAD_ENTRY * thread_p, VACUUM_HEAP_HELPER * helper,
701  bool update_best_space_stat, bool unlatch_page);
702 static void vacuum_log_vacuum_heap_page (THREAD_ENTRY * thread_p, PAGE_PTR page_p, int n_slots, PGSLOTID * slots,
703  MVCC_SATISFIES_VACUUM_RESULT * results, bool reusable, bool all_vacuumed);
704 static void vacuum_log_remove_ovf_insid (THREAD_ENTRY * thread_p, PAGE_PTR ovfpage);
705 static void vacuum_log_redoundo_vacuum_record (THREAD_ENTRY * thread_p, PAGE_PTR page_p, PGSLOTID slotid,
706  RECDES * undo_recdes, bool reusable);
707 static int vacuum_log_prefetch_vacuum_block (THREAD_ENTRY * thread_p, VACUUM_DATA_ENTRY * entry);
708 static int vacuum_fetch_log_page (THREAD_ENTRY * thread_p, LOG_PAGEID log_pageid, LOG_PAGE * log_page);
709 
710 static int vacuum_compare_dropped_files (const void *a, const void *b);
711 #if defined (SERVER_MODE)
712 static int vacuum_compare_dropped_files_version (INT32 version_a, INT32 version_b);
713 #endif // SERVER_MODE
714 static int vacuum_add_dropped_file (THREAD_ENTRY * thread_p, VFID * vfid, MVCCID mvccid);
715 static int vacuum_cleanup_dropped_files (THREAD_ENTRY * thread_p);
716 static int vacuum_find_dropped_file (THREAD_ENTRY * thread_p, bool * is_file_dropped, VFID * vfid, MVCCID mvccid);
717 static void vacuum_log_cleanup_dropped_files (THREAD_ENTRY * thread_p, PAGE_PTR page_p, INT16 * indexes,
718  INT16 n_indexes);
720  VPID * next_page);
721 static int vacuum_get_first_page_dropped_files (THREAD_ENTRY * thread_p, VPID * first_page_vpid);
722 static void vacuum_notify_all_workers_dropped_file (const VFID & vfid_dropped, MVCCID mvccid);
723 
724 static bool is_not_vacuumed_and_lost (THREAD_ENTRY * thread_p, MVCC_REC_HEADER * rec_header);
725 static void print_not_vacuumed_to_log (OID * oid, OID * class_oid, MVCC_REC_HEADER * rec_header, int btree_node_type);
726 
727 static bool vacuum_is_empty (void);
728 static void vacuum_convert_thread_to_master (THREAD_ENTRY * thread_p, thread_type & save_type);
729 static void vacuum_convert_thread_to_worker (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, thread_type & save_type);
730 static void vacuum_restore_thread (THREAD_ENTRY * thread_p, thread_type save_type);
731 
732 static void vacuum_data_load_first_and_last_page (THREAD_ENTRY * thread_p);
734 
735 static void vacuum_data_empty_update_last_blockid (THREAD_ENTRY * thread_p);
736 
737 #if defined (SA_MODE)
738 static void vacuum_sa_run_job (THREAD_ENTRY * thread_p, const VACUUM_DATA_ENTRY & data_entry, bool is_partial,
739  PERF_UTIME_TRACKER & perf_tracker);
740 #endif // SA_MODE
741 
742 #if !defined (NDEBUG)
743 /* Debug function to verify vacuum data. */
744 static void vacuum_verify_vacuum_data_debug (THREAD_ENTRY * thread_p);
746 #define VACUUM_VERIFY_VACUUM_DATA(thread_p) vacuum_verify_vacuum_data_debug (thread_p)
747 #else /* NDEBUG */
748 #define VACUUM_VERIFY_VACUUM_DATA(thread_p)
749 #endif /* NDEBUG */
750 static void vacuum_check_shutdown_interruption (const THREAD_ENTRY * thread_p, int error_code);
751 
752 /* *INDENT-OFF* */
753 void
755 {
756  assert (worker != NULL);
757 
758  context.type = type;
759  context.vacuum_worker = worker;
760  context.check_interrupt = false;
761 
762  assert (context.get_system_tdes () == NULL);
763  context.claim_system_worker ();
764 }
765 
766 // class vacuum_master_context_manager
767 //
768 // description:
769 // extend entry_manager to override context construction and retirement
770 //
772 {
773  private:
774  void on_daemon_create (cubthread::entry &context) final
775  {
776  // set vacuum master in execute state
777  assert (vacuum_Master.state == VACUUM_WORKER_STATE_EXECUTE);
778  vacuum_Master.state = VACUUM_WORKER_STATE_EXECUTE;
779 
780  vacuum_init_thread_context (context, TT_VACUUM_MASTER, &vacuum_Master);
781  }
782 
783  void on_daemon_retire (cubthread::entry &context) final
784  {
785  vacuum_finalize (&context); // todo: is this the rightful place?
786 
787  context.retire_system_worker ();
788 
789  if (context.vacuum_worker != NULL)
790  {
791  assert (context.vacuum_worker == &vacuum_Master);
792  context.vacuum_worker = NULL;
793  }
794  else
795  {
796  assert (false);
797  }
798  }
799 };
800 
802 {
803  public:
804  vacuum_master_task () = default;
805 
806  void execute (cubthread::entry &thread_ref) final;
807 
808  private:
809  bool check_shutdown () const;
810  bool is_task_queue_full () const;
811  bool should_interrupt_iteration () const; // conditions to interrupt an iteration and go to sleep
812  bool is_cursor_entry_ready_to_vacuum () const; // check if conditions to vacuum cursor entry are met
813  bool is_cursor_entry_available () const; // check if cursor entry is available and can generate a new job
814  void start_job_on_cursor_entry () const; // start job on cursor entry
815  bool should_force_data_update () const; // conditions to force a vacuum data update
816 
817  vacuum_job_cursor m_cursor; // cursor that iterates through vacuum data entries
818  MVCCID m_oldest_visible_mvccid; // saved oldest visible mvccid (recomputed on each iteration)
819 };
820 
821 // class vacuum_worker_context_manager
822 //
823 // description:
824 // extern entry manager to override construction/retirement of vacuum worker context
825 //
827 {
828  public:
830  {
832  }
833 
835  {
836  delete m_pool;
837  }
838 
840  {
841  return m_pool->claim ();
842  }
843  void retire_worker (VACUUM_WORKER & worker)
844  {
845  return m_pool->retire (worker);
846  }
847 
848  private:
849 
850  void on_create (cubthread::entry & context) final
851  {
852  context.tran_index = 0;
853 
854  vacuum_init_thread_context (context, TT_VACUUM_WORKER, m_pool->claim ());
855 
856  if (vacuum_worker_allocate_resources (&context, context.vacuum_worker) != NO_ERROR)
857  {
858  assert (false);
859  }
860 
861  // get private LRU index
862  context.private_lru_index = context.vacuum_worker->private_lru_index;
863  }
864 
865  void on_retire (cubthread::entry & context) final
866  {
867  context.retire_system_worker ();
868 
869  if (context.vacuum_worker != NULL)
870  {
871  context.vacuum_worker->state = VACUUM_WORKER_STATE::VACUUM_WORKER_STATE_INACTIVE;
872  m_pool->retire (*context.vacuum_worker);
873  context.vacuum_worker = NULL;
874  }
875  else
876  {
877  assert (false);
878  }
879 
880  // reset private LRU index
881  context.private_lru_index = -1;
882  }
883 
884  void on_recycle (cubthread::entry & context) final
885  {
886  // reset tran_index (it is recycled as NULL_TRAN_INDEX)
887  context.tran_index = LOG_SYSTEM_TRAN_INDEX;
888  }
889 
890  // members
892 };
893 
894 // class vacuum_worker_task
895 //
896 // description:
897 // vacuum worker task
898 //
900 {
901  public:
903  : m_data (entry_ref)
904  {
905  }
906 
907  void execute (cubthread::entry & thread_ref) final
908  {
909  // safe-guard - check interrupt is always false
910  assert (!thread_ref.check_interrupt);
911  vacuum_process_log_block (&thread_ref, &m_data, false);
912  }
913 
914  private:
916 
918 };
919 
920 // vacuum master globals
921 static cubthread::daemon *vacuum_Master_daemon = NULL; // daemon thread
923 
924 // vacuum worker globals
927 
928 /* *INDENT-ON* */
929 
930 #if defined (SA_MODE)
931 static void
932 vacuum_sa_run_job (THREAD_ENTRY * thread_p, const VACUUM_DATA_ENTRY & data_entry, bool is_partial,
933  PERF_UTIME_TRACKER & perf_tracker)
934 {
935  PERF_UTIME_TRACKER_TIME (thread_p, &perf_tracker, PSTAT_VAC_MASTER);
936 
937  VACUUM_WORKER *worker_p = vacuum_Worker_context_manager->claim_worker ();
938  thread_type save_type = thread_type::TT_NONE;
939  vacuum_convert_thread_to_worker (thread_p, worker_p, save_type);
940  assert (save_type == thread_type::TT_VACUUM_MASTER);
941 
942  VACUUM_DATA_ENTRY copy_data_entry = data_entry;
943  vacuum_process_log_block (thread_p, &copy_data_entry, is_partial);
944 
945  vacuum_convert_thread_to_master (thread_p, save_type);
946  assert (save_type == thread_type::TT_VACUUM_WORKER);
947  vacuum_Worker_context_manager->retire_worker (*worker_p);
948 
949  PERF_UTIME_TRACKER_START (thread_p, &perf_tracker);
950 }
951 #endif // SA_MODE
952 
953 /*
954  * xvacuum () - Vacuumes database
955  *
956  * return : Error code.
957  * thread_p(in) :
958  *
959  * NOTE: CS mode temporary disabled.
960  */
961 int
962 xvacuum (THREAD_ENTRY * thread_p)
963 {
964 #if defined(SERVER_MODE)
967 #else /* !SERVER_MODE */ /* SA_MODE */
968  thread_type save_type = thread_type::TT_NONE;
969 
970  if (prm_get_bool_value (PRM_ID_DISABLE_VACUUM) || vacuum_Data.is_vacuum_complete)
971  {
972  return NO_ERROR;
973  }
974 
975  /* Assign worker and allocate required resources. */
976  vacuum_convert_thread_to_master (thread_p, save_type);
977 
978  /* Process vacuum data and run vacuum. */
980  PERF_UTIME_TRACKER perf_tracker;
981 
982  bool dummy_continue_check_interrupt;
983 
984  int error_code = NO_ERROR;
985 
987  er_log_debug (ARG_FILE_LINE, "Stand-alone vacuum start.\n");
988 
989  PERF_UTIME_TRACKER_START (thread_p, &perf_tracker);
990 
992 
993  cursor.set_on_vacuum_data_start ();
994  cursor.load ();
997 
998  // must start with empty vacuum_Block_data_buffer
999  if (!vacuum_Block_data_buffer->is_empty ())
1000  {
1001  // start by updating vacuum data
1002  cursor.force_data_update ();
1003  }
1004  assert (vacuum_Block_data_buffer->is_empty ());
1005 
1006  // consume all vacuum data blocks
1007  while (cursor.is_valid ())
1008  {
1009  if (logtb_is_interrupted (thread_p, true, &dummy_continue_check_interrupt))
1010  {
1011  cursor.unload ();
1012  vacuum_Data.update ();
1013  return NO_ERROR;
1014  }
1015 
1016  if (cursor.get_current_entry ().is_available ())
1017  {
1018  cursor.start_job_on_current_entry ();
1019  // job will be executed immediately
1020  vacuum_sa_run_job (thread_p, cursor.get_current_entry (), false, perf_tracker);
1021  }
1022  else
1023  {
1024  // skip
1025  assert (cursor.get_current_entry ().is_vacuumed ());
1027  "Job for blockid = %lld %s. Skip.",
1028  (long long int) cursor.get_current_entry ().get_blockid (),
1029  cursor.get_current_entry ().is_vacuumed ()? "was executed" : "is in progress");
1030  }
1031  cursor.increment_blockid ();
1032 
1033  if (!vacuum_Block_data_buffer->is_empty () // there is a new block
1034  || vacuum_Finished_job_queue->is_full () // finished queue is full and must be consumed
1035  || !cursor.is_valid () // cursor is at the end; we might still get another block by vacuum data update
1036  )
1037  {
1038  // force an update; cursor must not be loaded
1039  cursor.force_data_update ();
1040  }
1041  }
1042 
1043  assert (!cursor.is_loaded ());
1044  assert (vacuum_Data.is_empty ());
1045  assert (vacuum_Block_data_buffer->is_empty ());
1046 
1047 #if !defined (NDEBUG)
1049 #endif /* !NDEBUG */
1050 
1051  /* Complete vacuum for SA_MODE. This means also vacuuming based on last block being logged. */
1053  {
1054  // *INDENT-OFF*
1055  vacuum_data_entry partial_entry { log_Gl.hdr };
1056  // *INDENT-ON*
1058 
1059  // can't be interrupted
1060  bool save_check_interrupt = logtb_set_check_interrupt (thread_p, false);
1061  vacuum_sa_run_job (thread_p, partial_entry, true, perf_tracker);
1062  (void) logtb_set_check_interrupt (thread_p, save_check_interrupt);
1063  }
1064 
1065  /* All vacuum complete. */
1067 
1068  log_append_redo_data2 (thread_p, RVVAC_COMPLETE, NULL, (PAGE_PTR) vacuum_Data.first_page, 0,
1070  vacuum_set_dirty_data_page (thread_p, vacuum_Data.first_page, DONT_FREE);
1071  logpb_force_flush_pages (thread_p);
1072 
1073  /* Cleanup dropped files. */
1074  vacuum_cleanup_dropped_files (thread_p);
1075 
1076  /* Reset log header information saved for vacuum. */
1078 
1080  er_log_debug (ARG_FILE_LINE, "Stand-alone vacuum end.\n");
1081 
1082  /* Vacuum structures no longer needed. */
1083  vacuum_finalize (thread_p);
1084 
1085  vacuum_Data.is_vacuum_complete = true;
1086 
1087  PERF_UTIME_TRACKER_TIME (thread_p, &perf_tracker, PSTAT_VAC_MASTER);
1088 
1089  vacuum_restore_thread (thread_p, save_type);
1090 
1091  return NO_ERROR;
1092 #endif /* SA_MODE */
1093 }
1094 
1095 /*
1096  * xvacuum_dump - Dump the contents of vacuum
1097  *
1098  * return: nothing
1099  *
1100  * outfp(in): FILE stream where to dump the vacuum. If NULL is given,
1101  * it is dumped to stdout.
1102  */
1103 void
1104 xvacuum_dump (THREAD_ENTRY * thread_p, FILE * outfp)
1105 {
1106  LOG_PAGEID min_log_pageid = NULL_PAGEID;
1107  int archive_number;
1108 
1109  assert (outfp != NULL);
1110 
1111  if (!vacuum_Is_booted)
1112  {
1113  fprintf (outfp, "vacuum did not boot properly.\n");
1114  return;
1115  }
1116 
1117  min_log_pageid = vacuum_min_log_pageid_to_keep (thread_p);
1118  if (min_log_pageid == NULL_PAGEID)
1119  {
1120  /* this is an assertion case but ignore. */
1121  fprintf (outfp, "vacuum did not boot properly.\n");
1122  return;
1123  }
1124 
1125  fprintf (outfp, "\n");
1126  fprintf (outfp, "*** Vacuum Dump ***\n");
1127  fprintf (outfp, "First log page ID referenced = %lld ", min_log_pageid);
1128 
1129  if (logpb_is_page_in_archive (min_log_pageid))
1130  {
1131  LOG_CS_ENTER_READ_MODE (thread_p);
1132  archive_number = logpb_get_archive_number (thread_p, min_log_pageid);
1133  if (archive_number < 0)
1134  {
1135  /* this is an assertion case but ignore. */
1136  fprintf (outfp, "\n");
1137  }
1138  else
1139  {
1140  fprintf (outfp, "(in %s%s%03d)\n", log_Prefix, FILEIO_SUFFIX_LOGARCHIVE, archive_number);
1141  }
1142  LOG_CS_EXIT (thread_p);
1143  }
1144  else
1145  {
1146  fprintf (outfp, "(in %s)\n", fileio_get_base_file_name (log_Name_active));
1147  }
1148 }
1149 
1150 /*
1151  * vacuum_initialize () - Initialize necessary structures for vacuum.
1152  *
1153  * return : Void.
1154  * thread_p (in) : Thread entry.
1155  * vacuum_log_block_npages (in) : Number of log pages in a block.
1156  * vacuum_data_vfid (in) : Vacuum data VFID.
1157  * dropped_files_vfid (in) : Dropped files VFID.
1158  */
1159 int
1160 vacuum_initialize (THREAD_ENTRY * thread_p, int vacuum_log_block_npages, VFID * vacuum_data_vfid,
1161  VFID * dropped_files_vfid, bool is_restore)
1162 {
1163  int error_code = NO_ERROR;
1164  int i;
1165 
1167  {
1168  return NO_ERROR;
1169  }
1170 
1171  /* Initialize vacuum data */
1172  vacuum_Data.is_restoredb_session = is_restore;
1173  /* Save vacuum data VFID. */
1174  VFID_COPY (&vacuum_Data.vacuum_data_file, vacuum_data_vfid);
1175  /* Save vacuum log block size in pages. */
1176  vacuum_Data.log_block_npages = vacuum_log_block_npages;
1177  /* Compute the capacity of one vacuum data page. */
1179 
1180 #if defined (SA_MODE)
1181  vacuum_Data.is_vacuum_complete = false;
1182 #endif
1183 
1184  /* Initialize vacuum dropped files */
1186  VFID_COPY (&vacuum_Dropped_files_vfid, dropped_files_vfid);
1187 
1188  /* Save first page vpid. */
1189  if (vacuum_get_first_page_dropped_files (thread_p, &vacuum_Dropped_files_vpid) != NO_ERROR)
1190  {
1191  assert (false);
1192  goto error;
1193  }
1194  assert (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
1195 
1199  VFID_SET_NULL (&vacuum_Last_dropped_vfid);
1200 #if !defined (NDEBUG)
1201  vacuum_Track_dropped_files = NULL;
1202 #endif
1203 
1204  /* Initialize the log block data buffer */
1205  /* *INDENT-OFF* */
1207  /* *INDENT-ON* */
1208  if (vacuum_Block_data_buffer == NULL)
1209  {
1210  goto error;
1211  }
1212 
1213  /* Initialize finished job queue. */
1214  /* *INDENT-OFF* */
1216  /* *INDENT-ON* */
1217  if (vacuum_Finished_job_queue == NULL)
1218  {
1219  goto error;
1220  }
1221 
1222  /* Initialize master worker. */
1223  vacuum_Master.drop_files_version = 0;
1224  vacuum_Master.state = VACUUM_WORKER_STATE_EXECUTE; /* Master is always in execution state. */
1225  vacuum_Master.log_zip_p = NULL;
1226  vacuum_Master.undo_data_buffer = NULL;
1227  vacuum_Master.undo_data_buffer_capacity = 0;
1228  vacuum_Master.private_lru_index = -1;
1229  vacuum_Master.heap_objects = NULL;
1230  vacuum_Master.heap_objects_capacity = 0;
1231  vacuum_Master.prefetch_log_buffer = NULL;
1232  vacuum_Master.prefetch_first_pageid = NULL_PAGEID;
1233  vacuum_Master.prefetch_last_pageid = NULL_PAGEID;
1234  vacuum_Master.allocated_resources = false;
1235 
1236  /* Initialize workers */
1237  for (i = 0; i < VACUUM_MAX_WORKER_COUNT; i++)
1238  {
1239  vacuum_Workers[i].drop_files_version = 0;
1240  vacuum_Workers[i].state = VACUUM_WORKER_STATE_INACTIVE;
1241  vacuum_Workers[i].log_zip_p = NULL;
1242  vacuum_Workers[i].undo_data_buffer = NULL;
1243  vacuum_Workers[i].undo_data_buffer_capacity = 0;
1244  vacuum_Workers[i].private_lru_index = pgbuf_assign_private_lru (thread_p, true, i);
1245  vacuum_Workers[i].heap_objects = NULL;
1246  vacuum_Workers[i].heap_objects_capacity = 0;
1247  vacuum_Workers[i].prefetch_log_buffer = NULL;
1248  vacuum_Workers[i].prefetch_first_pageid = NULL_PAGEID;
1249  vacuum_Workers[i].prefetch_last_pageid = NULL_PAGEID;
1250  vacuum_Workers[i].allocated_resources = false;
1251  }
1252 
1253  return NO_ERROR;
1254 
1255 error:
1256  vacuum_finalize (thread_p);
1257  return (error_code == NO_ERROR) ? ER_FAILED : error_code;
1258 }
1259 
1260 int
1262 {
1263  int error_code = NO_ERROR;
1264 
1265  assert (!vacuum_Is_booted); // only boot once
1266 
1268  {
1269  /* for debug only */
1270  return NO_ERROR;
1271  }
1272 
1273  if (thread_p == NULL)
1274  {
1275  thread_p = thread_get_thread_entry_info ();
1276  }
1277 
1278  /* first things first... load vacuum data and do some recovery if required */
1279  error_code = vacuum_data_load_and_recover (thread_p);
1280  if (error_code != NO_ERROR)
1281  {
1282  ASSERT_ERROR ();
1283  return error_code;
1284  }
1285 
1286  /* load dropped files from disk */
1287  error_code = vacuum_load_dropped_files_from_disk (thread_p);
1288  if (error_code != NO_ERROR)
1289  {
1290  ASSERT_ERROR ();
1291  return error_code;
1292  }
1293 
1294  // create context managers
1295  vacuum_Master_context_manager = new vacuum_master_context_manager ();
1296  vacuum_Worker_context_manager = new vacuum_worker_context_manager ();
1297 
1298 #if defined (SERVER_MODE)
1299 
1300  // get thread manager
1301  cubthread::manager * thread_manager = cubthread::get_manager ();
1302 
1303  // get logging flag for vacuum worker pool
1304  /* *INDENT-OFF* */
1305  bool log_vacuum_worker_pool =
1308 
1309  // create thread pool
1310  vacuum_Worker_threads =
1312  VACUUM_MAX_TASKS_IN_WORKER_POOL, "vacuum workers",
1313  vacuum_Worker_context_manager, 1, log_vacuum_worker_pool);
1314  assert (vacuum_Worker_threads != NULL);
1315 
1316  int vacuum_master_wakeup_interval_msec = prm_get_integer_value (PRM_ID_VACUUM_MASTER_WAKEUP_INTERVAL);
1317  cubthread::looper looper = cubthread::looper (std::chrono::milliseconds (vacuum_master_wakeup_interval_msec));
1318 
1319  // create vacuum master thread
1320  vacuum_Master_daemon =
1321  thread_manager->create_daemon (looper, new vacuum_master_task (), "vacuum_master", vacuum_Master_context_manager);
1322 
1323  /* *INDENT-ON* */
1324 #endif /* SERVER_MODE */
1325 
1326  vacuum_Is_booted = true;
1327 
1328  return NO_ERROR;
1329 }
1330 
1331 void
1333 {
1334  if (!vacuum_Is_booted)
1335  {
1336  // not booted
1337  return;
1338  }
1339 
1340  // notify master to stop generating new jobs
1342 
1343  // stop work pool
1344  if (vacuum_Worker_threads != NULL)
1345  {
1346 #if defined (SERVER_MODE)
1347  vacuum_Worker_threads->er_log_stats ();
1348  vacuum_Worker_threads->stop_execution ();
1349 #endif // SERVER_MODE
1350 
1351  cubthread::get_manager ()->destroy_worker_pool (vacuum_Worker_threads);
1352  }
1353 
1355  vacuum_Worker_context_manager = NULL;
1356 }
1357 
1358 void
1360 {
1361  if (!vacuum_Is_booted)
1362  {
1363  // not booted
1364  return;
1365  }
1366 
1367  // stop master daemon
1368  if (vacuum_Master_daemon != NULL)
1369  {
1370  cubthread::get_manager ()->destroy_daemon (vacuum_Master_daemon);
1371  }
1373  vacuum_Master_context_manager = NULL;
1374 
1375  vacuum_Is_booted = false;
1376 }
1377 
1378 /*
1379 * vacuum_finalize () - Finalize structures used for vacuum.
1380 *
1381 * return : Void.
1382 * thread_p (in) : Thread entry.
1383  */
1384 void
1386 {
1387  int i;
1388 
1390  {
1391  return;
1392  }
1393 
1394  assert (!vacuum_is_work_in_progress (thread_p));
1395 
1396  /* Make sure all finished job queues are consumed. */
1397  if (vacuum_Finished_job_queue != NULL)
1398  {
1399  vacuum_data_mark_finished (thread_p);
1400  if (!vacuum_Finished_job_queue->is_empty ())
1401  {
1403  assert (0);
1404  }
1406  vacuum_Finished_job_queue = NULL;
1407  }
1408 
1409  if (vacuum_Block_data_buffer != NULL)
1410  {
1411  while (!vacuum_Block_data_buffer->is_empty ())
1412  {
1413  // consume log block buffer; we need to do this in a loop because vacuum_consume_buffer_log_blocks adds new
1414  // log entries and may generate new log blocks
1415 
1416  if (!vacuum_Data.is_loaded)
1417  {
1418  // safe-guard check: cannot consume if data is not loaded. should never happen
1419  assert (false);
1420  break;
1421  }
1422  if (vacuum_consume_buffer_log_blocks (thread_p) != NO_ERROR)
1423  {
1425  assert (0);
1426  }
1427  }
1428  delete vacuum_Block_data_buffer;
1429  vacuum_Block_data_buffer = NULL;
1430  }
1431 
1432 #if !defined(SERVER_MODE) /* SA_MODE */
1434 #endif
1435 
1436  /* Finalize vacuum data. */
1438  /* We should have unfixed all pages. Double-check. */
1439  pgbuf_unfix_all (thread_p);
1440 
1441  /* Free all resources allocated for vacuum workers */
1442  for (i = 0; i < VACUUM_MAX_WORKER_COUNT; i++)
1443  {
1444  vacuum_finalize_worker (thread_p, &vacuum_Workers[i]);
1445  }
1446  vacuum_finalize_worker (thread_p, &vacuum_Master);
1447 
1448  /* Unlock data */
1450 }
1451 
1452 /*
1453  * vacuum_heap () - Vacuum heap objects.
1454  *
1455  * return : Error code.
1456  * thread_p (in) : Thread entry.
1457  * heap_objects (in) : Array of heap objects (VFID & OID).
1458  * n_heap_objects (in) : Number of heap objects.
1459  * threshold_mvccid (in) : Threshold MVCCID used for vacuum check.
1460  * was_interrutped (in) : True if same job was executed and interrupted.
1461  */
1462 static int
1463 vacuum_heap (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, MVCCID threshold_mvccid, bool was_interrupted)
1464 {
1465  VACUUM_HEAP_OBJECT *page_ptr;
1466  VACUUM_HEAP_OBJECT *obj_ptr;
1467  int error_code = NO_ERROR;
1470  bool reusable = false;
1471  int object_count = 0;
1472 
1473  if (worker->n_heap_objects == 0)
1474  {
1475  return NO_ERROR;
1476  }
1477 
1478  /* Set state to execute mode. */
1480 
1481  /* Sort all objects. Sort function will order all objects first by VFID then by OID. All objects belonging to one
1482  * file will be consecutive. Also, all objects belonging to one page will be consecutive. Vacuum will be called for
1483  * each different heap page. */
1484  qsort (worker->heap_objects, worker->n_heap_objects, sizeof (VACUUM_HEAP_OBJECT), vacuum_compare_heap_object);
1485 
1486  /* Start parsing array. Vacuum objects page by page. */
1487  for (page_ptr = worker->heap_objects; page_ptr < worker->heap_objects + worker->n_heap_objects;)
1488  {
1489  if (!VFID_EQ (&vfid, &page_ptr->vfid))
1490  {
1491  VFID_COPY (&vfid, &page_ptr->vfid);
1492  /* Reset HFID */
1493  HFID_SET_NULL (&hfid);
1494  }
1495 
1496  /* Find all objects for this page. */
1497  object_count = 1;
1498  for (obj_ptr = page_ptr + 1;
1499  obj_ptr < worker->heap_objects + worker->n_heap_objects && obj_ptr->oid.pageid == page_ptr->oid.pageid
1500  && obj_ptr->oid.volid == page_ptr->oid.volid; obj_ptr++)
1501  {
1502  object_count++;
1503  }
1504  /* Vacuum page. */
1505  error_code =
1506  vacuum_heap_page (thread_p, page_ptr, object_count, threshold_mvccid, &hfid, &reusable, was_interrupted);
1507  if (error_code != NO_ERROR)
1508  {
1509  vacuum_check_shutdown_interruption (thread_p, error_code);
1510 
1511  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Vacuum heap page %d|%d, error_code=%d.",
1512  page_ptr->oid.volid, page_ptr->oid.pageid);
1513 
1514 #if defined (NDEBUG)
1515  if (!thread_p->shutdown)
1516  {
1517  // unexpected case
1518  // debug crashes; but can release do about it? just try to clean as much as possible
1519  er_clear ();
1520  error_code = NO_ERROR;
1521  continue;
1522  }
1523 #endif // not DEBUG
1524 
1525  return error_code;
1526  }
1527  /* Advance to next page. */
1528  page_ptr = obj_ptr;
1529  }
1530  return NO_ERROR;
1531 }
1532 
1533 /*
1534  * vacuum_heap_page () - Vacuum objects in one heap page.
1535  *
1536  * return : Error code.
1537  * thread_p (in) : Thread entry.
1538  * heap_objects (in) : Array of objects to vacuum.
1539  * n_heap_objects (in) : Number of objects.
1540  * threshold_mvccid (in) : Threshold MVCCID used to vacuum.
1541  * hfid (in/out) : Heap file identifier
1542  * reusable (in/out) : True if object slots are reusable.
1543  * was_interrutped (in) : True if same job was executed and interrupted.
1544  */
1545 int
1546 vacuum_heap_page (THREAD_ENTRY * thread_p, VACUUM_HEAP_OBJECT * heap_objects, int n_heap_objects,
1547  MVCCID threshold_mvccid, HFID * hfid, bool * reusable, bool was_interrupted)
1548 {
1549  VACUUM_HEAP_HELPER helper; /* Vacuum heap helper. */
1550  HEAP_PAGE_VACUUM_STATUS page_vacuum_status; /* Current page vacuum status. */
1551  int error_code = NO_ERROR; /* Error code. */
1552  int obj_index = 0; /* Index used to iterate the object array. */
1553 
1554  /* Assert expected arguments. */
1555  assert (heap_objects != NULL);
1556  assert (n_heap_objects > 0);
1557  assert (MVCCID_IS_NORMAL (threshold_mvccid));
1558 
1559  VACUUM_PERF_HEAP_START (thread_p, &helper);
1560 
1561  /* Get page from first object. */
1562  VPID_GET_FROM_OID (&helper.home_vpid, &heap_objects->oid);
1563 
1564 #if !defined (NDEBUG)
1565  /* Check all objects belong to same page. */
1566  {
1567  int i = 0;
1568 
1569  assert (HEAP_ISVALID_OID (thread_p, &heap_objects->oid) != DISK_INVALID);
1570  for (i = 1; i < n_heap_objects; i++)
1571  {
1572  assert (heap_objects[i].oid.volid == heap_objects[0].oid.volid
1573  && heap_objects[i].oid.pageid == heap_objects[0].oid.pageid);
1574  assert (heap_objects[i].oid.slotid > 0);
1575  assert (heap_objects[i].vfid.fileid == heap_objects[0].vfid.fileid
1576  && heap_objects[i].vfid.volid == heap_objects[0].vfid.volid);
1577  }
1578  }
1579 #endif /* !NDEBUG */
1580 
1581  /* Initialize helper. */
1582  helper.home_page = NULL;
1583  helper.forward_page = NULL;
1584  helper.n_vacuumed = 0;
1585  helper.n_bulk_vacuumed = 0;
1586  helper.initial_home_free_space = -1;
1587  VFID_SET_NULL (&helper.overflow_vfid);
1588 
1589  /* Fix heap page. */
1590  if (was_interrupted)
1591  {
1592  PAGE_TYPE ptype;
1593  error_code =
1595  &helper.home_page);
1596  if (error_code != NO_ERROR)
1597  {
1598  vacuum_check_shutdown_interruption (thread_p, error_code);
1599  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d.",
1600  helper.home_vpid.volid, helper.home_vpid.pageid);
1601  return error_code;
1602  }
1603  if (helper.home_page == NULL)
1604  {
1605  /* deallocated */
1606  /* Safe guard: this was possible if there was only one object to be vacuumed. */
1607  assert (n_heap_objects == 1);
1608 
1609  vacuum_er_log_warning (VACUUM_ER_LOG_HEAP, "Heap page %d|%d was deallocated during previous run",
1610  VPID_AS_ARGS (&helper.home_vpid));
1611  return NO_ERROR;
1612  }
1613  ptype = pgbuf_get_page_ptype (thread_p, helper.home_page);
1614  if (ptype != PAGE_HEAP)
1615  {
1616  /* page was deallocated and reused as file table. */
1617  assert (ptype == PAGE_FTAB);
1618  /* Safe guard: this was possible if there was only one object to be vacuumed. */
1619  assert (n_heap_objects == 1);
1620 
1622  "Heap page %d|%d was deallocated during previous run and reused as file table page",
1623  VPID_AS_ARGS (&helper.home_vpid));
1624 
1625  pgbuf_unfix_and_init (thread_p, helper.home_page);
1626  return NO_ERROR;
1627  }
1628  }
1629  else
1630  {
1631  helper.home_page =
1633  if (helper.home_page == NULL)
1634  {
1635  ASSERT_ERROR_AND_SET (error_code);
1636  vacuum_check_shutdown_interruption (thread_p, error_code);
1637  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d.",
1638  helper.home_vpid.volid, helper.home_vpid.pageid);
1639  return error_code;
1640  }
1641  }
1642 
1643  (void) pgbuf_check_page_ptype (thread_p, helper.home_page, PAGE_HEAP);
1644 
1646 
1647  if (HFID_IS_NULL (hfid))
1648  {
1649  /* file has changed and we must get HFID and file type */
1650  error_code = vacuum_heap_get_hfid_and_file_type (thread_p, &helper, &heap_objects[0].vfid);
1651  if (error_code != NO_ERROR)
1652  {
1653  ASSERT_ERROR ();
1654  vacuum_check_shutdown_interruption (thread_p, error_code);
1655  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "%s", "Failed to get hfid.");
1656  return error_code;
1657  }
1658  /* we need to also output to avoid checking again for other objects */
1659  *reusable = helper.reusable;
1660  *hfid = helper.hfid;
1661  }
1662  else
1663  {
1664  helper.reusable = *reusable;
1665  helper.hfid = *hfid;
1666  }
1667 
1668  helper.crt_slotid = -1;
1669  for (obj_index = 0; obj_index < n_heap_objects; obj_index++)
1670  {
1671  if (helper.crt_slotid == heap_objects[obj_index].oid.slotid)
1672  {
1673  /* Same object. Do not check it twice. */
1674  continue;
1675  }
1676  /* Set current slotid. */
1677  helper.crt_slotid = heap_objects[obj_index].oid.slotid;
1678 
1679  /* Prepare record for vacuum (get all required pages, info and MVCC header). */
1680  error_code = vacuum_heap_prepare_record (thread_p, &helper);
1681  if (error_code != NO_ERROR)
1682  {
1684  "Could not prepare vacuum for object %d|%d|%d.",
1685  heap_objects[obj_index].oid.volid, heap_objects[obj_index].oid.pageid,
1686  heap_objects[obj_index].oid.slotid);
1687 
1688  vacuum_check_shutdown_interruption (thread_p, error_code);
1689  if (helper.forward_page != NULL)
1690  {
1691  pgbuf_unfix_and_init (thread_p, helper.forward_page);
1692  }
1693 
1694  /* release build will give up */
1695  goto end;
1696  }
1697  /* Safe guard. */
1698  assert (helper.home_page != NULL);
1699 
1700  switch (helper.record_type)
1701  {
1702  case REC_RELOCATION:
1703  case REC_HOME:
1704  case REC_BIGONE:
1705 
1706  /* Check if record can be vacuumed. */
1707  helper.can_vacuum = mvcc_satisfies_vacuum (thread_p, &helper.mvcc_header, threshold_mvccid);
1708  if (helper.can_vacuum == VACUUM_RECORD_REMOVE)
1709  {
1710  /* Record has been deleted and it can be removed. */
1711  error_code = vacuum_heap_record (thread_p, &helper);
1712  }
1714  {
1715  /* Record insert MVCCID and prev version lsa can be removed. */
1716  error_code = vacuum_heap_record_insid_and_prev_version (thread_p, &helper);
1717  }
1718  else
1719  {
1720  /* Object could not be vacuumed. */
1721  }
1722  if (helper.forward_page != NULL)
1723  {
1724  pgbuf_unfix_and_init (thread_p, helper.forward_page);
1725  }
1726  if (error_code != NO_ERROR)
1727  {
1729  "Failed to vacuum object at %d|%d|%d.", helper.home_vpid.volid,
1730  helper.home_vpid.pageid, helper.crt_slotid);
1731 
1732  /* Debug should hit assert. Release should continue. */
1733  assert_release (false);
1734 
1735  if (helper.home_page == NULL)
1736  {
1737  goto end;
1738  }
1739  else
1740  {
1741  continue;
1742  }
1743  }
1744  break;
1745 
1746  default:
1747  /* Object cannot be vacuumed. Most likely it was already vacuumed by another worker or it was rollbacked and
1748  * reused. */
1749  assert (helper.forward_page == NULL);
1750  break;
1751  }
1752 
1753  assert (!VACUUM_IS_THREAD_VACUUM_MASTER (thread_p));
1754  if (!VACUUM_IS_THREAD_VACUUM_WORKER (thread_p))
1755  {
1756  continue;
1757  }
1758 
1759  /* Check page vacuum status. */
1760  page_vacuum_status = heap_page_get_vacuum_status (thread_p, helper.home_page);
1761  /* Safe guard. */
1762  assert (page_vacuum_status != HEAP_PAGE_VACUUM_NONE || (was_interrupted && helper.n_vacuumed == 0));
1763 
1764  /* Page can be removed if no other worker will access this page. If this worker is the only one expected, then it
1765  * can remove the page. It is also possible that this job was previously executed and interrupted due to
1766  * shutdown or crash. This case is a little more complicated. There are two scenarios: 1. Current page status is
1767  * vacuum none. This means all vacuum was already executed. 2. Current page status is vacuum once. This means a
1768  * vacuum is expected, but we cannot tell if current vacuum worker was interrupted and re-executes an old vacuum
1769  * task or if it is executing the task expected by page status. Take next scenario: 1. Insert new object at
1770  * OID1. page status is vacuum once. 2. Block with above operations is finished and vacuum job is started. 3.
1771  * Vacuum insert MVCCID at OID1. status is now vacuum none. 4. Delete object at OID1. page status is set to
1772  * vacuum once. 5. Crash. 6. Job on block at step #2 is restarted. 7. Vacuum is executed on object OID1.
1773  * Object can be removed. 8. Vacuum is executed for delete operation at #4. It would be incorrect to change
1774  * page status from vacuum once to none, since it will be followed by another vacuum task. Since vacuum none
1775  * status means page might be deallocated, it is better to be paranoid about it. */
1776  if ((page_vacuum_status == HEAP_PAGE_VACUUM_ONCE && !was_interrupted)
1777  || (page_vacuum_status == HEAP_PAGE_VACUUM_NONE && was_interrupted))
1778  {
1779  assert (n_heap_objects == 1);
1780  assert (helper.n_vacuumed <= 1);
1781  if (page_vacuum_status == HEAP_PAGE_VACUUM_ONCE)
1782  {
1783  heap_page_set_vacuum_status_none (thread_p, helper.home_page);
1784 
1786  "Changed vacuum status of heap page %d|%d, lsa=%lld|%d from once to none.",
1787  PGBUF_PAGE_STATE_ARGS (helper.home_page));
1788 
1789  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, &helper);
1790 
1791  vacuum_log_vacuum_heap_page (thread_p, helper.home_page, helper.n_bulk_vacuumed, helper.slots,
1792  helper.results, helper.reusable, true);
1793 
1794  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, &helper);
1795  }
1796 
1797  /* Reset n_vacuumed since they have been logged already. */
1798  helper.n_vacuumed = 0;
1799  helper.n_bulk_vacuumed = 0;
1800 
1801  /* Set page dirty. */
1802  pgbuf_set_dirty (thread_p, helper.home_page, DONT_FREE);
1803 
1804  if (spage_number_of_records (helper.home_page) <= 1 && helper.reusable)
1805  {
1806  /* Try to remove page from heap. */
1807 
1808  /* HFID is required. */
1809  assert (!HFID_IS_NULL (&helper.hfid));
1810  VACUUM_PERF_HEAP_TRACK_PREPARE (thread_p, &helper);
1811 
1812  if (pgbuf_has_prevent_dealloc (helper.home_page) == false
1813  && heap_remove_page_on_vacuum (thread_p, &helper.home_page, &helper.hfid))
1814  {
1815  /* Successfully removed page. */
1816  assert (helper.home_page == NULL);
1817 
1819  "Successfully removed page %d|%d from heap file (%d, %d|%d).",
1820  VPID_AS_ARGS (&helper.home_vpid), HFID_AS_ARGS (&helper.hfid));
1821 
1822  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, &helper);
1823  goto end;
1824  }
1825  else if (helper.home_page != NULL)
1826  {
1827  /* Unfix page. */
1828  pgbuf_unfix_and_init (thread_p, helper.home_page);
1829  }
1830  /* Fall through and go to end. */
1831  }
1832  else
1833  {
1834  /* Finished vacuuming page. Unfix the page and go to end. */
1835  pgbuf_unfix_and_init (thread_p, helper.home_page);
1836  }
1837  goto end;
1838  }
1839 
1840  if (pgbuf_has_any_non_vacuum_waiters (helper.home_page) && obj_index < n_heap_objects - 1)
1841  {
1842  /* release latch to favor other threads */
1843  vacuum_heap_page_log_and_reset (thread_p, &helper, false, true);
1844  assert (helper.home_page == NULL);
1845  assert (helper.forward_page == NULL);
1846 
1847  helper.home_page =
1849  if (helper.home_page == NULL)
1850  {
1851  ASSERT_ERROR_AND_SET (error_code);
1852  vacuum_check_shutdown_interruption (thread_p, error_code);
1853  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d.",
1854  helper.home_vpid.volid, helper.home_vpid.pageid);
1855  goto end;
1856  }
1857  (void) pgbuf_check_page_ptype (thread_p, helper.home_page, PAGE_HEAP);
1858  }
1859  /* Continue to next object. */
1860  }
1861  /* Finished processing all objects. */
1862 
1863 end:
1864  assert (helper.forward_page == NULL);
1865  if (helper.home_page != NULL)
1866  {
1867  vacuum_heap_page_log_and_reset (thread_p, &helper, true, true);
1868  }
1869 
1870  return error_code;
1871 }
1872 
1873 /*
1874  * vacuum_heap_prepare_record () - Prepare all required information to vacuum heap record. Possible requirements:
1875  * - Record type (always).
1876  * - Peeked record data: REC_HOME,
1877  * REC_RELOCATION
1878  * - Forward page: REC_BIGONE, REC_RELOCATION
1879  * - Forward OID: REC_BIGONE, REC_RELOCATION
1880  * - HFID: REC_BIGONE, REC_RELOCATION
1881  * - Overflow VFID: REC_BIGONE
1882  * - MVCC header: REC_HOME, REC_BIGONE,
1883  * REC_RELOCATION
1884  *
1885  * return : Error code.
1886  * thread_p (in) : Thread entry.
1887  * helper (in) : Vacuum heap helper.
1888  */
1889 static int
1891 {
1892  SPAGE_SLOT *slotp; /* Slot at helper->crt_slotid or NULL. */
1893  VPID forward_vpid; /* Forward page VPID. */
1894  int error_code = NO_ERROR; /* Error code. */
1895  PGBUF_LATCH_CONDITION fwd_condition; /* Condition to latch forward page for REC_RELOCATION. */
1896 
1897  /* Assert expected arguments. */
1898  assert (helper != NULL);
1899  assert (helper->home_page != NULL);
1900  assert (helper->forward_page == NULL);
1901  assert (helper->crt_slotid > 0);
1902 
1903 retry_prepare:
1904 
1905  /* Get slot. */
1906  slotp = spage_get_slot (helper->home_page, helper->crt_slotid);
1907  if (slotp == NULL)
1908  {
1909  /* Slot must have been deleted. */
1910  helper->record_type = REC_MARKDELETED;
1911  return NO_ERROR;
1912  }
1913  helper->record_type = slotp->record_type;
1914 
1915  /* Get required pages and MVCC header in the three interesting cases: 1. REC_RELOCATION. 2. REC_BIGONE. 3. REC_HOME. */
1916  switch (helper->record_type)
1917  {
1918  case REC_RELOCATION:
1919  /* Required info: forward page, forward OID, REC_NEWHOME record, MVCC header and HFID. */
1920  assert (!HFID_IS_NULL (&helper->hfid));
1921 
1922  /* Get forward OID. */
1923  helper->forward_recdes.data = (char *) &helper->forward_link;
1924  helper->forward_recdes.area_size = sizeof (helper->forward_link);
1925  if (spage_get_record (thread_p, helper->home_page, helper->crt_slotid, &helper->forward_recdes, COPY) !=
1926  S_SUCCESS)
1927  {
1928  assert_release (false);
1929  return ER_FAILED;
1930  }
1931  COPY_OID (&helper->forward_oid, &helper->forward_link);
1932 
1933  /* Get forward page. */
1934  VPID_GET_FROM_OID (&forward_vpid, &helper->forward_link);
1935  if (helper->forward_page != NULL)
1936  {
1937  VPID crt_fwd_vpid = VPID_INITIALIZER;
1938 
1939  pgbuf_get_vpid (helper->forward_page, &crt_fwd_vpid);
1940  assert (!VPID_ISNULL (&crt_fwd_vpid));
1941  if (!VPID_EQ (&crt_fwd_vpid, &forward_vpid))
1942  {
1943  /* Unfix current forward page. */
1944  pgbuf_unfix_and_init (thread_p, helper->forward_page);
1945  }
1946  }
1947  if (helper->forward_page == NULL)
1948  {
1949  /* The condition used to fix forward page depends on its VPID and home page VPID. Unconditional latch can be
1950  * used if the order is home before forward. If the order is forward before home, try conditional latch, and
1951  * if it fails, fix pages in reversed order. */
1952  fwd_condition =
1954  &helper->hfid);
1955  helper->forward_page = pgbuf_fix (thread_p, &forward_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, fwd_condition);
1956  }
1957  if (helper->forward_page == NULL)
1958  {
1959  /* Fix failed. */
1960  if (fwd_condition == PGBUF_UNCONDITIONAL_LATCH)
1961  {
1962  /* Fix should have worked. */
1963  ASSERT_ERROR_AND_SET (error_code);
1964  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d", VPID_AS_ARGS (&forward_vpid));
1965  return error_code;
1966  }
1967  /* Conditional latch. Unfix home, and fix in reversed order. */
1968 
1969  VACUUM_PERF_HEAP_TRACK_PREPARE (thread_p, helper);
1970 
1971  /* Make sure all current changes on home are logged. */
1972  vacuum_heap_page_log_and_reset (thread_p, helper, false, true);
1973  assert (helper->home_page == NULL);
1974 
1975  /* Fix pages in reversed order. */
1976  /* Fix forward page. */
1977  helper->forward_page =
1978  pgbuf_fix (thread_p, &forward_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
1979  if (helper->forward_page == NULL)
1980  {
1981  ASSERT_ERROR_AND_SET (error_code);
1982  vacuum_check_shutdown_interruption (thread_p, error_code);
1983  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d", VPID_AS_ARGS (&forward_vpid));
1984  return error_code;
1985  }
1986  /* Fix home page. */
1987  helper->home_page =
1989  if (helper->home_page == NULL)
1990  {
1991  ASSERT_ERROR_AND_SET (error_code);
1992  vacuum_check_shutdown_interruption (thread_p, error_code);
1993  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|d.", VPID_AS_ARGS (&forward_vpid));
1994  return error_code;
1995  }
1996  /* Both pages fixed. */
1997 
1998  /* While home has been unfixed, it is possible that current record was changed. It could be returned to home,
1999  * link could be changed, or it could be vacuumed. Repeat getting record. */
2000  goto retry_prepare;
2001  }
2002  assert (VPID_EQ (pgbuf_get_vpid_ptr (helper->forward_page), &forward_vpid));
2003  /* COPY (needed for UNDO logging) REC_NEWHOME record. */
2004  helper->record.data = PTR_ALIGN (helper->rec_buf, MAX_ALIGNMENT);
2005  helper->record.area_size = sizeof (helper->rec_buf);
2006  if (spage_get_record (thread_p, helper->forward_page, helper->forward_oid.slotid, &helper->record, COPY) !=
2007  S_SUCCESS)
2008  {
2009  assert_release (false);
2010  return ER_FAILED;
2011  }
2012 
2013  /* Get MVCC header to check whether the record can be vacuumed. */
2014  error_code = or_mvcc_get_header (&helper->record, &helper->mvcc_header);
2015  if (error_code != NO_ERROR)
2016  {
2017  assert_release (false);
2018  return error_code;
2019  }
2020  return NO_ERROR;
2021 
2022  case REC_BIGONE:
2023  /* Required info: forward oid, forward page, MVCC header, HFID and overflow VFID. */
2024 
2025  if (helper->forward_page != NULL)
2026  {
2027  /* Retry from REC_RELOCATION. This forward_page cannot be good for REC_BIGONE. */
2028  pgbuf_unfix_and_init (thread_p, helper->forward_page);
2029  }
2030 
2031  assert (!HFID_IS_NULL (&helper->hfid));
2032 
2033  /* Overflow VFID is required to remove overflow pages. */
2034  if (VFID_ISNULL (&helper->overflow_vfid))
2035  {
2036  if (heap_ovf_find_vfid (thread_p, &helper->hfid, &helper->overflow_vfid, false, PGBUF_CONDITIONAL_LATCH)
2037  == NULL)
2038  {
2039  /* Failed conditional latch. Unfix heap page and try again using unconditional latch. */
2040  VACUUM_PERF_HEAP_TRACK_PREPARE (thread_p, helper);
2041 
2042  vacuum_heap_page_log_and_reset (thread_p, helper, false, true);
2043 
2044  if (heap_ovf_find_vfid (thread_p, &helper->hfid, &helper->overflow_vfid, false, PGBUF_UNCONDITIONAL_LATCH)
2045  == NULL || VFID_ISNULL (&helper->overflow_vfid))
2046  {
2047  assert_release (false);
2048  return ER_FAILED;
2049  }
2050  helper->home_page =
2052  if (helper->home_page == NULL)
2053  {
2054  ASSERT_ERROR_AND_SET (error_code);
2055  vacuum_check_shutdown_interruption (thread_p, error_code);
2056  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d.",
2057  VPID_AS_ARGS (&helper->home_vpid));
2058  return error_code;
2059  }
2060  /* While home has been unfixed, it is possible that current record was changed. It could be vacuumed.
2061  * Repeat getting record. */
2062  goto retry_prepare;
2063  }
2064  }
2065  assert (!VFID_ISNULL (&helper->overflow_vfid));
2066  assert (helper->home_page != NULL);
2067 
2068  /* Get forward OID. */
2069  helper->forward_recdes.data = (char *) &helper->forward_link;
2070  helper->forward_recdes.area_size = sizeof (helper->forward_link);
2071  if (spage_get_record (thread_p, helper->home_page, helper->crt_slotid, &helper->forward_recdes, COPY) !=
2072  S_SUCCESS)
2073  {
2074  assert_release (false);
2075  return ER_FAILED;
2076  }
2077 
2078  COPY_OID (&helper->forward_oid, &helper->forward_link);
2079 
2080  /* Fix first overflow page (forward_page). */
2081  VPID_GET_FROM_OID (&forward_vpid, &helper->forward_link);
2082  helper->forward_page =
2083  pgbuf_fix (thread_p, &forward_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
2084  if (helper->forward_page == NULL)
2085  {
2086  ASSERT_ERROR_AND_SET (error_code);
2087  vacuum_check_shutdown_interruption (thread_p, error_code);
2088  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "Failed to fix page %d|%d", VPID_AS_ARGS (&forward_vpid));
2089  return error_code;
2090  }
2091 
2092  /* Read MVCC header from first overflow page. */
2093  error_code = heap_get_mvcc_rec_header_from_overflow (helper->forward_page, &helper->mvcc_header, NULL);
2094  if (error_code != NO_ERROR)
2095  {
2096  ASSERT_ERROR ();
2098  "Failed to get MVCC header from overflow page %d|%d.", VPID_AS_ARGS (&forward_vpid));
2099  return error_code;
2100  }
2101  break;
2102 
2103  case REC_HOME:
2104  /* Required info: record data and MVCC header. */
2105 
2106  if (helper->forward_page != NULL)
2107  {
2108  /* Retry from REC_RELOCATION. This forward_page cannot be good for REC_HOME. */
2109  pgbuf_unfix_and_init (thread_p, helper->forward_page);
2110  }
2111 
2112  helper->record.data = PTR_ALIGN (helper->rec_buf, MAX_ALIGNMENT);
2113  helper->record.area_size = sizeof (helper->rec_buf);
2114 
2115  /* Peek record. */
2116  if (spage_get_record (thread_p, helper->home_page, helper->crt_slotid, &helper->record, COPY) != S_SUCCESS)
2117  {
2118  assert_release (false);
2119  return ER_FAILED;
2120  }
2121 
2122  /* Get MVCC header to check whether the record can be vacuumed. */
2123  error_code = or_mvcc_get_header (&helper->record, &helper->mvcc_header);
2124  if (error_code != NO_ERROR)
2125  {
2126  assert_release (false);
2127  return ER_FAILED;
2128  }
2129  break;
2130 
2131  default:
2132  /* No information is required other than record type. */
2133 
2134  if (helper->forward_page != NULL)
2135  {
2136  /* Retry from REC_RELOCATION. This forward_page cannot be good for vacuumed/deleted slot. */
2137  pgbuf_unfix_and_init (thread_p, helper->forward_page);
2138  }
2139  break;
2140  }
2141 
2142  /* Assert forward page is fixed if and only if record type is either REC_RELOCATION or REC_BIGONE. */
2143  assert ((helper->record_type == REC_RELOCATION
2144  || helper->record_type == REC_BIGONE) == (helper->forward_page != NULL));
2145 
2146  VACUUM_PERF_HEAP_TRACK_PREPARE (thread_p, helper);
2147 
2148  /* Success. */
2149  return NO_ERROR;
2150 }
2151 
2152 /*
2153  * vacuum_heap_record_insid_and_prev_version () - Remove insert MVCCID and prev version lsa from record.
2154  *
2155  * return : Error code.
2156  * thread_p (in) : Thread entry.
2157  * helper (in) : Vacuum heap helper.
2158  */
2159 static int
2161 {
2162  RECDES *update_record;
2163  int error_code = NO_ERROR;
2164  char *start_p, *existing_data_p, *new_data_p;
2165  int repid_and_flag_bits = 0, mvcc_flags = 0;
2166 
2167  /* Assert expected arguments. */
2168  assert (helper != NULL);
2171 
2172  switch (helper->record_type)
2173  {
2174  case REC_RELOCATION:
2175  /* Remove insert MVCCID from REC_NEWHOME in forward_page. */
2176 
2177  /* Forward page and OID are required. */
2178  assert (helper->forward_page != NULL);
2179  assert (!OID_ISNULL (&helper->forward_oid));
2180  assert (helper->record.type == REC_NEWHOME);
2181 
2182  /* Remove insert MVCCID and prev version lsa. */
2183  update_record = &helper->record;
2184  start_p = update_record->data;
2185  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (start_p);
2186  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
2187 
2188  /* Skip bytes up to insid_offset. */
2189  existing_data_p = start_p + mvcc_header_size_lookup[mvcc_flags];
2190  new_data_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
2191  if (mvcc_flags & OR_MVCC_FLAG_VALID_DELID)
2192  {
2193  /* Has MVCC DELID. */
2194  if (mvcc_flags & OR_MVCC_FLAG_VALID_INSID)
2195  {
2196  /* Copy MVCC DELID over INSID (INSID is removed). */
2197  memcpy (new_data_p, new_data_p + OR_MVCCID_SIZE, OR_MVCCID_SIZE);
2198  }
2199  /* Skip DELID. */
2200  new_data_p += OR_MVCCID_SIZE;
2201  }
2202 
2203  /* Clear flag for valid insert MVCCID and prev version lsa. */
2205  OR_PUT_INT (start_p, repid_and_flag_bits);
2206 
2207  /* Expect new_data_p != existing_data_p in most of the cases. */
2208  assert (existing_data_p >= new_data_p);
2209  memmove (new_data_p, existing_data_p, update_record->length - CAST_BUFLEN (existing_data_p - start_p));
2210  update_record->length -= CAST_BUFLEN (existing_data_p - new_data_p);
2211  assert (update_record->length > 0);
2212 
2213  /* Update record in page. */
2214  if (spage_update (thread_p, helper->forward_page, helper->forward_oid.slotid, update_record) != SP_SUCCESS)
2215  {
2216  assert_release (false);
2217  return ER_FAILED;
2218  }
2219 
2220  /* Since forward page was vacuumed, log it immediately. Then unfix forward page. */
2221  vacuum_log_vacuum_heap_page (thread_p, helper->forward_page, 1, &helper->forward_oid.slotid, &helper->can_vacuum,
2222  helper->reusable, false);
2223  pgbuf_set_dirty (thread_p, helper->forward_page, FREE);
2224  helper->forward_page = NULL;
2225 
2227  break;
2228 
2229  case REC_BIGONE:
2230  /* First overflow page is required. */
2231  assert (helper->forward_page != NULL);
2232 
2233  /* Replace current insert MVCCID with MVCCID_ALL_VISIBLE. Header must remain the same size. */
2236  error_code = heap_set_mvcc_rec_header_on_overflow (helper->forward_page, &helper->mvcc_header);
2237  if (error_code != NO_ERROR)
2238  {
2239  ASSERT_ERROR ();
2241  "set mvcc header (flag=%d, repid=%d, chn=%d, insid=%llu, "
2242  "delid=%llu, forward object %d|%d|%d with record of type=%d and size=%d",
2243  (int) MVCC_GET_FLAG (&helper->mvcc_header), (int) MVCC_GET_REPID (&helper->mvcc_header),
2244  MVCC_GET_CHN (&helper->mvcc_header), MVCC_GET_INSID (&helper->mvcc_header),
2245  MVCC_GET_DELID (&helper->mvcc_header), helper->home_vpid.volid, helper->home_vpid.pageid,
2246  helper->crt_slotid, REC_BIGONE, helper->record.length);
2247  return error_code;
2248  }
2249  /* Log changes and unfix first overflow page. */
2250  vacuum_log_remove_ovf_insid (thread_p, helper->forward_page);
2251  pgbuf_set_dirty (thread_p, helper->forward_page, FREE);
2252  helper->forward_page = NULL;
2253 
2255  break;
2256 
2257  case REC_HOME:
2258  /* Remove insert MVCCID and prev version lsa. */
2259 
2260  assert (helper->record.type == REC_HOME);
2261  update_record = &helper->record;
2262  start_p = update_record->data;
2263  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (start_p);
2264  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
2265 
2266  /* Skip bytes up to insid_offset */
2267  existing_data_p = start_p + mvcc_header_size_lookup[mvcc_flags];
2268  new_data_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
2269  if (mvcc_flags & OR_MVCC_FLAG_VALID_DELID)
2270  {
2271  /* Has MVCC DELID. */
2272  if (mvcc_flags & OR_MVCC_FLAG_VALID_INSID)
2273  {
2274  /* Copy MVCC DELID over INSID (INSID is removed). */
2275  memcpy (new_data_p, new_data_p + OR_MVCCID_SIZE, OR_MVCCID_SIZE);
2276  }
2277  /* Skip DELID. */
2278  new_data_p += OR_MVCCID_SIZE;
2279  }
2280 
2281  /* Clear flag for valid insert MVCCID and prev version lsa. */
2283  OR_PUT_INT (start_p, repid_and_flag_bits);
2284 
2285  /* Expect new_data_p != existing_data_p in most of the cases. */
2286  assert (existing_data_p >= new_data_p);
2287  memmove (new_data_p, existing_data_p, update_record->length - CAST_BUFLEN (existing_data_p - start_p));
2288  update_record->length -= CAST_BUFLEN (existing_data_p - new_data_p);
2289  assert (update_record->length > 0);
2290 
2291  if (spage_update (thread_p, helper->home_page, helper->crt_slotid, update_record) != SP_SUCCESS)
2292  {
2293  assert_release (false);
2294  return ER_FAILED;
2295  }
2296  /* Collect vacuum data to be logged later. */
2297  helper->slots[helper->n_bulk_vacuumed] = helper->crt_slotid;
2299  helper->n_bulk_vacuumed++;
2300 
2302  break;
2303 
2304  default:
2305  /* Should not be here. */
2306  assert_release (false);
2307  return ER_FAILED;
2308  }
2309 
2310  helper->n_vacuumed++;
2311 
2313 
2314  /* Success. */
2315  return NO_ERROR;
2316 }
2317 
2318 /*
2319  * vacuum_heap_record () - Vacuum heap record.
2320  *
2321  * return : Error code.
2322  * thread_p (in) : Thread entry.
2323  * helper (in) : Vacuum heap helper.
2324  */
2325 static int
2327 {
2328  /* Assert expected arguments. */
2329  assert (helper != NULL);
2330  assert (helper->can_vacuum == VACUUM_RECORD_REMOVE);
2331  assert (helper->home_page != NULL);
2333 
2334  if (helper->record_type == REC_RELOCATION || helper->record_type == REC_BIGONE)
2335  {
2336  /* HOME record of rel/big records are performed as a single operation: flush all existing vacuumed slots before
2337  * starting a system op for current record */
2338  vacuum_heap_page_log_and_reset (thread_p, helper, false, false);
2339  log_sysop_start (thread_p);
2340  }
2341  else
2342  {
2343  assert (helper->record_type == REC_HOME);
2344  /* Collect home page changes. */
2345  helper->slots[helper->n_bulk_vacuumed] = helper->crt_slotid;
2346  helper->results[helper->n_bulk_vacuumed] = VACUUM_RECORD_REMOVE;
2347  }
2348 
2349  /* Vacuum REC_HOME/REC_RELOCATION/REC_BIGONE */
2350  spage_vacuum_slot (thread_p, helper->home_page, helper->crt_slotid, helper->reusable);
2351 
2352  if (helper->reusable)
2353  {
2355  }
2356 
2357  if (helper->record_type != REC_HOME)
2358  {
2359  /* We try to keep the same amount of pgbuf_set_dirty and logged changes; Changes on REC_HOME records are logged
2360  * in bulk and page is set dirty along with that log record */
2361  pgbuf_set_dirty (thread_p, helper->home_page, DONT_FREE);
2362  }
2363 
2364  switch (helper->record_type)
2365  {
2366  case REC_RELOCATION:
2367  /* Remove REC_NEWHOME. */
2368  assert (helper->forward_page != NULL);
2369  assert (!OID_ISNULL (&helper->forward_oid));
2370  assert (!HFID_IS_NULL (&helper->hfid));
2371  assert (!OID_ISNULL (&helper->forward_oid));
2372 
2373  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2374 
2375  vacuum_log_redoundo_vacuum_record (thread_p, helper->home_page, helper->crt_slotid, &helper->forward_recdes,
2376  helper->reusable);
2377 
2378  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, helper);
2379 
2380  spage_vacuum_slot (thread_p, helper->forward_page, helper->forward_oid.slotid, true);
2381 
2382  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2383 
2384  /* Log changes in forward page immediately. */
2385  vacuum_log_redoundo_vacuum_record (thread_p, helper->forward_page, helper->forward_oid.slotid, &helper->record,
2386  true);
2387 
2388  pgbuf_set_dirty (thread_p, helper->forward_page, FREE);
2389  helper->forward_page = NULL;
2390 
2391  log_sysop_commit (thread_p);
2392 
2393  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, helper);
2394 
2396  break;
2397 
2398  case REC_BIGONE:
2399  assert (helper->forward_page != NULL);
2400  /* Overflow first page is required. */
2401  assert (!VFID_ISNULL (&helper->overflow_vfid));
2402 
2403  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2404 
2405  vacuum_log_redoundo_vacuum_record (thread_p, helper->home_page, helper->crt_slotid, &helper->forward_recdes,
2406  helper->reusable);
2407 
2408  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, helper);
2409 
2410  /* Unfix first overflow page. */
2411  pgbuf_unfix_and_init (thread_p, helper->forward_page);
2412 
2413  if (heap_ovf_delete (thread_p, &helper->hfid, &helper->forward_oid, &helper->overflow_vfid) == NULL)
2414  {
2415  /* Failed to delete. */
2416  assert_release (false);
2417  log_sysop_abort (thread_p);
2418  return ER_FAILED;
2419  }
2420 
2421  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2422 
2423  log_sysop_commit (thread_p);
2424 
2425  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, helper);
2426 
2428  break;
2429 
2430  case REC_HOME:
2431  helper->n_bulk_vacuumed++;
2432 
2434  break;
2435 
2436  default:
2437  /* Unexpected. */
2438  assert_release (false);
2439  return ER_FAILED;
2440  }
2441 
2442  helper->n_vacuumed++;
2443 
2444  assert (helper->forward_page == NULL);
2445 
2446  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2447 
2448  return NO_ERROR;
2449 }
2450 
2451 /*
2452  * vacuum_heap_get_hfid () - Get heap file identifier.
2453  *
2454  * return : Error code.
2455  * thread_p (in) : Thread entry.
2456  * helper (in) : Vacuum heap helper.
2457  * vfid (in) : file identifier
2458  */
2459 static int
2461 {
2462  int error_code = NO_ERROR; /* Error code. */
2463  OID class_oid = OID_INITIALIZER; /* Class OID. */
2464  FILE_TYPE ftype;
2465 
2466  assert (helper != NULL);
2467  assert (helper->home_page != NULL);
2468  assert (vfid != NULL && !VFID_ISNULL (vfid));
2469 
2470  /* Get class OID from heap page. */
2471  error_code = heap_get_class_oid_from_page (thread_p, helper->home_page, &class_oid);
2472  if (error_code != NO_ERROR)
2473  {
2475  "Failed to obtain class_oid from heap page %d|%d.",
2477 
2478  assert_release (false);
2479  return error_code;
2480  }
2481  assert (!OID_ISNULL (&class_oid));
2482 
2483  /* Get HFID for class OID. */
2484  error_code = heap_get_class_info (thread_p, &class_oid, &helper->hfid, &ftype, NULL);
2485  if (error_code == ER_HEAP_UNKNOWN_OBJECT)
2486  {
2487  FILE_DESCRIPTORS file_descriptor;
2488 
2489  /* clear expected error */
2490  er_clear ();
2491  error_code = NO_ERROR;
2492 
2493  error_code = file_descriptor_get (thread_p, vfid, &file_descriptor);
2494  if (error_code != NO_ERROR)
2495  {
2496  assert_release (false);
2497  }
2498  else
2499  {
2500  helper->hfid = file_descriptor.heap.hfid;
2501  error_code = file_get_type (thread_p, vfid, &ftype);
2502  if (error_code != NO_ERROR)
2503  {
2504  assert_release (false);
2505  }
2506  else
2507  {
2509  "vacuuming heap found deleted class oid, however hfid and file type "
2510  "have been successfully loaded from file header. ");
2511  }
2512  }
2513  }
2514  if (error_code != NO_ERROR)
2515  {
2517  "Failed to obtain heap file identifier for class %d|%d|%d)", OID_AS_ARGS (&class_oid));
2518 
2519  assert_release (false);
2520  return error_code;
2521  }
2522  if (HFID_IS_NULL (&helper->hfid) || (ftype != FILE_HEAP && ftype != FILE_HEAP_REUSE_SLOTS))
2523  {
2525  "Invalid hfid (%d, %d|%d) or ftype = %s ", HFID_AS_ARGS (&helper->hfid),
2526  file_type_to_string (ftype));
2527  assert_release (false);
2528  return ER_FAILED;
2529  }
2530 
2531  /* reusable */
2532  helper->reusable = ftype == FILE_HEAP_REUSE_SLOTS;
2533 
2534  /* Success. */
2535  return NO_ERROR;
2536 }
2537 
2538 /*
2539  * vacuum_heap_page_log_and_reset () - Logs the vacuumed slots from page and reset page pointer and number of
2540  * vacuumed slots.
2541  *
2542  * return : Void.
2543  * thread_p (in) : Thread entry.
2544  * helper (in) : Vacuum heap helper.
2545  * update_best_space_stat (in) :
2546  * unlatch_page (in) :
2547  */
2548 static void
2549 vacuum_heap_page_log_and_reset (THREAD_ENTRY * thread_p, VACUUM_HEAP_HELPER * helper, bool update_best_space_stat,
2550  bool unlatch_page)
2551 {
2552  assert (helper != NULL);
2553  assert (helper->home_page != NULL);
2554 
2555  if (helper->n_bulk_vacuumed == 0)
2556  {
2557  /* No logging is required. */
2558  if (unlatch_page == true)
2559  {
2560  pgbuf_unfix_and_init (thread_p, helper->home_page);
2561  }
2562  return;
2563  }
2564 
2565  if (spage_need_compact (thread_p, helper->home_page) == true)
2566  {
2567  /* Compact page data */
2568  spage_compact (thread_p, helper->home_page);
2569  }
2570 
2571  /* Update statistics only for home pages; We assume that fwd pages (from relocated records) are home pages for other
2572  * OIDs and their statistics are updated in that context */
2573  if (update_best_space_stat == true && helper->initial_home_free_space != -1)
2574  {
2575  assert (!HFID_IS_NULL (&helper->hfid));
2576  heap_stats_update (thread_p, helper->home_page, &helper->hfid, helper->initial_home_free_space);
2577  }
2578 
2579  VACUUM_PERF_HEAP_TRACK_EXECUTE (thread_p, helper);
2580 
2581  /* Log vacuumed slots */
2582  vacuum_log_vacuum_heap_page (thread_p, helper->home_page, helper->n_bulk_vacuumed, helper->slots, helper->results,
2583  helper->reusable, false);
2584 
2585  /* Mark page as dirty and unfix */
2586  pgbuf_set_dirty (thread_p, helper->home_page, DONT_FREE);
2587  if (unlatch_page == true)
2588  {
2589  pgbuf_unfix_and_init (thread_p, helper->home_page);
2590  }
2591 
2592  /* Reset the number of vacuumed slots */
2593  helper->n_bulk_vacuumed = 0;
2594 
2595  VACUUM_PERF_HEAP_TRACK_LOGGING (thread_p, helper);
2596 }
2597 
2598 
2599 /*
2600  * vacuum_log_vacuum_heap_page () - Log removing OID's from heap page.
2601  *
2602  * return : Error code.
2603  * thread_p (in) : Thread entry.
2604  * page_p (in) : Page pointer.
2605  * n_slots (in) : OID count in slots.
2606  * slots (in/out) : Array of slots removed from heap page.
2607  * results (in) : Satisfies vacuum result.
2608  * reusable (in) :
2609  *
2610  * NOTE: Some values in slots array are modified and set to negative values.
2611  */
2612 static void
2613 vacuum_log_vacuum_heap_page (THREAD_ENTRY * thread_p, PAGE_PTR page_p, int n_slots, PGSLOTID * slots,
2614  MVCC_SATISFIES_VACUUM_RESULT * results, bool reusable, bool all_vacuumed)
2615 {
2616  LOG_DATA_ADDR addr;
2617  int packed_size = 0, i = 0;
2618  char *ptr = NULL, *buffer_p = NULL;
2619  char buffer[MAX_SLOTS_IN_PAGE * (sizeof (PGSLOTID) + 2 * OR_OID_SIZE) + (MAX_ALIGNMENT * 2)];
2620 
2621  assert (n_slots >= 0 && n_slots <= ((SPAGE_HEADER *) page_p)->num_slots);
2622  assert (n_slots > 0 || all_vacuumed);
2623 
2624  /* Initialize log data. */
2625  addr.offset = n_slots; /* Save number of slots in offset. */
2626  addr.pgptr = page_p;
2627  addr.vfid = NULL;
2628 
2629  /* Compute recovery data size */
2630 
2631  /* slots & results */
2632  packed_size += n_slots * sizeof (PGSLOTID);
2633 
2634  if (reusable)
2635  {
2637  }
2638 
2639  if (all_vacuumed)
2640  {
2642  }
2643 
2644  assert (packed_size <= (int) sizeof (buffer));
2645 
2646  buffer_p = PTR_ALIGN (buffer, MAX_ALIGNMENT);
2647  ptr = buffer_p;
2648 
2649  if (n_slots > 0)
2650  {
2651  /* Pack slot ID's and results */
2652  for (i = 0; i < n_slots; i++)
2653  {
2654  assert (results[i] == VACUUM_RECORD_DELETE_INSID_PREV_VER || results[i] == VACUUM_RECORD_REMOVE);
2655 
2656  assert (slots[i] > 0);
2657 
2658  if (results[i] == VACUUM_RECORD_REMOVE)
2659  {
2660  /* Use negative slot ID to mark that object has been completely removed. */
2661  slots[i] = -slots[i];
2662  }
2663  }
2664  memcpy (ptr, slots, n_slots * sizeof (PGSLOTID));
2665  ptr += n_slots * sizeof (PGSLOTID);
2666  }
2667 
2668  assert ((ptr - buffer_p) == packed_size);
2669 
2670  /* Append new redo log rebuild_record */
2671  log_append_redo_data (thread_p, RVVAC_HEAP_PAGE_VACUUM, &addr, packed_size, buffer_p);
2672 }
2673 
2674 /*
2675  * vacuum_rv_redo_vacuum_heap_page () - Redo vacuum remove oids from heap page.
2676  *
2677  * return : Error code.
2678  * thread_p (in) : Thread entry.
2679  * rcv (in) : Recovery structure.
2680  */
2681 int
2683 {
2684  int i = 0;
2685  INT16 n_slots;
2686  PGSLOTID *slotids = NULL;
2687  PAGE_PTR page_p = NULL;
2688  RECDES rebuild_record, peek_record;
2689  int old_header_size, new_header_size;
2690  MVCC_REC_HEADER rec_header;
2691  char *ptr = NULL;
2692  char data_buf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
2693  bool reusable;
2694  bool all_vacuumed;
2695 
2696  page_p = rcv->pgptr;
2697 
2698  ptr = (char *) rcv->data;
2699 
2700  /* Get n_slots and flags */
2701  n_slots = (rcv->offset & (~VACUUM_LOG_VACUUM_HEAP_MASK));
2702  reusable = (rcv->offset & VACUUM_LOG_VACUUM_HEAP_REUSABLE) != 0;
2703  all_vacuumed = (rcv->offset & VACUUM_LOG_VACUUM_HEAP_ALL_VACUUMED) != 0;
2704 
2705  assert (n_slots < ((SPAGE_HEADER *) page_p)->num_slots);
2706 
2707  if (all_vacuumed)
2708  {
2710  "Change vacuum status for heap page %d|%d, lsa=%lld|%d, from once to none.",
2711  PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
2712  }
2713 
2714  if (n_slots == 0)
2715  {
2716  /* No slots have been vacuumed, but header must be changed from one vacuum required to no vacuum required. */
2717  assert (all_vacuumed);
2718 
2719  if (all_vacuumed)
2720  {
2721  heap_page_set_vacuum_status_none (thread_p, rcv->pgptr);
2722  }
2723 
2724  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
2725 
2726  return NO_ERROR;
2727  }
2728 
2729  /* Get slot ID's and result types */
2730  slotids = (PGSLOTID *) ptr;
2731  ptr += n_slots * sizeof (PGSLOTID);
2732 
2733  /* Safeguard for correct unpacking of recovery data */
2734  assert (ptr == rcv->data + rcv->length);
2735 
2736  /* Initialize rebuild_record for deleting INSERT MVCCID's */
2737  rebuild_record.area_size = IO_MAX_PAGE_SIZE;
2738  rebuild_record.data = PTR_ALIGN (data_buf, MAX_ALIGNMENT);
2739 
2740  /* Vacuum slots */
2741  for (i = 0; i < n_slots; i++)
2742  {
2743  if (slotids[i] < 0)
2744  {
2745  /* Record was removed completely */
2746  slotids[i] = -slotids[i];
2747  spage_vacuum_slot (thread_p, page_p, slotids[i], reusable);
2748  }
2749  else
2750  {
2751  /* Only insert MVCCID has been removed */
2752  if (spage_get_record (thread_p, rcv->pgptr, slotids[i], &peek_record, PEEK) != S_SUCCESS)
2753  {
2754  vacuum_er_log_error (VACUUM_ER_LOG_HEAP | VACUUM_ER_LOG_RECOVERY, "Failed to get record at %d|%d|%d",
2755  PGBUF_PAGE_VPID_AS_ARGS (rcv->pgptr), slotids[i]);
2756  assert_release (false);
2757  return ER_FAILED;
2758  }
2759 
2760  if (peek_record.type != REC_HOME && peek_record.type != REC_NEWHOME)
2761  {
2762  /* Unexpected */
2763  assert_release (false);
2764  return ER_FAILED;
2765  }
2766 
2767  /* Remove insert MVCCID */
2768  or_mvcc_get_header (&peek_record, &rec_header);
2769  old_header_size = mvcc_header_size_lookup[MVCC_GET_FLAG (&rec_header)];
2770  /* Clear insert MVCCID. */
2772  /* Clear previous version. */
2774  new_header_size = mvcc_header_size_lookup[MVCC_GET_FLAG (&rec_header)];
2775 
2776  /* Rebuild record */
2777  rebuild_record.type = peek_record.type;
2778  rebuild_record.length = peek_record.length;
2779  memcpy (rebuild_record.data, peek_record.data, peek_record.length);
2780 
2781  /* Set new header */
2782  or_mvcc_set_header (&rebuild_record, &rec_header);
2783  /* Copy record data */
2784  memcpy (rebuild_record.data + new_header_size, peek_record.data + old_header_size,
2785  peek_record.length - old_header_size);
2786 
2787  if (spage_update (thread_p, rcv->pgptr, slotids[i], &rebuild_record) != SP_SUCCESS)
2788  {
2789  assert_release (false);
2790  return ER_FAILED;
2791  }
2792  }
2793  }
2794 
2795  if (spage_need_compact (thread_p, rcv->pgptr) == true)
2796  {
2797  (void) spage_compact (thread_p, rcv->pgptr);
2798  }
2799 
2800  if (all_vacuumed)
2801  {
2802  heap_page_set_vacuum_status_none (thread_p, rcv->pgptr);
2803  }
2804 
2805  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
2806 
2807  return NO_ERROR;
2808 }
2809 
2810 /*
2811  * vacuum_log_remove_ovf_insid () - Log removing insert MVCCID from big record.
2812  *
2813  * return : Void.
2814  * thread_p (in) : Thread entry.
2815  * ovfpage (in) : Big record first overflow page.
2816  */
2817 static void
2819 {
2820  log_append_redo_data2 (thread_p, RVVAC_REMOVE_OVF_INSID, NULL, ovfpage, 0, 0, NULL);
2821 }
2822 
2823 /*
2824  * vacuum_rv_redo_remove_ovf_insid () - Redo removing insert MVCCID from big record.
2825  *
2826  * return : Error code.
2827  * thread_p (in) : Thread entry.
2828  * rcv (in) : Recovery data.
2829  */
2830 int
2832 {
2833  MVCC_REC_HEADER rec_header;
2834  int error = NO_ERROR;
2835 
2836  error = heap_get_mvcc_rec_header_from_overflow (rcv->pgptr, &rec_header, NULL);
2837  if (error != NO_ERROR)
2838  {
2839  return error;
2840  }
2841 
2842  MVCC_SET_INSID (&rec_header, MVCCID_ALL_VISIBLE);
2843  LSA_SET_NULL (&rec_header.prev_version_lsa);
2844 
2845  error = heap_set_mvcc_rec_header_on_overflow (rcv->pgptr, &rec_header);
2846  if (error != NO_ERROR)
2847  {
2848  return error;
2849  }
2850 
2851  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
2852 
2853  return NO_ERROR;
2854 }
2855 
2856 /*
2857  * vacuum_produce_log_block_data () - After logging a block of log data, useful information for vacuum is passed by log
2858  * manager and should be saved in lock-free buffer.
2859  *
2860  * return : Void.
2861  * thread_p (in) : Thread entry.
2862  * start_lsa (in) : Log block starting LSA.
2863  * oldest_mvccid (in) : Log block oldest MVCCID.
2864  * newest_mvccid (in) : Log block newest MVCCID.
2865  */
2866 void
2868 {
2870  {
2871  return;
2872  }
2874  // *INDENT-OFF*
2875  VACUUM_DATA_ENTRY block_data { log_Gl.hdr };
2876  // *INDENT-ON*
2877 
2878  // reset info for next block
2881 
2882  if (vacuum_Block_data_buffer == NULL)
2883  {
2884  assert (false);
2885  return;
2886  }
2887 
2889  "vacuum_produce_log_block_data: blockid=(%lld) start_lsa=(%lld, %d) old_mvccid=(%llu) "
2890  "new_mvccid=(%llu)", (long long) block_data.blockid, LSA_AS_ARGS (&block_data.start_lsa),
2891  (unsigned long long int) block_data.oldest_visible_mvccid,
2892  (unsigned long long int) block_data.newest_mvccid);
2893 
2894  /* Push new block into block data buffer */
2895  if (!vacuum_Block_data_buffer->produce (block_data))
2896  {
2897  /* Push failed, the buffer must be full */
2898  /* TODO: Set a new message error for full block data buffer */
2899  /* TODO: Probably this case should be avoided... Make sure that we do not lose vacuum data so there has to be
2900  * enough space to keep it. */
2901  vacuum_er_log_error (VACUUM_ER_LOG_ERROR, "%s", "Cannot produce new log block data! The buffer is already full.");
2902  assert (false);
2903  return;
2904  }
2905 
2907 }
2908 
2909 static void
2911 {
2912  if (vacuum_Data.is_loaded)
2913  {
2914  return;
2915  }
2916  assert (vacuum_Data.first_page == NULL && vacuum_Data.last_page == NULL);
2917  vacuum_Data.first_page = vacuum_fix_data_page (thread_p, &vacuum_Data_load.vpid_first);
2918  if (vacuum_Data.first_page == NULL)
2919  {
2920  assert_release (false);
2921  return;
2922  }
2923  if (VPID_EQ (&vacuum_Data_load.vpid_first, &vacuum_Data_load.vpid_last))
2924  {
2925  vacuum_Data.last_page = vacuum_Data.first_page;
2926  }
2927  else
2928  {
2929  vacuum_Data.last_page = vacuum_fix_data_page (thread_p, &vacuum_Data_load.vpid_last);
2930  if (vacuum_Data.last_page == NULL)
2931  {
2933  assert_release (false);
2934  return;
2935  }
2936  }
2937  vacuum_Data.is_loaded = true;
2938 }
2939 
2940 static void
2942 {
2943  if (!vacuum_Data.is_loaded)
2944  {
2945  return;
2946  }
2947 
2948  // save VPID's in case we need to reload
2949  pgbuf_get_vpid ((PAGE_PTR) vacuum_Data.first_page, &vacuum_Data_load.vpid_first);
2950  pgbuf_get_vpid ((PAGE_PTR) vacuum_Data.last_page, &vacuum_Data_load.vpid_last);
2951 
2953  vacuum_Data.is_loaded = false;
2954 }
2955 
2956 #if defined (SERVER_MODE)
2957 // *INDENT-OFF*
2958 void
2960 {
2961  PERF_UTIME_TRACKER perf_tracker;
2962 
2964  {
2965  return;
2966  }
2967 
2968  if (check_shutdown ())
2969  {
2970  // stop on shutdown
2971  return;
2972  }
2973 
2974  if (!BO_IS_SERVER_RESTARTED ())
2975  {
2976  // check if boot is aborted
2977  return;
2978  }
2979 
2980  PERF_UTIME_TRACKER_START (&thread_ref, &perf_tracker);
2981 
2982  m_oldest_visible_mvccid = log_Gl.mvcc_table.update_global_oldest_visible ();
2983  vacuum_er_log (VACUUM_ER_LOG_MASTER, "update oldest_visible = %lld", (long long int) m_oldest_visible_mvccid);
2984 
2985  if (!vacuum_Data.is_loaded)
2986  {
2987  /* Load vacuum data. */
2988  /* This was initially in boot_restart_server. However, the "commit" of boot_restart_server will complain
2989  * about vacuum data first and last page not being unfixed (and it will also unfix them).
2990  * So, we have to load the data here (vacuum master never commits).
2991  */
2993 
2994  m_cursor.set_on_vacuum_data_start ();
2995  }
2996 
2997  pgbuf_flush_if_requested (&thread_ref, (PAGE_PTR) vacuum_Data.first_page);
2998  pgbuf_flush_if_requested (&thread_ref, (PAGE_PTR) vacuum_Data.last_page);
2999 
3000  m_cursor.force_data_update ();
3002  vacuum_job_cursor_print_args (m_cursor));
3003  for (; m_cursor.is_valid () && !should_interrupt_iteration (); m_cursor.increment_blockid ())
3004  {
3005  if (!is_cursor_entry_ready_to_vacuum ())
3006  {
3007  // next entries cannot be ready if current entry is not ready; stop this iteration
3008  break;
3009  }
3010 
3011  if (!is_cursor_entry_available ())
3012  {
3013  // try next block
3014  continue;
3015  }
3016  start_job_on_cursor_entry ();
3017 
3018  if (should_force_data_update ())
3019  {
3020  m_cursor.force_data_update ();
3021  }
3022  }
3023  m_cursor.unload ();
3024 #if !defined (NDEBUG)
3026 #endif /* !NDEBUG */
3027  PERF_UTIME_TRACKER_TIME (&thread_ref, &perf_tracker, PSTAT_VAC_MASTER);
3028 }
3029 
3030 bool
3032 {
3033  if (vacuum_Data.shutdown_sequence.check_shutdown_request ())
3034  {
3035  // stop on shutdown
3036  vacuum_er_log (VACUUM_ER_LOG_MASTER, "%s", "Interrupt iteration: shutdown");
3037  return true;
3038  }
3039  return false;
3040 }
3041 
3042 bool
3044 {
3045  if (cubthread::get_manager ()->is_pool_full (vacuum_Worker_threads))
3046  {
3047  // stop if worker pool is full
3048  vacuum_er_log (VACUUM_ER_LOG_MASTER, "%s", "Interrupt iteration: full worker pool");
3049  return true;
3050  }
3051  return false;
3052 }
3053 
3054 bool
3056 {
3057  return check_shutdown () || is_task_queue_full ();
3058 }
3059 
3060 bool
3062 {
3063  assert (m_cursor.is_valid ());
3064 
3065  if (m_cursor.get_current_entry ().newest_mvccid >= m_oldest_visible_mvccid)
3066  {
3067  // if entry newest MVCCID is still visible, it cannot be vacuumed
3069  "Cannot generate job for " VACUUM_LOG_DATA_ENTRY_MSG ("entry") ". "
3070  "global oldest visible mvccid = %llu.",
3071  VACUUM_LOG_DATA_ENTRY_AS_ARGS (&m_cursor.get_current_entry ()),
3072  (unsigned long long int) m_oldest_visible_mvccid);
3073  return false;
3074  }
3075 
3076  if (m_cursor.get_current_entry ().start_lsa.pageid + 1 >= log_Gl.append.prev_lsa.pageid)
3077  {
3078  // too close to end of log; let more log be appended before trying to vacuum the block
3080  "Cannot generate job for " VACUUM_LOG_DATA_ENTRY_MSG ("entry") ". "
3081  "log_Gl.append.prev_lsa.pageid = %d.",
3082  VACUUM_LOG_DATA_ENTRY_AS_ARGS (&m_cursor.get_current_entry ()),
3083  (long long int) log_Gl.append.prev_lsa.pageid);
3084  return false;
3085  }
3086 
3087  return true;
3088 }
3089 
3090 bool
3092 {
3093  const vacuum_data_entry &entry = m_cursor.get_current_entry ();
3094  if (entry.is_available ())
3095  {
3096  return true;
3097  }
3098  else
3099  {
3100  // already vacuumed or entry job is in progress
3101  assert (entry.is_vacuumed () || entry.is_job_in_progress ());
3103  "Job for blockid = %lld %s. Skip.", (long long int) entry.get_blockid (),
3104  entry.is_vacuumed () ? "was executed" : "is in progress");
3105  return false;
3106  }
3107 }
3108 
3109 void
3111 {
3112  m_cursor.start_job_on_current_entry ();
3113  cubthread::get_manager ()->push_task (vacuum_Worker_threads,
3114  new vacuum_worker_task (m_cursor.get_current_entry ()));
3115 }
3116 
3117 bool
3119 {
3120  if (vacuum_Finished_job_queue->is_half_full ())
3121  {
3122  // don't wait until it's full
3123  return true;
3124  }
3125  if (vacuum_Block_data_buffer->is_half_full ())
3126  {
3127  // don't wait until it's full
3128  return true;
3129  }
3130 
3131  return false;
3132 }
3133 // *INDENT-ON*
3134 #endif // SERVER_MODE
3135 
3136 /*
3137  * vacuum_rv_redo_vacuum_complete () - Redo recovery of vacuum complete.
3138  *
3139  * return : NO_ERROR.
3140  * thread_p (in) : Thread entry.
3141  * rcv (in) : Recovery data.
3142  */
3143 int
3145 {
3146  MVCCID oldest_newest_mvccid = MVCCID_NULL;
3147 
3148  assert (rcv->data != NULL && rcv->length == sizeof (MVCCID));
3149 
3150  oldest_newest_mvccid = *((MVCCID *) rcv->data);
3151 
3152  /* All vacuum complete. */
3153  vacuum_Data.oldest_unvacuumed_mvccid = oldest_newest_mvccid;
3154 
3155  /* Reset log header information saved for vacuum. */
3157 
3158  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
3159 
3160  return NO_ERROR;
3161 }
3162 
3163 /*
3164  * vacuum_process_log_block () - Vacuum heap and b-tree entries using log information found in a block of pages.
3165  *
3166  * return : Error code.
3167  * thread_p (in) : Thread entry.
3168  * data (in) : Block data.
3169  * block_log_buffer (in) : Block log page buffer identifier
3170  * sa_mode_partial_block (in) : True when SA_MODE vacuum based on partial block information from log header.
3171  * Logging is skipped if true.
3172  */
3173 static int
3174 vacuum_process_log_block (THREAD_ENTRY * thread_p, VACUUM_DATA_ENTRY * data, bool sa_mode_partial_block)
3175 {
3176  VACUUM_WORKER *worker = vacuum_get_vacuum_worker (thread_p);
3177  LOG_LSA log_lsa;
3178  LOG_LSA rcv_lsa;
3179  LOG_PAGEID first_block_pageid = VACUUM_FIRST_LOG_PAGEID_IN_BLOCK (data->get_blockid ());
3180  int error_code = NO_ERROR;
3181  LOG_DATA log_record_data;
3182  char *undo_data = NULL;
3183  int undo_data_size;
3184  char *es_uri = NULL;
3185  char log_pgbuf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
3186  LOG_PAGE *log_page_p = NULL;
3188  BTID sys_btid;
3189  OID class_oid, oid;
3190  BTREE_OBJECT_INFO old_version;
3191  BTREE_OBJECT_INFO new_version;
3192  MVCCID threshold_mvccid = log_Gl.mvcc_table.get_global_oldest_visible ();
3194  MVCCID mvccid;
3195  LOG_VACUUM_INFO log_vacuum;
3196  OID heap_object_oid;
3197  bool vacuum_complete = false;
3198  bool was_interrupted = false;
3199  bool is_file_dropped = false;
3200 
3201  PERF_UTIME_TRACKER perf_tracker;
3202  PERF_UTIME_TRACKER job_time_tracker;
3203 #if defined (SA_MODE)
3204  bool dummy_continue_check = false;
3205 #endif /* SA_MODE */
3206 
3208  {
3209  return NO_ERROR;
3210  }
3211  assert (thread_p != NULL);
3212  assert (thread_p->get_system_tdes () != NULL);
3213 
3214  assert (worker != NULL);
3215  assert (!LOG_FIND_CURRENT_TDES (thread_p)->is_under_sysop ());
3216 
3217  PERF_UTIME_TRACKER_START (thread_p, &perf_tracker);
3218  PERF_UTIME_TRACKER_START (thread_p, &job_time_tracker);
3219 
3220  /* Initialize log_vacuum */
3221  LSA_SET_NULL (&log_vacuum.prev_mvcc_op_log_lsa);
3222  VFID_SET_NULL (&log_vacuum.vfid);
3223 
3224  /* Set sys_btid pointer for internal b-tree block */
3225  btid_int.sys_btid = &sys_btid;
3226 
3227  /* Check starting lsa is not null and that it really belong to this block */
3228  assert (!LSA_ISNULL (&data->start_lsa) && (data->get_blockid () == vacuum_get_log_blockid (data->start_lsa.pageid)));
3229 
3230  /* Fetch the page where start_lsa is located */
3231  log_page_p = (LOG_PAGE *) PTR_ALIGN (log_pgbuf, MAX_ALIGNMENT);
3232  log_page_p->hdr.logical_pageid = NULL_PAGEID;
3233  log_page_p->hdr.offset = NULL_OFFSET;
3234 
3236  "vacuum_process_log_block (): " VACUUM_LOG_DATA_ENTRY_MSG ("block"),
3238 
3239  if (!sa_mode_partial_block)
3240  {
3241  error_code = vacuum_log_prefetch_vacuum_block (thread_p, data);
3242  if (error_code != NO_ERROR)
3243  {
3244  return error_code;
3245  }
3246  }
3247  else
3248  {
3249  // block is not entirely logged and we cannot prefetch it.
3250  }
3251 
3252  /* Initialize stored heap objects. */
3253  worker->n_heap_objects = 0;
3254 
3255  /* set was_interrupted flag to tell vacuum_heap_page that some safe-guard have to behave differently. interruptions
3256  * are usually marked in blockid, however sa_mode_partial_block can also be interrupted and will no flag is set in
3257  * blockid. */
3258  was_interrupted = data->was_interrupted () || sa_mode_partial_block;
3259 
3260  /* Follow the linked records starting with start_lsa */
3261  for (LSA_COPY (&log_lsa, &data->start_lsa); !LSA_ISNULL (&log_lsa) && log_lsa.pageid >= first_block_pageid;
3262  LSA_COPY (&log_lsa, &log_vacuum.prev_mvcc_op_log_lsa))
3263  {
3264 #if defined(SERVER_MODE)
3265  if (thread_p->shutdown)
3266  {
3267  /* Server shutdown was requested, stop vacuuming. */
3268  goto end;
3269  }
3270 #else /* !SERVER_MODE */ /* SA_MODE */
3271  if (logtb_get_check_interrupt (thread_p) && logtb_is_interrupted (thread_p, true, &dummy_continue_check))
3272  {
3274  error_code = ER_INTERRUPTED;
3275  goto end;
3276  }
3277 #endif /* SERVER_MODE */
3278 
3279  vacuum_er_log (VACUUM_ER_LOG_WORKER, "process log entry at log_lsa %lld|%d", LSA_AS_ARGS (&log_lsa));
3280 
3282  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &perf_tracker, PSTAT_VAC_WORKER_EXECUTE);
3283 
3284  LSA_COPY (&rcv_lsa, &log_lsa);
3285 
3286  if (log_page_p->hdr.logical_pageid != log_lsa.pageid)
3287  {
3288  error_code = vacuum_fetch_log_page (thread_p, log_lsa.pageid, log_page_p);
3289  if (error_code != NO_ERROR)
3290  {
3291  assert_release (false);
3292  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_process_log_block");
3293  goto end;
3294  }
3295  }
3296 
3297  /* Process log entry and obtain relevant information for vacuum. */
3298  error_code =
3299  vacuum_process_log_record (thread_p, worker, &log_lsa, log_page_p, &log_record_data, &mvccid, &undo_data,
3300  &undo_data_size, &log_vacuum, &is_file_dropped, false);
3301  if (error_code != NO_ERROR)
3302  {
3303  vacuum_check_shutdown_interruption (thread_p, error_code);
3304  goto end;
3305  }
3306 
3308  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &perf_tracker, PSTAT_VAC_WORKER_PROCESS_LOG);
3309 
3310  if (is_file_dropped)
3311  {
3312  /* No need to vacuum */
3314  "Skip vacuuming based on %lld|%d in file %d|%d. Log record info: rcvindex=%d.",
3315  (long long int) rcv_lsa.pageid, (int) rcv_lsa.offset, log_vacuum.vfid.volid,
3316  log_vacuum.vfid.fileid, log_record_data.rcvindex);
3317  continue;
3318  }
3319 
3320 #if !defined (NDEBUG)
3321  if (MVCC_ID_FOLLOW_OR_EQUAL (mvccid, threshold_mvccid) || MVCC_ID_PRECEDES (mvccid, data->oldest_visible_mvccid)
3322  || MVCC_ID_PRECEDES (data->newest_mvccid, mvccid))
3323  {
3324  /* threshold_mvccid or mvccid or block data may be invalid */
3325  assert (0);
3326  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_process_log_block");
3327  goto end;
3328  }
3329 #endif /* !NDEBUG */
3330 
3331  if (LOG_IS_MVCC_HEAP_OPERATION (log_record_data.rcvindex))
3332  {
3333  /* Collect heap object to be vacuumed at the end of the job. */
3334  heap_object_oid.pageid = log_record_data.pageid;
3335  heap_object_oid.volid = log_record_data.volid;
3336  heap_object_oid.slotid = heap_rv_remove_flags_from_offset (log_record_data.offset);
3337 
3338  error_code = vacuum_collect_heap_objects (thread_p, worker, &heap_object_oid, &log_vacuum.vfid);
3339  if (error_code != NO_ERROR)
3340  {
3341  assert_release (false);
3342  vacuum_er_log_error (VACUUM_ER_LOG_WORKER | VACUUM_ER_LOG_HEAP, "%s", "vacuum_collect_heap_objects.");
3343  /* Release should not stop. */
3344  er_clear ();
3345  error_code = NO_ERROR;
3346  continue;
3347  }
3349  "collected oid %d|%d|%d, in file %d|%d, based on %lld|%d", OID_AS_ARGS (&heap_object_oid),
3350  VFID_AS_ARGS (&log_vacuum.vfid), LSA_AS_ARGS (&rcv_lsa));
3351  }
3352  else if (LOG_IS_MVCC_BTREE_OPERATION (log_record_data.rcvindex))
3353  {
3354  /* Find b-tree entry and vacuum it */
3355  OR_BUF key_buf;
3356 
3357  assert (undo_data != NULL);
3358 
3359  if (log_record_data.rcvindex == RVBT_MVCC_INSERT_OBJECT_UNQ)
3360  {
3361  btree_rv_read_keybuf_two_objects (thread_p, undo_data, undo_data_size, &btid_int, &old_version,
3362  &new_version, &key_buf);
3363  COPY_OID (&oid, &old_version.oid);
3364  COPY_OID (&class_oid, &old_version.class_oid);
3365  }
3366  else
3367  {
3368  btree_rv_read_keybuf_nocopy (thread_p, undo_data, undo_data_size, &btid_int, &class_oid, &oid, &mvcc_info,
3369  &key_buf);
3370  }
3371  assert (!OID_ISNULL (&oid));
3372 
3373  /* Vacuum based on rcvindex. */
3374  if (log_record_data.rcvindex == RVBT_MVCC_NOTIFY_VACUUM)
3375  {
3376  /* The notification comes from loading index. The object may be both inserted or deleted (load index
3377  * considers all objects for visibility reasons). Vacuum must also decide to remove insert MVCCID or the
3378  * entire object. */
3379  if (MVCCID_IS_VALID (mvcc_info.delete_mvccid))
3380  {
3382  "vacuum from b-tree: btidp(%d, %d|%d) oid(%d|%d|%d) "
3383  "class_oid(%d|%d|%d), purpose=rem_object, mvccid=%llu, based on %lld|%d",
3384  BTID_AS_ARGS (btid_int.sys_btid), OID_AS_ARGS (&oid), OID_AS_ARGS (&class_oid),
3385  (unsigned long long int) mvcc_info.delete_mvccid, LSA_AS_ARGS (&rcv_lsa));
3386  error_code =
3387  btree_vacuum_object (thread_p, btid_int.sys_btid, &key_buf, &oid, &class_oid,
3388  mvcc_info.delete_mvccid);
3389  }
3390  else if (MVCCID_IS_VALID (mvcc_info.insert_mvccid) && mvcc_info.insert_mvccid != MVCCID_ALL_VISIBLE)
3391  {
3393  "vacuum from b-tree: btidp(%d, %d|%d) oid(%d|%d|%d) class_oid(%d|%d|%d), "
3394  "purpose=rem_insid, mvccid=%llu, based on %lld|%d",
3395  BTID_AS_ARGS (btid_int.sys_btid), OID_AS_ARGS (&oid), OID_AS_ARGS (&class_oid),
3396  (unsigned long long int) mvcc_info.insert_mvccid, LSA_AS_ARGS (&rcv_lsa));
3397  error_code =
3398  btree_vacuum_insert_mvccid (thread_p, btid_int.sys_btid, &key_buf, &oid, &class_oid,
3399  mvcc_info.insert_mvccid);
3400  }
3401  else
3402  {
3403  /* impossible case */
3405  "invalid vacuum case for RVBT_MVCC_NOTIFY_VACUUM btid(%d, %d|%d) "
3406  "oid(%d|%d|%d) class_oid(%d|%d|%d), based on %lld|%d",
3407  BTID_AS_ARGS (btid_int.sys_btid), OID_AS_ARGS (&oid), OID_AS_ARGS (&class_oid),
3408  LSA_AS_ARGS (&rcv_lsa));
3409  assert_release (false);
3410  continue;
3411  }
3412  }
3413  else if (log_record_data.rcvindex == RVBT_MVCC_DELETE_OBJECT)
3414  {
3415  /* Object was deleted and must be completely removed. */
3417  "vacuum from b-tree: btidp(%d, %d|%d) oid(%d|%d|%d) "
3418  "class_oid(%d|%d|%d), purpose=rem_object, mvccid=%llu, based on %lld|%d",
3419  BTID_AS_ARGS (btid_int.sys_btid), OID_AS_ARGS (&oid), OID_AS_ARGS (&class_oid),
3420  (unsigned long long int) mvccid, LSA_AS_ARGS (&rcv_lsa));
3421  error_code = btree_vacuum_object (thread_p, btid_int.sys_btid, &key_buf, &oid, &class_oid, mvccid);
3422  }
3423  else if (log_record_data.rcvindex == RVBT_MVCC_INSERT_OBJECT
3424  || log_record_data.rcvindex == RVBT_MVCC_INSERT_OBJECT_UNQ)
3425  {
3426  /* Object was inserted and only its insert MVCCID must be removed. */
3428  "vacuum from b-tree: btidp(%d, (%d %d)) oid(%d, %d, %d) "
3429  "class_oid(%d, %d, %d), purpose=rem_insid, mvccid=%llu, based on %lld|%d",
3430  BTID_AS_ARGS (btid_int.sys_btid), OID_AS_ARGS (&oid), OID_AS_ARGS (&class_oid),
3431  (unsigned long long int) mvccid, LSA_AS_ARGS (&rcv_lsa));
3432  error_code = btree_vacuum_insert_mvccid (thread_p, btid_int.sys_btid, &key_buf, &oid, &class_oid, mvccid);
3433  }
3434  else
3435  {
3436  /* Unexpected. */
3437  assert_release (false);
3438  }
3439  /* Did we have any errors? */
3440  if (error_code != NO_ERROR)
3441  {
3442  if (thread_p->shutdown)
3443  {
3444  // interrupted on shutdown
3445  goto end;
3446  }
3447  // unexpected case
3448  assert_release (false);
3450  "Error deleting object or insert MVCCID: error_code=%d", error_code);
3451  er_clear ();
3452  error_code = NO_ERROR;
3453  /* Release should not stop. Continue. */
3454  }
3455  }
3456  else if (log_record_data.rcvindex == RVES_NOTIFY_VACUUM)
3457  {
3458  /* A lob file must be deleted */
3459  (void) or_unpack_string (undo_data, &es_uri);
3460  vacuum_er_log (VACUUM_ER_LOG_WORKER, "Delete lob %s based on %lld|%d", es_uri, LSA_AS_ARGS (&rcv_lsa));
3461  if (es_delete_file (es_uri) != NO_ERROR)
3462  {
3463  er_clear ();
3464  }
3465  else
3466  {
3467  ASSERT_NO_ERROR ();
3468  }
3469  db_private_free_and_init (thread_p, es_uri);
3470  }
3471  else
3472  {
3473  /* Safeguard code */
3474  assert_release (false);
3475  }
3476 
3477  /* do not leak system ops */
3479  assert (!LOG_FIND_CURRENT_TDES (thread_p)->is_under_sysop ());
3480  }
3481 
3483  assert (!LOG_FIND_CURRENT_TDES (thread_p)->is_under_sysop ());
3484 
3485  error_code = vacuum_heap (thread_p, worker, threshold_mvccid, was_interrupted);
3486  if (error_code != NO_ERROR)
3487  {
3488  vacuum_check_shutdown_interruption (thread_p, error_code);
3489  goto end;
3490  }
3492  assert (!LOG_FIND_CURRENT_TDES (thread_p)->is_under_sysop ());
3493 
3495 
3496  vacuum_complete = true;
3497 
3498 end:
3499 
3500  assert (!LOG_FIND_CURRENT_TDES (thread_p)->is_under_sysop ());
3501 
3503  if (!sa_mode_partial_block)
3504  {
3505  /* TODO: Check that if start_lsa can be set to a different value when vacuum is not complete, to avoid processing
3506  * the same log data again. */
3507  vacuum_finished_block_vacuum (thread_p, data, vacuum_complete);
3508  }
3509 
3510 #if defined (SERVER_MODE)
3511  /* Unfix all pages now. Normally all pages should already be unfixed. */
3512  pgbuf_unfix_all (thread_p);
3513 #else /* !SERVER_MODE */ /* SA_MODE */
3514  /* Do not unfix all in stand-alone. Not yet. We need to keep vacuum data pages fixed. */
3515 #endif /* SA_MODE */
3516 
3517  PERF_UTIME_TRACKER_TIME_AND_RESTART (thread_p, &perf_tracker, PSTAT_VAC_WORKER_EXECUTE);
3518  PERF_UTIME_TRACKER_TIME (thread_p, &job_time_tracker, PSTAT_VAC_JOB);
3519 
3520  return error_code;
3521 }
3522 
3523 /*
3524  * vacuum_worker_allocate_resources () - Assign a vacuum worker to current thread.
3525  *
3526  * return : Error code.
3527  * thread_p (in) : Thread entry.
3528  *
3529  * NOTE: This is protected by vacuum data lock.
3530  */
3531 static int
3533 {
3534  size_t size_worker_prefetch_log_buffer;
3535 
3537 
3538  if (worker->allocated_resources)
3539  {
3540  return NO_ERROR;
3541  }
3542 
3543  /* Allocate log_zip */
3544  worker->log_zip_p = log_zip_alloc (IO_PAGESIZE);
3545  if (worker->log_zip_p == NULL)
3546  {
3547  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "%s", "Could not allocate log zip.");
3548  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_worker_allocate_resources");
3549  return ER_FAILED;
3550  }
3551 
3552  /* Allocate heap objects buffer */
3554  worker->heap_objects = (VACUUM_HEAP_OBJECT *) malloc (worker->heap_objects_capacity * sizeof (VACUUM_HEAP_OBJECT));
3555  if (worker->heap_objects == NULL)
3556  {
3557  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "%s", "Could not allocate files and objects buffer.");
3558  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_worker_allocate_resources");
3559  goto error;
3560  }
3561 
3562  /* Allocate undo data buffer */
3563  worker->undo_data_buffer = (char *) malloc (IO_PAGESIZE);
3564  if (worker->undo_data_buffer == NULL)
3565  {
3566  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "%s", "Could not allocate undo data buffer.");
3567  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_worker_allocate_resources");
3568  goto error;
3569  }
3571 
3572  size_worker_prefetch_log_buffer = VACUUM_PREFETCH_LOG_BLOCK_BUFFER_PAGES * LOG_PAGESIZE;
3573  worker->prefetch_log_buffer = (char *) malloc (size_worker_prefetch_log_buffer);
3574  if (worker->prefetch_log_buffer == NULL)
3575  {
3576  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "%s", "Could not allocate prefetch buffer.");
3577  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_worker_allocate_resources");
3578  goto error;
3579  }
3580 
3581  /* Safe guard - it is assumed that transaction descriptor is already initialized. */
3582  assert (logtb_get_system_tdes (thread_p) != NULL);
3583 
3584  worker->allocated_resources = true;
3585 
3586  return NO_ERROR;
3587 
3588 error:
3589  /* Free worker resources. */
3590  vacuum_finalize_worker (thread_p, worker);
3591  return ER_FAILED;
3592 }
3593 
3594 /*
3595  * vacuum_finalize_worker () - Free resources allocated for vacuum worker.
3596  *
3597  * return : Void.
3598  * worker_info (in) : Vacuum worker.
3599  */
3600 static void
3602 {
3603  if (worker_info->log_zip_p != NULL)
3604  {
3605  log_zip_free (worker_info->log_zip_p);
3606  worker_info->log_zip_p = NULL;
3607  }
3608  if (worker_info->heap_objects != NULL)
3609  {
3610  free_and_init (worker_info->heap_objects);
3611  }
3612  if (worker_info->undo_data_buffer != NULL)
3613  {
3614  free_and_init (worker_info->undo_data_buffer);
3615  }
3616  if (worker_info->prefetch_log_buffer != NULL)
3617  {
3618  free_and_init (worker_info->prefetch_log_buffer);
3619  }
3620 }
3621 
3622 /*
3623  * vacuum_finished_block_vacuum () - Called when vacuuming a block is stopped.
3624  *
3625  * return : Void.
3626  * thread_p (in) : Thread entry.
3627  * data (in) : Vacuum block data.
3628  * is_vacuum_complete (in) : True if the entire block was processed.
3629  *
3630  * NOTE: The block data received here is not a block in vacuum data table.
3631  * It is just a copy (because the table can be changed and the data
3632  * can be moved). First obtain the block data in the table and copy the
3633  * block data received as argument.
3634  */
3635 static void
3636 vacuum_finished_block_vacuum (THREAD_ENTRY * thread_p, VACUUM_DATA_ENTRY * data, bool is_vacuum_complete)
3637 {
3639 
3640  if (is_vacuum_complete)
3641  {
3642  /* Set status as vacuumed. Vacuum master will remove it from table */
3643  data->set_vacuumed ();
3644 
3646  "Processing log block %lld is complete. Notify master.", (long long int) data->get_blockid ());
3647  }
3648  else
3649  {
3650  /* We expect that worker job is abandoned during shutdown. But all other cases are error cases. */
3651  int error_level =
3652 #if defined (SERVER_MODE)
3653  thread_p->shutdown ? VACUUM_ER_LOG_WARNING : VACUUM_ER_LOG_ERROR;
3654 
3655 #if !defined (NDEBUG)
3656  /* Interrupting jobs without shutdown is unacceptable. */
3657  assert (thread_p->shutdown);
3659 #endif /* !NDEBUG */
3660 
3661 #else /* !SERVER_MODE */
3663  assert (er_errid () == ER_INTERRUPTED);
3664 #endif /* !SERVER_MODE */
3665 
3666  /* Vacuum will have to be re-run */
3667  data->set_interrupted ();
3668 
3669  /* Copy new block data */
3670  /* The only relevant information is in fact the updated start_lsa if it has changed. */
3671  if (error_level == VACUUM_ER_LOG_ERROR)
3672  {
3674  "Processing log block %lld is interrupted!", (long long int) data->get_blockid ());
3675  }
3676  else
3677  {
3679  "Processing log block %lld is interrupted!", (long long int) data->get_blockid ());
3680  }
3681  }
3682 
3683  /* Notify master the job is finished. */
3684  blockid = data->blockid;
3685  if (!vacuum_Finished_job_queue->produce (blockid))
3686  {
3687  assert_release (false);
3688  vacuum_er_log_error (VACUUM_ER_LOG_WORKER | VACUUM_ER_LOG_JOBS, "%s", "Finished job queue is full!!!");
3689  }
3690 
3691 #if defined (SERVER_MODE)
3692  /* Hurry master wakeup if finished job queue is getting filled. */
3693  if (vacuum_Finished_job_queue->is_half_full ())
3694  {
3695  /* Wakeup master to process finished jobs. */
3696  vacuum_Master_daemon->wakeup ();
3697  }
3698 #endif /* SERVER_MODE */
3699 }
3700 
3701 /*
3702  * vacuum_read_log_aligned () - clone of LOG_READ_ALIGN based on vacuum_fetch_log_page
3703  *
3704  * thread_entry (in) : thread entry
3705  * log_lsa (in/out) : log lsa
3706  * log_page (in/out) : log page
3707  */
3708 static void
3710 {
3711  // align offset
3712  log_lsa->offset = DB_ALIGN (log_lsa->offset, DOUBLE_ALIGNMENT);
3713  while (log_lsa->offset >= (int) LOGAREA_SIZE)
3714  {
3715  log_lsa->pageid++;
3716  if (vacuum_fetch_log_page (thread_entry, log_lsa->pageid, log_page) != NO_ERROR)
3717  {
3718  // we cannot recover from this
3719  logpb_fatal_error (thread_entry, true, ARG_FILE_LINE, "vacuum_read_log_aligned");
3720  }
3721 
3722  log_lsa->offset = DB_ALIGN (log_lsa->offset - (int) LOGAREA_SIZE, DOUBLE_ALIGNMENT);
3723  }
3724 }
3725 
3726 /*
3727  * vacuum_read_log_add_aligned () - clone of LOG_READ_ADD_ALIGN based on vacuum_fetch_log_page
3728  *
3729  * thread_entry (in) : thread entry
3730  * size (in) : size to add
3731  * log_lsa (in/out) : log lsa
3732  * log_page (in/out) : log page
3733  */
3734 static void
3736 {
3737  log_lsa->offset += (int) size;
3738  vacuum_read_log_aligned (thread_entry, log_lsa, log_page);
3739 }
3740 
3741 /*
3742  * vacuum_read_advance_when_doesnt_fit () - clone of LOG_READ_ADVANCE_WHEN_DOESNT_FIT based on vacuum_fetch_log_page
3743  *
3744  * thread_entry (in) : thread entry
3745  * size (in) : size to add
3746  * log_lsa (in/out) : log lsa
3747  * log_page (in/out) : log page
3748  */
3749 static void
3751 {
3752  if (log_lsa->offset + (int) size >= (int) LOGAREA_SIZE)
3753  {
3754  log_lsa->offset = (int) LOGAREA_SIZE; // force fetching next page
3755  vacuum_read_log_aligned (thread_entry, log_lsa, log_page);
3756  log_lsa->offset = 0;
3757  }
3758 }
3759 
3760 /*
3761  * vacuum_copy_data_from_log () - clone of logpb_copy_from_log based on vacuum_fetch_log_page
3762  *
3763  * thread_entry (in) : thread entry
3764  * area (out) : where to copy log data
3765  * length (in) : how much log data to copy
3766  * size (in) : size to add
3767  * log_lsa (in/out) : log lsa
3768  * log_page (in/out) : log page
3769  */
3770 static void
3772 {
3773  if (log_lsa->offset + length < (int) LOGAREA_SIZE)
3774  {
3775  // the log data is contiguous
3776  std::memcpy (area, log_page->area + log_lsa->offset, length);
3777  log_lsa->offset += length;
3778  }
3779  else
3780  {
3781  int copy_length = 0;
3782  int area_offset = 0;
3783 
3784  while (length > 0)
3785  {
3786  vacuum_read_advance_when_doesnt_fit (thread_p, 0, log_lsa, log_page);
3787  if (log_lsa->offset + length < (int) LOGAREA_SIZE)
3788  {
3789  copy_length = length;
3790  }
3791  else
3792  {
3793  copy_length = LOGAREA_SIZE - (int) log_lsa->offset;
3794  }
3795  std::memcpy (area + area_offset, log_page->area + log_lsa->offset, copy_length);
3796  length -= copy_length;
3797  area_offset += copy_length;
3798  log_lsa->offset += copy_length;
3799  }
3800  }
3801 }
3802 
3803 /*
3804  * vacuum_process_log_record () - Process one log record for vacuum.
3805  *
3806  * return : Error code.
3807  * worker (in) : Vacuum worker.
3808  * thread_p (in) : Thread entry.
3809  * log_lsa_p (in/out) : Input is the start of undo data. Output is the end of undo data.
3810  * log_page_p (in/out) : The log page for log_lsa_p.
3811  * mvccid (out) : Log entry MVCCID.
3812  * undo_data_ptr (out) : Undo data pointer.
3813  * undo_data_size (out) : Undo data size.
3814  * is_file_dropped (out) : True if the file corresponding to log entry was dropped.
3815  * stop_after_vacuum_info (in) : True if only vacuum info must be obtained from log record.
3816  */
3817 static int
3818 vacuum_process_log_record (THREAD_ENTRY * thread_p, VACUUM_WORKER * worker, LOG_LSA * log_lsa_p, LOG_PAGE * log_page_p,
3819  LOG_DATA * log_record_data, MVCCID * mvccid, char **undo_data_ptr, int *undo_data_size,
3820  LOG_VACUUM_INFO * vacuum_info, bool * is_file_dropped, bool stop_after_vacuum_info)
3821 {
3823  LOG_REC_MVCC_UNDOREDO *mvcc_undoredo = NULL;
3824  LOG_REC_MVCC_UNDO *mvcc_undo = NULL;
3825  LOG_REC_SYSOP_END *sysop_end = NULL;
3826  int ulength;
3827  char *new_undo_data_buffer = NULL;
3828  bool is_zipped = false;
3829  volatile LOG_RECTYPE log_rec_type = LOG_SMALLER_LOGREC_TYPE;
3830 
3831  int error_code = NO_ERROR;
3832 
3833  assert (log_lsa_p != NULL && log_page_p != NULL);
3834  assert (log_record_data != NULL);
3835  assert (mvccid != NULL);
3836  assert (stop_after_vacuum_info || is_file_dropped != NULL);
3837  assert (stop_after_vacuum_info || worker != NULL);
3838  assert (stop_after_vacuum_info || undo_data_ptr != NULL);
3839  assert (stop_after_vacuum_info || undo_data_size != NULL);
3840 
3841  if (!stop_after_vacuum_info)
3842  {
3843  *undo_data_ptr = NULL;
3844  *undo_data_size = 0;
3845  }
3846 
3847  LSA_SET_NULL (&vacuum_info->prev_mvcc_op_log_lsa);
3848  VFID_SET_NULL (&vacuum_info->vfid);
3849 
3850  /* Get log record header */
3851  log_rec_header = LOG_GET_LOG_RECORD_HEADER (log_page_p, log_lsa_p);
3852  log_rec_type = log_rec_header->type;
3853  vacuum_read_log_add_aligned (thread_p, sizeof (*log_rec_header), log_lsa_p, log_page_p);
3854 
3855  if (log_rec_type == LOG_MVCC_UNDO_DATA)
3856  {
3857  /* Get log record mvcc_undo information */
3858  vacuum_read_advance_when_doesnt_fit (thread_p, sizeof (*mvcc_undo), log_lsa_p, log_page_p);
3859  mvcc_undo = (LOG_REC_MVCC_UNDO *) (log_page_p->area + log_lsa_p->offset);
3860 
3861  /* Get MVCCID */
3862  *mvccid = mvcc_undo->mvccid;
3863 
3864  /* Get record log data */
3865  *log_record_data = mvcc_undo->undo.data;
3866 
3867  /* Get undo data length */
3868  ulength = mvcc_undo->undo.length;
3869 
3870  /* Copy LSA for next MVCC operation */
3871  LSA_COPY (&vacuum_info->prev_mvcc_op_log_lsa, &mvcc_undo->vacuum_info.prev_mvcc_op_log_lsa);
3872  VFID_COPY (&vacuum_info->vfid, &mvcc_undo->vacuum_info.vfid);
3873 
3874  vacuum_read_log_add_aligned (thread_p, sizeof (*mvcc_undo), log_lsa_p, log_page_p);
3875  }
3876  else if (log_rec_type == LOG_MVCC_UNDOREDO_DATA || log_rec_type == LOG_MVCC_DIFF_UNDOREDO_DATA)
3877  {
3878  /* Get log record undoredo information */
3879  vacuum_read_advance_when_doesnt_fit (thread_p, sizeof (*mvcc_undoredo), log_lsa_p, log_page_p);
3880  mvcc_undoredo = (LOG_REC_MVCC_UNDOREDO *) (log_page_p->area + log_lsa_p->offset);
3881 
3882  /* Get MVCCID */
3883  *mvccid = mvcc_undoredo->mvccid;
3884 
3885  /* Get record log data */
3886  *log_record_data = mvcc_undoredo->undoredo.data;
3887 
3888  /* Get undo data length */
3889  ulength = mvcc_undoredo->undoredo.ulength;
3890 
3891  /* Copy LSA for next MVCC operation */
3892  LSA_COPY (&vacuum_info->prev_mvcc_op_log_lsa, &mvcc_undoredo->vacuum_info.prev_mvcc_op_log_lsa);
3893  VFID_COPY (&vacuum_info->vfid, &mvcc_undoredo->vacuum_info.vfid);
3894 
3895  vacuum_read_log_add_aligned (thread_p, sizeof (*mvcc_undoredo), log_lsa_p, log_page_p);
3896  }
3897  else if (log_rec_type == LOG_SYSOP_END)
3898  {
3899  /* Get system op mvcc undo information */
3900  vacuum_read_advance_when_doesnt_fit (thread_p, sizeof (*sysop_end), log_lsa_p, log_page_p);
3901  sysop_end = (LOG_REC_SYSOP_END *) (log_page_p->area + log_lsa_p->offset);
3902  if (sysop_end->type != LOG_SYSOP_END_LOGICAL_MVCC_UNDO)
3903  {
3904  assert (false);
3905  vacuum_er_log_error (VACUUM_ER_LOG_LOGGING, "%s", "invalid record type!");
3906  return ER_FAILED;
3907  }
3908 
3909  mvcc_undo = &sysop_end->mvcc_undo;
3910 
3911  /* Get MVCCID */
3912  *mvccid = mvcc_undo->mvccid;
3913 
3914  /* Get record log data */
3915  *log_record_data = mvcc_undo->undo.data;
3916 
3917  /* Get undo data length */
3918  ulength = mvcc_undo->undo.length;
3919 
3920  /* Copy LSA for next MVCC operation */
3921  LSA_COPY (&vacuum_info->prev_mvcc_op_log_lsa, &mvcc_undo->vacuum_info.prev_mvcc_op_log_lsa);
3922  VFID_COPY (&vacuum_info->vfid, &mvcc_undo->vacuum_info.vfid);
3923 
3924  vacuum_read_log_add_aligned (thread_p, sizeof (*sysop_end), log_lsa_p, log_page_p);
3925  }
3926  else
3927  {
3928  /* Unexpected case */
3929  assert (false);
3931  return ER_FAILED;
3932  }
3933 
3934  if (stop_after_vacuum_info)
3935  {
3936  /* Vacuum info was already obtained. */
3937  return NO_ERROR;
3938  }
3939 
3940  if (!VFID_ISNULL (&vacuum_info->vfid))
3941  {
3942  /* Check if file was dropped. */
3944  {
3945  /* New files have been dropped. Droppers must wait until all running workers have been notified. Save new
3946  * version to let dropper know this worker noticed the changes. */
3947 
3948  /* But first, cleanup collected heap objects. */
3949  VFID vfid;
3950  VFID_COPY (&vfid, &vacuum_Last_dropped_vfid);
3951  vacuum_cleanup_collected_by_vfid (worker, &vfid);
3952 
3955  "update min version to %d", worker->drop_files_version);
3956  }
3957 
3958  /* Check if file is dropped */
3959  error_code = vacuum_is_file_dropped (thread_p, is_file_dropped, &vacuum_info->vfid, *mvccid);
3960  if (error_code != NO_ERROR)
3961  {
3962  vacuum_check_shutdown_interruption (thread_p, error_code);
3963  return error_code;
3964  }
3965  if (*is_file_dropped == true)
3966  {
3967  return NO_ERROR;
3968  }
3969  }
3970 
3971  /* We are here because the file that will be vacuumed is not dropped. */
3972  if (!LOG_IS_MVCC_BTREE_OPERATION (log_record_data->rcvindex) && log_record_data->rcvindex != RVES_NOTIFY_VACUUM)
3973  {
3974  /* No need to unpack undo data */
3975  return NO_ERROR;
3976  }
3977 
3978  /* We are here because undo data must be unpacked. */
3979  if (ZIP_CHECK (ulength))
3980  {
3981  /* Get real size */
3982  *undo_data_size = (int) GET_ZIP_LEN (ulength);
3983  is_zipped = true;
3984  }
3985  else
3986  {
3987  *undo_data_size = ulength;
3988  }
3989 
3990  if (log_lsa_p->offset + *undo_data_size < (int) LOGAREA_SIZE)
3991  {
3992  /* Set undo data pointer directly to log data */
3993  *undo_data_ptr = (char *) log_page_p->area + log_lsa_p->offset;
3994  }
3995  else
3996  {
3997  /* Undo data is found on several pages and needs to be copied to a contiguous area. */
3998  if (worker->undo_data_buffer_capacity < *undo_data_size)
3999  {
4000  /* Not enough space to save all undo data. Expand worker's undo data buffer. */
4001  new_undo_data_buffer = (char *) realloc (worker->undo_data_buffer, *undo_data_size);
4002  if (new_undo_data_buffer == NULL)
4003  {
4004  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "Could not expand undo data buffer to %d.", *undo_data_size);
4005  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_process_log_record");
4006  return ER_FAILED;
4007  }
4008  /* Expand was successful, update worker. */
4009  worker->undo_data_buffer = new_undo_data_buffer;
4010  worker->undo_data_buffer_capacity = *undo_data_size;
4011  }
4012  /* Set undo data pointer to worker's undo data buffer. */
4013  *undo_data_ptr = worker->undo_data_buffer;
4014 
4015  /* Copy data to buffer. */
4016  vacuum_copy_data_from_log (thread_p, *undo_data_ptr, *undo_data_size, log_lsa_p, log_page_p);
4017  }
4018 
4019  if (is_zipped)
4020  {
4021  /* Unzip data */
4022  if (log_unzip (worker->log_zip_p, *undo_data_size, *undo_data_ptr))
4023  {
4024  /* Update undo data pointer and size after unzipping. */
4025  *undo_data_size = (int) worker->log_zip_p->data_length;
4026  *undo_data_ptr = (char *) worker->log_zip_p->log_data;
4027  }
4028  else
4029  {
4030  vacuum_er_log_error (VACUUM_ER_LOG_WORKER, "%s", "Could not unzip undo data.");
4031  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_process_log_record");
4032  return ER_FAILED;
4033  }
4034  }
4035 
4036  return NO_ERROR;
4037 }
4038 
4039 #if defined (SERVER_MODE)
4040 /*
4041  * vacuum_get_worker_min_dropped_files_version () - Get current minimum dropped files version seen by active
4042  * vacuum workers.
4043  *
4044  * return : Minimum dropped files version.
4045  */
4046 static INT32
4047 vacuum_get_worker_min_dropped_files_version (void)
4048 {
4049  int i;
4050  INT32 min_version = -1;
4051 
4052  for (i = 0; i < VACUUM_MAX_WORKER_COUNT; i++)
4053  {
4054  /* Update minimum version if worker is active and its seen version is smaller than current minimum version (or if
4055  * minimum version is not initialized). */
4056  if (vacuum_Workers[i].state != VACUUM_WORKER_STATE_INACTIVE
4057  && (min_version == -1
4058  || vacuum_compare_dropped_files_version (min_version, vacuum_Workers[i].drop_files_version) > 0))
4059  {
4060  /* Update overall minimum version. */
4061  min_version = vacuum_Workers[i].drop_files_version;
4062  }
4063  }
4064  return min_version;
4065 }
4066 #endif // SERVER_MODE
4067 
4068 /*
4069  * vacuum_compare_blockids () - Comparator function for blockid's stored in vacuum data. The comparator knows to
4070  * filter any flags that mark block status.
4071  *
4072  * return : 0 if entries are equal, negative if first entry is smaller and
4073  * positive if first entry is bigger.
4074  * ptr1 (in) : Pointer to first blockid.
4075  * ptr2 (in) : Pointer to second blockid.
4076  */
4077 static int
4078 vacuum_compare_blockids (const void *ptr1, const void *ptr2)
4079 {
4080  /* Compare blockid's by removing any other flags. */
4081  return (int) (VACUUM_BLOCKID_WITHOUT_FLAGS (*((VACUUM_LOG_BLOCKID *) ptr1))
4083 }
4084 
4085 /*
4086  * vacuum_data_load_and_recover () - Loads vacuum data from disk and recovers it.
4087  *
4088  * return : Error code.
4089  * thread_p (in) : Thread entry.
4090  *
4091  * NOTE: Loading vacuum data should be done when the database is started,
4092  * before starting other vacuum routines.
4093  */
4094 int
4096 {
4097  int error_code = NO_ERROR;
4098  VACUUM_DATA_ENTRY *entry = NULL;
4099  VACUUM_DATA_PAGE *data_page = NULL;
4100  VPID next_vpid;
4101  int i = 0;
4102  bool is_page_dirty;
4103  FILE_DESCRIPTORS fdes;
4104 
4105  assert_release (!VFID_ISNULL (&vacuum_Data.vacuum_data_file));
4106 
4107  error_code = file_descriptor_get (thread_p, &vacuum_Data.vacuum_data_file, &fdes);
4108  if (error_code != NO_ERROR)
4109  {
4110  ASSERT_ERROR_AND_SET (error_code);
4111  goto end;
4112  }
4113 
4115 
4116  data_page = vacuum_fix_data_page (thread_p, &fdes.vacuum_data.vpid_first);
4117  if (data_page == NULL)
4118  {
4119  ASSERT_ERROR_AND_SET (error_code);
4120  goto end;
4121  }
4122  vacuum_Data.first_page = data_page;
4123  vacuum_Data.oldest_unvacuumed_mvccid = MVCCID_NULL;
4124 
4125  while (true)
4126  {
4127  is_page_dirty = false;
4128  if (data_page->index_unvacuumed >= 0)
4129  {
4130  assert (data_page->index_unvacuumed < vacuum_Data.page_data_max_count);
4131  assert (data_page->index_unvacuumed <= data_page->index_free);
4132  for (i = data_page->index_unvacuumed; i < data_page->index_free; i++)
4133  {
4134  entry = &data_page->data[i];
4135  if (entry->is_job_in_progress ())
4136  {
4137  /* Reset in progress flag, mark the job as interrupted and update last_blockid. */
4138  entry->set_interrupted ();
4139  is_page_dirty = true;
4140  }
4141  }
4142  }
4143  if (is_page_dirty)
4144  {
4145  vacuum_set_dirty_data_page (thread_p, data_page, DONT_FREE);
4146  }
4147  VPID_COPY (&next_vpid, &data_page->next_page);
4148  if (VPID_ISNULL (&next_vpid))
4149  {
4150  break;
4151  }
4152  vacuum_unfix_data_page (thread_p, data_page);
4153  data_page = vacuum_fix_data_page (thread_p, &next_vpid);
4154  if (data_page == NULL)
4155  {
4156  ASSERT_ERROR_AND_SET (error_code);
4157  goto end;
4158  }
4159  }
4160  assert (data_page != NULL);
4161  /* Save last_page. */
4162  vacuum_Data.last_page = data_page;
4163  data_page = NULL;
4164  /* Get last_blockid. */
4165  if (vacuum_is_empty ())
4166  {
4168 
4169  if (log_blockid < 0)
4170  {
4171  // we can be here if log has not yet passed first block. one case may be soon after copydb.
4172  assert (log_blockid == VACUUM_NULL_LOG_BLOCKID);
4174  "vacuum_data_load_and_recover: do not update last_blockid; prev_lsa = %lld|%d",
4176  }
4177  else if (LSA_ISNULL (&vacuum_Data.recovery_lsa) && LSA_ISNULL (&log_Gl.hdr.mvcc_op_log_lsa))
4178  {
4179  /* No recovery needed. This is used for 10.1 version to keep the functionality of the database.
4180  * In this case, we are updating the last_blockid of the vacuum to the last block that was logged.
4181  */
4182  vacuum_Data.set_last_blockid (log_blockid);
4183 
4185  "vacuum_data_load_and_recover: set last_blockid = %lld to logpb_last_complete_blockid ()",
4186  (long long int) vacuum_Data.get_last_blockid ());
4187  }
4188  else
4189  {
4190  /* Get the maximum between what is currently stored in vacuum and the value stored
4191  * in the log_Gl header. After a long session in SA_MODE, the vacuum_Data.last_page->data->blockid will
4192  * be outdated. Instead, SA_MODE updates log_Gl.hdr.vacuum_last_blockid before removing old archives.
4193  */
4194  vacuum_Data.set_last_blockid (MAX (log_Gl.hdr.vacuum_last_blockid, vacuum_Data.last_page->data->blockid));
4195 
4197  "vacuum_data_load_and_recover: set last_blockid = %lld to MAX("
4198  "log_Gl.hdr.vacuum_last_blockid=%lld, vacuum_Data.last_page->data->blockid=%lld)",
4199  (long long int) vacuum_Data.get_last_blockid (),
4200  (long long int) log_Gl.hdr.vacuum_last_blockid,
4201  (long long int) vacuum_Data.last_page->data->blockid);
4202  }
4203  }
4204  else
4205  {
4206  /* Get last_blockid from last vacuum data entry. */
4207  INT16 last_block_index = (vacuum_Data.last_page->index_free <= 0) ? 0 : vacuum_Data.last_page->index_free - 1;
4208  vacuum_Data.set_last_blockid (vacuum_Data.last_page->data[last_block_index].blockid);
4209 
4211  "vacuum_data_load_and_recover: set last_blockid = %lld to last data blockid = %lld",
4212  (long long int) vacuum_Data.get_last_blockid (),
4213  (long long int) vacuum_Data.last_page->data[last_block_index].blockid);
4214  }
4215 
4216  vacuum_Data.is_loaded = true;
4217 
4218  /* get global oldest active MVCCID. */
4220 
4221  error_code = vacuum_recover_lost_block_data (thread_p);
4222  if (error_code != NO_ERROR)
4223  {
4224  ASSERT_ERROR ();
4225  goto end;
4226  }
4227  LSA_SET_NULL (&vacuum_Data.recovery_lsa);
4228 
4229  vacuum_Data.set_oldest_unvacuumed_on_boot ();
4231 
4232 #if !defined (NDEBUG)
4234 #endif /* !NDEBUG */
4235 
4236  /* note: this is called when server is started, after recovery. however, pages cannot remain fixed by current thread,
4237  * they must be fixed by vacuum master. therefore, we'll save first and last vpids to vacuum_Data_load and unfix
4238  * them here. */
4239  pgbuf_get_vpid ((PAGE_PTR) vacuum_Data.first_page, &vacuum_Data_load.vpid_first);
4240  pgbuf_get_vpid ((PAGE_PTR) vacuum_Data.last_page, &vacuum_Data_load.vpid_last);
4241 
4242 end:
4243 
4244  if (data_page != NULL)
4245  {
4246  vacuum_unfix_data_page (thread_p, data_page);
4247  }
4249 
4250  return error_code;
4251 }
4252 
4253 /*
4254  * vacuum_load_dropped_files_from_disk () - Loads dropped files from disk.
4255  *
4256  * return : Error code.
4257  * thread_p (in) : Thread entry.
4258  * vfid (in) : File identifier.
4259  */
4260 int
4262 {
4264  VPID vpid;
4265  INT16 page_count;
4266 #if !defined (NDEBUG)
4267  VACUUM_TRACK_DROPPED_FILES *track_head = NULL, *track_tail = NULL;
4268  VACUUM_TRACK_DROPPED_FILES *track_new = NULL, *save_next = NULL;
4269 #endif
4270 
4271  assert_release (!VFID_ISNULL (&vacuum_Dropped_files_vfid));
4272 
4274  {
4275  /* Already loaded */
4276  assert_release (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
4277 #if !defined (NDEBUG)
4278  assert_release (vacuum_Track_dropped_files != NULL);
4279 #endif
4280  return NO_ERROR;
4281  }
4282 
4283  assert (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
4284 
4285  /* Save total count. */
4286  if (vacuum_Dropped_files_count != 0)
4287  {
4288  assert (false);
4290  }
4291 
4292  VPID_COPY (&vpid, &vacuum_Dropped_files_vpid);
4293 
4294  while (!VPID_ISNULL (&vpid))
4295  {
4296  page = vacuum_fix_dropped_entries_page (thread_p, &vpid, PGBUF_LATCH_READ);
4297  if (page == NULL)
4298  {
4299  assert (false);
4300  return ER_FAILED;
4301  }
4302 
4303  /* Get next page VPID and current page count */
4304  VPID_COPY (&vpid, &page->next_page);
4305 
4306  page_count = page->n_dropped_files;
4307  vacuum_Dropped_files_count += (INT32) page_count;
4308 
4309 #if !defined (NDEBUG)
4311  if (track_new == NULL)
4312  {
4314  for (track_new = track_head; track_new != NULL; track_new = save_next)
4315  {
4316  save_next = track_new->next_tracked_page;
4317  free_and_init (track_new);
4318  }
4319  vacuum_unfix_dropped_entries_page (thread_p, page);
4320  return ER_OUT_OF_VIRTUAL_MEMORY;
4321  }
4322 
4323  memcpy (&track_new->dropped_data_page, page, DB_PAGESIZE);
4324  track_new->next_tracked_page = NULL;
4325 
4326  if (track_head == NULL)
4327  {
4328  track_head = track_tail = track_new;
4329  }
4330  else
4331  {
4332  assert (track_tail != NULL);
4333  track_tail->next_tracked_page = track_new;
4334  track_tail = track_new;
4335  }
4336 #endif
4337  vacuum_unfix_dropped_entries_page (thread_p, page);
4338  }
4339 
4340 #if !defined(NDEBUG)
4341  vacuum_Track_dropped_files = track_head;
4342 #endif
4343 
4345  return NO_ERROR;
4346 }
4347 
4348 /*
4349  * vacuum_create_file_for_vacuum_data () - Create a disk file to keep vacuum data.
4350  *
4351  * return : Error code.
4352  * thread_p (in) : Thread entry.
4353  * vacuum_data_npages (in) : Number of vacuum data disk pages.
4354  * vacuum_data_vfid (out) : Created file VFID.
4355  */
4356 int
4357 vacuum_create_file_for_vacuum_data (THREAD_ENTRY * thread_p, VFID * vacuum_data_vfid)
4358 {
4359  VPID first_page_vpid;
4360  VACUUM_DATA_PAGE *data_page = NULL;
4361  PAGE_TYPE ptype = PAGE_VACUUM_DATA;
4362  FILE_DESCRIPTORS fdes;
4363 
4364  int error_code = NO_ERROR;
4365 
4366  /* Create disk file to keep vacuum data */
4367  error_code = file_create_with_npages (thread_p, FILE_VACUUM_DATA, 1, NULL, vacuum_data_vfid);
4368  if (error_code != NO_ERROR)
4369  {
4370  ASSERT_ERROR ();
4371  return error_code;
4372  }
4373  error_code = file_alloc (thread_p, vacuum_data_vfid, file_init_page_type, &ptype, &first_page_vpid,
4374  (PAGE_PTR *) (&data_page));
4375  if (error_code != NO_ERROR)
4376  {
4377  ASSERT_ERROR ();
4378  return error_code;
4379  }
4380  if (data_page == NULL)
4381  {
4382  assert_release (false);
4383  return ER_FAILED;
4384  }
4385 
4386  /* save in file descriptors to load when database is restarted */
4387  fdes.vacuum_data.vpid_first = first_page_vpid;
4388  error_code = file_descriptor_update (thread_p, vacuum_data_vfid, &fdes);
4389  if (error_code != NO_ERROR)
4390  {
4391  ASSERT_ERROR ();
4392  return error_code;
4393  }
4394 
4395  vacuum_init_data_page_with_last_blockid (thread_p, data_page, 0);
4396  vacuum_unfix_data_page (thread_p, data_page);
4397 
4398  return NO_ERROR;
4399 }
4400 
4401 /*
4402  * vacuum_data_initialize_new_page () - Create new vacuum data page.
4403  *
4404  * return : Void.
4405  * thread_p (in) : Thread entry.
4406  * data_page (in) : New vacuum data page pointer.
4407  * first_blockid (in) : Starting blockid.
4408  */
4409 static void
4411 {
4412  memset (data_page, 0, DB_PAGESIZE);
4413 
4414  VPID_SET_NULL (&data_page->next_page);
4415  data_page->index_unvacuumed = 0;
4416  data_page->index_free = 0;
4417 
4418  pgbuf_set_page_ptype (thread_p, (PAGE_PTR) data_page, PAGE_VACUUM_DATA);
4419 
4420  vacuum_er_log (VACUUM_ER_LOG_VACUUM_DATA, "Initialized " PGBUF_PAGE_STATE_MSG ("vacuum data page"),
4421  PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) data_page));
4422 }
4423 
4424 /*
4425  * vacuum_rv_redo_initialize_data_page () - Redo initialize vacuum data page.
4426  *
4427  * return : NO_ERROR.
4428  * thread_p (in) : Thread entry.
4429  * rcv (in) : Recovery data.
4430  */
4431 int
4433 {
4434  VACUUM_DATA_PAGE *data_page = (VACUUM_DATA_PAGE *) rcv->pgptr;
4436 
4437  assert (data_page != NULL);
4438  assert (rcv->length == sizeof (last_blockid));
4439  last_blockid = *((VACUUM_LOG_BLOCKID *) rcv->data);
4440 
4441  vacuum_data_initialize_new_page (thread_p, data_page);
4442  data_page->data->blockid = last_blockid;
4443 
4444  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
4445  return NO_ERROR;
4446 }
4447 
4448 /*
4449  * vacuum_create_file_for_dropped_files () - Create a disk file to track dropped files for vacuum.
4450  *
4451  * return : Error code.
4452  * thread_p (in) : Thread entry.
4453  * vacuum_data_vfid (out) : Created file VFID.
4454  */
4455 int
4456 vacuum_create_file_for_dropped_files (THREAD_ENTRY * thread_p, VFID * dropped_files_vfid)
4457 {
4458  VPID first_page_vpid;
4459  VACUUM_DROPPED_FILES_PAGE *dropped_files_page = NULL;
4460  PAGE_TYPE ptype = PAGE_DROPPED_FILES;
4461  int error_code = NO_ERROR;
4462 
4463  /* Create disk file to keep dropped files */
4464  error_code = file_create_with_npages (thread_p, FILE_DROPPED_FILES, 1, NULL, dropped_files_vfid);
4465  if (error_code != NO_ERROR)
4466  {
4467  ASSERT_ERROR ();
4468  return error_code;
4469  }
4470  error_code = file_alloc_sticky_first_page (thread_p, dropped_files_vfid, file_init_page_type, &ptype,
4471  &first_page_vpid, (PAGE_PTR *) (&dropped_files_page));
4472  if (error_code != NO_ERROR)
4473  {
4474  ASSERT_ERROR ();
4475  return error_code;
4476  }
4477  if (dropped_files_page == NULL)
4478  {
4479  assert (false);
4480  return ER_FAILED;
4481  }
4482 
4483  /* Initialize dropped files */
4484  /* Pack VPID of next page as NULL OID and count as 0 */
4485  VPID_SET_NULL (&dropped_files_page->next_page);
4486  dropped_files_page->n_dropped_files = 0;
4487 
4488  pgbuf_set_page_ptype (thread_p, (PAGE_PTR) dropped_files_page, PAGE_DROPPED_FILES);
4489 
4490  /* Set dirty page and free */
4491  vacuum_set_dirty_dropped_entries_page (thread_p, dropped_files_page, FREE);
4492 
4493  return NO_ERROR;
4494 }
4495 
4496 /*
4497  * vacuum_is_work_in_progress () - Returns true if there are any vacuum jobs running.
4498  *
4499  * return : True if there is any job in progress, false otherwise.
4500  * thread_p (in) : Thread entry.
4501  *
4502  * NOTE: If this is not called by the auto vacuum master thread, it is
4503  * recommended to obtain lock on vacuum data first.
4504  */
4505 static bool
4507 {
4508 #if defined (SERVER_MODE)
4509  int i;
4510 
4511  for (i = 0; i < VACUUM_MAX_WORKER_COUNT; i++)
4512  {
4513  if (vacuum_Workers[i].state != VACUUM_WORKER_STATE_INACTIVE)
4514  {
4515  return true;
4516  }
4517  }
4518 
4519  /* No running jobs, return false */
4520  return false;
4521 #else // not SERVER_MODE = SA_MODE
4522  return false;
4523 #endif // not SERVER_MODE = SA_MODE
4524 }
4525 
4526 /*
4527  * vacuum_data_mark_finished () - Mark blocks already vacuumed (or interrupted).
4528  *
4529  * return : Void.
4530  * thread_p (in) : Thread entry.
4531  */
4532 static void
4534 {
4535 #define TEMP_BUFFER_SIZE VACUUM_FINISHED_JOB_QUEUE_CAPACITY
4536  VACUUM_LOG_BLOCKID finished_blocks[TEMP_BUFFER_SIZE];
4538  VACUUM_LOG_BLOCKID page_unvacuumed_blockid;
4539  VACUUM_LOG_BLOCKID page_free_blockid;
4540  VACUUM_DATA_PAGE *data_page = NULL;
4541  VACUUM_DATA_PAGE *prev_data_page = NULL;
4542  VACUUM_DATA_ENTRY *data = NULL;
4543  VACUUM_DATA_ENTRY *page_unvacuumed_data = NULL;
4544  INT16 n_finished_blocks = 0;
4545  INT16 index = 0;
4546  INT16 page_start_index = 0;
4547  VPID next_vpid = VPID_INITIALIZER;
4548 
4549  /* Consume finished block ID's from queue. */
4550  /* Stop if too many blocks have been collected (> TEMP_BUFFER_SIZE). */
4551  while (n_finished_blocks < TEMP_BUFFER_SIZE
4552  && vacuum_Finished_job_queue->consume (finished_blocks[n_finished_blocks]))
4553  {
4554  /* Increment consumed finished blocks. */
4555  vacuum_er_log (VACUUM_ER_LOG_VACUUM_DATA, "Consumed from finished job queue %lld (flags %lld).",
4556  (long long int) VACUUM_BLOCKID_WITHOUT_FLAGS (finished_blocks[n_finished_blocks]),
4557  VACUUM_BLOCKID_GET_FLAGS (finished_blocks[n_finished_blocks]));
4558  ++n_finished_blocks;
4559  }
4560  if (n_finished_blocks == 0)
4561  {
4562  /* No blocks. */
4563  return;
4564  }
4565  /* Sort consumed blocks. */
4566  qsort (finished_blocks, n_finished_blocks, sizeof (VACUUM_LOG_BLOCKID), vacuum_compare_blockids);
4567 
4568  /* Mark finished blocks in vacuum data. */
4569 
4570  /* Loop to mark all finished blocks in all affected pages. */
4571  index = 0;
4572  data_page = vacuum_Data.first_page;
4573  page_start_index = 0;
4574  assert (data_page->index_unvacuumed >= 0);
4575  page_unvacuumed_data = data_page->data + data_page->index_unvacuumed;
4576  page_unvacuumed_blockid = page_unvacuumed_data->get_blockid ();
4577  page_free_blockid = page_unvacuumed_blockid + (data_page->index_free - data_page->index_unvacuumed);
4578  assert (page_free_blockid == data_page->data[data_page->index_free - 1].get_blockid () + 1);
4579  while (true)
4580  {
4581  /* Loop until all blocks from current pages are marked. */
4582  while ((index < n_finished_blocks)
4583  && ((blockid = VACUUM_BLOCKID_WITHOUT_FLAGS (finished_blocks[index])) < page_free_blockid))
4584  {
4585  /* Update status for block. */
4586  data = page_unvacuumed_data + (blockid - page_unvacuumed_blockid);
4587  assert (data->get_blockid () == blockid);
4588  assert (data->is_job_in_progress ());
4589  if (VACUUM_BLOCK_STATUS_IS_VACUUMED (finished_blocks[index]))
4590  {
4591  /* Block has been vacuumed. */
4592  data->set_vacuumed ();
4593 
4595  "Mark block %lld as vacuumed.", (long long int) data->get_blockid ());
4596  }
4597  else
4598  {
4599  /* Block was not completely vacuumed. Job was interrupted. */
4600  data->set_interrupted ();
4601 
4603  "Mark block %lld as interrupted.", (long long int) data->get_blockid ());
4604  }
4605  index++;
4606  }
4607  /* Finished marking blocks. */
4608 
4609  if (index == page_start_index)
4610  {
4611  /* No changes in page. Nothing to do. */
4612  /* Fall through. */
4613  }
4614  else
4615  {
4616  /* Some blocks in page were changed. */
4617 
4618  /* Update index_unvacuumed. */
4619  while (data_page->index_unvacuumed < data_page->index_free && page_unvacuumed_data->is_vacuumed ())
4620  {
4621  page_unvacuumed_data++;
4622  data_page->index_unvacuumed++;
4623  }
4624 
4625  if (data_page->index_unvacuumed == data_page->index_free)
4626  {
4627  /* Nothing left in page to be vacuumed. */
4628 
4629  vacuum_data_empty_page (thread_p, prev_data_page, &data_page);
4630  /* Should have advanced on next page. */
4631  if (data_page == NULL)
4632  {
4633  /* No next page */
4634  if (prev_data_page != NULL)
4635  {
4636  vacuum_unfix_data_page (thread_p, prev_data_page);
4637  }
4638  if (n_finished_blocks > index)
4639  {
4640  assert (false);
4642  "Finished blocks not found in vacuum data!!!!");
4643  return;
4644  }
4645  else
4646  {
4647  /* Break loop. */
4648  break;
4649  }
4650  }
4651  else
4652  {
4653  /* Continue with new page. */
4654  page_start_index = index;
4655  assert (data_page->index_unvacuumed >= 0);
4656  page_unvacuumed_data = data_page->data + data_page->index_unvacuumed;
4657  page_unvacuumed_blockid = page_unvacuumed_data->get_blockid ();
4658  page_free_blockid = page_unvacuumed_blockid + (data_page->index_free - data_page->index_unvacuumed);
4659  continue;
4660  }
4661  }
4662  else
4663  {
4664  /* Page still has some data. */
4665 
4666  if (VPID_ISNULL (&data_page->next_page))
4667  {
4668  /* We remove first blocks that have been vacuumed. */
4669  if (data_page->index_unvacuumed > 0)
4670  {
4671  /* Relocate everything at the start of the page. */
4672  memmove (data_page->data, data_page->data + data_page->index_unvacuumed,
4673  (data_page->index_free - data_page->index_unvacuumed) * sizeof (VACUUM_DATA_ENTRY));
4674  data_page->index_free -= data_page->index_unvacuumed;
4675  data_page->index_unvacuumed = 0;
4676  }
4677  }
4678 
4679  /* Log changes. */
4680  log_append_redo_data2 (thread_p, RVVAC_DATA_FINISHED_BLOCKS, NULL, (PAGE_PTR) data_page, 0,
4681  (index - page_start_index) * sizeof (VACUUM_LOG_BLOCKID),
4682  &finished_blocks[page_start_index]);
4683  vacuum_set_dirty_data_page (thread_p, data_page, DONT_FREE);
4684  }
4685  }
4686 
4687  if (prev_data_page != NULL)
4688  {
4689  vacuum_unfix_data_page (thread_p, prev_data_page);
4690  }
4691  if (index == n_finished_blocks)
4692  {
4693  /* All finished blocks have been consumed. */
4694  vacuum_unfix_data_page (thread_p, data_page);
4695  break;
4696  }
4697  if (VPID_ISNULL (&data_page->next_page))
4698  {
4699  assert (false);
4700  vacuum_er_log_error (VACUUM_ER_LOG_VACUUM_DATA, "%s", "Finished blocks not found in vacuum data!!!!");
4701  vacuum_unfix_data_page (thread_p, data_page);
4702  return;
4703  }
4704 
4705  prev_data_page = data_page;
4706  VPID_COPY (&next_vpid, &data_page->next_page);
4707  data_page = vacuum_fix_data_page (thread_p, &next_vpid);
4708  if (data_page == NULL)
4709  {
4710  assert_release (false);
4711  vacuum_unfix_data_page (thread_p, prev_data_page);
4712  return;
4713  }
4714  page_start_index = index;
4715  assert (data_page->index_unvacuumed >= 0);
4716  page_unvacuumed_data = data_page->data + data_page->index_unvacuumed;
4717  page_unvacuumed_blockid = page_unvacuumed_data->get_blockid ();
4718  page_free_blockid = page_unvacuumed_blockid + (data_page->index_free - data_page->index_unvacuumed);
4719  }
4720  assert (prev_data_page == NULL);
4721 
4722  /* We need to update vacuum_Data.keep_from_log_pageid in case archives must be purged. */
4724 
4725  VACUUM_VERIFY_VACUUM_DATA (thread_p);
4726 #if !defined (NDEBUG)
4728 #endif /* !NDEBUG */
4729 
4730 #undef TEMP_BUFFER_SIZE
4731 }
4732 
4733 /*
4734  * vacuum_data_empty_page () - Handle empty vacuum data page.
4735  *
4736  * return : Void.
4737  * thread_p (in) : Thread entry.
4738  * prev_data_page (in) : Previous vacuum data page.
4739  * data_page (in/out) : Empty page as input, prev page as output..
4740  */
4741 static void
4742 vacuum_data_empty_page (THREAD_ENTRY * thread_p, VACUUM_DATA_PAGE * prev_data_page, VACUUM_DATA_PAGE ** data_page)
4743 {
4744  FILE_DESCRIPTORS fdes_update;
4745 
4746  /* We can have three expected cases here:
4747  * 1. This is the last page. We won't deallocate, just reset the page (even if it is also first page).
4748  * 2. This is the first page and there are other pages too (case #1 covers first page = last page case).
4749  * We will deallocate the page and update the first page.
4750  * 3. Page is not first and is not last. It must be deallocated.
4751  */
4752  assert (data_page != NULL && *data_page != NULL);
4753  assert ((*data_page)->index_unvacuumed == (*data_page)->index_free);
4754 
4755  if (*data_page == vacuum_Data.last_page)
4756  {
4757  /* Case 1. */
4758  /* Reset page. */
4759  vacuum_init_data_page_with_last_blockid (thread_p, *data_page, vacuum_Data.get_last_blockid ());
4760 
4762  "Last page, vpid = %d|%d, is empty and was reset. %s",
4763  pgbuf_get_vpid_ptr ((PAGE_PTR) (*data_page))->volid,
4764  pgbuf_get_vpid_ptr ((PAGE_PTR) (*data_page))->pageid,
4765  vacuum_Data.first_page == vacuum_Data.last_page ?
4766  "This is also first page." : "This is different from first page.");
4767 
4768  /* No next page */
4769  *data_page = NULL;
4770  }
4771  else if (*data_page == vacuum_Data.first_page)
4772  {
4773  /* Case 2. */
4774  VACUUM_DATA_PAGE *save_first_page = vacuum_Data.first_page;
4775  VPID save_first_vpid;
4776 
4777  *data_page = vacuum_fix_data_page (thread_p, &((*data_page)->next_page));
4778  if (*data_page == NULL)
4779  {
4780  /* Unexpected. */
4781  assert_release (false);
4782  vacuum_er_log_error (VACUUM_ER_LOG_VACUUM_DATA, "%s", "Invalid vacuum data next_page!!!");
4783  *data_page = vacuum_Data.first_page;
4784  return;
4785  }
4786 
4787  /* save vpid of first page */
4788  pgbuf_get_vpid ((PAGE_PTR) save_first_page, &save_first_vpid);
4789 
4790  log_sysop_start (thread_p);
4791 
4792  /* update file descriptor for persistence */
4793  fdes_update.vacuum_data.vpid_first = save_first_page->next_page;
4794  if (file_descriptor_update (thread_p, &vacuum_Data.vacuum_data_file, &fdes_update) != NO_ERROR)
4795  {
4796  assert_release (false);
4798  "Failed to update file descriptor!!!", save_first_vpid.volid, save_first_vpid.pageid);
4799  log_sysop_abort (thread_p);
4800 
4801  return;
4802  }
4803 
4804  /* change first_page */
4805  vacuum_Data.first_page = *data_page;
4806  vacuum_Data_load.vpid_first = save_first_page->next_page;
4807 
4808  /* Make sure the new first page is marked as dirty */
4809  vacuum_set_dirty_data_page (thread_p, vacuum_Data.first_page, DONT_FREE);
4810  /* Unfix old first page. */
4811  vacuum_unfix_data_page (thread_p, save_first_page);
4812  if (file_dealloc (thread_p, &vacuum_Data.vacuum_data_file, &save_first_vpid, FILE_VACUUM_DATA) != NO_ERROR)
4813  {
4814  assert_release (false);
4816  "Failed to deallocate first page from vacuum data - %d|%d!!!",
4817  save_first_vpid.volid, save_first_vpid.pageid);
4818  log_sysop_abort (thread_p);
4819 
4820  /* Revert first page change
4821  * - this is just to handle somehow the case in release. Should never happen anyway.
4822  */
4823  save_first_page = vacuum_Data.first_page;
4824  vacuum_Data.first_page = vacuum_fix_data_page (thread_p, &save_first_vpid);
4825  vacuum_Data_load.vpid_first = save_first_vpid;
4826  vacuum_unfix_data_page (thread_p, save_first_page);
4827  *data_page = vacuum_Data.first_page;
4828  return;
4829  }
4830 
4831  log_sysop_commit (thread_p);
4832 
4833  vacuum_er_log (VACUUM_ER_LOG_VACUUM_DATA, "Changed first VPID from %d|%d to %d|%d.",
4834  VPID_AS_ARGS (&save_first_vpid), VPID_AS_ARGS (&fdes_update.vacuum_data.vpid_first));
4835  }
4836  else
4837  {
4838  /* Case 3 */
4839  VPID save_page_vpid = VPID_INITIALIZER;
4840  VPID save_next_vpid = VPID_INITIALIZER;
4841 
4842  assert (*data_page != vacuum_Data.first_page && *data_page != vacuum_Data.last_page);
4843 
4844  /* We must have prev_data_page. */
4845  if (prev_data_page == NULL)
4846  {
4847  assert_release (false);
4848  vacuum_er_log_error (VACUUM_ER_LOG_VACUUM_DATA, "%s", "No previous data page is unexpected!!!");
4849  vacuum_unfix_data_page (thread_p, *data_page);
4850  return;
4851  }
4852 
4853  log_sysop_start (thread_p);
4854 
4855  /* Save link to next page. */
4856  VPID_COPY (&save_next_vpid, &(*data_page)->next_page);
4857  /* Save data page VPID. */
4858  pgbuf_get_vpid ((PAGE_PTR) (*data_page), &save_page_vpid);
4859  /* Unfix data page. */
4860  vacuum_unfix_data_page (thread_p, *data_page);
4861  /* Deallocate data page. */
4862  if (file_dealloc (thread_p, &vacuum_Data.vacuum_data_file, &save_page_vpid, FILE_VACUUM_DATA) != NO_ERROR)
4863  {
4864  assert_release (false);
4866  "Failed to deallocate page from vacuum data - %d|%d!!!",
4867  save_page_vpid.volid, save_page_vpid.pageid);
4868  log_sysop_abort (thread_p);
4869  return;
4870  }
4871 
4872  /* Update link in previous page. */
4873  log_append_undoredo_data2 (thread_p, RVVAC_DATA_SET_LINK, NULL, (PAGE_PTR) prev_data_page, 0, sizeof (VPID),
4874  sizeof (VPID), &prev_data_page->next_page, &save_next_vpid);
4875  VPID_COPY (&prev_data_page->next_page, &save_next_vpid);
4876  vacuum_set_dirty_data_page (thread_p, prev_data_page, DONT_FREE);
4877 
4878  log_sysop_commit (thread_p);
4879 
4880  assert (*data_page == NULL);
4881  /* Move *data_page to next page. */
4882  assert (!VPID_ISNULL (&prev_data_page->next_page));
4883  *data_page = vacuum_fix_data_page (thread_p, &prev_data_page->next_page);
4884  assert (*data_page != NULL);
4885  }
4886 }
4887 
4888 /*
4889  * vacuum_rv_redo_data_finished () - Redo setting vacuum jobs as finished (or interrupted).
4890  *
4891  * return : NO_ERROR.
4892  * thread_p (in) : Thread entry.
4893  * rcv (in) : Recovery data.
4894  */
4895 int
4897 {
4898  const char *rcv_data_ptr = rcv->data;
4900  VACUUM_LOG_BLOCKID blockid_with_flags;
4901  VACUUM_LOG_BLOCKID page_unvacuumed_blockid;
4902  VACUUM_DATA_PAGE *data_page = (VACUUM_DATA_PAGE *) rcv->pgptr;
4903  int data_index;
4904 
4905  assert (data_page != NULL);
4906 
4907  page_unvacuumed_blockid = data_page->data[data_page->index_unvacuumed].get_blockid ();
4908 
4909  if (rcv_data_ptr != NULL)
4910  {
4911  while (rcv_data_ptr < (char *) rcv->data + rcv->length)
4912  {
4913  assert (rcv_data_ptr + sizeof (VACUUM_LOG_BLOCKID) <= rcv->data + rcv->length);
4914  blockid_with_flags = *((VACUUM_LOG_BLOCKID *) rcv_data_ptr);
4915  blockid = VACUUM_BLOCKID_WITHOUT_FLAGS (blockid_with_flags);
4916 
4917  assert (blockid >= page_unvacuumed_blockid);
4918  data_index = (int) (blockid - page_unvacuumed_blockid) + data_page->index_unvacuumed;
4919  assert (data_index < data_page->index_free);
4920 
4921  if (VACUUM_BLOCK_STATUS_IS_VACUUMED (blockid_with_flags))
4922  {
4923  data_page->data[data_index].set_vacuumed ();
4924  }
4925  else
4926  {
4927  data_page->data[data_index].set_interrupted ();
4928  }
4929 
4930  rcv_data_ptr += sizeof (VACUUM_LOG_BLOCKID);
4931  }
4932  assert (rcv_data_ptr == rcv->data + rcv->length);
4933  }
4934 
4935  while (data_page->index_unvacuumed < data_page->index_free
4936  && data_page->data[data_page->index_unvacuumed].is_vacuumed ())
4937  {
4938  data_page->index_unvacuumed++;
4939  }
4940  if (VPID_ISNULL (&data_page->next_page) && data_page->index_unvacuumed > 0)
4941  {
4942  /* Remove all vacuumed blocks. */
4943  if (data_page->index_free > data_page->index_unvacuumed)
4944  {
4945  memmove (data_page->data, data_page->data + data_page->index_unvacuumed,
4946  (data_page->index_free - data_page->index_unvacuumed) * sizeof (VACUUM_DATA_ENTRY));
4947  }
4948  data_page->index_free -= data_page->index_unvacuumed;
4949  data_page->index_unvacuumed = 0;
4950  }
4951 
4952  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
4953  return NO_ERROR;
4954 }
4955 
4956 /*
4957  * vacuum_rv_redo_data_finished_dump () - Dump redo for setting vacuum jobs finished or interrupted.
4958  *
4959  * return : Void.
4960  * fp (in) : Output target.
4961  * length (in) : Recovery data length.
4962  * data (in) : Recovery data.
4963  */
4964 void
4965 vacuum_rv_redo_data_finished_dump (FILE * fp, int length, void *data)
4966 {
4967  const char *rcv_data_ptr = (const char *) data;
4969  VACUUM_LOG_BLOCKID blockid_with_flags;
4970 
4971  if (rcv_data_ptr != NULL)
4972  {
4973  fprintf (fp, " Set block status for vacuum data to : \n");
4974  while (rcv_data_ptr < (char *) data + length)
4975  {
4976  assert (rcv_data_ptr + sizeof (VACUUM_LOG_BLOCKID) <= (char *) data + length);
4977 
4978  blockid_with_flags = *((VACUUM_LOG_BLOCKID *) rcv_data_ptr);
4979  blockid = VACUUM_BLOCKID_WITHOUT_FLAGS (blockid_with_flags);
4980 
4981  if (VACUUM_BLOCK_STATUS_IS_VACUUMED (blockid_with_flags))
4982  {
4983  fprintf (fp, " Blockid %lld: vacuumed. \n", (long long int) blockid);
4984  }
4985  else
4986  {
4987  fprintf (fp, " Blockid %lld: available and interrupted. \n", (long long int) blockid);
4988  }
4989  rcv_data_ptr += sizeof (VACUUM_LOG_BLOCKID);
4990  }
4991  }
4992 }
4993 
4994 /*
4995  * vacuum_consume_buffer_log_blocks () - Append new blocks from log block data from buffer (if any).
4996  *
4997  * return : error code.
4998  * thread_p (in) : Thread entry.
4999  *
5000  * NOTE: In order to avoid synchronizing access on vacuum data for log
5001  * manager, information on new blocks is appended into a lock-free
5002  * buffer. This information can be later obtained and appended to
5003  * vacuum data.
5004  */
5005 int
5007 {
5008 #define MAX_PAGE_MAX_DATA_ENTRIES (IO_MAX_PAGE_SIZE / sizeof (VACUUM_DATA_ENTRY))
5009  VACUUM_DATA_ENTRY consumed_data;
5010  VACUUM_DATA_PAGE *data_page = NULL;
5011  VACUUM_DATA_ENTRY *page_free_data = NULL;
5012  VACUUM_DATA_ENTRY *save_page_free_data = NULL;
5013  VACUUM_LOG_BLOCKID next_blockid;
5014  PAGE_TYPE ptype = PAGE_VACUUM_DATA;
5015  bool is_sysop = false;
5016  bool was_vacuum_data_empty = false;
5017 
5018  int error_code = NO_ERROR;
5019 
5021  {
5022  return NO_ERROR;
5023  }
5024  if (vacuum_Block_data_buffer == NULL)
5025  {
5026  /* Not initialized */
5027  assert (false);
5028  return NO_ERROR;
5029  }
5030 
5031  if (vacuum_Block_data_buffer->is_empty ())
5032  {
5033  /* empty */
5034  if (vacuum_is_empty ())
5035  {
5036  // don't let vacuum data go too far back; try to update last blockid
5037  // need to make sure that log_Gl.hdr.does_block_need_vacuum is not true; safest choice is to also hold
5038  // log_Gl.prior_info.prior_lsa_mutex while doing it
5039 
5041  {
5042  // cannot update
5043  return NO_ERROR;
5044  }
5045 
5046  // *INDENT-OFF*
5047  std::unique_lock<std::mutex> ulock { log_Gl.prior_info.prior_lsa_mutex };
5048  // *INDENT-ON*
5049  // need to double check log_Gl.hdr.does_block_need_vacuum while holding mutex
5051  {
5052  // cannot update
5053  return NO_ERROR;
5054  }
5055  // check buffer again, it is possible that a new block was added
5056  if (vacuum_Block_data_buffer->is_empty ())
5057  {
5058  // update last blockid
5060  ulock.unlock (); // unlock after reading prior_lsa
5061 
5062  const VACUUM_LOG_BLOCKID LOG_BLOCK_TRAILING_DIFF = 2;
5063  VACUUM_LOG_BLOCKID log_blockid = vacuum_get_log_blockid (log_lsa.pageid);
5064 
5065  if (log_blockid > vacuum_Data.get_last_blockid () + LOG_BLOCK_TRAILING_DIFF)
5066  {
5067  vacuum_Data.set_last_blockid (log_blockid - LOG_BLOCK_TRAILING_DIFF);
5070  vacuum_er_log (VACUUM_ER_LOG_VACUUM_DATA, "update last_blockid to %lld",
5071  (long long int) vacuum_Data.get_last_blockid ());
5072  }
5073  return NO_ERROR;
5074  }
5075  else
5076  {
5077  // fall through to consume buffer
5078  }
5079  }
5080  else
5081  {
5082  // last blockid remains last in vacuum data
5083  return NO_ERROR;
5084  }
5085  }
5086 
5087  if (vacuum_Data.last_page == NULL)
5088  {
5089  assert_release (false);
5090  return ER_FAILED;
5091  }
5092 
5093  data_page = vacuum_Data.last_page;
5094  page_free_data = data_page->data + data_page->index_free;
5095  save_page_free_data = page_free_data;
5096 
5097  was_vacuum_data_empty = vacuum_is_empty ();
5098 
5099  while (vacuum_Block_data_buffer->consume (consumed_data))
5100  {
5101  assert (vacuum_Data.get_last_blockid () < consumed_data.blockid);
5102 
5103  /* Add all blocks after vacuum_Data.last_blockid to consumed_data.blockid. */
5104  for (next_blockid = vacuum_Data.get_last_blockid () + 1; next_blockid <= consumed_data.blockid; next_blockid++)
5105  {
5106  if (data_page->index_free == vacuum_Data.page_data_max_count)
5107  {
5108  /* This page is full. */
5109  /* Append a new page to vacuum data. */
5110  VPID next_vpid = VPID_INITIALIZER;
5111  VACUUM_DATA_PAGE *save_last_page = NULL;
5112 
5113  /* Log changes in this page. */
5114  if (page_free_data > save_page_free_data)
5115  {
5117  (PGLENGTH) (save_page_free_data - data_page->data),
5118  CAST_BUFLEN (((char *) page_free_data)
5119  - (char *) save_page_free_data), save_page_free_data);
5120 
5121  vacuum_set_dirty_data_page (thread_p, data_page, DONT_FREE);
5122  }
5123  else
5124  {
5125  /* No changes in current page. */
5126  }
5127 
5128  if (is_sysop)
5129  {
5130  // more than one page in one iteration, now that's a performance
5131  log_sysop_commit (thread_p);
5132  }
5133 
5134  log_sysop_start (thread_p);
5135  is_sysop = true;
5136 
5137  error_code = file_alloc (thread_p, &vacuum_Data.vacuum_data_file, file_init_page_type, &ptype, &next_vpid,
5138  (PAGE_PTR *) (&data_page));
5139  if (error_code != NO_ERROR)
5140  {
5141  /* Could not allocate new page. */
5143  "Could not allocate new page for vacuum data!!!!");
5144  assert_release (false);
5145  log_sysop_abort (thread_p);
5146  return error_code;
5147  }
5148  if (data_page == NULL)
5149  {
5150  assert_release (false);
5151  log_sysop_abort (thread_p);
5152  return ER_FAILED;
5153  }
5154  vacuum_init_data_page_with_last_blockid (thread_p, data_page, vacuum_Data.get_last_blockid ());
5155 
5156  /* Change link in last page. */
5157  VPID_COPY (&vacuum_Data.last_page->next_page, &next_vpid);
5159  0, sizeof (VPID), NULL, &next_vpid);
5160  save_last_page = vacuum_Data.last_page;
5161  vacuum_Data.last_page = data_page;
5162  vacuum_set_dirty_data_page (thread_p, save_last_page, FREE);
5163 
5164  // we cannot commit here. we should append some data blocks first.
5165 
5166  page_free_data = data_page->data + data_page->index_free;
5167  save_page_free_data = page_free_data;
5168  }
5169  assert (data_page->index_free < vacuum_Data.page_data_max_count);
5170 
5171  if (data_page->index_unvacuumed == data_page->index_free && next_blockid < consumed_data.blockid)
5172  {
5173  /* Page is empty. We don't want to add a new block that does not require vacuum. */
5174  assert (data_page->index_unvacuumed == 0);
5175  next_blockid = consumed_data.blockid - 1; // for will increment it to consumed_data.blockid
5176  continue;
5177  }
5178 
5179  page_free_data->blockid = next_blockid;
5180  if (next_blockid == consumed_data.blockid)
5181  {
5182  /* Copy block information from consumed data. */
5183  assert (page_free_data->is_available ()); // starts as available
5184  LSA_COPY (&page_free_data->start_lsa, &consumed_data.start_lsa);
5185  page_free_data->newest_mvccid = consumed_data.newest_mvccid;
5186  page_free_data->oldest_visible_mvccid = consumed_data.oldest_visible_mvccid;
5188 #if !defined (NDEBUG)
5189  /* Check that oldest_mvccid is not decreasing. */
5190  if (data_page->index_free > 0)
5191  {
5192  assert ((page_free_data - 1)->oldest_visible_mvccid <= page_free_data->oldest_visible_mvccid);
5193  assert ((page_free_data - 1)->get_blockid () + 1 == page_free_data->get_blockid ());
5194  }
5195 #endif /* !NDEBUG */
5196 
5198  "Add block %lld, start_lsa=%lld|%d, oldest_visible_mvccid=%llu, newest_mvccid=%llu. "
5199  "Hdr last blockid = %lld\n",
5200  (long long int) page_free_data->get_blockid (),
5201  (long long int) page_free_data->start_lsa.pageid, (int) page_free_data->start_lsa.offset,
5202  (unsigned long long int) page_free_data->oldest_visible_mvccid,
5203  (unsigned long long int) page_free_data->newest_mvccid,
5204  (long long int) log_Gl.hdr.vacuum_last_blockid);
5205  }
5206  else
5207  {
5208  /* Mark the blocks with no MVCC operations as already vacuumed. */
5209  page_free_data->set_vacuumed ();
5210  LSA_SET_NULL (&page_free_data->start_lsa);
5211  page_free_data->oldest_visible_mvccid = MVCCID_NULL;
5212  page_free_data->newest_mvccid = MVCCID_NULL;
5213 
5215  "Block %lld has no MVCC ops and is skipped (marked as vacuumed).", next_blockid);
5216  }
5217  vacuum_Data.set_last_blockid (next_blockid);
5218 
5219  /* Move to next free data */
5220  page_free_data++;
5221  data_page->index_free++;
5222  }
5223  }
5224 
5225  if (was_vacuum_data_empty)
5226  {
5228  }
5229 
5230  assert (data_page == vacuum_Data.last_page);
5231  if (save_page_free_data < page_free_data)
5232  {
5233  /* Log changes in current page. */
5235  (PGLENGTH) (save_page_free_data - data_page->data),
5236  CAST_BUFLEN (((char *) page_free_data) - (char *) save_page_free_data),
5237  save_page_free_data);
5238  if (is_sysop)
5239  {
5240  log_sysop_commit (thread_p);
5241  }
5242  vacuum_set_dirty_data_page (thread_p, data_page, DONT_FREE);
5243  }
5244  else
5245  {
5246  // no change
5247  if (is_sysop)
5248  {
5249  // invalid situation; don't leak sysop
5250  assert (false);
5251  log_sysop_commit (thread_p);
5252  }
5253  }
5254 
5255  VACUUM_VERIFY_VACUUM_DATA (thread_p);
5256 #if !defined (NDEBUG)
5258 #endif /* !NDEBUG */
5259 
5260  return NO_ERROR;
5261 }
5262 
5263 /*
5264  * vacuum_rv_undoredo_data_set_link () - Undoredo set link in vacuum data page.
5265  *
5266  * return : NO_ERROR.
5267  * thread_p (in) : Thread entry.
5268  * rcv (in) : Recovery data.
5269  */
5270 int
5272 {
5273  VACUUM_DATA_PAGE *data_page = (VACUUM_DATA_PAGE *) rcv->pgptr;
5274  VPID *next_vpid = (VPID *) rcv->data;
5275 
5276  assert (data_page != NULL);
5277 
5278  if (next_vpid == NULL)
5279  {
5280  /* NULL link */
5281  VPID_SET_NULL (&data_page->next_page);
5282  }
5283  else
5284  {
5285  assert (rcv->length == sizeof (*next_vpid));
5286  VPID_COPY (&data_page->next_page, next_vpid);
5287  }
5288  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
5289  return NO_ERROR;
5290 }
5291 
5292 /*
5293  * vacuum_rv_redo_data_set_link_dump () - Dump redo set link in vacuum data page
5294  *
5295  * return : Void.
5296  * fp (in) : Output target.
5297  * length (in) : Recovery data length.
5298  * data (in) : Recovery data.
5299  */
5300 void
5301 vacuum_rv_undoredo_data_set_link_dump (FILE * fp, int length, void *data)
5302 {
5303  if (data == NULL)
5304  {
5305  fprintf (fp, " Reset link in vacuum data page to -1|-1. \n");
5306  }
5307  else
5308  {
5309  fprintf (fp, " Set link in vacuum data page to %d|%d. \n", ((VPID *) data)->volid, ((VPID *) data)->pageid);
5310  }
5311 }
5312 
5313 /*
5314  * vacuum_rv_redo_append_data () - Redo append blocks to vacuum data.
5315  *
5316  * return : NO_ERROR.
5317  * thread_p (in) : Thread entry.
5318  * rcv (in) : Recovery data.
5319  */
5320 int
5322 {
5323  VACUUM_DATA_PAGE *data_page = (VACUUM_DATA_PAGE *) rcv->pgptr;
5324  int n_blocks = rcv->length / sizeof (VACUUM_DATA_ENTRY);
5325 
5326  assert (data_page != NULL);
5327  assert (rcv->length > 0);
5328  assert ((n_blocks * (int) sizeof (VACUUM_DATA_ENTRY)) == rcv->length);
5329  assert (rcv->offset == data_page->index_free);
5330 
5331  memcpy (data_page->data + rcv->offset, rcv->data, n_blocks * sizeof (VACUUM_DATA_ENTRY));
5332  data_page->index_free += n_blocks;
5333  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
5334  return NO_ERROR;
5335 }
5336 
5337 /*
5338  * vacuum_rv_redo_append_data_dump () - Dump redo append blocks to vacuum data.
5339  *
5340  * return : Void.
5341  * fp (in) : Output target.
5342  * length (in) : Recovery data length.
5343  * data (in) : Recovery data.
5344  */
5345 void
5346 vacuum_rv_redo_append_data_dump (FILE * fp, int length, void *data)
5347 {
5349 
5350  fprintf (fp, " Append log blocks to vacuum data : \n");
5351  vacuum_data_entry = (VACUUM_DATA_ENTRY *) data;
5352  while ((char *) vacuum_data_entry < (char *) data + length)
5353  {
5354  assert ((char *) (vacuum_data_entry + 1) <= (char *) data + length);
5355 
5356  fprintf (fp, " { Blockid = %lld, Start_Lsa = %lld|%d, Oldest_MVCCID = %llu, Newest_MVCCID = %llu } \n",
5357  (long long int) vacuum_data_entry->blockid, (long long int) vacuum_data_entry->start_lsa.pageid,
5358  (int) vacuum_data_entry->start_lsa.offset,
5359  (unsigned long long int) vacuum_data_entry->oldest_visible_mvccid,
5360  (unsigned long long int) vacuum_data_entry->newest_mvccid);
5361 
5362  vacuum_data_entry++;
5363  }
5364 }
5365 
5366 /*
5367  * vacuum_recover_lost_block_data () - If and when server crashed, the block data buffer may have not been empty.
5368  * These blocks must be recovered by processing MVCC op log records and must be
5369  * added back to vacuum data.
5370  *
5371  * return : Error code.
5372  * thread_p (in) : Thread entry.
5373  */
5374 static int
5376 {
5377  int error_code = NO_ERROR;
5378  char log_page_buf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
5379  LOG_LSA log_lsa;
5381  LOG_PAGE *log_page_p = NULL;
5382  LOG_PAGEID stop_at_pageid;
5383  VACUUM_DATA_ENTRY data;
5384  LOG_DATA dummy_log_data;
5385  LOG_VACUUM_INFO vacuum_info;
5386  MVCCID mvccid;
5387  VACUUM_LOG_BLOCKID crt_blockid;
5388  LOG_LSA mvcc_op_log_lsa = LSA_INITIALIZER;
5389 
5391  "vacuum_recover_lost_block_data, lsa = %lld|%d, global_oldest_visible_mvccid = %llu",
5392  LSA_AS_ARGS (&vacuum_Data.recovery_lsa),
5393  (unsigned long long int) log_Gl.mvcc_table.get_global_oldest_visible ());
5394  if (LSA_ISNULL (&vacuum_Data.recovery_lsa))
5395  {
5396  /* No recovery was done. */
5397  return NO_ERROR;
5398  }
5399  /* Recovery was done. */
5400 
5401  /* Initialize log_page_p. */
5402  log_page_p = (LOG_PAGE *) PTR_ALIGN (log_page_buf, MAX_ALIGNMENT);
5403  log_page_p->hdr.logical_pageid = NULL_PAGEID;
5404  log_page_p->hdr.offset = NULL_OFFSET;
5405 
5407  {
5408  /* We need to search for an MVCC op log record to start recovering lost blocks. */
5410  "vacuum_recover_lost_block_data, log_Gl.hdr.mvcc_op_log_lsa is null ");
5411 
5412  LSA_COPY (&log_lsa, &vacuum_Data.recovery_lsa);
5413  /* todo: Find a better stopping point for this!! */
5414  /* Stop search if search reaches blocks already in vacuum data. */
5415  stop_at_pageid = VACUUM_LAST_LOG_PAGEID_IN_BLOCK (vacuum_Data.get_last_blockid ());
5416  while (log_lsa.pageid > stop_at_pageid)
5417  {
5418  if (log_page_p->hdr.logical_pageid != log_lsa.pageid)
5419  {
5420  /* Get log page. */
5421  error_code = logpb_fetch_page (thread_p, &log_lsa, LOG_CS_SAFE_READER, log_page_p);
5422  if (error_code != NO_ERROR)
5423  {
5424  ASSERT_ERROR ();
5425  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_recover_lost_block_data");
5426  return ER_FAILED;
5427  }
5428  }
5429  log_rec_header = *LOG_GET_LOG_RECORD_HEADER (log_page_p, &log_lsa);
5430  if (log_rec_header.type == LOG_MVCC_UNDO_DATA || log_rec_header.type == LOG_MVCC_UNDOREDO_DATA
5431  || log_rec_header.type == LOG_MVCC_DIFF_UNDOREDO_DATA)
5432  {
5433  LSA_COPY (&mvcc_op_log_lsa, &log_lsa);
5435  "vacuum_recover_lost_block_data, found mvcc op at lsa = %lld|%d ",
5436  LSA_AS_ARGS (&mvcc_op_log_lsa));
5437  break;
5438  }
5439  else if (log_rec_header.type == LOG_SYSOP_END)
5440  {
5441  /* we need to check if it is a logical MVCC undo */
5442  LOG_REC_SYSOP_END *sysop_end = NULL;
5443  LOG_LSA copy_lsa = log_lsa;
5444 
5445  LOG_READ_ADD_ALIGN (thread_p, sizeof (LOG_RECORD_HEADER), &copy_lsa, log_page_p);
5446  LOG_READ_ADVANCE_WHEN_DOESNT_FIT (thread_p, sizeof (LOG_REC_SYSOP_END), &copy_lsa, log_page_p);
5447  sysop_end = (LOG_REC_SYSOP_END *) (log_page_p->area + copy_lsa.offset);
5448  if (sysop_end->type == LOG_SYSOP_END_LOGICAL_MVCC_UNDO)
5449  {
5450  LSA_COPY (&mvcc_op_log_lsa, &log_lsa);
5452  "vacuum_recover_lost_block_data, found mvcc op at lsa = %lld|%d ",
5453  LSA_AS_ARGS (&mvcc_op_log_lsa));
5454  break;
5455  }
5456  }
5457  else if (log_rec_header.type == LOG_REDO_DATA)
5458  {
5459  /* is vacuum complete? */
5460  LOG_REC_REDO *redo = NULL;
5461  LOG_LSA copy_lsa = log_lsa;
5462 
5463  LOG_READ_ADD_ALIGN (thread_p, sizeof (LOG_RECORD_HEADER), &copy_lsa, log_page_p);
5464  LOG_READ_ADVANCE_WHEN_DOESNT_FIT (thread_p, sizeof (LOG_REC_REDO), &copy_lsa, log_page_p);
5465  redo = (LOG_REC_REDO *) (log_page_p->area + copy_lsa.offset);
5466  if (redo->data.rcvindex == RVVAC_COMPLETE)
5467  {
5468  /* stop looking */
5470  "vacuum_recover_lost_block_data, complete vacuum ");
5471  break;
5472  }
5473  }
5474 
5475  LSA_COPY (&log_lsa, &log_rec_header.back_lsa);
5476  }
5477  if (LSA_ISNULL (&mvcc_op_log_lsa))
5478  {
5479  /* Vacuum data was reached, so there is nothing to recover. */
5481  "vacuum_recover_lost_block_data, nothing to recovery ");
5482  return NO_ERROR;
5483  }
5484  }
5486  {
5487  /* Already in vacuum data. */
5489  "vacuum_recover_lost_block_data, mvcc_op_log_lsa %lld|%d is already in vacuum data "
5490  "(last blockid = %lld) ", LSA_AS_ARGS (&log_Gl.hdr.mvcc_op_log_lsa),
5491  (long long int) vacuum_Data.get_last_blockid ());
5493  return NO_ERROR;
5494  }
5495  else
5496  {
5497  LSA_COPY (&mvcc_op_log_lsa, &log_Gl.hdr.mvcc_op_log_lsa);
5498  }
5499  assert (!LSA_ISNULL (&mvcc_op_log_lsa));
5500 
5501  // reset header; info will be restored if last block is not consumed.
5503 
5505  "vacuum_recover_lost_block_data, start recovering from %lld|%d ", LSA_AS_ARGS (&mvcc_op_log_lsa));
5506 
5507  /* Start recovering blocks. */
5508  crt_blockid = vacuum_get_log_blockid (mvcc_op_log_lsa.pageid);
5509  LSA_COPY (&log_lsa, &mvcc_op_log_lsa);
5510 
5511  // stack used to produce in reverse order data for vacuum_Block_data_buffer circular queue
5512  /* *INDENT-OFF* */
5513  std::stack<VACUUM_DATA_ENTRY> vacuum_block_data_buffer_stack;
5514  /* *INDENT-ON* */
5515 
5516  /* we don't reset data.oldest_visible_mvccid between blocks. we need to maintain ordered oldest_visible_mvccid's, and
5517  * if a block + 1 MVCCID is smaller than all MVCCID's in block, then it must have been active (and probably suspended)
5518  * while block was logged. therefore, we must keep it. */
5520  while (crt_blockid > vacuum_Data.get_last_blockid ())
5521  {
5522  /* Stop recovering this block when previous block is reached. */
5523  stop_at_pageid = VACUUM_FIRST_LOG_PAGEID_IN_BLOCK (crt_blockid) - 1;
5524  /* Initialize this block data. */
5525  data.blockid = crt_blockid;
5526  LSA_COPY (&data.start_lsa, &log_lsa);
5527  /* inherit data.oldest_visible_mvccid */
5528  data.newest_mvccid = MVCCID_NULL;
5529  /* Loop through MVCC op log records in this block. */
5530  while (log_lsa.pageid > stop_at_pageid)
5531  {
5532  if (log_page_p->hdr.logical_pageid != log_lsa.pageid)
5533  {
5534  error_code = logpb_fetch_page (thread_p, &log_lsa, LOG_CS_SAFE_READER, log_page_p);
5535  if (error_code != NO_ERROR)
5536  {
5537  ASSERT_ERROR ();
5538  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_recover_lost_block_data");
5539  return ER_FAILED;
5540  }
5541  }
5542  /* Process this log record. */
5543  error_code =
5544  vacuum_process_log_record (thread_p, NULL, &log_lsa, log_page_p, &dummy_log_data, &mvccid, NULL, NULL,
5545  &vacuum_info, NULL, true);
5546  if (error_code != NO_ERROR)
5547  {
5548  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_recover_lost_block_data");
5549  return error_code;
5550  }
5551  /* Update oldest/newest MVCCID. */
5553  {
5554  data.oldest_visible_mvccid = mvccid;
5555  }
5556  if (data.newest_mvccid == MVCCID_NULL || MVCC_ID_PRECEDES (data.newest_mvccid, mvccid))
5557  {
5558  data.newest_mvccid = mvccid;
5559  }
5560  LSA_COPY (&log_lsa, &vacuum_info.prev_mvcc_op_log_lsa);
5561  }
5562 
5564  {
5568  log_Gl.hdr.mvcc_op_log_lsa = mvcc_op_log_lsa;
5569 
5571  "Restore log global cached info: \n\t mvcc_op_log_lsa = %lld|%d \n"
5572  "\t oldest_visible_mvccid = %llu \n\t newest_block_mvccid = %llu ",
5574  (unsigned long long int) log_Gl.hdr.oldest_visible_mvccid,
5575  (unsigned long long int) log_Gl.hdr.newest_block_mvccid);
5576  }
5577  else
5578  {
5579  vacuum_block_data_buffer_stack.push (data);
5580  }
5581 
5582  crt_blockid = vacuum_get_log_blockid (log_lsa.pageid);
5583  }
5584 
5585  /* Produce recovered blocks. */
5586  while (!vacuum_block_data_buffer_stack.empty ())
5587  {
5588  vacuum_Block_data_buffer->produce (vacuum_block_data_buffer_stack.top ());
5589  vacuum_block_data_buffer_stack.pop ();
5590  }
5591 
5592  /* Consume recovered blocks. */
5593  thread_type tt;
5594  vacuum_convert_thread_to_master (thread_p, tt);
5595  error_code = vacuum_consume_buffer_log_blocks (thread_p);
5596  if (error_code != NO_ERROR)
5597  {
5598  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_recover_lost_block_data");
5599  }
5600  vacuum_restore_thread (thread_p, tt);
5601 
5602  return error_code;
5603 }
5604 
5605 /*
5606  * vacuum_get_log_blockid () - Compute blockid for given log pageid.
5607  *
5608  * return : Log blockid.
5609  * pageid (in) : Log pageid.
5610  */
5613 {
5614  return ((pageid == NULL_PAGEID) ? VACUUM_NULL_LOG_BLOCKID : (pageid / vacuum_Data.log_block_npages));
5615 }
5616 
5617 /*
5618  * vacuum_min_log_pageid_to_keep () - Get the minimum log pageid required to execute vacuum.
5619  * See vacuum_update_keep_from_log_pageid.
5620  *
5621  * return : LOG Page identifier for first log page that should be processed by vacuum.
5622  * thread_p (in) : Thread entry.
5623  */
5624 LOG_PAGEID
5626 {
5627  /* Return first pageid from first block in vacuum data table */
5629  {
5630  /* this is for debug, suppress log archive purging. */
5631  return 0;
5632  }
5633 #if defined (SA_MODE)
5634  if (vacuum_Data.is_vacuum_complete)
5635  {
5636  /* no log archives are needed for vacuum any longer. */
5637  return NULL_PAGEID;
5638  }
5639 #endif /* defined (SA_MODE) */
5640  return vacuum_Data.keep_from_log_pageid;
5641 }
5642 
5643 /*
5644  * vacuum_is_safe_to_remove_archives () - Is safe to remove archives? Not until keep_from_log_pageid has been updated
5645  * at least once.
5646  *
5647  * return : is safe?
5648  */
5649 bool
5651 {
5652  return vacuum_Data.is_archive_removal_safe;
5653 }
5654 
5655 /*
5656  * vacuum_rv_redo_start_job () - Redo start vacuum job.
5657  *
5658  * return : Error code.
5659  * thread_p (in) : Thread entry.
5660  * rcv (in) : Recovery data.
5661  */
5662 int
5664 {
5665  VACUUM_DATA_PAGE *data_page = (VACUUM_DATA_PAGE *) rcv->pgptr;
5666 
5667  assert (data_page != NULL);
5668  assert (rcv->offset >= 0 && rcv->offset < vacuum_Data.page_data_max_count);
5669 
5670  /* Start job is marked by in progress flag in blockid. */
5671  data_page->data[rcv->offset].set_job_in_progress ();
5672 
5673  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
5674 
5675  return NO_ERROR;
5676 }
5677 
5678 /*
5679  * vacuum_update_keep_from_log_pageid () - Update vacuum_Data.keep_from_log_pageid.
5680  *
5681  * return : Void.
5682  * thread_p (in) : Thread entry.
5683  */
5684 static void
5686 {
5687  /* vacuum_Data.keep_from_log_pageid should keep first page in first block not yet vacuumed, so that archive purger
5688  * does not remove log required for vacuum.
5689  * If vacuum data is empty, then all blocks until (and including) vacuum_Data.last_blockid have been
5690  * vacuumed, and first page belonging to next block must be preserved (this is most likely in the active area of the
5691  * log, but not always).
5692  * If vacuum data is not empty, then we need to preserve the log starting with the first page of first unvacuumed
5693  * block.
5694  */
5695  if (vacuum_is_empty ())
5696  {
5697  // keep starting with next after last_blockid ()
5698  vacuum_Data.keep_from_log_pageid = VACUUM_FIRST_LOG_PAGEID_IN_BLOCK (vacuum_Data.get_last_blockid () + 1);
5699  }
5700  else
5701  {
5703  }
5704 
5706  "Update keep_from_log_pageid to %lld ", (long long int) vacuum_Data.keep_from_log_pageid);
5707 
5708  if (!vacuum_Data.is_archive_removal_safe)
5709  {
5710  /* remove archives that have been blocked up to this point. */
5711  vacuum_Data.is_archive_removal_safe = true;
5712  }
5713 }
5714 
5715 /*
5716  * vacuum_compare_dropped_files () - Compare two file identifiers.
5717  *
5718  * return : Positive if the first argument is bigger, negative if it is smaller and 0 if arguments are equal.
5719  * a (in) : Pointer to a file identifier.
5720  * b (in) : Pointer to a a file identifier.
5721  */
5722 static int
5723 vacuum_compare_dropped_files (const void *a, const void *b)
5724 {
5725  VFID *file_a = (VFID *) a;
5726  VFID *file_b = (VFID *) b;
5727  INT32 diff_fileid;
5728 
5729  assert (a != NULL && b != NULL);
5730 
5731  diff_fileid = file_a->fileid - file_b->fileid;
5732  if (diff_fileid != 0)
5733  {
5734  return (int) diff_fileid;
5735  }
5736 
5737  return (int) (file_a->volid - file_b->volid);
5738 }
5739 
5740 /*
5741  * vacuum_add_dropped_file () - Add new dropped file.
5742  *
5743  * return : Error code.
5744  * thread_p (in) : Thread entry.
5745  * vfid (in) : Class OID or B-tree identifier.
5746  * mvccid (in) : MVCCID.
5747  */
5748 static int
5750 {
5751  MVCCID save_mvccid = MVCCID_NULL;
5752  VPID vpid = VPID_INITIALIZER, prev_vpid = VPID_INITIALIZER;
5753  int page_count = 0, mem_size = 0;
5754  VACUUM_DROPPED_FILES_PAGE *page = NULL, *new_page = NULL;
5755  INT16 position = -1;
5757  LOG_TDES *tdes = LOG_FIND_CURRENT_TDES (thread_p);
5758  bool found = false;
5759  PAGE_TYPE ptype = PAGE_DROPPED_FILES;
5760 
5761 #if !defined (NDEBUG)
5762  VACUUM_TRACK_DROPPED_FILES *track_page = NULL;
5763  VACUUM_TRACK_DROPPED_FILES *new_track_page = NULL;
5764 #endif
5765 
5766  int error_code = NO_ERROR;
5767 
5768  assert (tdes != NULL);
5769 
5771  {
5772  /* Normally, dropped files are loaded after recovery, in order to provide a consistent state of its pages.
5773  * Actually, the consistent state should be reached after all run postpone and compensate undo records are
5774  * applied. However, this may be called from log_recovery_finish_all_postpone or from log_recovery_undo. Because
5775  * there is no certain code that is executed after applying redo and before calling these function, the dropped
5776  * files are loaded when needed. */
5777 
5778  /* This must be recover, otherwise the files should have been loaded. */
5779  assert (!LOG_ISRESTARTED ());
5780 
5782  {
5784  "Failed to load dropped files during recovery!");
5785 
5786  assert_release (false);
5787  return ER_FAILED;
5788  }
5789  }
5790 
5791  assert_release (!VFID_ISNULL (&vacuum_Dropped_files_vfid));
5792  assert_release (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
5793 
5794 #if !defined (NDEBUG)
5795  assert (vacuum_Track_dropped_files != NULL);
5796 
5797  track_page = vacuum_Track_dropped_files;
5798 #endif /* !NDEBUG */
5799 
5800  addr.vfid = NULL;
5801  addr.offset = -1;
5802 
5803  VPID_COPY (&vpid, &vacuum_Dropped_files_vpid);
5804  while (!VPID_ISNULL (&vpid))
5805  {
5806  /* Unfix previous page */
5807  if (page != NULL)
5808  {
5809  vacuum_unfix_dropped_entries_page (thread_p, page);
5810  }
5811 
5812  /* Fix current page */
5813  page = vacuum_fix_dropped_entries_page (thread_p, &vpid, PGBUF_LATCH_WRITE);
5814  if (page == NULL)
5815  {
5816  assert (false);
5817  return ER_FAILED;
5818  }
5819 
5820  /* Save current vpid to prev_vpid */
5821  VPID_COPY (&prev_vpid, &vpid);
5822 
5823  /* Get next vpid and page count */
5824  VPID_COPY (&vpid, &page->next_page);
5825  page_count = page->n_dropped_files;
5826 
5827  /* binary search */
5828  position =
5829  util_bsearch (vfid, page->dropped_files, page_count, sizeof (VACUUM_DROPPED_FILE), vacuum_compare_dropped_files,
5830  &found);
5831 
5832  if (found)
5833  {
5834  /* Same entry was already dropped, replace previous MVCCID */
5835  VACUUM_DROPPED_FILE undo_data;
5836 
5837  /* Replace MVCCID */
5838  undo_data = page->dropped_files[position];
5839  save_mvccid = page->dropped_files[position].mvccid;
5840  page->dropped_files[position].mvccid = mvccid;
5841 
5842  assert_release (MVCC_ID_FOLLOW_OR_EQUAL (mvccid, save_mvccid));
5843 
5844  /* log changes */
5845  addr.pgptr = (PAGE_PTR) page;
5846  addr.offset = position;
5848  sizeof (VACUUM_DROPPED_FILE), &undo_data, &page->dropped_files[position]);
5849 
5850 #if !defined (NDEBUG)
5851  if (track_page != NULL)
5852  {
5853  memcpy (&track_page->dropped_data_page, page, DB_PAGESIZE);
5854  }
5855 #endif
5857  "add dropped file: found duplicate vfid %d|%d at position=%d, "
5858  "replace mvccid=%llu with mvccid=%llu. Page is %d|%d with lsa %lld|%d."
5859  "Page count=%d, global count=%d", VFID_AS_ARGS (&page->dropped_files[position].vfid), position,
5860  (unsigned long long int) save_mvccid,
5861  (unsigned long long int) page->dropped_files[position].mvccid,
5862  PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) page), page->n_dropped_files, vacuum_Dropped_files_count);
5863 
5864  vacuum_set_dirty_dropped_entries_page (thread_p, page, FREE);
5865 
5866  return NO_ERROR;
5867  }
5868 
5869  /* not a duplicate. can we add? */
5870  if (VACUUM_DROPPED_FILES_PAGE_CAPACITY <= page_count)
5871  {
5873 
5874  /* No room left for new entries, try next page */
5875 
5876 #if !defined (NDEBUG)
5877  if (track_page != NULL && !VPID_ISNULL (&vpid))
5878  {
5879  /* Don't advance from last track page. A new page will be added and we need to set a link between
5880  * last track page and new track page. */
5881  track_page = track_page->next_tracked_page;
5882  }
5883 #endif
5884  continue;
5885  }
5886 
5887  /* add to position to keep the order */
5888  if (page_count > position)
5889  {
5890  mem_size = (page_count - position) * sizeof (VACUUM_DROPPED_FILE);
5891  memmove (&page->dropped_files[position + 1], &page->dropped_files[position], mem_size);
5892  }
5893 
5894  /* Increment page count */
5895  page->n_dropped_files++;
5896 
5897  /* Increment total count */
5898  ATOMIC_INC_32 (&vacuum_Dropped_files_count, 1);
5899 
5900  VFID_COPY (&page->dropped_files[position].vfid, vfid);
5901  page->dropped_files[position].mvccid = mvccid;
5902 
5903  addr.pgptr = (PAGE_PTR) page;
5904  addr.offset = position;
5906  &page->dropped_files[position]);
5907 
5908 #if !defined (NDEBUG)
5909  if (track_page != NULL)
5910  {
5911  memcpy (&track_page->dropped_data_page, page, DB_PAGESIZE);
5912  }
5913 #endif
5914 
5916  "added new dropped file %d|%d and mvccid=%llu at position=%d. "
5917  "Page is %d|%d with lsa %lld|%d. Page count=%d, global count=%d",
5918  VFID_AS_ARGS (&page->dropped_files[position].vfid),
5919  (unsigned long long int) page->dropped_files[position].mvccid, position,
5920  PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) page), page->n_dropped_files, vacuum_Dropped_files_count);
5921 
5922  vacuum_set_dirty_dropped_entries_page (thread_p, page, FREE);
5923 
5924  return NO_ERROR;
5925  }
5926 
5927  /* The entry couldn't fit in any of the current pages. */
5928  /* Allocate a new page */
5929 
5930  /* Last page must be fixed */
5931  assert (page != NULL);
5932 
5933  /* Extend file */
5934  error_code = file_alloc (thread_p, &vacuum_Dropped_files_vfid, file_init_page_type, &ptype, &vpid,
5935  (PAGE_PTR *) (&new_page));
5936  if (error_code != NO_ERROR)
5937  {
5938  assert (false);
5939  vacuum_unfix_dropped_entries_page (thread_p, page);
5940  return ER_FAILED;
5941  }
5942  if (new_page == NULL)
5943  {
5944  assert_release (false);
5945  vacuum_unfix_dropped_entries_page (thread_p, page);
5946  return ER_FAILED;
5947  }
5948 
5949  /* Set page header: next page as NULL and count as 1 */
5950  VPID_SET_NULL (&new_page->next_page);
5951  new_page->n_dropped_files = 1;
5952 
5953  /* Set vfid */
5954  VFID_COPY (&new_page->dropped_files[0].vfid, vfid);
5955 
5956  /* Set MVCCID */
5957  new_page->dropped_files[0].mvccid = mvccid;
5958 
5959  ATOMIC_INC_32 (&vacuum_Dropped_files_count, 1);
5960 
5961 #if !defined(NDEBUG)
5962  if (track_page != NULL)
5963  {
5964  if (track_page->next_tracked_page == NULL)
5965  {
5966  new_track_page = (VACUUM_TRACK_DROPPED_FILES *) malloc (VACUUM_TRACK_DROPPED_FILES_SIZE);
5967  if (new_track_page == NULL)
5968  {
5970  vacuum_unfix_dropped_entries_page (thread_p, page);
5971  vacuum_unfix_dropped_entries_page (thread_p, new_page);
5972  return ER_FAILED;
5973  }
5974  }
5975  else
5976  {
5977  new_track_page = track_page->next_tracked_page;
5978  }
5979 
5980  memcpy (&new_track_page->dropped_data_page, new_page, DB_PAGESIZE);
5981  new_track_page->next_tracked_page = NULL;
5982  track_page->next_tracked_page = new_track_page;
5983  }
5984 #endif
5985 
5986  pgbuf_set_page_ptype (thread_p, (PAGE_PTR) new_page, PAGE_DROPPED_FILES);
5988  sizeof (VACUUM_DROPPED_FILES_PAGE), new_page);
5989 
5991  "added new dropped file %d|%d and mvccid=%llu to at position=%d. "
5992  "Page is %d|%d with lsa %lld|%d. Page count=%d, global count=%d",
5993  VFID_AS_ARGS (&new_page->dropped_files[0].vfid),
5994  (unsigned long long int) new_page->dropped_files[0].mvccid, 0,
5995  PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) new_page), new_page->n_dropped_files, vacuum_Dropped_files_count);
5996 
5997  /* Unfix new page */
5998  vacuum_set_dirty_dropped_entries_page (thread_p, new_page, FREE);
5999 
6000  /* Save a link to the new page in last page */
6001  vacuum_dropped_files_set_next_page (thread_p, page, &vpid);
6002 #if !defined(NDEBUG)
6003  if (track_page != NULL)
6004  {
6005  VPID_COPY (&track_page->dropped_data_page.next_page, &vpid);
6006  }
6007 #endif
6008 
6009  /* unfix last page */
6010  vacuum_unfix_dropped_entries_page (thread_p, page);
6011  return NO_ERROR;
6012 }
6013 
6014 /*
6015  * vacuum_log_add_dropped_file () - Append postpone/undo log for notifying vacuum of a file being dropped. Postpone
6016  * is added when a class or index is dropped and undo when a class or index is created.
6017  *
6018  * return : Void.
6019  * thread_p (in) : Thread entry.
6020  * vfid (in) : Dropped file identifier.
6021  * class_oid(in) : class OID
6022  */
6023 void
6024 vacuum_log_add_dropped_file (THREAD_ENTRY * thread_p, const VFID * vfid, const OID * class_oid, bool pospone_or_undo)
6025 {
6026  LOG_DATA_ADDR addr;
6028 
6029  vacuum_er_log (VACUUM_ER_LOG_DROPPED_FILES, "Append %s log from dropped file %d|%d.",
6030  pospone_or_undo ? "postpone" : "undo", vfid->volid, vfid->fileid);
6031 
6032  /* Initialize recovery data */
6033  VFID_COPY (&rcv_data.vfid, vfid);
6034  if (class_oid != NULL)
6035  {
6036  COPY_OID (&rcv_data.class_oid, class_oid);
6037  }
6038  else
6039  {
6040  OID_SET_NULL (&rcv_data.class_oid);
6041  }
6042 
6043  addr.offset = -1;
6044  addr.pgptr = NULL;
6045  addr.vfid = NULL;
6046 
6047  if (pospone_or_undo == VACUUM_LOG_ADD_DROPPED_FILE_POSTPONE)
6048  {
6049  log_append_postpone (thread_p, RVVAC_NOTIFY_DROPPED_FILE, &addr, sizeof (rcv_data), &rcv_data);
6050  }
6051  else
6052  {
6053  log_append_undo_data (thread_p, RVVAC_NOTIFY_DROPPED_FILE, &addr, sizeof (rcv_data), &rcv_data);
6054  }
6055 }
6056 
6057 /*
6058  * vacuum_rv_redo_add_dropped_file () - Redo recovery used for adding dropped files.
6059  *
6060  * return : Error code.
6061  * thread_p (in) : Thread entry.
6062  * rcv (in) : Recovery data.
6063  */
6064 int
6066 {
6068  INT16 position = rcv->offset;
6069  int mem_size;
6070  VACUUM_DROPPED_FILE *dropped_file;
6071 
6072  assert (rcv->length == sizeof (VACUUM_DROPPED_FILE));
6073  dropped_file = ((VACUUM_DROPPED_FILE *) rcv->data);
6074 
6075  assert_release (!VFID_ISNULL (&dropped_file->vfid));
6076  assert_release (MVCCID_IS_VALID (dropped_file->mvccid));
6077 
6078  page = (VACUUM_DROPPED_FILES_PAGE *) rcv->pgptr;
6079 
6080  if (position > page->n_dropped_files)
6081  {
6082  /* Error! */
6084  "Dropped files recovery error: Invalid position %d (only %d entries in page) while "
6085  "inserting new entry vfid=%d|%d mvccid=%llu. Page is %d|%d at lsa %lld|%d. ",
6086  position, page->n_dropped_files, VFID_AS_ARGS (&dropped_file->vfid),
6087  (unsigned long long) dropped_file->mvccid, PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
6088 
6089  assert_release (false);
6090  return ER_FAILED;
6091  }
6092 
6093  if (position < page->n_dropped_files)
6094  {
6095  /* Make room for new record */
6096  mem_size = (page->n_dropped_files - position) * sizeof (VACUUM_DROPPED_FILE);
6097  memmove (&page->dropped_files[position + 1], &page->dropped_files[position], mem_size);
6098  }
6099 
6100  /* Copy new dropped file */
6101  VFID_COPY (&page->dropped_files[position].vfid, &dropped_file->vfid);
6102  page->dropped_files[position].mvccid = dropped_file->mvccid;
6103 
6104  /* Increment number of files */
6105  page->n_dropped_files++;
6106 
6108  "Dropped files redo recovery, insert new entry "
6109  "vfid=%d|%d, mvccid=%llu at position %d. Page is %d|%d at lsa %lld|%d.",
6110  VFID_AS_ARGS (&dropped_file->vfid), (unsigned long long) dropped_file->mvccid, position,
6111  PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
6112 
6113  /* Make sure the mvcc_next_id is also updated, since this is the marker used by dropped files. */
6114  if (!MVCC_ID_PRECEDES (dropped_file->mvccid, log_Gl.hdr.mvcc_next_id))
6115  {
6116  log_Gl.hdr.mvcc_next_id = dropped_file->mvccid;
6118  }
6119 
6120  /* Page was modified, so set it dirty */
6121  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
6122  return NO_ERROR;
6123 }
6124 
6125 /*
6126  * vacuum_rv_undo_add_dropped_file () - Undo recovery used for adding dropped files.
6127  *
6128  * return : Error code.
6129  * thread_p (in) : Thread entry.
6130  * rcv (in) : Recovery data.
6131  */
6132 int
6134 {
6136  INT16 position = rcv->offset;
6137  int mem_size;
6138 
6139  page = (VACUUM_DROPPED_FILES_PAGE *) rcv->pgptr;
6140 
6141  if (position >= page->n_dropped_files)
6142  {
6143  assert_release (false);
6144  return ER_FAILED;
6145  }
6146 
6147  mem_size = (page->n_dropped_files - 1 - position) * sizeof (VACUUM_DROPPED_FILE);
6148  if (mem_size > 0)
6149  {
6150  memmove (&page->dropped_files[position], &page->dropped_files[position + 1], mem_size);
6151  }
6152  page->n_dropped_files--;
6153 
6154  /* Page was modified, so set it dirty */
6155  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
6156  return NO_ERROR;
6157 }
6158 
6159 /*
6160  * vacuum_rv_replace_dropped_file () - replace dropped file for recovery
6161  *
6162  * return : error code
6163  * thread_p (in) : thread entry
6164  * rcv (in) : recovery data
6165  */
6166 int
6168 {
6170  INT16 position = rcv->offset;
6171  VACUUM_DROPPED_FILE *dropped_file;
6172 
6173  assert (rcv->length == sizeof (VACUUM_DROPPED_FILE));
6174  dropped_file = (VACUUM_DROPPED_FILE *) rcv->data;
6175 
6176  page = (VACUUM_DROPPED_FILES_PAGE *) rcv->pgptr;
6177 
6178  /* Should be the same VFID */
6179  if (position >= page->n_dropped_files)
6180  {
6181  /* Error! */
6183  "Dropped files recovery error: Invalid position %d (only %d entries in page) while "
6184  "replacing old entry with vfid=%d|%d mvccid=%llu. Page is %d|%d at lsa %lld|%d. ",
6185  position, page->n_dropped_files, VFID_AS_ARGS (&dropped_file->vfid),
6186  (unsigned long long) dropped_file->mvccid, PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
6187 
6188  assert_release (false);
6189  return ER_FAILED;
6190  }
6191 
6192  if (!VFID_EQ (&dropped_file->vfid, &page->dropped_files[position].vfid))
6193  {
6194  /* Error! */
6196  "Dropped files recovery error: expected to "
6197  "find vfid %d|%d at position %d and found %d|%d with MVCCID=%d. "
6198  "Page is %d|%d at lsa %lld|%d. ", VFID_AS_ARGS (&dropped_file->vfid), position,
6199  VFID_AS_ARGS (&page->dropped_files[position].vfid),
6200  (unsigned long long) page->dropped_files[position].mvccid,
6201  PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
6202 
6203  assert_release (false);
6204  return ER_FAILED;
6205  }
6206 
6208  "Dropped files redo recovery, replace MVCCID for"
6209  " file %d|%d with %llu (position=%d). Page is %d|%d at lsa %lld|%d.",
6210  VFID_AS_ARGS (&dropped_file->vfid), (unsigned long long) dropped_file->mvccid, position,
6211  PGBUF_PAGE_STATE_ARGS (rcv->pgptr));
6212  page->dropped_files[position].mvccid = dropped_file->mvccid;
6213 
6214  /* Make sure the mvcc_next_id is also updated, since this is the marker used by dropped files. */
6215  if (!MVCC_ID_PRECEDES (dropped_file->mvccid, log_Gl.hdr.mvcc_next_id))
6216  {
6217  log_Gl.hdr.mvcc_next_id = dropped_file->mvccid;
6219  }
6220 
6221  /* Page was modified, so set it dirty */
6222  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
6223  return NO_ERROR;
6224 }
6225 
6226 /*
6227  * vacuum_notify_all_workers_dropped_file () - notify all vacuum workers that given file was dropped
6228  *
6229  * vfid_dropped (in) : VFID of dropped file
6230  * mvccid (in) : MVCCID marker for dropped file
6231  */
6232 static void
6234 {
6235 #if defined (SERVER_MODE)
6236  if (!LOG_ISRESTARTED ())
6237  {
6238  // workers are not running during recovery
6239  return;
6240  }
6241 
6242  INT32 my_version, workers_min_version;
6243 
6244  /* Before notifying vacuum workers there is one last thing we have to do. Running workers must also be notified of
6245  * the VFID being dropped to cleanup their collected heap object arrays. Since must done one file at a time, so a
6246  * mutex is used for protection, in case there are several transactions doing file drops. */
6248  assert (VFID_ISNULL (&vacuum_Last_dropped_vfid));
6249  VFID_COPY (&vacuum_Last_dropped_vfid, &vfid_dropped);
6250 
6251  /* Increment dropped files version and save a version for current change. It is not important to keep the version
6252  * synchronized with the changes. It is only used to make sure that all workers have seen current change. */
6253  my_version = ++vacuum_Dropped_files_version;
6254 
6256  "Added dropped file - vfid=%d|%d, mvccid=%llu - "
6257  "Wait for all workers to see my_version=%d", VFID_AS_ARGS (&vfid_dropped), mvccid, my_version);
6258 
6259  /* Wait until all workers have been notified of this change */
6260  for (workers_min_version = vacuum_get_worker_min_dropped_files_version ();
6261  workers_min_version != -1 && workers_min_version < my_version;
6262  workers_min_version = vacuum_get_worker_min_dropped_files_version ())
6263  {
6265  "not all workers saw my changes, workers min version=%d. Sleep and retry.", workers_min_version);
6266 
6267  thread_sleep (1);
6268  }
6269 
6270  vacuum_er_log (VACUUM_ER_LOG_DROPPED_FILES, "All workers have been notified, min_version=%d", workers_min_version);
6271 
6272  VFID_SET_NULL (&vacuum_Last_dropped_vfid);
6274 #endif // SERVER_MODE
6275 }
6276 
6277 /*
6278  * vacuum_rv_notify_dropped_file () - Add drop file used in recovery phase. Can be used in two ways: at run postpone phase
6279  * for dropped heap files and indexes (if postpone_ref_lsa in not null); or at undo
6280  * phase for created heap files and indexes.
6281  *
6282  * return : Error code.
6283  * thread_p (in) : Thread entry.
6284  * rcv (in) : Recovery data.
6285  * pospone_ref_lsa (in) : Reference LSA for running postpone. NULL if this is
6286  * an undo for created heap files and indexes.
6287  */
6288 int
6290 {
6291  int error = NO_ERROR;
6292  OID *class_oid;
6293  MVCCID mvccid;
6295 
6296  /* Copy VFID from current log recovery data but set MVCCID at this point. We will use the log_Gl.hdr.mvcc_next_id as
6297  * borderline to distinguish this file from newer files. 1. All changes on this file must be done by transaction that
6298  * have already committed which means their MVCCID will be less than current log_Gl.hdr.mvcc_next_id. 2. All changes
6299  * on a new file that reused VFID must be done by transaction that start after this call, which means their MVCCID's
6300  * will be at least equal to current log_Gl.hdr.mvcc_next_id. */
6301 
6302  mvccid = ATOMIC_LOAD_64 (&log_Gl.hdr.mvcc_next_id);
6303 
6304  /* Add dropped file to current list */
6305  rcv_data = (VACUUM_DROPPED_FILES_RCV_DATA *) rcv->data;
6306  error = vacuum_add_dropped_file (thread_p, &rcv_data->vfid, mvccid);
6307  if (error != NO_ERROR)
6308  {
6309  return error;
6310  }
6311 
6312  // make sure vacuum workers will not access dropped file
6313  vacuum_notify_all_workers_dropped_file (rcv_data->vfid, mvccid);
6314 
6315  /* vacuum is notified of the file drop, it is safe to remove from cache */
6316  class_oid = &rcv_data->class_oid;
6317  if (!OID_ISNULL (class_oid))
6318  {
6319  (void) heap_delete_hfid_from_cache (thread_p, class_oid);
6320  }
6321 
6322  /* Success */
6323  return NO_ERROR;
6324 }
6325 
6326 /*
6327  * vacuum_cleanup_dropped_files () - Clean unnecessary dropped files.
6328  *
6329  * return : Error code.
6330  * thread_p (in) : Thread entry.
6331  *
6332  * NOTE: All entries with an MVCCID older than vacuum_Data->oldest_unvacuumed_mvccid are removed.
6333  * All records belonging to these entries must be either vacuumed or skipped after drop.
6334  */
6335 static int
6337 {
6340  int page_count = 0, mem_size = 0;
6341  VPID last_page_vpid = VPID_INITIALIZER, last_non_empty_page_vpid = VPID_INITIALIZER;
6342  INT16 removed_entries[VACUUM_DROPPED_FILES_MAX_PAGE_CAPACITY];
6343  INT16 n_removed_entries = 0, i;
6344 #if !defined (NDEBUG)
6345  VACUUM_TRACK_DROPPED_FILES *track_page = (VACUUM_TRACK_DROPPED_FILES *) vacuum_Track_dropped_files;
6346 #endif
6347 
6348  vacuum_er_log (VACUUM_ER_LOG_DROPPED_FILES, "%s", "Start cleanup dropped files.");
6349 
6350  if (!LOG_ISRESTARTED ())
6351  {
6352  /* Skip cleanup during recovery */
6353  vacuum_er_log (VACUUM_ER_LOG_RECOVERY | VACUUM_ER_LOG_DROPPED_FILES, "%s", "Skip cleanup during recovery.");
6354  return NO_ERROR;
6355  }
6356 
6357  assert_release (!VFID_ISNULL (&vacuum_Dropped_files_vfid));
6358  assert_release (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
6359 
6360  if (vacuum_Dropped_files_count == 0)
6361  {
6362  /* Nothing to clean */
6363  vacuum_er_log (VACUUM_ER_LOG_DROPPED_FILES, "%s", "Cleanup skipped, no current entries.");
6364  return NO_ERROR;
6365  }
6366 
6367  /* Clean each page of dropped files */
6368  VPID_COPY (&vpid, &vacuum_Dropped_files_vpid);
6369  VPID_COPY (&last_non_empty_page_vpid, &vacuum_Dropped_files_vpid);
6370 
6371  while (!VPID_ISNULL (&vpid))
6372  {
6373  /* Reset n_removed_entries */
6374  n_removed_entries = 0;
6375 
6376  /* Track the last page found */
6377  VPID_COPY (&last_page_vpid, &vpid);
6378 
6379  /* Fix current page */
6380  page = vacuum_fix_dropped_entries_page (thread_p, &vpid, PGBUF_LATCH_WRITE);
6381  if (page == NULL)
6382  {
6383  assert (false);
6384  return ER_FAILED;
6385  }
6386 
6387  /* Get next page VPID */
6388  VPID_COPY (&vpid, &page->next_page);
6389 
6390  page_count = page->n_dropped_files;
6391  if (page_count == 0)
6392  {
6393  /* Page is empty */
6394  vacuum_unfix_dropped_entries_page (thread_p, page);
6395  continue;
6396  }
6397 
6398  /* Page is not empty, track the last non-empty page found */
6399  VPID_COPY (&last_non_empty_page_vpid, &vpid);
6400 
6401  /* Check entries for cleaning. Start from the end of the array */
6402  for (i = page_count - 1; i >= 0; i--)
6403  {
6405  {
6406  /* Remove entry */
6407  removed_entries[n_removed_entries++] = i;
6408  if (i < page_count - 1)
6409  {
6410  mem_size = (page_count - i - 1) * sizeof (VACUUM_DROPPED_FILE);
6411  memmove (&page->dropped_files[i], &page->dropped_files[i + 1], mem_size);
6412  }
6413  }
6414  }
6415 
6416  if (n_removed_entries > 0)
6417  {
6418  /* Update dropped files global counter */
6419  ATOMIC_INC_32 (&vacuum_Dropped_files_count, -n_removed_entries);
6420 
6421  /* Update dropped files page counter */
6422  page->n_dropped_files -= n_removed_entries;
6423 
6424  /* Log changes */
6425  vacuum_log_cleanup_dropped_files (thread_p, (PAGE_PTR) page, removed_entries, n_removed_entries);
6426 
6428  "cleanup dropped files. Page is %d|%d with lsa %lld|%d. "
6429  "Page count=%d, global count=%d", PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) page),
6430  page->n_dropped_files, vacuum_Dropped_files_count);
6431 
6432  /* todo: new pages are allocated but old pages are never deallocated. it looks like they are leaked. */
6433 
6434 #if !defined (NDEBUG)
6435  /* Copy changes to tracker */
6436  memcpy (&track_page->dropped_data_page, page, DB_PAGESIZE);
6437 #endif
6438  vacuum_set_dirty_dropped_entries_page (thread_p, page, FREE);
6439  }
6440  else
6441  {
6442  /* No changes */
6443  vacuum_unfix_dropped_entries_page (thread_p, page);
6444  }
6445 
6446 #if !defined (NDEBUG)
6447  track_page = track_page->next_tracked_page;
6448 #endif
6449  }
6450 
6451  if (!VPID_ISNULL (&last_non_empty_page_vpid) && !VPID_EQ (&last_non_empty_page_vpid, &last_page_vpid))
6452  {
6453  /* Update next page link in the last non-empty page to NULL, to avoid fixing empty pages in the future. */
6455  "Cleanup dropped files must remove pages to the of page %d|%d... Cut off link.",
6456  last_non_empty_page_vpid.volid, last_non_empty_page_vpid.pageid);
6457 
6458  page = vacuum_fix_dropped_entries_page (thread_p, &last_non_empty_page_vpid, PGBUF_LATCH_WRITE);
6459  if (page == NULL)
6460  {
6461  assert (false);
6462  return ER_FAILED;
6463  }
6464 
6465  vacuum_dropped_files_set_next_page (thread_p, page, &page->next_page);
6466  vacuum_unfix_dropped_entries_page (thread_p, page);
6467 
6468  /* todo: tracker? */
6469  }
6470 
6471  vacuum_er_log (VACUUM_ER_LOG_DROPPED_FILES, "%s", "Finished cleanup dropped files.");
6472  return NO_ERROR;
6473 }
6474 
6475 /*
6476  * vacuum_is_file_dropped () - Check whether file is considered dropped.
6477  *
6478  * return : error code.
6479  * thread_p (in) : Thread entry.
6480  * is_file_dropped(out) : True if file is considered dropped. False, otherwise.
6481  * vfid (in) : File identifier.
6482  * mvccid (in) : MVCCID.
6483  */
6484 int
6485 vacuum_is_file_dropped (THREAD_ENTRY * thread_p, bool * is_file_dropped, VFID * vfid, MVCCID mvccid)
6486 {
6488  {
6489  *is_file_dropped = false;
6490  return NO_ERROR;
6491  }
6492 
6493  return vacuum_find_dropped_file (thread_p, is_file_dropped, vfid, mvccid);
6494 }
6495 
6496 /*
6497  * vacuum_find_dropped_file () - Find the dropped file and check whether the given MVCCID is older than or equal to the
6498  * MVCCID of dropped file. Used by vacuum to detect records that belong to dropped files.
6499  *
6500  * return : error code.
6501  * thread_p (in) : Thread entry.
6502  * is_file_dropped(out) : True if file is considered dropped. False, otherwise.
6503  * vfid (in) : File identifier.
6504  * mvccid (in) : MVCCID of checked record.
6505  */
6506 static int
6507 vacuum_find_dropped_file (THREAD_ENTRY * thread_p, bool * is_file_dropped, VFID * vfid, MVCCID mvccid)
6508 {
6510  VACUUM_DROPPED_FILE *dropped_file = NULL;
6511  VPID vpid;
6512  INT16 page_count;
6513  int error;
6514 
6515  if (vacuum_Dropped_files_count == 0)
6516  {
6517  /* No dropped files */
6518  *is_file_dropped = false;
6519  return NO_ERROR;
6520  }
6521 
6522  assert_release (!VPID_ISNULL (&vacuum_Dropped_files_vpid));
6523 
6524  /* Search for dropped file in all pages. */
6525  VPID_COPY (&vpid, &vacuum_Dropped_files_vpid);
6526 
6527  while (!VPID_ISNULL (&vpid))
6528  {
6529  /* Fix current page */
6530  page = vacuum_fix_dropped_entries_page (thread_p, &vpid, PGBUF_LATCH_READ);
6531  if (page == NULL)
6532  {
6533  *is_file_dropped = false; /* actually unknown but unimportant */
6534 
6535  assert (!VACUUM_IS_THREAD_VACUUM_MASTER (thread_p));
6536  ASSERT_ERROR_AND_SET (error);
6537  assert (error == ER_INTERRUPTED);
6538 
6539  if (VACUUM_IS_THREAD_VACUUM_WORKER (thread_p))
6540  {
6541  assert (thread_p->shutdown);
6542  }
6543  return error;
6544  }
6545 
6546  /* dropped files page are never boosted. mark them that vacuum will fix to at least postpone victimization */
6547  pgbuf_notify_vacuum_follows (thread_p, (PAGE_PTR) page);
6548 
6549  /* Copy next page VPID */
6550  VPID_COPY (&vpid, &page->next_page);
6551  page_count = page->n_dropped_files;
6552 
6553  /* Use compare VFID to find a matching entry */
6554  dropped_file =
6555  (VACUUM_DROPPED_FILE *) bsearch (vfid, page->dropped_files, page_count, sizeof (VACUUM_DROPPED_FILE),
6557  if (dropped_file != NULL)
6558  {
6559  /* Found matching entry. Compare the given MVCCID with the MVCCID of dropped file. */
6560  if (MVCC_ID_PRECEDES (mvccid, dropped_file->mvccid))
6561  {
6562  /* The record must belong to the dropped file */
6564  "found dropped file: vfid=%d|%d mvccid=%llu in page %d|%d. "
6565  "Entry at position %d, vfid=%d|%d mvccid=%llu. The vacuumed file is dropped.",
6566  VFID_AS_ARGS (vfid), (unsigned long long int) mvccid,
6567  PGBUF_PAGE_VPID_AS_ARGS ((PAGE_PTR) page), dropped_file - page->dropped_files,
6568  VFID_AS_ARGS (&dropped_file->vfid), (unsigned long long int) dropped_file->mvccid);
6569 
6570  vacuum_unfix_dropped_entries_page (thread_p, page);
6571 
6572  *is_file_dropped = true;
6573  return NO_ERROR;
6574  }
6575  else
6576  {
6577  /* The record belongs to an entry with the same identifier, but is newer. */
6579  "found dropped file: vfid=%d|%d mvccid=%llu in page %d|%d. "
6580  "Entry at position %d, vfid=%d|%d mvccid=%llu. The vacuumed file is newer.",
6581  VFID_AS_ARGS (vfid), (unsigned long long int) mvccid,
6582  PGBUF_PAGE_VPID_AS_ARGS ((PAGE_PTR) page), dropped_file - page->dropped_files,
6583  VFID_AS_ARGS (&dropped_file->vfid), (unsigned long long int) dropped_file->mvccid);
6584 
6585  vacuum_unfix_dropped_entries_page (thread_p, page);
6586 
6587  *is_file_dropped = false;
6588  return NO_ERROR;
6589  }
6590  }
6591 
6592  /* Do not log this unless you think it is useful. It spams the log file. */
6594  "didn't find dropped file: vfid=%d|%d mvccid=%llu in page (%d, %d).", VFID_AS_ARGS (vfid),
6595  (unsigned long long int) mvccid, PGBUF_PAGE_VPID_AS_ARGS ((PAGE_PTR) page));
6596 
6597  vacuum_unfix_dropped_entries_page (thread_p, page);
6598  }
6599 
6600  /* Entry not found */
6601  *is_file_dropped = false;
6602  return NO_ERROR;
6603 }
6604 
6605 /*
6606  * vacuum_log_cleanup_dropped_files () - Log dropped files cleanup.
6607  *
6608  * return : Void.
6609  * thread_p (in) : Thread entry.
6610  * page_p (in) : Page pointer.
6611  * indexes (in) : Indexes of cleaned up dropped files.
6612  * n_indexes (in) : Total count of dropped files.
6613  *
6614  * NOTE: Consider not logging cleanup. Cleanup can be done at database restart.
6615  */
6616 static void
6617 vacuum_log_cleanup_dropped_files (THREAD_ENTRY * thread_p, PAGE_PTR page_p, INT16 * indexes, INT16 n_indexes)
6618 {
6619 #define VACUUM_CLEANUP_DROPPED_FILES_MAX_REDO_CRUMBS 3
6621  LOG_DATA_ADDR addr;
6622  int n_redo_crumbs = 0;
6623 
6624  /* Add n_indexes */
6625  redo_crumbs[n_redo_crumbs].data = &n_indexes;
6626  redo_crumbs[n_redo_crumbs++].length = sizeof (n_indexes);
6627 
6628  /* Add indexes */
6629  redo_crumbs[n_redo_crumbs].data = indexes;
6630  redo_crumbs[n_redo_crumbs++].length = n_indexes * sizeof (*indexes);
6631 
6633 
6634  /* Initialize log data address */
6635  addr.pgptr = page_p;
6637  addr.offset = 0;
6638 
6639  log_append_redo_crumbs (thread_p, RVVAC_DROPPED_FILE_CLEANUP, &addr, n_redo_crumbs, redo_crumbs);
6640 }
6641 
6642 /*
6643  * vacuum_rv_redo_cleanup_dropped_files () - Recover dropped files cleanup.
6644  *
6645  * return : Error code.
6646  * thread_p (in) : Thread entry,
6647  * rcv (in) : Recovery data.
6648  *
6649  * NOTE: Consider not logging cleanup. Cleanup can be done at database restart.
6650  */
6651 int
6653 {
6654  int offset = 0, mem_size;
6656  INT16 *indexes;
6657  INT16 n_indexes, i;
6658 
6659  /* Get recovery information */
6660 
6661  /* Get n_indexes */
6662  n_indexes = *((INT16 *) rcv->data);
6663  offset += sizeof (n_indexes);
6664 
6665  /* Get indexes */
6666  indexes = (INT16 *) (rcv->data + offset);
6667  offset += sizeof (*indexes) * n_indexes;
6668 
6669  /* Check that all recovery data has been processed */
6670  assert (offset == rcv->length);
6671 
6672  /* Cleanup starting from last entry */
6673  for (i = 0; i < n_indexes; i++)
6674  {
6675  /* Remove entry at indexes[i] */
6677  "Recovery of dropped classes: remove file %d|%d, mvccid=%llu at position %d.",
6678  (int) page->dropped_files[indexes[i]].vfid.volid,
6679  (int) page->dropped_files[indexes[i]].vfid.fileid, page->dropped_files[indexes[i]].mvccid,
6680  (int) indexes[i]);
6681  mem_size = (page->n_dropped_files - indexes[i]) * sizeof (VACUUM_DROPPED_FILE);
6682 
6683  assert (mem_size >= 0);
6684  if (mem_size > 0)
6685  {
6686  memmove (&page->dropped_files[indexes[i]], &page->dropped_files[indexes[i] + 1], mem_size);
6687  }
6688 
6689  /* Update dropped files page counter */
6690  page->n_dropped_files--;
6691  }
6692 
6693  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
6694 
6695  return NO_ERROR;
6696 }
6697 
6698 /*
6699  * vacuum_dropped_files_set_next_page () - Set dropped files next page link and log it.
6700  *
6701  * return : Void.
6702  * thread_p (in) : Thread entry.
6703  * page_p (in) : Dropped files page.
6704  * next_page (in) : Next page VPID.
6705  */
6706 static void
6708 {
6709  LOG_DATA_ADDR addr;
6710 
6711  /* Initialize log data address */
6712  addr.pgptr = (PAGE_PTR) page_p;
6713  addr.vfid = NULL;
6714  addr.offset = 0;
6715 
6716  /* log and change */
6717  log_append_undoredo_data (thread_p, RVVAC_DROPPED_FILE_NEXT_PAGE, &addr, sizeof (VPID), sizeof (VPID),
6718  &page_p->next_page, next_page);
6719  page_p->next_page = *next_page;
6720 
6722 }
6723 
6724 /*
6725  * vacuum_rv_set_next_page_dropped_files () - Recover setting link to next page for dropped files.
6726  *
6727  * return : Error code.
6728  * thread_p (in) : Thread entry.
6729  * rcv (in) : Recovery data.
6730  */
6731 int
6733 {
6735 
6736  /* Set next page VPID */
6737  VPID_COPY (&page->next_page, (VPID *) rcv->data);
6738 
6739  /* Check recovery data is as expected */
6740  assert (rcv->length = sizeof (VPID));
6741 
6742  vacuum_er_log (VACUUM_ER_LOG_RECOVERY, "Set link for dropped files from page %d|%d to page %d|%d.",
6744  page->next_page.pageid);
6745 
6746  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
6747 
6748  return NO_ERROR;
6749 }
6750 
6751 /*
6752  * vacuum_compare_heap_object () - Compare two heap objects to be vacuumed. HFID compare has priority against OID
6753  * compare.
6754  *
6755  * return : Compare result.
6756  * a (in) : First object.
6757  * b (in) : Second object.
6758  */
6759 static int
6760 vacuum_compare_heap_object (const void *a, const void *b)
6761 {
6762  VACUUM_HEAP_OBJECT *file_obj_a = (VACUUM_HEAP_OBJECT *) a;
6763  VACUUM_HEAP_OBJECT *file_obj_b = (VACUUM_HEAP_OBJECT *) b;
6764  int diff;
6765 
6766  /* First compare VFID, then OID. */
6767 
6768  /* Compare VFID file ID's. */
6769  diff = (int) (file_obj_a->vfid.fileid - file_obj_b->vfid.fileid);
6770  if (diff != 0)
6771  {
6772  return diff;
6773  }
6774 
6775  /* Compare VFID volume ID's. */
6776  diff = (int) (file_obj_a->vfid.volid - file_obj_b->vfid.volid);
6777  if (diff != 0)
6778  {
6779  return diff;
6780  }
6781 
6782  /* Compare OID page ID's. */
6783  diff = (int) (file_obj_a->oid.pageid - file_obj_b->oid.pageid);
6784  if (diff != 0)
6785  {
6786  return diff;
6787  }
6788 
6789  /* Compare OID volume ID's. */
6790  diff = (int) (file_obj_a->oid.volid - file_obj_b->oid.volid);
6791  if (diff != 0)
6792  {
6793  return diff;
6794  }
6795 
6796  /* Compare OID slot ID's. */
6797  return (int) (file_obj_a->oid.slotid - file_obj_b->oid.slotid);
6798 }
6799 
6800 /*
6801  * vacuum_collect_heap_objects () - Collect the heap object to be later vacuumed.
6802  *
6803  * return : Error code.
6804  * thread_p (in) : thread entry
6805  * worker (in/out) : Vacuum worker structure.
6806  * oid (in) : Heap object OID.
6807  * vfid (in) : Heap file ID.
6808  */
6809 static int
6811 {
6812  /* Collect both file ID and object OID to vacuum at the end of the job. Heap file ID is required to know whether
6813  * objects are reusable or not, OID is to point vacuum where data needs to be removed. */
6814 
6815  /* Make sure we have enough storage. */
6816  if (worker->n_heap_objects >= worker->heap_objects_capacity)
6817  {
6818  /* Expand buffer. */
6819  VACUUM_HEAP_OBJECT *new_buffer = NULL;
6820  int new_capacity = worker->heap_objects_capacity * 2;
6821 
6822  new_buffer = (VACUUM_HEAP_OBJECT *) realloc (worker->heap_objects, new_capacity * sizeof (VACUUM_HEAP_OBJECT));
6823  if (new_buffer == NULL)
6824  {
6826  new_capacity * sizeof (VACUUM_HEAP_OBJECT));
6828  "Could not expact the files and objects capacity to %d.", new_capacity);
6829  return ER_OUT_OF_VIRTUAL_MEMORY;
6830  }
6831  worker->heap_objects = new_buffer;
6832  worker->heap_objects_capacity = new_capacity;
6833  }
6834 
6835  /* Add new heap object (HFID & OID). */
6836  VFID_COPY (&worker->heap_objects[worker->n_heap_objects].vfid, vfid);
6837  COPY_OID (&worker->heap_objects[worker->n_heap_objects].oid, oid);
6838  /* Increment object count. */
6839  worker->n_heap_objects++;
6840 
6841  /* Success. */
6842  return NO_ERROR;
6843 }
6844 
6845 /*
6846  * vacuum_cleanup_collected_by_vfid () - Cleanup entries collected from dropped file.
6847  *
6848  * return : Void.
6849  * worker (in) : Vacuum worker.
6850  * vfid (in) : VFID of dropped file.
6851  */
6852 static void
6854 {
6855  int start, end;
6856 
6857  /* Sort collected. */
6858  qsort (worker->heap_objects, worker->n_heap_objects, sizeof (VACUUM_HEAP_OBJECT), vacuum_compare_heap_object);
6859 
6860  /* Find first entry for file */
6861  for (start = 0; start < worker->n_heap_objects && !VFID_EQ (&worker->heap_objects[start].vfid, vfid); start++);
6862  if (start == worker->n_heap_objects)
6863  {
6864  /* VFID doesn't exist. */
6865  return;
6866  }
6867  /* Find first entry for other file. */
6868  for (end = start + 1; end < worker->n_heap_objects && VFID_EQ (&worker->heap_objects[end].vfid, vfid); end++);
6869  /* Remove all between start and end. */
6870  if (end == worker->n_heap_objects)
6871  {
6872  /* Just update the number of objects. */
6873  worker->n_heap_objects = start;
6874  }
6875  else
6876  {
6877  /* Move objects after end */
6878  memmove (&worker->heap_objects[start], &worker->heap_objects[end],
6879  (worker->n_heap_objects - end) * sizeof (VACUUM_HEAP_OBJECT));
6880  /* Update number of objects. */
6881  worker->n_heap_objects -= (end - start);
6882  }
6883 }
6884 
6885 #if defined (SERVER_MODE)
6886 /*
6887  * vacuum_compare_dropped_files_version () - Compare two versions ID's of dropped files. Take into consideration that
6888  * versions can overflow max value of INT32.
6889  *
6890  * return : Positive value if first version is considered bigger,
6891  * negative if it is considered smaller and 0 if they are
6892  * equal.
6893  * version_a (in) : First version.
6894  * version_b (in) : Second version.
6895  */
6896 static int
6897 vacuum_compare_dropped_files_version (INT32 version_a, INT32 version_b)
6898 {
6899  INT32 max_int32_div_2 = 0x3FFFFFFF;
6900 
6901  /* If both are positive or if both are negative return a-b */
6902  if ((version_a >= 0 && version_b >= 0) || (version_a < 0 && version_b < 0))
6903  {
6904  return (int) (version_a - version_b);
6905  }
6906 
6907  /* If one is positive and the other negative we have to consider the case when version overflowed INT32 and the case
6908  * when one just passed 0. In the first case, the positive value is considered smaller, while in the second case the
6909  * negative value is considered smaller. The INT32 domain of values is split into 4 ranges: [-MAX_INT32,
6910  * -MAX_INT32/2], [-MAX_INT32/2, 0], [0, MAX_INT32/2] and [MAX_INT32/2, MAX_INT32]. We will consider the case when
6911  * one value is in [-MAX_INT32, -MAX_INT32/2] and the other in [MAX_INT32/2, MAX_INT32] and the second case when the
6912  * values are in [-MAX_INT32/2, 0] and [0, MAX_INT32]. If the values are not in these ranges, the algorithm is
6913  * flawed. */
6914  if (version_a >= 0)
6915  {
6916  /* 0x3FFFFFFF is MAX_INT32/2 */
6917  if (version_a >= max_int32_div_2)
6918  {
6919  assert (version_b <= -max_int32_div_2);
6920  /* In this case, version_a is considered smaller */
6921  return -1;
6922  }
6923  else
6924  {
6925  assert (version_b >= -max_int32_div_2);
6926  /* In this case, version_b is considered smaller */
6927  return 1;
6928  }
6929  }
6930  else
6931  {
6932  if (version_b >= max_int32_div_2)
6933  {
6934  assert (version_a <= -max_int32_div_2);
6935  /* In this case, version_a is considered bigger */
6936  return 1;
6937  }
6938  else
6939  {
6940  assert (version_a >= -max_int32_div_2);
6941  /* In this case, version_b is considered bigger */
6942  return -1;
6943  }
6944  }
6945 
6946  /* We shouldn't be here */
6947  assert (false);
6948 }
6949 #endif // SERVER_MODE
6950 
6951 #if !defined (NDEBUG)
6952 /*
6953  * vacuum_verify_vacuum_data_debug () - Vacuum data sanity check.
6954  *
6955  * return : Void.
6956  */
6957 static void
6959 {
6960  int i;
6961  VACUUM_DATA_PAGE *data_page = NULL;
6962  VACUUM_DATA_ENTRY *entry = NULL;
6963  VACUUM_DATA_ENTRY *last_unvacuumed = NULL;
6964  VPID next_vpid;
6965  int in_progress_distance = 0;
6966  bool found_in_progress = false;
6967 
6968  data_page = vacuum_Data.first_page;
6969 
6970  /* First page is same as last page if and only if first page link to next page is NULL. */
6971  assert ((vacuum_Data.first_page == vacuum_Data.last_page) == (VPID_ISNULL (&vacuum_Data.first_page->next_page)));
6972 
6973  /* Loop sanity check for each vacuum data page. */
6974  while (true)
6975  {
6976  /* Check index_unvacuumed and index_unavaliable have valid values. */
6977  assert (data_page->index_unvacuumed >= 0 && data_page->index_unvacuumed < vacuum_Data.page_data_max_count);
6978  assert (data_page->index_free >= 0 && data_page->index_free <= vacuum_Data.page_data_max_count);
6979  assert (data_page->index_unvacuumed <= data_page->index_free);
6980 
6981  /* Check page has valid data. */
6982  for (i = data_page->index_unvacuumed; i < data_page->index_free; i++)
6983  {
6984  /* Check page entries. */
6985  entry = &data_page->data[i];
6986 
6987  if (entry->is_vacuumed ())
6988  {
6989  assert (i != data_page->index_unvacuumed);
6990  if (found_in_progress && !LSA_ISNULL (&data_page->data[i].start_lsa))
6991  {
6992  in_progress_distance++;
6993  }
6994  continue;
6995  }
6996 
6997  assert (entry->is_available () || entry->is_job_in_progress ());
6999  assert (vacuum_Data.oldest_unvacuumed_mvccid <= entry->oldest_visible_mvccid);
7000  assert (entry->get_blockid () <= vacuum_Data.get_last_blockid ());
7001  assert (vacuum_get_log_blockid (entry->start_lsa.pageid) == entry->get_blockid ());
7002  assert (last_unvacuumed == NULL
7003  || !MVCC_ID_PRECEDES (entry->oldest_visible_mvccid, last_unvacuumed->oldest_visible_mvccid));
7004 
7005  if (i > data_page->index_unvacuumed)
7006  {
7007  assert (entry->get_blockid () == ((entry - 1)->get_blockid () + 1));
7008  }
7009 
7010  last_unvacuumed = entry;
7011 
7012  if (entry->is_job_in_progress ())
7013  {
7014  found_in_progress = true;
7015  in_progress_distance++;
7016  }
7017  }
7018  if (VPID_ISNULL (&data_page->next_page))
7019  {
7020  /* This was last page. Stop. */
7021  data_page = NULL;
7022  break;
7023  }
7024  /* Fix next page. */
7025  VPID_COPY (&next_vpid, &data_page->next_page);
7026  vacuum_unfix_data_page (thread_p, data_page);
7027  data_page = vacuum_fix_data_page (thread_p, &next_vpid);
7028  assert (data_page != NULL);
7029  last_unvacuumed = NULL;
7030  }
7031 
7032  if (in_progress_distance > 500)
7033  {
7034  /* In progress distance is computed starting with first in progress entry found and by counting all following
7035  * in progress or vacuumed jobs. The goal of this count is to find potential job leaks: jobs marked as in progress
7036  * but that never start or that are never marked as finished. We will assume that if this distance goes beyond some
7037  * value, then something bad must have happened.
7038  *
7039  * Theoretically, if a worker is blocked for long enough this value can be any size. However, we set a value unlikely
7040  * to be reached in normal circumstances.
7041  */
7042 
7043  /* It was an assertion but we have not seen a case that vacuum is blocked. */
7045  "vacuum is behind or blocked. distance is %d.", in_progress_distance);
7046  }
7047 }
7048 #endif /* !NDEBUG */
7049 
7050 /*
7051  * vacuum_log_prefetch_vacuum_block () - Pre-fetches from log page buffer or from disk, (almost) all log pages
7052  * required by a vacuum block
7053  * thread_p (in):
7054  * entry (in): vacuum data entry
7055  *
7056  * Note : this function does not handle cases when last log entry in 'start_lsa'
7057  * page of vacuum data entry spans for more than extra one log page.
7058  * Only one extra page is loaded after the 'start_lsa' page.
7059  * Please note that 'start_lsa' page is the last log page (logically),
7060  * the vacuum will require log pages before this one.
7061  */
7062 static int
7064 {
7065  VACUUM_WORKER *worker = vacuum_get_vacuum_worker (thread_p);
7066  int error = NO_ERROR;
7067  LOG_LSA req_lsa;
7068  LOG_PAGEID log_pageid;
7069  LOG_PAGE *log_page;
7070 
7071  req_lsa.offset = LOG_PAGESIZE;
7072 
7073  assert (entry != NULL);
7074 
7077 
7078  for (log_pageid = worker->prefetch_first_pageid, log_page = (LOG_PAGE *) worker->prefetch_log_buffer;
7079  log_pageid <= worker->prefetch_last_pageid;
7080  log_pageid++, log_page = (LOG_PAGE *) (((char *) log_page) + LOG_PAGESIZE))
7081  {
7082  req_lsa.pageid = log_pageid;
7083  error = logpb_fetch_page (thread_p, &req_lsa, LOG_CS_SAFE_READER, log_page);
7084  if (error != NO_ERROR)
7085  {
7086  assert (false); // failure is not acceptable
7087  vacuum_er_log_error (VACUUM_ER_LOG_ERROR, "cannot prefetch log page %d", log_pageid);
7088 
7089  error = ER_FAILED;
7090  goto end;
7091  }
7092  }
7093 
7094  vacuum_er_log (VACUUM_ER_LOG_MASTER, "VACUUM : prefetched %d log pages from %lld to %lld",
7096  (long long int) worker->prefetch_last_pageid);
7097 
7098 end:
7099  return error;
7100 }
7101 
7102 
7103 /*
7104  * vacuum_fetch_log_page () - Loads a log page to be processed by vacuum from vacuum block buffer or log page buffer or
7105  * disk log archive.
7106  *
7107  * thread_p (in):
7108  * log_pageid (in): log page logical id
7109  * log_page_p (in/out): pre-allocated buffer to store one log page
7110  *
7111  */
7112 static int
7113 vacuum_fetch_log_page (THREAD_ENTRY * thread_p, LOG_PAGEID log_pageid, LOG_PAGE * log_page_p)
7114 {
7115  int error = NO_ERROR;
7116 
7117  if (vacuum_is_thread_vacuum (thread_p))
7118  {
7119  // try to fetch from prefetched pages
7120  VACUUM_WORKER *worker = vacuum_get_vacuum_worker (thread_p);
7121 
7122  assert (worker != NULL);
7123  assert (log_page_p != NULL);
7124 
7126 
7127  if (worker->prefetch_first_pageid <= log_pageid && log_pageid <= worker->prefetch_last_pageid)
7128  {
7129  /* log page is cached */
7130  size_t page_index = log_pageid - worker->prefetch_first_pageid;
7131  memcpy (log_page_p, worker->prefetch_log_buffer + page_index * LOG_PAGESIZE, LOG_PAGESIZE);
7132 
7133  assert (log_page_p->hdr.logical_pageid == log_pageid); // should be the correct page
7134 
7136  return NO_ERROR;
7137  }
7138  else
7139  {
7141  "log page %lld is not in prefetched range %lld - %lld",
7142  log_pageid, worker->prefetch_first_pageid, worker->prefetch_last_pageid);
7143  }
7144  // fall through
7145  }
7146  else
7147  {
7148  // there are two possible paths here
7149  // 1. vacuum_process_log_block (when caller must be vacuum worker)
7150  // 2. vacuum_recover_lost_block_data (when caller is boot thread)
7151  // this must be second case
7152  }
7153  // need to fetch from log
7154 
7155  LOG_LSA req_lsa;
7156  req_lsa.pageid = log_pageid;
7157  req_lsa.offset = LOG_PAGESIZE;
7158  error = logpb_fetch_page (thread_p, &req_lsa, LOG_CS_SAFE_READER, log_page_p);
7159  if (error != NO_ERROR)
7160  {
7161  assert (false); // failure is not acceptable
7162  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "vacuum_fetch_log_page");
7163  error = ER_FAILED;
7164  }
7165 
7166  return error;
7167 }
7168 
7169 /*
7170  * print_not_vacuumed_to_log () - prints to log info related to a not vacuumed OID (either from HEAP or BTREE)
7171  *
7172  * rerturn: void.
7173  * oid (in): The not vacuumed instance OID
7174  * class_oid (in): The class to which belongs the oid
7175  * rec_header (in): The record header of the not vacuumed record
7176  * btree_node_type (in): If the oid is not vacuumed from BTREE then this is
7177  * the type node. If <0 then the OID comes from heap.
7178  *
7179  */
7180 static void
7181 print_not_vacuumed_to_log (OID * oid, OID * class_oid, MVCC_REC_HEADER * rec_header, int btree_node_type)
7182 {
7183 #define TEMP_BUFFER_SIZE 1024
7184  char mess[TEMP_BUFFER_SIZE], *p = mess;
7185  bool is_btree = (btree_node_type >= 0 ? true : false);
7186 
7187  if (is_btree)
7188  {
7189  p += sprintf (p, "Found not vacuumed BTREE record");
7190  }
7191  else
7192  {
7193  p += sprintf (p, "Found not vacuumed HEAP record");
7194  }
7195  p +=
7196  sprintf (p, " with oid=%d|%d|%d, class_oid=%d|%d|%d", (int) oid->volid, oid->pageid, (int) oid->slotid,
7197  (int) class_oid->volid, class_oid->pageid, (int) class_oid->slotid);
7198  if (MVCC_IS_FLAG_SET (rec_header, OR_MVCC_FLAG_VALID_INSID))
7199  {
7200  p += sprintf (p, ", insert_id=%llu", (unsigned long long int) MVCC_GET_INSID (rec_header));
7201  }
7202  else
7203  {
7204  p += sprintf (p, ", insert_id=missing");
7205  }
7206  if (MVCC_IS_HEADER_DELID_VALID (rec_header))
7207  {
7208  p += sprintf (p, ", delete_id=%llu", (unsigned long long int) MVCC_GET_DELID (rec_header));
7209  }
7210  else
7211  {
7212  p += sprintf (p, ", delete_id=missing");
7213  }
7214  p += sprintf (p, ", oldest_mvcc_id=%llu", (unsigned long long int) vacuum_Data.oldest_unvacuumed_mvccid);
7215  if (is_btree)
7216  {
7217  const char *type_str = NULL;
7218 
7219  switch (btree_node_type)
7220  {
7221  case BTREE_LEAF_NODE:
7222  type_str = "LEAF";
7223  break;
7224  case BTREE_NON_LEAF_NODE:
7225  type_str = "NON_LEAF";
7226  break;
7227  case BTREE_OVERFLOW_NODE:
7228  type_str = "OVERFLOW";
7229  break;
7230  default:
7231  type_str = "UNKNOWN";
7232  break;
7233  }
7234  p += sprintf (p, ", node_type=%s", type_str);
7235  }
7236  p += sprintf (p, "\n");
7237 
7238  er_log_debug (ARG_FILE_LINE, mess);
7239 }
7240 
7241 /*
7242  * vacuum_check_not_vacuumed_recdes () - checks if an OID should've been vacuumed (using a record descriptor)
7243  *
7244  * return: DISK_INVALID if the OID was not vacuumed, DISK_VALID if it was
7245  * and DISK_ERROR in case of an error.
7246  * thread_p (in):
7247  * oid (in): The not vacuumed instance OID
7248  * class_oid (in): The class to which the oid belongs
7249  * recdes (in): The not vacuumed record
7250  * btree_node_type (in): If the oid is not vacuumed from BTREE then this is
7251  * the type node. If <0 then the OID comes from heap.
7252  *
7253  */
7256  int btree_node_type)
7257 {
7258  MVCC_REC_HEADER rec_header;
7259 
7260  if (or_mvcc_get_header (recdes, &rec_header) != NO_ERROR)
7261  {
7262  return DISK_ERROR;
7263  }
7264 
7265  return vacuum_check_not_vacuumed_rec_header (thread_p, oid, class_oid, &rec_header, btree_node_type);
7266 }
7267 
7268 /*
7269  * is_not_vacuumed_and_lost () - checks if a record should've been vacuumed (using a record header)
7270  *
7271  * return: true if the record was not vacuumed and is completely lost.
7272  * thread_p (in):
7273  * rec_header (in): The header of the record to be checked
7274  *
7275  */
7276 static bool
7278 {
7280 
7281  res = mvcc_satisfies_vacuum (thread_p, rec_header, vacuum_Data.oldest_unvacuumed_mvccid);
7282  switch (res)
7283  {
7284  case VACUUM_RECORD_REMOVE:
7285  /* Record should have been vacuumed by now. */
7286  return true;
7287 
7289  /* Record insert & previous version should have been vacuumed by now. */
7290  return true;
7291 
7293  return false;
7294 
7295  default:
7296  return false;
7297  }
7298 }
7299 
7300 /*
7301  * vacuum_check_not_vacuumed_rec_header () - checks if an OID should've been vacuumed (using a record header)
7302  *
7303  * return: DISK_INVALID if the OID was not vacuumed, DISK_VALID if it was
7304  * and DISK_ERROR in case of an error.
7305  * thread_p (in):
7306  * oid (in): The not vacuumed instance OID
7307  * class_oid (in): The class to which belongs the oid
7308  * rec_header (in): The not vacuumed record header
7309  * btree_node_type (in): If the oid is not vacuumed from BTREE then this is
7310  * the type node. If <0 then the OID comes from heap.
7311  *
7312  */
7314 vacuum_check_not_vacuumed_rec_header (THREAD_ENTRY * thread_p, OID * oid, OID * class_oid, MVCC_REC_HEADER * rec_header,
7315  int btree_node_type)
7316 {
7317  if (is_not_vacuumed_and_lost (thread_p, rec_header))
7318  {
7319  OID cls_oid;
7320  if (class_oid == NULL || OID_ISNULL (class_oid))
7321  {
7322  if (heap_get_class_oid (thread_p, oid, &cls_oid) != S_SUCCESS)
7323  {
7324  ASSERT_ERROR ();
7325  return DISK_ERROR;
7326  }
7327  class_oid = &cls_oid;
7328  }
7329  print_not_vacuumed_to_log (oid, class_oid, rec_header, btree_node_type);
7330 
7331  assert (false);
7332  return DISK_INVALID;
7333  }
7334 
7335  return DISK_VALID;
7336 }
7337 
7338 /*
7339  * vacuum_get_first_page_dropped_files () - Get the first allocated vpid of vacuum_Dropped_files_vfid.
7340  *
7341  * return : VPID *
7342  * thread_p (in):
7343  * first_page_vpid (out):
7344  *
7345  */
7346 static int
7348 {
7349  assert (!VFID_ISNULL (&vacuum_Dropped_files_vfid));
7350  return file_get_sticky_first_page (thread_p, &vacuum_Dropped_files_vfid, first_page_vpid);
7351 }
7352 
7353 /*
7354  * vacuum_is_mvccid_vacuumed () - Return true if MVCCID should be vacuumed.
7355  * It must be older than vacuum_Data->oldest_unvacuumed_mvccid.
7356  *
7357  * return : True/false.
7358  * id (in) : MVCCID to check.
7359  */
7360 bool
7362 {
7363  if (id < vacuum_Data.oldest_unvacuumed_mvccid)
7364  {
7365  return true;
7366  }
7367 
7368  return false;
7369 }
7370 
7371 /*
7372  * vacuum_log_redoundo_vacuum_record () - Log vacuum of a REL or BIG heap record
7373  *
7374  * return : Error code.
7375  * thread_p (in) : Thread entry.
7376  * page_p (in) : Page pointer.
7377  * slotid (in) : slot id
7378  * undo_recdes (in) : record descriptor before vacuuming
7379  * reusable (in) :
7380  *
7381  * NOTE: Some values in slots array are modified and set to negative values.
7382  */
7383 static void
7384 vacuum_log_redoundo_vacuum_record (THREAD_ENTRY * thread_p, PAGE_PTR page_p, PGSLOTID slotid, RECDES * undo_recdes,
7385  bool reusable)
7386 {
7387  LOG_DATA_ADDR addr;
7388  LOG_CRUMB undo_crumbs[2];
7389  int num_undo_crumbs;
7390 
7391  assert (slotid >= 0 && slotid < ((SPAGE_HEADER *) page_p)->num_slots);
7392 
7393  /* Initialize log data. */
7394  addr.offset = slotid;
7395  addr.pgptr = page_p;
7396  addr.vfid = NULL;
7397 
7398  if (reusable)
7399  {
7401  }
7402 
7403  undo_crumbs[0].length = sizeof (undo_recdes->type);
7404  undo_crumbs[0].data = (char *) &undo_recdes->type;
7405  undo_crumbs[1].length = undo_recdes->length;
7406  undo_crumbs[1].data = undo_recdes->data;
7407  num_undo_crumbs = 2;
7408 
7409  /* Log undoredo with NULL redo crumbs - the redo function (vacuum_rv_redo_vacuum_heap_record) require only
7410  * the object's address to re-vacuum */
7411  log_append_undoredo_crumbs (thread_p, RVVAC_HEAP_RECORD_VACUUM, &addr, num_undo_crumbs, 0, undo_crumbs, NULL);
7412 }
7413 
7414 /*
7415  * vacuum_rv_undo_vacuum_heap_record () - undo function for RVVAC_HEAP_RECORD_VACUUM
7416  *
7417  * return : Error code.
7418  * thread_p (in) : Thread entry.
7419  * rcv (in) : Recovery structure.
7420  */
7421 int
7423 {
7424  rcv->offset = (rcv->offset & (~VACUUM_LOG_VACUUM_HEAP_MASK));
7425 
7426  return heap_rv_redo_insert (thread_p, rcv);
7427 }
7428 
7429 /*
7430  * vacuum_rv_redo_vacuum_heap_record () - redo function for RVVAC_HEAP_RECORD_VACUUM
7431  *
7432  * return : Error code.
7433  * thread_p (in) : Thread entry.
7434  * rcv (in) : Recovery structure.
7435  */
7436 int
7438 {
7439  INT16 slotid;
7440  bool reusable;
7441 
7442  slotid = (rcv->offset & (~VACUUM_LOG_VACUUM_HEAP_MASK));
7443  reusable = (rcv->offset & VACUUM_LOG_VACUUM_HEAP_REUSABLE) != 0;
7444 
7445  spage_vacuum_slot (thread_p, rcv->pgptr, slotid, reusable);
7446 
7447  if (spage_need_compact (thread_p, rcv->pgptr) == true)
7448  {
7449  (void) spage_compact (thread_p, rcv->pgptr);
7450  }
7451 
7452  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
7453 
7454  return NO_ERROR;
7455 }
7456 
7457 /*
7458  * vacuum_notify_server_crashed () - Notify vacuum that server has crashed and that recovery is running. After
7459  * recovery, when vacuum data is being loaded, vacuum will also recover the
7460  * block data buffer that had not been saved to vacuum data before crash.
7461  * The recovery LSA argument is used in case no MVCC operation log record is found
7462  * during recovery.
7463  *
7464  * return : Void.
7465  * recovery_lsa (in) : Recovery starting LSA.
7466  */
7467 void
7469 {
7470  LSA_COPY (&vacuum_Data.recovery_lsa, recovery_lsa);
7471 }
7472 
7473 /*
7474  * vacuum_notify_server_shutdown () - Notify vacuum that server shutdown was requested. It should stop executing new
7475  * jobs.
7476  *
7477  * return : Void.
7478  */
7479 void
7481 {
7482  vacuum_Data.shutdown_sequence.request_shutdown ();
7483 }
7484 
7485 #if !defined (NDEBUG)
7486 /*
7487  * vacuum_data_check_page_fix () - Check fix counts on vacuum data pages are not off.
7488  *
7489  * return : Void.
7490  * thread_p (in) : Thread entry.
7491  */
7492 static void
7494 {
7495  assert (pgbuf_get_fix_count ((PAGE_PTR) vacuum_Data.first_page) == 1);
7496  assert (vacuum_Data.last_page == vacuum_Data.first_page
7497  || pgbuf_get_fix_count ((PAGE_PTR) vacuum_Data.last_page) == 1);
7498  if (vacuum_Data.first_page == vacuum_Data.last_page)
7499  {
7500  assert (pgbuf_get_hold_count (thread_p) == 1);
7501  }
7502  else
7503  {
7504  assert (pgbuf_get_fix_count ((PAGE_PTR) vacuum_Data.last_page) == 1);
7505  assert (pgbuf_get_hold_count (thread_p) == 2);
7506  }
7507 }
7508 #endif /* !NDEBUG */
7509 
7510 /*
7511  * vacuum_rv_check_at_undo () - check and modify undo record header to satisfy vacuum status
7512  *
7513  * return : Error code.
7514  * thread_p (in) : Thread entry.
7515  * pgptr (in) : Page where record resides.
7516  * slotid (in) : Record slot.
7517  * rec_type (in) : Expected record type.
7518  *
7519  * Note: This function will update the record to be valid in terms of vacuuming. Insert ID and prev version
7520  * must be removed from the record at undo, if the record was subject to vacuuming but skipped
7521  * during an update/delete operation. This happens when the record is changed before vacuum reaches it,
7522  * and when it is reached its new header is different and not qualified for vacuum anymore.
7523  */
7524 int
7525 vacuum_rv_check_at_undo (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, INT16 slotid, INT16 rec_type)
7526 {
7527  MVCC_REC_HEADER rec_header;
7528  MVCC_SATISFIES_VACUUM_RESULT can_vacuum;
7530  char data_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
7531 
7532  /* get record header according to record type */
7533  if (rec_type == REC_BIGONE)
7534  {
7535  if (heap_get_mvcc_rec_header_from_overflow (pgptr, &rec_header, &recdes) != NO_ERROR)
7536  {
7537  assert_release (false);
7538  return ER_FAILED;
7539  }
7540  recdes.type = REC_BIGONE;
7541  }
7542  else
7543  {
7544  recdes.data = PTR_ALIGN (data_buffer, MAX_ALIGNMENT);
7545  recdes.area_size = DB_PAGESIZE;
7546  if (spage_get_record (thread_p, pgptr, slotid, &recdes, COPY) != S_SUCCESS)
7547  {
7548  assert_release (false);
7549  return ER_FAILED;
7550  }
7551 
7552  if (or_mvcc_get_header (&recdes, &rec_header) != NO_ERROR)
7553  {
7554  assert_release (false);
7555  return ER_FAILED;
7556  }
7557  }
7558 
7559  assert (recdes.type == rec_type);
7560 
7561  if (log_is_in_crash_recovery ())
7562  {
7563  /* always clear flags when recovering from crash - all the objects are visible anyway */
7564  if (MVCC_IS_FLAG_SET (&rec_header, OR_MVCC_FLAG_VALID_INSID))
7565  {
7566  /* Note: PREV_VERSION flag should be set only if VALID_INSID flag is set */
7568  }
7569  else
7570  {
7572  can_vacuum = VACUUM_RECORD_CANNOT_VACUUM;
7573  }
7574  }
7575  else
7576  {
7577  can_vacuum = mvcc_satisfies_vacuum (thread_p, &rec_header, log_Gl.mvcc_table.get_global_oldest_visible ());
7578  }
7579 
7580  /* it is impossible to restore a record that should be removed by vacuum */
7581  assert (can_vacuum != VACUUM_RECORD_REMOVE);
7582 
7583  if (can_vacuum == VACUUM_RECORD_DELETE_INSID_PREV_VER)
7584  {
7585  /* the undo/redo record was qualified to have its insid and prev version vacuumed;
7586  * do this here because it is possible that vacuum have missed it during update/delete operation */
7587  if (rec_type == REC_BIGONE)
7588  {
7590  MVCC_SET_INSID (&rec_header, MVCCID_ALL_VISIBLE);
7591  LSA_SET_NULL (&rec_header.prev_version_lsa);
7592 
7593  if (heap_set_mvcc_rec_header_on_overflow (pgptr, &rec_header) != NO_ERROR)
7594  {
7595  assert_release (false);
7596  return ER_FAILED;
7597  }
7598  }
7599  else
7600  {
7602 
7603  if (or_mvcc_set_header (&recdes, &rec_header) != NO_ERROR)
7604  {
7605  assert_release (false);
7606  return ER_FAILED;
7607  }
7608 
7609  /* update the record */
7610  if (spage_update (thread_p, pgptr, slotid, &recdes) != SP_SUCCESS)
7611  {
7612  assert_release (false);
7613  return ER_FAILED;
7614  }
7615  }
7616 
7617  pgbuf_set_dirty (thread_p, pgptr, DONT_FREE);
7618  }
7619 
7620  return NO_ERROR;
7621 }
7622 
7623 /*
7624  * vacuum_is_empty() - Checks if the vacuum is empty.
7625  *
7626  * return :- true or false
7627  */
7628 bool
7630 {
7631  if (vacuum_Data.first_page->index_unvacuumed == vacuum_Data.first_page->index_free)
7632  {
7633  assert (vacuum_Data.first_page == vacuum_Data.last_page);
7634  assert (vacuum_Data.last_page->index_unvacuumed == 0);
7635  return true;
7636  }
7637 
7638  return false;
7639 }
7640 
7641 /*
7642  * vacuum_sa_reflect_last_blockid () - Update vacuum last blockid on SA_MODE
7643  *
7644  * thread_p(in) :- Thread context.
7645  */
7646 void
7648 {
7649  if (VPID_ISNULL (&vacuum_Data_load.vpid_first))
7650  {
7651  // database is freshly created or boot was aborted without doing anything
7652  return;
7653  }
7654  if (vacuum_Data.is_restoredb_session)
7655  {
7656  // restoredb doesn't vacuum; we cannot do this here
7657  return;
7658  }
7659 
7661 
7663 
7665  "vacuum_sa_reflect_last_blockid: last_blockid=%lld, append_prev_pageid=%d\n",
7666  (long long int) last_blockid, (int) log_Gl.append.prev_lsa.pageid);
7667  if (last_blockid == VACUUM_NULL_LOG_BLOCKID)
7668  {
7670  return;
7671  }
7672 
7673  vacuum_Data.set_last_blockid (last_blockid);
7674  log_Gl.hdr.vacuum_last_blockid = last_blockid;
7676 
7678 }
7679 
7680 static void
7682 {
7683  assert (vacuum_is_empty ());
7684 
7685  VACUUM_DATA_PAGE *data_page = vacuum_Data.first_page;
7686  assert (data_page != NULL);
7687 
7688  /* We should have only 1 page in vacuum_Data. */
7689  assert (vacuum_Data.first_page == vacuum_Data.last_page);
7690 
7691  vacuum_init_data_page_with_last_blockid (thread_p, vacuum_Data.first_page, vacuum_Data.get_last_blockid ());
7692 
7694  "vacuum_data_empty_update_last_blockid: update last_blockid=%lld in page %d|%d at lsa %lld|%d",
7695  (long long int) vacuum_Data.get_last_blockid (), PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) (data_page)));
7696 }
7697 
7698 /*
7699  * vacuum_convert_thread_to_master () - convert thread to vacuum master
7700  *
7701  * thread_p (in) : thread entry
7702  * save_type (out) : thread entry old type
7703  */
7704 static void
7706 {
7707  if (thread_p == NULL)
7708  {
7709  thread_p = thread_get_thread_entry_info ();
7710  }
7711  save_type = thread_p->type;
7712  thread_p->type = TT_VACUUM_MASTER;
7713  thread_p->vacuum_worker = &vacuum_Master;
7714  if (thread_p->get_system_tdes () == NULL)
7715  {
7716  thread_p->claim_system_worker ();
7717  }
7718 }
7719 
7720 /*
7721  * vacuum_convert_thread_to_worker - convert this thread to a vacuum worker
7722  *
7723  * thread_p (in) : thread entry
7724  * worker (in) : vacuum worker context
7725  * save_type (out) : save previous thread type
7726  */
7727 static void
7729 {
7730  if (thread_p == NULL)
7731  {
7732  thread_p = thread_get_thread_entry_info ();
7733  }
7734  save_type = thread_p->type;
7735  thread_p->type = TT_VACUUM_WORKER;
7736  thread_p->vacuum_worker = worker;
7737  if (vacuum_worker_allocate_resources (thread_p, thread_p->vacuum_worker) != NO_ERROR)
7738  {
7739  assert_release (false);
7740  }
7741  if (thread_p->get_system_tdes () == NULL)
7742  {
7743  thread_p->claim_system_worker ();
7744  }
7745 }
7746 
7747 /*
7748  * vacuum_restore_thread - restore thread previously converted to a vacuum worker
7749  *
7750  * thread_p (in) : thread entry
7751  * save_type (in) : saved type of thread entry
7752  */
7753 static void
7755 {
7756  if (thread_p == NULL)
7757  {
7758  thread_p = thread_get_thread_entry_info ();
7759  }
7760  thread_p->type = save_type;
7761  thread_p->vacuum_worker = NULL;
7762  thread_p->retire_system_worker ();
7763  thread_p->tran_index = LOG_SYSTEM_TRAN_INDEX; // restore tran_index
7764 }
7765 
7766 /*
7767  * vacuum_rv_es_nop () - Skip recovery operation for external storage.
7768  *
7769  * return : NO_ERROR.
7770  * thread_p (in) : Thread entry.
7771  * rcv (in) : Recovery data.
7772  */
7773 int
7775 {
7776  /* Do nothing */
7777  return NO_ERROR;
7778 }
7779 
7780 #if defined (SERVER_MODE)
7781 /*
7782  * vacuum_notify_es_deleted () - External storage file cannot be deleted
7783  * when transaction is ended and MVCC is
7784  * used. Vacuum must be notified instead and
7785  * file is deleted when it is no longer
7786  * visible.
7787  *
7788  * return : Void.
7789  * thread_p (in) : Thread entry.
7790  * uri (in) : File location URI.
7791  */
7792 void
7793 vacuum_notify_es_deleted (THREAD_ENTRY * thread_p, const char *uri)
7794 {
7795 #define ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE \
7796  (INT_ALIGNMENT + /* Aligning buffer start */ \
7797  OR_INT_SIZE + /* String length */ \
7798  ES_MAX_URI_LEN + /* URI string */ \
7799  INT_ALIGNMENT) /* Alignment of packed string */
7800 
7801  LOG_DATA_ADDR addr;
7802  int length;
7803  char data_buf[ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE];
7804  char *data = NULL;
7805 
7806  addr.offset = -1;
7807  addr.pgptr = NULL;
7808  addr.vfid = NULL;
7809 
7810  /* Compute the total length required to pack string */
7811  length = or_packed_string_length (uri, NULL);
7812 
7813  /* Check there is enough space in data buffer to pack the string */
7814  assert (length <= (int) (ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE - INT_ALIGNMENT));
7815 
7816  /* Align buffer to prepare for packing string */
7817  data = PTR_ALIGN (data_buf, INT_ALIGNMENT);
7818 
7819  /* Pack string */
7820  (void) or_pack_string (data, uri);
7821 
7822  /* This is not actually ever undone, but vacuum will process undo data of log entry. */
7823  log_append_undo_data (thread_p, RVES_NOTIFY_VACUUM, &addr, length, data);
7824 }
7825 #endif /* SERVER_MODE */
7826 
7827 //
7828 // vacuum_check_shutdown_interruption () - check error occurs due to shutdown interrupting
7829 //
7830 // thread_p (in) : thread entry
7831 // error_code (in) : error code
7832 //
7833 static void
7834 vacuum_check_shutdown_interruption (const THREAD_ENTRY * thread_p, int error_code)
7835 {
7836  ASSERT_ERROR ();
7837  // interrupted is accepted if:
7838  // 1. this is an active worker thread
7839  // 2. or server is shutting down
7840  assert (!vacuum_is_thread_vacuum_worker (thread_p) || (thread_p->shutdown && error_code == ER_INTERRUPTED));
7841 }
7842 
7843 //
7844 // vacuum_reset_data_after_copydb () - reset vacuum data after copydb. since complete vacuum is run on copied database
7845 // there should be no actual data; however, last_blockid remains set in first
7846 // data entry
7847 //
7848 int
7850 {
7851  assert (vacuum_Data.first_page == NULL && vacuum_Data.last_page == NULL);
7852  assert (!VFID_ISNULL (&vacuum_Data.vacuum_data_file));
7853 
7854  int error_code = NO_ERROR;
7855  FILE_DESCRIPTORS fdes;
7856 
7857  error_code = file_descriptor_get (thread_p, &vacuum_Data.vacuum_data_file, &fdes);
7858  if (error_code != NO_ERROR)
7859  {
7860  ASSERT_ERROR ();
7861  return error_code;
7862  }
7864 
7865  vacuum_Data.first_page = vacuum_fix_data_page (thread_p, &fdes.vacuum_data.vpid_first);
7866  if (vacuum_Data.first_page == NULL)
7867  {
7868  ASSERT_ERROR_AND_SET (error_code);
7869  return error_code;
7870  }
7871 
7872  // there should be no data
7873  assert (VPID_ISNULL (&vacuum_Data.first_page->next_page));
7874  assert (vacuum_Data.first_page->index_free == 0);
7875 
7877 
7878  vacuum_er_log (VACUUM_ER_LOG_VACUUM_DATA, "Reset vacuum data page %d|%d, lsa %lld|%d, after copydb",
7879  PGBUF_PAGE_STATE_ARGS ((PAGE_PTR) vacuum_Data.first_page));
7880 
7882 
7883  return NO_ERROR;
7884 }
7885 
7886 static void
7889 {
7890  vacuum_data_initialize_new_page (thread_p, data_page);
7891  data_page->data->blockid = blockid;
7892  log_append_redo_data2 (thread_p, RVVAC_DATA_INIT_NEW_PAGE, NULL, (PAGE_PTR) data_page, 0, sizeof (blockid), &blockid);
7893  vacuum_set_dirty_data_page (thread_p, data_page, DONT_FREE);
7894 }
7895 
7896 // *INDENT-OFF*
7897 //
7898 // C++
7899 //
7900 
7901 //
7902 // vacuum_data
7903 //
7904 bool
7906 {
7907  return has_one_page () && first_page->is_empty ();
7908 }
7909 
7910 bool
7912 {
7913  return first_page == last_page;
7914 }
7915 
7918 {
7919  return m_last_blockid;
7920 }
7921 
7924 {
7925  if (is_empty ())
7926  {
7927  return m_last_blockid;
7928  }
7929  return first_page->get_first_blockid ();
7930 }
7931 
7932 const VACUUM_DATA_ENTRY &
7934 {
7935  assert (!is_empty ());
7936  return first_page->data[0];
7937 }
7938 
7939 void
7941 {
7942  // first, make sure we string flags
7943  blockid = VACUUM_BLOCKID_WITHOUT_FLAGS (blockid);
7944 
7945 #if !defined (NDEBUG)
7946  // sanity check - last_blockid should be less than last LSA's block
7948  VACUUM_LOG_BLOCKID log_blockid = vacuum_get_log_blockid (log_lsa.pageid);
7949  assert (blockid < log_blockid);
7950 #endif // NDEBUG
7951 
7952  m_last_blockid = blockid;
7953 }
7954 
7955 void
7957 {
7958  cubthread::entry *thread_p = &cubthread::get_entry ();
7959  bool updated_oldest_unvacuumed = false;
7960 
7961  // three major operations need to be done here:
7962  //
7963  // 1. mark finished jobs as vacuumed/interrupted
7964  // 2. consume new blocks from log
7965  // 3. maintain oldest unvacuumed mvccid (for sanity checks)
7966 
7967  // For 3rd part, when vacuum data is not empty, the operation is trivial - just set to first block data oldest mvccid.
7968  // (the algorithm ensures that entries oldest mvccid is always ascending)
7969  // When vacuum data is empty, just don't update oldest_unvacuumed
7970 
7971  // first remove vacuumed blocks
7972  vacuum_data_mark_finished (thread_p);
7973 
7974  // then consume new generated blocks
7976 
7977  if (!vacuum_Data.is_empty ())
7978  {
7979  // buffer was not empty, we can trivially update to first entry oldest mvccid
7980  upgrade_oldest_unvacuumed (get_first_entry ().oldest_visible_mvccid);
7981  }
7982 }
7983 
7984 void
7986 {
7987  // no thread safety needs to be considered here
7989  {
7990  // log_Gl.hdr.oldest_visible_mvccid may not remain uninitialized
7992  }
7993  if (vacuum_Data.is_empty ())
7994  {
7995  oldest_unvacuumed_mvccid = log_Gl.hdr.oldest_visible_mvccid;
7996  }
7997  else
7998  {
7999  // set on first block oldest mvccid
8000  oldest_unvacuumed_mvccid = first_page->data[0].oldest_visible_mvccid;
8001  assert (oldest_unvacuumed_mvccid <= log_Gl.hdr.oldest_visible_mvccid);
8002  }
8003 }
8004 
8005 void
8007 {
8008  assert (oldest_unvacuumed_mvccid <= mvccid);
8009  oldest_unvacuumed_mvccid = mvccid;
8010 }
8011 
8012 //
8013 // vacuum_data_entry
8014 //
8017  , start_lsa (lsa)
8018  , oldest_visible_mvccid (oldest)
8019  , newest_mvccid (newest)
8020 {
8021  assert (!lsa.is_null ());
8022  assert (MVCCID_IS_VALID (oldest));
8023  assert (MVCCID_IS_VALID (newest));
8024  assert (oldest <= newest);
8026 }
8027 
8029  : vacuum_data_entry (hdr.mvcc_op_log_lsa, hdr.oldest_visible_mvccid, hdr.newest_block_mvccid)
8030 {
8031 }
8032 
8035 {
8037 }
8038 
8039 bool
8041 {
8043 }
8044 
8045 bool
8047 {
8049 }
8050 
8051 bool
8053 {
8055 }
8056 
8057 bool
8059 {
8061 }
8062 
8063 void
8065 {
8068 }
8069 
8070 void
8072 {
8074 }
8075 
8076 void
8078 {
8081 }
8082 
8083 //
8084 // vacuum_data_page
8085 //
8086 bool
8088 {
8089  return index_unvacuumed == index_free;
8090 }
8091 
8092 bool
8094 {
8095  return index >= index_unvacuumed || index < index_free;
8096 }
8097 
8098 INT16
8100 {
8101  if (is_empty ())
8102  {
8103  return INDEX_NOT_FOUND;
8104  }
8105 
8106  VACUUM_LOG_BLOCKID first_blockid = data[index_unvacuumed].get_blockid ();
8107  if (first_blockid > blockid)
8108  {
8109  return INDEX_NOT_FOUND;
8110  }
8111  VACUUM_LOG_BLOCKID last_blockid = data[index_free - 1].get_blockid ();
8112  if (last_blockid < blockid)
8113  {
8114  return INDEX_NOT_FOUND;
8115  }
8116  INT16 index_of_blockid = (INT16) (blockid - first_blockid) + index_unvacuumed;
8117  assert (data[index_of_blockid].get_blockid () == blockid);
8118  return index_of_blockid;
8119 }
8120 
8123 {
8124  assert (!is_empty ());
8125  return data[index_unvacuumed].get_blockid ();
8126 }
8127 
8128 //
8129 // vacuum_job_cursor
8130 //
8132  : m_blockid (VACUUM_NULL_LOG_BLOCKID)
8133  , m_page (NULL)
8134  , m_index (vacuum_data_page::INDEX_NOT_FOUND)
8135 {
8136 }
8137 
8139 {
8140  // check it was unloaded
8141  assert (m_page == NULL);
8142 }
8143 
8144 bool
8146 {
8147  return is_loaded (); // if loaded, must be valid
8148 }
8149 
8150 bool
8152 {
8154  return m_page != NULL;
8155 }
8156 
8159 {
8160  return m_blockid;
8161 }
8162 
8163 const VPID &
8165 {
8167 }
8168 
8171 {
8172  assert (m_page != NULL);
8173  return m_page;
8174 }
8175 
8176 INT16
8178 {
8179  return m_index;
8180 }
8181 
8182 const vacuum_data_entry &
8184 {
8185  assert (is_valid ());
8186 
8187  return m_page->data[m_index];
8188 }
8189 
8190 void
8192 {
8193  assert (is_valid ());
8194  cubthread::entry * thread_p = &cubthread::get_entry ();
8195  vacuum_data_entry &entry = m_page->data[m_index];
8196  entry.set_job_in_progress ();
8197  if (!entry.was_interrupted ())
8198  {
8199  /* Log that a new job is starting. After recovery, the system will then know this job was partially executed.
8200  * Logging the start of a job already interrupted is not necessary. We do it here rather than when vacuum job
8201  * is really started to avoid locking vacuum data again (logging vacuum data cannot be done without locking).
8202  */
8204  log_append_redo_data (thread_p, RVVAC_START_JOB, &addr, 0, NULL);
8205  }
8207 }
8208 
8209 void
8211 {
8212  unload (); // can't be loaded while updating
8213  vacuum_Data.update ();
8215  load ();
8216 }
8217 
8218 void
8220 {
8221  // can only increment
8222  assert (m_blockid <= blockid);
8223 
8224  m_blockid = blockid;
8225 
8226  // make sure m_page/m_index point to right blockid
8227  if (m_blockid > vacuum_Data.get_last_blockid ())
8228  {
8229  // cursor consumed all data
8230  assert (m_blockid == vacuum_Data.get_last_blockid () + 1);
8231  unload ();
8232  }
8233  else
8234  {
8235  assert (m_blockid >= vacuum_Data.get_first_blockid ());
8236  reload ();
8237  }
8238 }
8239 
8240 void
8242 {
8243  change_blockid (m_blockid + 1);
8246 }
8247 
8248 void
8250 {
8251  m_blockid = vacuum_Data.get_first_blockid ();
8252 }
8253 
8254 void
8256 {
8257  if (vacuum_Data.is_empty ())
8258  {
8259  // it doesn't matter
8260  return;
8261  }
8262 
8263  VACUUM_LOG_BLOCKID first_blockid = vacuum_Data.get_first_blockid ();
8264  if (m_blockid < first_blockid)
8265  {
8266  // cursor was left behind
8267  vacuum_er_log (VACUUM_ER_LOG_JOBS, "readjust cursor blockid from %lld to %lld",
8268  (long long int) m_blockid, (long long int) first_blockid);
8269  m_blockid = first_blockid;
8270  }
8271 }
8272 
8273 void
8275 {
8277  if (m_page != NULL)
8278  {
8280  }
8282 }
8283 
8284 void
8286 {
8287  assert (!is_loaded ()); // would not be optimal if already loaded
8288 
8289  search ();
8291 }
8292 
8293 void
8295 {
8296  if (m_page != NULL)
8297  {
8298  // check currently pointed page
8301  {
8302  // not in page
8303  unload ();
8304  }
8305  else
8306  {
8307  // found in page, reload finished
8308  return;
8309  }
8310  }
8311  // must search for blockid
8312  search ();
8313 }
8314 
8315 void
8317 {
8318  assert (m_page == NULL);
8319 
8320  vacuum_data_page *data_page = vacuum_Data.first_page;
8321  assert (data_page != NULL);
8322 
8323  while (true)
8324  {
8325  m_index = data_page->get_index_of_blockid (m_blockid);
8327  {
8328  m_page = data_page;
8329  return;
8330  }
8331 
8332  // advance to next page
8333  VPID next_vpid = data_page->next_page;
8335  if (VPID_ISNULL (&next_vpid))
8336  {
8337  // no next page
8338  return;
8339  }
8340  data_page = vacuum_fix_data_page (&cubthread::get_entry (), &next_vpid);
8341  }
8342 }
8343 
8344 //
8345 // vacuum_shutdown_sequence
8346 //
8348  : m_state (NO_SHUTDOWN)
8349 #if defined (SERVER_MODE)
8350  , m_state_mutex ()
8351  , m_condvar ()
8352 #endif // SERVER_MODE
8353 {
8354 }
8355 
8356 void
8358 {
8359 #if defined (SERVER_MODE)
8361  {
8362  return;
8363  }
8364  std::unique_lock<std::mutex> ulock { m_state_mutex };
8365  assert (m_state == NO_SHUTDOWN);
8366  m_state = SHUTDOWN_REQUESTED;
8367  // must wait until shutdown is registered
8368  m_condvar.wait (ulock, [this] ()
8369  {
8370  return m_state == SHUTDOWN_REGISTERED || vacuum_Master_daemon == NULL;
8371  });
8372  if (m_state == SHUTDOWN_REQUESTED && vacuum_Master_daemon == NULL)
8373  {
8374  // no one to register, but myself
8376  }
8378 #else // SA_MODE
8380 #endif // SA_MODE
8381 }
8382 
8383 bool
8385 {
8386  return m_state != NO_SHUTDOWN;
8387 }
8388 
8389 bool
8391 {
8392  if (m_state == NO_SHUTDOWN)
8393  {
8394  return false;
8395  }
8396  else if (m_state == SHUTDOWN_REGISTERED)
8397  {
8398  return true;
8399  }
8400  else
8401  {
8402 #if defined (SA_MODE)
8403  assert (false);
8404  return true;
8405 #else // SERVER_MODE
8406  // register
8407  std::unique_lock<std::mutex> ulock { m_state_mutex };
8408  assert (m_state == SHUTDOWN_REQUESTED);
8410  ulock.unlock ();
8411  m_condvar.notify_one ();
8412  return true;
8413 #endif
8414  }
8415 }
8416 // *INDENT-ON*
VACUUM_TRACK_DROPPED_FILES * vacuum_Track_dropped_files
Definition: vacuum.c:636
#define LOG_READ_ADD_ALIGN(thread_p, add, lsa, log_pgptr)
Definition: log_impl.h:141
PGLENGTH offset
Definition: recovery.h:201
bool is_null() const
Definition: log_lsa.hpp:92
char * PAGE_PTR
LOG_REC_UNDO undo
Definition: log_record.hpp:209
#define OID_INITIALIZER
Definition: oid.h:36
Definition: vacuum.c:95
int vacuum_rv_redo_data_finished(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:4896
static void vacuum_copy_data_from_log(THREAD_ENTRY *thread_p, char *area, int length, LOG_LSA *log_lsa, LOG_PAGE *log_page)
Definition: vacuum.c:3771
#define PGBUF_PAGE_VPID_AS_ARGS(pg)
Definition: page_buffer.h:53
INT16 index_free
Definition: vacuum.c:189
LOG_PAGEID prefetch_last_pageid
Definition: vacuum.h:126
LOG_SYSOP_END_TYPE type
Definition: log_record.hpp:297
#define vacuum_er_log_error(er_log_level, msg,...)
Definition: vacuum.h:69
cubthread::entry * thread_get_thread_entry_info(void)
#define VACUUM_MAX_WORKER_COUNT
INT16 record_type
Definition: vacuum.c:502
#define NO_ERROR
Definition: error_code.h:46
LOG_PAGEID keep_from_log_pageid
Definition: vacuum.c:364
#define VACUUM_LAST_LOG_PAGEID_IN_BLOCK(blockid)
Definition: vacuum.c:75
int area_size
void log_append_redo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:1979
bool is_task_queue_full() const
#define VFID_EQ(vfid_ptr1, vfid_ptr2)
Definition: file_manager.h:75
VACUUM_WORKER vacuum_Workers[VACUUM_MAX_WORKER_COUNT]
Definition: vacuum.c:489
#define MVCC_IS_HEADER_DELID_VALID(rec_header_p)
Definition: mvcc.h:87
VACUUM_DATA_ENTRY data[1]
Definition: vacuum.c:192
VACUUM_LOG_BLOCKID vacuum_get_log_blockid(LOG_PAGEID pageid)
Definition: vacuum.c:5612
int heap_get_class_oid_from_page(THREAD_ENTRY *thread_p, PAGE_PTR page_p, OID *class_oid)
Definition: heap_file.c:18824
#define MVCC_GET_INSID(header)
Definition: mvcc.h:51
#define VACUUM_PERF_HEAP_START(thread_p, helper)
Definition: vacuum.c:531
const VPID & get_page_vpid() const
Definition: vacuum.c:8164
#define VACUUM_ER_LOG_RECOVERY
Definition: vacuum.h:55
MVCCID update_global_oldest_visible()
Definition: mvcc_table.cpp:618
#define BTID_AS_ARGS(btid)
void log_append_undoredo_data2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1861
LOG_LSA start_lsa
Definition: vacuum.c:99
#define IO_PAGESIZE
MVCCID m_oldest_visible_mvccid
Definition: vacuum.c:818
#define BO_IS_SERVER_RESTARTED()
Definition: boot_sr.h:84
int vacuum_heap_page(THREAD_ENTRY *thread_p, VACUUM_HEAP_OBJECT *heap_objects, int n_heap_objects, MVCCID threshold_mvccid, HFID *hfid, bool *reusable, bool was_interrupted)
Definition: vacuum.c:1546
MVCCID oldest_visible_mvccid
Definition: vacuum.c:100
void log_append_undoredo_crumbs(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_undo_crumbs, int num_redo_crumbs, const LOG_CRUMB *undo_crumbs, const LOG_CRUMB *redo_crumbs)
Definition: log_manager.c:2030
char * or_unpack_string(char *ptr, char **string)
BTID * sys_btid
Definition: btree.h:121
#define ASSERT_ERROR()
void claim_system_worker()
VACUUM_HEAP_OBJECT * heap_objects
Definition: vacuum.h:114
void change_blockid(VACUUM_LOG_BLOCKID blockid)
Definition: vacuum.c:8219
#define VACUUM_BLOCK_IS_INTERRUPTED(blockid)
Definition: vacuum.c:172
LOG_DATA data
Definition: log_record.hpp:182
bool pgbuf_has_any_non_vacuum_waiters(PAGE_PTR pgptr)
int vacuum_reset_data_after_copydb(THREAD_ENTRY *thread_p)
Definition: vacuum.c:7849
int file_descriptor_update(THREAD_ENTRY *thread_p, const VFID *vfid, void *des_new)
bool is_valid() const
Definition: vacuum.c:8145
void logpb_fatal_error(THREAD_ENTRY *thread_p, bool logexit, const char *file_name, const int lineno, const char *fmt,...)
#define HFID_INITIALIZER
int file_dealloc(THREAD_ENTRY *thread_p, const VFID *vfid, const VPID *vpid, FILE_TYPE file_type_hint)
int vacuum_rv_set_next_page_dropped_files(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6732
static int vacuum_get_first_page_dropped_files(THREAD_ENTRY *thread_p, VPID *first_page_vpid)
Definition: vacuum.c:7347
#define VACUUM_ER_LOG_NONE
Definition: vacuum.h:45
#define pthread_mutex_init(a, b)
Definition: area_alloc.c:48
#define LOG_DATA_ADDR_INITIALIZER
Definition: log_append.hpp:63
vacuum_data_entry()=default
void LSA_COPY(log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:139
VACUUM_WORKER * claim_worker()
Definition: vacuum.c:839
#define MAX_SLOTS_IN_PAGE
Definition: vacuum.c:66
static void vacuum_log_cleanup_dropped_files(THREAD_ENTRY *thread_p, PAGE_PTR page_p, INT16 *indexes, INT16 n_indexes)
Definition: vacuum.c:6617
VACUUM_DATA_PAGE * first_page
Definition: vacuum.c:368
void vacuum_stop_master(THREAD_ENTRY *thread_p)
Definition: vacuum.c:1359
#define VPID_COPY(dest_ptr, src_ptr)
Definition: dbtype_def.h:909
const int LOG_WORKER_POOL_VACUUM
bool is_restoredb_session
Definition: vacuum.c:382
void vacuum_rv_redo_append_data_dump(FILE *fp, int length, void *data)
Definition: vacuum.c:5346
LOG_VACUUM_INFO vacuum_info
Definition: log_record.hpp:211
#define LOGAREA_SIZE
Definition: log_impl.h:116
#define VACUUM_BLOCK_STATUS_IS_IN_PROGRESS(blockid)
Definition: vacuum.c:159
#define GET_ZIP_LEN(length)
Definition: log_compress.h:36
int logpb_fetch_page(THREAD_ENTRY *thread_p, const LOG_LSA *req_lsa, LOG_CS_ACCESS_MODE access_mode, LOG_PAGE *log_pgptr)
static API_MUTEX mutex
Definition: api_util.c:72
void start_job_on_current_entry() const
Definition: vacuum.c:8191
#define ER_FAILED
Definition: error_code.h:47
const int LOG_SYSTEM_TRAN_INDEX
MVCCID oldest_unvacuumed_mvccid
Definition: vacuum.c:366
#define VACUUM_LOG_DATA_ENTRY_AS_ARGS(data)
Definition: vacuum.c:657
#define VACUUM_LOG_VACUUM_HEAP_ALL_VACUUMED
Definition: vacuum.c:547
void logpb_vacuum_reset_log_header_cache(THREAD_ENTRY *thread_p, LOG_HEADER *loghdr)
int private_lru_index
Definition: vacuum.h:122
LOG_GLOBAL log_Gl
LOG_HEADER hdr
Definition: log_impl.h:653
int file_get_type(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_TYPE *ftype_out)
static void vacuum_log_redoundo_vacuum_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slotid, RECDES *undo_recdes, bool reusable)
Definition: vacuum.c:7384
#define pthread_mutex_unlock(a)
Definition: area_alloc.c:51
#define VACUUM_LOG_DATA_ENTRY_MSG(name)
Definition: vacuum.c:655
FILE_TYPE
Definition: file_manager.h:38
LOG_HDRPAGE hdr
Definition: log_storage.hpp:84
MVCCID oldest_visible_mvccid
VPID vpid_first
Definition: vacuum.c:435
int vacuum_rv_notify_dropped_file(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6289
#define LOG_GET_LOG_RECORD_HEADER(log_page_p, lsa)
Definition: log_record.hpp:406
void pgbuf_unfix_all(THREAD_ENTRY *thread_p)
Definition: page_buffer.c:2656
VPID next_page
Definition: vacuum.c:187
#define MVCC_IS_HEADER_INSID_NOT_ALL_VISIBLE(rec_header_p)
Definition: mvcc.h:91
#define MVCC_ID_FOLLOW_OR_EQUAL(id1, id2)
Definition: mvcc.h:138
lockfree::circular_queue< vacuum_data_entry > * vacuum_Block_data_buffer
Definition: vacuum.c:458
VACUUM_DROPPED_FILE dropped_files[1]
Definition: vacuum.c:585
LOG_PAGEID prefetch_first_pageid
Definition: vacuum.h:125
void logpb_force_flush_pages(THREAD_ENTRY *thread_p)
char * or_pack_string(char *ptr, const char *string)
lockfree::circular_queue< VACUUM_LOG_BLOCKID > * vacuum_Finished_job_queue
Definition: vacuum.c:466
vacuum_shutdown_sequence shutdown_sequence
Definition: vacuum.c:376
#define ASSERT_ERROR_AND_SET(error_code)
#define VACUUM_BLOCK_STATUS_SET_AVAILABLE(blockid)
Definition: vacuum.c:169
void thread_sleep(double millisec)
SPAGE_SLOT * spage_get_slot(PAGE_PTR page_p, PGSLOTID slot_id)
#define VACUUM_DATA_PAGE_HEADER_SIZE
Definition: vacuum.c:203
#define LSA_INITIALIZER
Definition: log_lsa.hpp:76
#define OR_MVCC_FLAG_VALID_INSID
#define assert_release(e)
Definition: error_manager.h:96
static VPID vacuum_Dropped_files_vpid
Definition: vacuum.c:564
void pgbuf_set_dirty(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, bool free_page)
Definition: page_buffer.c:4280
VACUUM_LOG_BLOCKID get_first_blockid() const
Definition: vacuum.c:7923
MVCCID delete_mvccid
Definition: btree.h:518
int vacuum_rv_es_nop(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:7774
static bool vacuum_is_empty(void)
Definition: vacuum.c:7629
resource_shared_pool< VACUUM_WORKER > * m_pool
Definition: vacuum.c:891
void pgbuf_notify_vacuum_follows(THREAD_ENTRY *thread_p, PAGE_PTR page)
#define VFID_AS_ARGS(vfidp)
Definition: dbtype_def.h:892
static void vacuum_init_data_page_with_last_blockid(THREAD_ENTRY *thread_p, VACUUM_DATA_PAGE *data_page, VACUUM_LOG_BLOCKID blockid)
Definition: vacuum.c:7887
int or_mvcc_get_header(RECDES *record, MVCC_REC_HEADER *mvcc_header)
void heap_page_set_vacuum_status_none(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23755
#define MVCCID_NULL
char log_Name_active[PATH_MAX]
Definition: log_global.c:90
void log_sysop_start(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3578
const VACUUM_DATA_ENTRY & get_first_entry() const
Definition: vacuum.c:7933
void vacuum_notify_server_shutdown(void)
Definition: vacuum.c:7480
INT16 heap_rv_remove_flags_from_offset(INT16 offset)
Definition: heap_file.c:23901
#define OR_MVCC_FLAG_VALID_DELID
#define OR_MVCCID_SIZE
VOLID volid
Definition: log_record.hpp:158
static int vacuum_compare_blockids(const void *ptr1, const void *ptr2)
Definition: vacuum.c:4078
#define LOG_IS_MVCC_HEAP_OPERATION(rcvindex)
Definition: mvcc.h:239
int undo_data_buffer_capacity
Definition: vacuum.h:119
#define VACUUM_FIRST_LOG_PAGEID_IN_BLOCK(blockid)
Definition: vacuum.c:72
#define VACUUM_CLEANUP_DROPPED_FILES_MAX_REDO_CRUMBS
RECDES forward_recdes
Definition: vacuum.c:521
#define OID_SET_NULL(oidp)
Definition: oid.h:85
void on_daemon_create(cubthread::entry &context) final
Definition: vacuum.c:774
void on_recycle(cubthread::entry &context) final
Definition: vacuum.c:884
VACUUM_LOG_BLOCKID get_first_blockid() const
Definition: vacuum.c:8122
#define VACUUM_IS_THREAD_VACUUM_MASTER
Definition: vacuum.h:217
int vacuum_rv_redo_vacuum_heap_record(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:7437
#define TEMP_BUFFER_SIZE
#define OR_MVCC_FLAG_SHIFT_BITS
int vacuum_is_file_dropped(THREAD_ENTRY *thread_p, bool *is_file_dropped, VFID *vfid, MVCCID mvccid)
Definition: vacuum.c:6485
char * data
VFID vacuum_data_file
Definition: vacuum.c:363
static void vacuum_dropped_files_set_next_page(THREAD_ENTRY *thread_p, VACUUM_DROPPED_FILES_PAGE *page_p, VPID *next_page)
Definition: vacuum.c:6707
static void vacuum_data_empty_page(THREAD_ENTRY *thread_p, VACUUM_DATA_PAGE *prev_data_page, VACUUM_DATA_PAGE **data_page)
Definition: vacuum.c:4742
#define MVCC_GET_REPID(header)
Definition: mvcc.h:63
int32_t pageid
Definition: dbtype_def.h:879
#define LSA_AS_ARGS(lsa_ptr)
Definition: log_lsa.hpp:78
struct log_zip * log_zip_p
Definition: vacuum.h:112
static INT32 vacuum_Dropped_files_count
Definition: vacuum.c:567
#define diff
Definition: mprec.h:352
#define MVCC_CLEAR_FLAG_BITS(rec_header_p, flag)
Definition: mvcc.h:101
#define VACUUM_LOG_VACUUM_HEAP_MASK
Definition: vacuum.c:549
char * undo_data_buffer
Definition: vacuum.h:118
#define VACUUM_NULL_LOG_BLOCKID
int er_errid(void)
entry_workpool * create_worker_pool(std::size_t pool_size, std::size_t task_max_count, const char *name, entry_manager *context_manager, std::size_t core_count, bool debug_logging, bool pool_threads=false, wait_seconds wait_for_task_time=std::chrono::seconds(5))
#define VACUUM_ER_LOG_WORKER
Definition: vacuum.h:53
void set_interrupted()
Definition: vacuum.c:8077
int file_get_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, VPID *vpid_out)
LOG_ZIP_SIZE_T data_length
Definition: log_compress.h:55
#define SP_SUCCESS
Definition: slotted_page.h:50
#define VPID_INITIALIZER
Definition: dbtype_def.h:894
int pgbuf_get_fix_count(PAGE_PTR pgptr)
void on_retire(cubthread::entry &context) final
Definition: vacuum.c:865
#define ER_STAND_ALONE_VACUUM_END
Definition: error_code.h:1536
PAGE_TYPE
#define PTR_ALIGN(addr, boundary)
Definition: memory_alloc.h:77
VACUUM_LOG_BLOCKID m_last_blockid
Definition: vacuum.c:424
bool logtb_get_check_interrupt(THREAD_ENTRY *thread_p)
static void vacuum_read_log_aligned(THREAD_ENTRY *thread_entry, LOG_LSA *log_lsa, LOG_PAGE *log_page)
Definition: vacuum.c:3709
#define VACUUM_VERIFY_VACUUM_DATA(thread_p)
Definition: vacuum.c:746
static void vacuum_check_shutdown_interruption(const THREAD_ENTRY *thread_p, int error_code)
Definition: vacuum.c:7834
static void vacuum_data_initialize_new_page(THREAD_ENTRY *thread_p, VACUUM_DATA_PAGE *data_page)
Definition: vacuum.c:4410
#define OID_AS_ARGS(oidp)
Definition: oid.h:39
int logpb_get_archive_number(THREAD_ENTRY *thread_p, LOG_PAGEID pageid)
std::mutex prior_lsa_mutex
Definition: log_append.hpp:125
LOG_VACUUM_INFO vacuum_info
Definition: log_record.hpp:202
#define VACUUM_BLOCKID_WITHOUT_FLAGS(blockid)
Definition: vacuum.c:141
bool is_archive_removal_safe
Definition: vacuum.c:377
static cubthread::daemon * vacuum_Master_daemon
Definition: vacuum.c:921
static int vacuum_compare_dropped_files(const void *a, const void *b)
Definition: vacuum.c:5723
#define er_log_debug(...)
#define VPID_AS_ARGS(vpidp)
Definition: dbtype_def.h:896
LOG_ZIP * log_zip_alloc(LOG_ZIP_SIZE_T size)
Definition: log_compress.c:230
bool is_shutdown_requested()
Definition: vacuum.c:8384
static void vacuum_log_remove_ovf_insid(THREAD_ENTRY *thread_p, PAGE_PTR ovfpage)
Definition: vacuum.c:2818
VACUUM_DATA_LOAD vacuum_Data_load
Definition: vacuum.c:438
MVCCID newest_mvccid
Definition: vacuum.c:101
VACUUM_LOG_BLOCKID blockid
Definition: vacuum.c:98
static void vacuum_data_unload_first_and_last_page(THREAD_ENTRY *thread_p)
Definition: vacuum.c:2941
static void vacuum_update_keep_from_log_pageid(THREAD_ENTRY *thread_p)
Definition: vacuum.c:5685
#define MAX_ALIGNMENT
Definition: memory_alloc.h:70
bool is_cursor_entry_available() const
#define COPY_OID(dest_oid_ptr, src_oid_ptr)
Definition: oid.h:63
bool check_shutdown() const
#define ER_VACUUM_CS_NOT_AVAILABLE
Definition: error_code.h:1520
static void vacuum_notify_all_workers_dropped_file(const VFID &vfid_dropped, MVCCID mvccid)
Definition: vacuum.c:6233
SCAN_CODE spage_get_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p, int is_peeking)
int heap_objects_capacity
Definition: vacuum.h:115
int mvcc_header_size_lookup[8]
int vacuum_create_file_for_dropped_files(THREAD_ENTRY *thread_p, VFID *dropped_files_vfid)
Definition: vacuum.c:4456
#define vacuum_er_log_warning(er_log_level, msg,...)
Definition: vacuum.h:73
int vacuum_data_load_and_recover(THREAD_ENTRY *thread_p)
Definition: vacuum.c:4095
LOG_LSA back_lsa
Definition: log_record.hpp:145
int file_create_with_npages(THREAD_ENTRY *thread_p, FILE_TYPE file_type, int npages, FILE_DESCRIPTORS *des, VFID *vfid)
#define VFID_ISNULL(vfid_ptr)
Definition: file_manager.h:72
void THREAD_ENTRY
static int vacuum_process_log_block(THREAD_ENTRY *thread_p, VACUUM_DATA_ENTRY *block_data, bool sa_mode_partial_block)
Definition: vacuum.c:3174
static const INT16 INDEX_NOT_FOUND
Definition: vacuum.c:194
mvcctable mvcc_table
Definition: log_impl.h:684
#define NULL_PAGEID
char * log_data
Definition: log_compress.h:57
#define MVCCID_ALL_VISIBLE
#define pgbuf_unfix_and_init(thread_p, pgptr)
Definition: page_buffer.h:63
STATIC_INLINE VACUUM_WORKER * vacuum_get_vacuum_worker(THREAD_ENTRY *thread_p) __attribute__((ALWAYS_INLINE))
Definition: vacuum.h:148
#define MVCC_SET_INSID(header, mvcc_id)
Definition: mvcc.h:54
PGLENGTH offset
Definition: log_record.hpp:157
VACUUM_LOG_BLOCKID get_blockid() const
Definition: vacuum.c:8158
thread_type type
INT16 index_unvacuumed
Definition: vacuum.c:188
void vacuum_log_add_dropped_file(THREAD_ENTRY *thread_p, const VFID *vfid, const OID *class_oid, bool pospone_or_undo)
Definition: vacuum.c:6024
#define VACUUM_FINISHED_JOB_QUEUE_CAPACITY
Definition: vacuum.c:476
static int vacuum_heap_record(THREAD_ENTRY *thread_p, VACUUM_HEAP_HELPER *helper)
Definition: vacuum.c:2326
int spage_update(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, const RECDES *record_descriptor_p)
static int vacuum_fetch_log_page(THREAD_ENTRY *thread_p, LOG_PAGEID log_pageid, LOG_PAGE *log_page)
Definition: vacuum.c:7113
bool should_interrupt_iteration() const
#define FREE(PTR)
Definition: cas_common.h:56
void log_append_redo_data2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, int length, const void *data)
Definition: log_manager.c:1995
LOG_RECTYPE type
Definition: log_record.hpp:148
int file_descriptor_get(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_DESCRIPTORS *desc_out)
manager * get_manager(void)
VPID vpid_last
Definition: vacuum.c:436
PAGE_TYPE pgbuf_get_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr)
Definition: page_buffer.c:4675
int file_alloc_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
static void print_not_vacuumed_to_log(OID *oid, OID *class_oid, MVCC_REC_HEADER *rec_header, int btree_node_type)
Definition: vacuum.c:7181
void spage_vacuum_slot(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slotid, bool reusable)
#define RECDES_INITIALIZER
int vacuum_rv_redo_vacuum_heap_page(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:2682
#define MVCCID_IS_NORMAL(id)
#define VACUUM_LOG_VACUUM_HEAP_REUSABLE
Definition: vacuum.c:545
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
enum mvcc_satisfies_vacuum_result MVCC_SATISFIES_VACUUM_RESULT
Definition: mvcc.h:233
void set_job_in_progress()
Definition: vacuum.c:8071
vacuum_data()
Definition: vacuum.c:388
INT16 get_index() const
Definition: vacuum.c:8177
void LOG_CS_ENTER_READ_MODE(THREAD_ENTRY *thread_p)
#define OR_MVCC_FLAG_VALID_PREV_VERSION
int vacuum_rv_replace_dropped_file(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6167
#define VACUUM_DROPPED_FILES_PAGE_CAPACITY
Definition: vacuum.c:593
PAGE_PTR pgptr
Definition: recovery.h:199
bool pgbuf_has_prevent_dealloc(PAGE_PTR pgptr)
const char * fileio_get_base_file_name(const char *full_name_p)
Definition: file_io.c:5533
#define assert(x)
int heap_rv_redo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15569
HEAP_PAGE_VACUUM_STATUS heap_page_get_vacuum_status(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23830
LOG_APPEND_INFO append
Definition: log_impl.h:651
static bool vacuum_Dropped_files_loaded
Definition: vacuum.c:558
std::int64_t VACUUM_LOG_BLOCKID
Definition: log_storage.hpp:91
int heap_get_mvcc_rec_header_from_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header, RECDES *peek_recdes)
Definition: heap_file.c:18714
int n_heap_objects
Definition: vacuum.h:116
static void vacuum_read_log_add_aligned(THREAD_ENTRY *thread_entry, size_t size, LOG_LSA *log_lsa, LOG_PAGE *log_page)
Definition: vacuum.c:3735
int or_packed_string_length(const char *string, int *strlen)
#define OR_MVCC_FLAG_MASK
int32_t fileid
Definition: dbtype_def.h:886
bool pgbuf_check_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
void on_daemon_retire(cubthread::entry &context) final
Definition: vacuum.c:783
#define OR_MVCC_INSERT_ID_OFFSET
static void vacuum_convert_thread_to_worker(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker, thread_type &save_type)
Definition: vacuum.c:7728
DISK_ISVALID vacuum_check_not_vacuumed_recdes(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, RECDES *recdes, int btree_node_type)
Definition: vacuum.c:7255
void set_vacuumed()
Definition: vacuum.c:8064
VACUUM_TRACK_DROPPED_FILES * next_tracked_page
Definition: vacuum.c:633
int prm_get_integer_value(PARAM_ID prm_id)
#define ER_GENERIC_ERROR
Definition: error_code.h:49
void on_create(cubthread::entry &context) final
Definition: vacuum.c:850
int page_data_max_count
Definition: vacuum.c:371
void pgbuf_flush_if_requested(THREAD_ENTRY *thread_p, PAGE_PTR page)
Definition: page_buffer.c:2996
#define VACUUM_PERF_HEAP_TRACK_PREPARE(thread_p, helper)
Definition: vacuum.c:533
FILE_VACUUM_DATA_DES vacuum_data
Definition: file_manager.h:137
#define MVCC_GET_CHN(header)
Definition: mvcc.h:69
bool is_cursor_entry_ready_to_vacuum() const
#define VACUUM_IS_THREAD_VACUUM_WORKER
Definition: vacuum.h:216
bool is_vacuumed() const
Definition: vacuum.c:8046
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
void LOG_CS_EXIT(THREAD_ENTRY *thread_p)
VACUUM_DATA_PAGE * last_page
Definition: vacuum.c:369
void log_append_undo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:1917
void execute(cubthread::entry &thread_ref) final
Definition: vacuum.c:907
char area[1]
Definition: log_storage.hpp:85
#define VACUUM_ER_LOG_LOGGING
Definition: vacuum.h:48
PGBUF_LATCH_CONDITION
Definition: page_buffer.h:185
#define ER_STAND_ALONE_VACUUM_START
Definition: error_code.h:1535
void retire_worker(VACUUM_WORKER &worker)
Definition: vacuum.c:843
PAGE_PTR home_page
Definition: vacuum.c:497
thread_type
#define VACUUM_ER_LOG_HEAP
Definition: vacuum.h:50
unsigned int record_type
Definition: slotted_page.h:89
STATIC_INLINE bool vacuum_is_thread_vacuum(const THREAD_ENTRY *thread_p) __attribute__((ALWAYS_INLINE))
Definition: vacuum.h:155
bool is_loaded
Definition: vacuum.c:375
int or_mvcc_set_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header)
PERF_UTIME_TRACKER time_track
Definition: vacuum.c:528
LOG_PAGEID logical_pageid
Definition: log_storage.hpp:65
static void vacuum_cleanup_collected_by_vfid(VACUUM_WORKER *worker, VFID *vfid)
Definition: vacuum.c:6853
bool heap_remove_page_on_vacuum(THREAD_ENTRY *thread_p, PAGE_PTR *page_ptr, HFID *hfid)
Definition: heap_file.c:4643
int spage_compact(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
static int vacuum_heap_prepare_record(THREAD_ENTRY *thread_p, VACUUM_HEAP_HELPER *helper)
Definition: vacuum.c:1890
char log_Prefix[PATH_MAX]
Definition: log_global.c:87
#define VACUUM_ER_LOG_MASTER
Definition: vacuum.h:54
enum log_rectype LOG_RECTYPE
Definition: log_record.hpp:138
static vacuum_worker_context_manager * vacuum_Worker_context_manager
Definition: vacuum.c:926
static VACUUM_DATA vacuum_Data
Definition: vacuum.c:428
#define VPID_EQ(vpid_ptr1, vpid_ptr2)
Definition: dbtype_def.h:915
#define VFID_INITIALIZER
Definition: dbtype_def.h:890
LOG_REC_MVCC_UNDO mvcc_undo
Definition: log_record.hpp:303
int vacuum_rv_undo_vacuum_heap_record(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:7422
#define HFID_SET_NULL(hfid)
void vacuum_init_thread_context(cubthread::entry &context, thread_type type, VACUUM_WORKER *worker)
Definition: vacuum.c:754
#define vacuum_fix_dropped_entries_page(thread_p, vpidp, latch)
Definition: vacuum.c:606
short volid
Definition: dbtype_def.h:880
LOG_LSA prev_mvcc_op_log_lsa
Definition: log_record.hpp:190
void execute(cubthread::entry &thread_ref) final
static void vacuum_data_empty_update_last_blockid(THREAD_ENTRY *thread_p)
Definition: vacuum.c:7681
int vacuum_rv_redo_initialize_data_page(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:4432
#define ASSERT_NO_ERROR()
bool is_loaded() const
Definition: vacuum.c:8151
bool is_empty() const
Definition: vacuum.c:7905
VACUUM_LOG_BLOCKID logpb_last_complete_blockid(void)
#define VACUUM_PREFETCH_LOG_BLOCK_BUFFER_PAGES
Definition: vacuum.c:470
std::int64_t pageid
Definition: log_lsa.hpp:36
int log_block_npages
Definition: vacuum.c:373
int length
Definition: recovery.h:202
void vacuum_stop_workers(THREAD_ENTRY *thread_p)
Definition: vacuum.c:1332
MVCC_REC_HEADER mvcc_header
Definition: vacuum.c:508
INT64 LOG_PAGEID
void er_log_stats(void) const
LOG_DATA data
Definition: log_record.hpp:174
void vacuum_notify_server_crashed(LOG_LSA *recovery_lsa)
Definition: vacuum.c:7468
#define NULL
Definition: freelistheap.h:34
const VPID vpid_Null_vpid
Definition: page_buffer.c:74
int file_alloc(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
int es_delete_file(const char *uri)
Definition: es.c:301
UINT64 MVCCID
static VFID vacuum_Dropped_files_vfid
Definition: vacuum.c:561
PGNSLOTS spage_number_of_records(PAGE_PTR page_p)
Definition: slotted_page.c:860
#define MVCC_IS_FLAG_SET(rec_header_p, flags)
Definition: mvcc.h:84
static void vacuum_read_advance_when_doesnt_fit(THREAD_ENTRY *thread_entry, size_t size, LOG_LSA *log_lsa, LOG_PAGE *log_page)
Definition: vacuum.c:3750
bool is_empty() const
Definition: vacuum.c:8087
if(extra_options)
Definition: dynamic_load.c:958
#define VACUUM_ER_LOG_WARNING
Definition: vacuum.h:47
int pgbuf_assign_private_lru(THREAD_ENTRY *thread_p, bool is_vacuum, const int id)
bool log_is_in_crash_recovery(void)
Definition: log_manager.c:476
const VFID * vfid
Definition: log_append.hpp:56
#define vacuum_er_log(er_log_level, msg,...)
Definition: vacuum.h:65
MVCCID insert_mvccid
Definition: btree.h:517
bool vacuum_is_safe_to_remove_archives(void)
Definition: vacuum.c:5650
#define vacuum_job_cursor_print_format
Definition: vacuum.c:325
bool LSA_ISNULL(const log_lsa *lsa_ptr)
Definition: log_lsa.hpp:153
vacuum_worker_task(const VACUUM_DATA_ENTRY &entry_ref)
Definition: vacuum.c:902
#define SERVER_MODE
VACUUM_LOG_BLOCKID get_last_blockid() const
Definition: vacuum.c:7917
#define VACUUM_DEFAULT_HEAP_OBJECT_BUFFER_SIZE
Definition: vacuum.c:552
void vacuum_rv_redo_data_finished_dump(FILE *fp, int length, void *data)
Definition: vacuum.c:4965
PAGE_PTR pgptr
Definition: log_append.hpp:57
#define db_private_free_and_init(thrd, ptr)
Definition: memory_alloc.h:141
int xvacuum(THREAD_ENTRY *thread_p)
Definition: vacuum.c:962
static void vacuum_restore_thread(THREAD_ENTRY *thread_p, thread_type save_type)
Definition: vacuum.c:7754
#define VACUUM_BLOCK_STATUS_IS_AVAILABLE(blockid)
Definition: vacuum.c:161
void log_append_undoredo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1837
int pgbuf_get_hold_count(THREAD_ENTRY *thread_p)
#define pgbuf_fix(thread_p, vpid, fetch_mode, requestmode, condition)
Definition: page_buffer.h:255
VACUUM_WORKER_STATE state
Definition: vacuum.h:108
#define MVCC_ID_PRECEDES(id1, id2)
Definition: mvcc.h:137
PGLENGTH offset
Definition: log_storage.hpp:66
#define LOG_IS_MVCC_BTREE_OPERATION(rcvindex)
Definition: mvcc.h:248
void xvacuum_dump(THREAD_ENTRY *thread_p, FILE *outfp)
Definition: vacuum.c:1104
MVCCID newest_block_mvccid
void force_data_update()
Definition: vacuum.c:8210
int pgbuf_get_condition_for_ordered_fix(const VPID *vpid_new_page, const VPID *vpid_fixed_page, const HFID *hfid)
static int vacuum_find_dropped_file(THREAD_ENTRY *thread_p, bool *is_file_dropped, VFID *vfid, MVCCID mvccid)
Definition: vacuum.c:6507
const OID * heap_ovf_delete(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *ovf_oid, VFID *ovf_vfid_p)
Definition: heap_file.c:6521
#define vacuum_job_cursor_print_args(cursor)
Definition: vacuum.c:326
#define NULL_OFFSET
INT32 drop_files_version
Definition: vacuum.h:109
VFID vacuum_Last_dropped_vfid
Definition: vacuum.c:643
INT32 vacuum_Dropped_files_version
Definition: vacuum.c:641
bool logtb_set_check_interrupt(THREAD_ENTRY *thread_p, bool flag)
#define vacuum_set_dirty_data_page(thread_p, data_page, free)
Definition: vacuum.c:239
LOG_PRIOR_LSA_INFO prior_info
Definition: log_impl.h:652
VFID * heap_ovf_find_vfid(THREAD_ENTRY *thread_p, const HFID *hfid, VFID *ovf_vfid, bool docreate, PGBUF_LATCH_CONDITION latch_cond)
Definition: heap_file.c:6353
#define HFID_AS_ARGS(hfid)
void pgbuf_get_vpid(PAGE_PTR pgptr, VPID *vpid)
Definition: page_buffer.c:4579
char * prefetch_log_buffer
Definition: vacuum.h:124
offset_type offset
Definition: log_append.hpp:58
VFID overflow_vfid
Definition: vacuum.c:511
LOG_REC_UNDOREDO undoredo
Definition: log_record.hpp:200
#define VFID_COPY(vfid_ptr1, vfid_ptr2)
Definition: file_manager.h:69
const char * file_type_to_string(FILE_TYPE fstruct_type)
void log_sysop_abort(THREAD_ENTRY *thread_p)
Definition: log_manager.c:4017
static int vacuum_heap_record_insid_and_prev_version(THREAD_ENTRY *thread_p, VACUUM_HEAP_HELPER *helper)
Definition: vacuum.c:2160
FILE_HEAP_DES heap
Definition: file_manager.h:132
void update()
Definition: vacuum.c:7956
LOG_LSA recovery_lsa
Definition: vacuum.c:379
void destroy_worker_pool(entry_workpool *&worker_pool_arg)
vacuum_job_cursor m_cursor
Definition: vacuum.c:817
int vacuum_load_dropped_files_from_disk(THREAD_ENTRY *thread_p)
Definition: vacuum.c:4261
static void vacuum_data_load_first_and_last_page(THREAD_ENTRY *thread_p)
Definition: vacuum.c:2910
static int vacuum_add_dropped_file(THREAD_ENTRY *thread_p, VFID *vfid, MVCCID mvccid)
Definition: vacuum.c:5749
MVCC_SATISFIES_VACUUM_RESULT can_vacuum
Definition: vacuum.c:514
#define CAST_BUFLEN
Definition: porting.h:471
static int vacuum_collect_heap_objects(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker, OID *oid, VFID *vfid)
Definition: vacuum.c:6810
int vacuum_rv_redo_start_job(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:5663
bool logtb_is_interrupted(THREAD_ENTRY *thread_p, bool clear, bool *continue_checking)
#define VACUUM_BLOCK_STATUS_SET_IN_PROGRESS(blockid)
Definition: vacuum.c:167
static void error(const char *msg)
Definition: gencat.c:331
int file_init_page_type(THREAD_ENTRY *thread_p, PAGE_PTR page, void *args)
void log_append_postpone(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:2698
LOG_LSA prev_lsa
Definition: log_append.hpp:77
bool should_force_data_update() const
#define VACUUM_ER_LOG_VACUUM_DATA
Definition: vacuum.h:52
#define FILEIO_SUFFIX_LOGARCHIVE
Definition: file_io.h:83
#define VPID_ISNULL(vpid_ptr)
Definition: dbtype_def.h:925
void btree_rv_read_keybuf_two_objects(THREAD_ENTRY *thread_p, char *datap, int data_size, BTID_INT *btid_int, BTREE_OBJECT_INFO *first_version, BTREE_OBJECT_INFO *second_version, OR_BUF *key_buf)
Definition: btree.c:17631
const char * data
Definition: recovery.h:203
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
void readjust_to_vacuum_data_changes()
Definition: vacuum.c:8255
#define ER_INTERRUPTED
Definition: error_code.h:51
MVCCID get_global_oldest_visible() const
Definition: mvcc_table.cpp:612
void start_job_on_cursor_entry() const
int heap_set_mvcc_rec_header_on_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:18740
void btree_rv_read_keybuf_nocopy(THREAD_ENTRY *thread_p, char *datap, int data_size, BTID_INT *btid, OID *cls_oid, OID *oid, BTREE_MVCC_INFO *mvcc_info, OR_BUF *key_buf)
Definition: btree.c:17567
static void vacuum_verify_vacuum_data_debug(THREAD_ENTRY *thread_p)
Definition: vacuum.c:6958
LOG_TDES * LOG_FIND_CURRENT_TDES(THREAD_ENTRY *thread_p=NULL)
Definition: log_impl.h:1115
SCAN_CODE heap_get_class_oid(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid)
Definition: heap_file.c:9285
void vacuum_produce_log_block_data(THREAD_ENTRY *thread_p)
Definition: vacuum.c:2867
#define pgbuf_fix_if_not_deallocated(thread_p, vpid, latch_mode, latch_condition, page)
Definition: page_buffer.h:441
#define MVCC_GET_FLAG(header)
Definition: mvcc.h:75
#define HFID_IS_NULL(hfid)
#define VACUUM_BLOCK_SET_INTERRUPTED(blockid)
Definition: vacuum.c:174
static void vacuum_finished_block_vacuum(THREAD_ENTRY *thread_p, VACUUM_DATA_ENTRY *block_data, bool is_vacuum_complete)
Definition: vacuum.c:3636
PGSLOTID slots[MAX_SLOTS_IN_PAGE]
Definition: vacuum.c:517
#define ARG_FILE_LINE
Definition: error_manager.h:44
void destroy_daemon(daemon *&daemon_arg)
int btree_vacuum_object(THREAD_ENTRY *thread_p, BTID *btid, OR_BUF *buffered_key, OID *oid, OID *class_oid, MVCCID delete_mvccid)
Definition: btree.c:29429
int vacuum_initialize(THREAD_ENTRY *thread_p, int vacuum_log_block_npages, VFID *vacuum_data_vfid, VFID *dropped_files_vfid, bool is_restore)
Definition: vacuum.c:1160
VACUUM_DATA_PAGE * m_page
Definition: vacuum.c:320
static const bool COPY
void set_oldest_unvacuumed_on_boot()
Definition: vacuum.c:7985
bool log_unzip(LOG_ZIP *log_unzip, LOG_ZIP_SIZE_T length, void *data)
Definition: log_compress.c:123
int vacuum_boot(THREAD_ENTRY *thread_p)
Definition: vacuum.c:1261
LOG_PAGEID vacuum_min_log_pageid_to_keep(THREAD_ENTRY *thread_p)
Definition: vacuum.c:5625
#define vacuum_fix_data_page(thread_p, vpidp)
Definition: vacuum.c:214
INT16 PGLENGTH
int btree_vacuum_insert_mvccid(THREAD_ENTRY *thread_p, BTID *btid, OR_BUF *buffered_key, OID *oid, OID *class_oid, MVCCID insert_mvccid)
Definition: btree.c:29397
#define VACUUM_LOG_ADD_DROPPED_FILE_POSTPONE
Definition: vacuum.h:78
LOG_LSA prev_version_lsa
Definition: mvcc.h:45
int vacuum_rv_redo_remove_ovf_insid(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:2831
INT16 PGSLOTID
#define vacuum_unfix_first_and_last_data_page(thread_p)
Definition: vacuum.c:265
bool has_one_page() const
Definition: vacuum.c:7911
#define DOUBLE_ALIGNMENT
Definition: memory_alloc.h:64
static int vacuum_compare_heap_object(const void *a, const void *b)
Definition: vacuum.c:6760
void log_zip_free(LOG_ZIP *log_zip)
Definition: log_compress.c:265
static bool vacuum_is_work_in_progress(THREAD_ENTRY *thread_p)
Definition: vacuum.c:4506
bool vacuum_is_mvccid_vacuumed(MVCCID id)
Definition: vacuum.c:7361
log_system_tdes * get_system_tdes(void)
#define free_and_init(ptr)
Definition: memory_alloc.h:147
PAGEID pageid
Definition: log_record.hpp:156
#define LOG_ISRESTARTED()
Definition: log_impl.h:232
#define DB_ALIGN(offset, align)
Definition: memory_alloc.h:84
static bool is_flag_set(const T &where_to_check, const T &what_to_check)
Definition: base_flag.hpp:149
void LSA_SET_NULL(log_lsa *lsa_ptr)
Definition: log_lsa.hpp:146
static void vacuum_set_dirty_data_page_dont_free(cubthread::entry *thread_p, vacuum_data_page *data_page)
Definition: vacuum.c:258
PGSLOTID crt_slotid
Definition: vacuum.c:501
#define DB_PAGESIZE
#define ER_HEAP_UNKNOWN_OBJECT
Definition: error_code.h:102
#define MVCC_GET_DELID(header)
Definition: mvcc.h:57
HEAP_PAGE_VACUUM_STATUS
Definition: heap_file.h:358
void pgbuf_set_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
Definition: page_buffer.c:4847
bool logpb_is_page_in_archive(LOG_PAGEID pageid)
bool prm_get_bool_value(PARAM_ID prm_id)
#define INT_ALIGNMENT
Definition: memory_alloc.h:61
int spage_get_free_space_without_saving(THREAD_ENTRY *thread_p, PAGE_PTR page_p, bool *need_update)
Definition: slotted_page.c:925
#define ZIP_CHECK(length)
Definition: log_compress.h:39
#define OR_PUT_INT(ptr, val)
DISK_ISVALID vacuum_check_not_vacuumed_rec_header(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, MVCC_REC_HEADER *rec_header, int btree_node_type)
Definition: vacuum.c:7314
void er_clear(void)
#define VACUUM_BLOCK_DATA_BUFFER_CAPACITY
Definition: vacuum.c:460
void log_sysop_commit(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3895
LOG_TDES * logtb_get_system_tdes(THREAD_ENTRY *thread_p=NULL)
int vacuum_rv_redo_cleanup_dropped_files(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6652
static void vacuum_data_mark_finished(THREAD_ENTRY *thread_p)
Definition: vacuum.c:4533
MVCC_SATISFIES_VACUUM_RESULT results[MAX_SLOTS_IN_PAGE]
Definition: vacuum.c:518
#define OR_GET_MVCC_REPID_AND_FLAG(ptr)
bool is_available() const
Definition: vacuum.c:8040
void log_append_redo_crumbs(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_crumbs, const LOG_CRUMB *crumbs)
Definition: log_manager.c:2307
int i
Definition: dynamic_load.c:954
void vacuum_sa_reflect_last_blockid(THREAD_ENTRY *thread_p)
Definition: vacuum.c:7647
static void vacuum_verify_vacuum_data_page_fix_count(THREAD_ENTRY *thread_p)
Definition: vacuum.c:7493
static int vacuum_recover_lost_block_data(THREAD_ENTRY *thread_p)
Definition: vacuum.c:5375
void vacuum_finalize(THREAD_ENTRY *thread_p)
Definition: vacuum.c:1385
#define vacuum_unfix_data_page(thread_p, data_page)
Definition: vacuum.c:227
VACUUM_DATA_ENTRY m_data
Definition: vacuum.c:917
pthread_mutex_t vacuum_Dropped_files_mutex
Definition: vacuum.c:642
static int vacuum_heap_get_hfid_and_file_type(THREAD_ENTRY *thread_p, VACUUM_HEAP_HELPER *helper, const VFID *vfid)
Definition: vacuum.c:2460
static cubthread::entry_workpool * vacuum_Worker_threads
Definition: vacuum.c:925
static vacuum_master_context_manager * vacuum_Master_context_manager
Definition: vacuum.c:922
void vacuum_rv_undoredo_data_set_link_dump(FILE *fp, int length, void *data)
Definition: vacuum.c:5301
VACUUM_LOG_BLOCKID vacuum_last_blockid
STATIC_INLINE bool vacuum_is_thread_vacuum_worker(const THREAD_ENTRY *thread_p) __attribute__((ALWAYS_INLINE))
Definition: vacuum.h:162
INT16 type
STATIC_INLINE void perfmon_add_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid, UINT64 amount) __attribute__((ALWAYS_INLINE))
int vacuum_rv_check_at_undo(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, INT16 slotid, INT16 rec_type)
Definition: vacuum.c:7525
int heap_delete_hfid_from_cache(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:23262
LOG_RCVINDEX rcvindex
Definition: log_record.hpp:155
PAGE_PTR forward_page
Definition: vacuum.c:499
#define VACUUM_TRACK_DROPPED_FILES_SIZE
Definition: vacuum.c:637
#define pthread_mutex_lock(a)
Definition: area_alloc.c:50
#define IO_MAX_PAGE_SIZE
int vacuum_rv_redo_vacuum_complete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:3144
#define LOG_PAGESIZE
bool vacuum_Is_booted
Definition: vacuum.c:652
short volid
Definition: dbtype_def.h:887
static int vacuum_worker_allocate_resources(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker)
Definition: vacuum.c:3532
void upgrade_oldest_unvacuumed(MVCCID mvccid)
Definition: vacuum.c:8006
entry & get_entry(void)
void set_on_vacuum_data_start()
Definition: vacuum.c:8249
#define VPID_GET_FROM_OID(vpid_ptr, oid_ptr)
Definition: page_buffer.h:46
#define VACUUM_BLOCKID_GET_FLAGS(blockid)
Definition: vacuum.c:145
static void vacuum_heap_page_log_and_reset(THREAD_ENTRY *thread_p, VACUUM_HEAP_HELPER *helper, bool update_best_space_stat, bool unlatch_page)
Definition: vacuum.c:2549
int vacuum_create_file_for_vacuum_data(THREAD_ENTRY *thread_p, VFID *vacuum_data_vfid)
Definition: vacuum.c:4357
#define VACUUM_PERF_HEAP_TRACK_EXECUTE(thread_p, helper)
Definition: vacuum.c:536
#define VACUUM_DROPPED_FILES_MAX_PAGE_CAPACITY
Definition: vacuum.c:597
#define PGBUF_PAGE_STATE_MSG(name)
Definition: page_buffer.h:56
#define OID_ISNULL(oidp)
Definition: oid.h:81
VACUUM_LOG_BLOCKID m_blockid
Definition: vacuum.c:319
#define VACUUM_PERF_HEAP_TRACK_LOGGING(thread_p, helper)
Definition: vacuum.c:539
int vacuum_consume_buffer_log_blocks(THREAD_ENTRY *thread_p)
Definition: vacuum.c:5006
static int vacuum_log_prefetch_vacuum_block(THREAD_ENTRY *thread_p, VACUUM_DATA_ENTRY *entry)
Definition: vacuum.c:7063
#define DONT_FREE
Definition: page_buffer.h:41
#define MVCCID_FORWARD(id)
void push_task(entry_workpool *worker_pool_arg, entry_task *exec_p)
MVCCID mvcc_next_id
bool check_shutdown_request()
Definition: vacuum.c:8390
static int vacuum_process_log_record(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker, LOG_LSA *log_lsa_p, LOG_PAGE *log_page_p, LOG_DATA *log_record_data, MVCCID *mvccid, char **undo_data_ptr, int *undo_data_size, LOG_VACUUM_INFO *vacuum_info, bool *is_file_dropped, bool stop_after_vacuum_info)
Definition: vacuum.c:3818
bool is_index_valid(INT16 index) const
Definition: vacuum.c:8093
INT16 get_index_of_blockid(VACUUM_LOG_BLOCKID blockid) const
Definition: vacuum.c:8099
daemon * create_daemon(const looper &looper_arg, entry_task *exec_p, const char *daemon_name="", entry_manager *context_manager=NULL)
int heap_get_class_info(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid_out, FILE_TYPE *ftype_out, char **classname_out)
Definition: heap_file.c:16733
const void * data
Definition: log_append.hpp:48
static void vacuum_log_vacuum_heap_page(THREAD_ENTRY *thread_p, PAGE_PTR page_p, int n_slots, PGSLOTID *slots, MVCC_SATISFIES_VACUUM_RESULT *results, bool reusable, bool all_vacuumed)
Definition: vacuum.c:2613
static int vacuum_cleanup_dropped_files(THREAD_ENTRY *thread_p)
Definition: vacuum.c:6336
#define vacuum_set_dirty_dropped_entries_page(thread_p, dropped_page, free)
Definition: vacuum.c:616
int vacuum_rv_redo_append_data(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:5321
void increment_blockid()
Definition: vacuum.c:8241
int initial_home_free_space
Definition: vacuum.c:525
bool is_job_in_progress() const
Definition: vacuum.c:8052
VACUUM_WORKER vacuum_Master
Definition: vacuum.c:447
int vacuum_rv_undoredo_data_set_link(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:5271
#define PEEK
Definition: file_io.h:74
#define VACUUM_BLOCK_STATUS_SET_VACUUMED(blockid)
Definition: vacuum.c:165
#define VACUUM_ER_LOG_ERROR
Definition: vacuum.h:46
LOG_LSA mvcc_op_log_lsa
static void vacuum_finalize_worker(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker_info)
Definition: vacuum.c:3601
void set_last_blockid(VACUUM_LOG_BLOCKID blockid)
Definition: vacuum.c:7940
#define VPID_SET_NULL(vpid_ptr)
Definition: dbtype_def.h:906
MVCC_SATISFIES_VACUUM_RESULT mvcc_satisfies_vacuum(THREAD_ENTRY *thread_p, MVCC_REC_HEADER *rec_header, MVCCID oldest_mvccid)
Definition: mvcc.c:309
vacuum_data_page * get_page() const
Definition: vacuum.c:8170
#define VACUUM_ER_LOG_DROPPED_FILES
Definition: vacuum.h:51
bool spage_need_compact(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
bool was_interrupted() const
Definition: vacuum.c:8058
#define MVCCID_IS_VALID(id)
#define PGBUF_PAGE_STATE_ARGS(pg)
Definition: page_buffer.h:57
#define VACUUM_BLOCK_CLEAR_INTERRUPTED(blockid)
Definition: vacuum.c:176
#define VACUUM_BLOCK_STATUS_IS_VACUUMED(blockid)
Definition: vacuum.c:157
#define vacuum_unfix_dropped_entries_page(thread_p, dropped_page)
Definition: vacuum.c:610
bool is_logging_configured(const int logging_flag)
std::int64_t offset
Definition: log_lsa.hpp:37
#define VACUUM_ER_LOG_JOBS
Definition: vacuum.h:58
#define VFID_SET_NULL(vfid_ptr)
Definition: file_manager.h:65
const char ** p
Definition: dynamic_load.c:945
VPID * pgbuf_get_vpid_ptr(PAGE_PTR pgptr)
Definition: page_buffer.c:4609
const vacuum_data_entry & get_current_entry() const
Definition: vacuum.c:8183
DISK_ISVALID
Definition: disk_manager.h:53
int util_bsearch(const void *key, const void *base, int n_elems, unsigned int sizeof_elem, int(*func_compare)(const void *, const void *), bool *out_found)
Definition: util_func.c:764
int vacuum_rv_redo_add_dropped_file(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6065
static bool is_not_vacuumed_and_lost(THREAD_ENTRY *thread_p, MVCC_REC_HEADER *rec_header)
Definition: vacuum.c:7277
void heap_stats_update(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, const HFID *hfid, int prev_freespace)
Definition: heap_file.c:2936
#define VACUUM_ER_LOG_BTREE
Definition: vacuum.h:49
#define HEAP_ISVALID_OID(thread_p, oid)
Definition: heap_file.h:77
int vacuum_rv_undo_add_dropped_file(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: vacuum.c:6133
bool allocated_resources
Definition: vacuum.h:128
VACUUM_DROPPED_FILES_PAGE dropped_data_page
Definition: vacuum.c:634
VACUUM_LOG_BLOCKID get_blockid() const
Definition: vacuum.c:8034
char rec_buf[IO_MAX_PAGE_SIZE+MAX_ALIGNMENT]
Definition: vacuum.c:506
int search(int &result, const cub_regex_object &reg, const std::string &src, const INTL_CODESET codeset)
#define LOG_READ_ADVANCE_WHEN_DOESNT_FIT(thread_p, length, lsa, log_pgptr)
Definition: log_impl.h:149
#define pthread_mutex_destroy(a)
Definition: area_alloc.c:49
bool does_block_need_vacuum
static void vacuum_convert_thread_to_master(THREAD_ENTRY *thread_p, thread_type &save_type)
Definition: vacuum.c:7705
struct vacuum_worker * vacuum_worker
static int vacuum_heap(THREAD_ENTRY *thread_p, VACUUM_WORKER *worker, MVCCID threshold_mvccid, bool was_interrupted)
Definition: vacuum.c:1463