CUBRID Engine  latest
log_append.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include "log_append.hpp"
20 
21 #include "file_manager.h"
22 #include "log_compress.h"
23 #include "log_impl.h"
24 #include "log_manager.h"
25 #include "log_record.hpp"
26 #include "page_buffer.h"
27 #include "perf_monitor.h"
28 #include "thread_entry.hpp"
29 #include "thread_manager.hpp"
30 #include "vacuum.h"
31 
32 static bool log_Zip_support = false;
34 #if !defined(SERVER_MODE)
37 static char *log_data_ptr = NULL;
38 static int log_data_length = 0;
39 #endif
40 
41 size_t
43 {
44  return LOGAREA_SIZE;
45 }
46 
47 static void log_prior_lsa_append_align ();
48 static void log_prior_lsa_append_advance_when_doesnot_fit (size_t length);
49 static void log_prior_lsa_append_add_align (size_t add);
50 static int prior_lsa_gen_postpone_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RCVINDEX rcvindex,
51  LOG_DATA_ADDR *addr, int length, const char *data);
52 static int prior_lsa_gen_dbout_redo_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RCVINDEX rcvindex,
53  int length, const char *data);
54 static int prior_lsa_gen_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RECTYPE rec_type, int length,
55  const char *data);
57  LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_ucrumbs, const LOG_CRUMB *ucrumbs, int num_rcrumbs,
58  const LOG_CRUMB *rcrumbs);
59 static int prior_lsa_gen_2pc_prepare_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, int gtran_length,
60  const char *gtran_data, int lock_length, const char *lock_data);
61 static int prior_lsa_gen_end_chkpt_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, int tran_length,
62  const char *tran_data, int topop_length, const char *topop_data);
63 static int prior_lsa_copy_undo_data_to_node (LOG_PRIOR_NODE *node, int length, const char *data);
64 static int prior_lsa_copy_redo_data_to_node (LOG_PRIOR_NODE *node, int length, const char *data);
65 static int prior_lsa_copy_undo_crumbs_to_node (LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs);
66 static int prior_lsa_copy_redo_crumbs_to_node (LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs);
67 static void prior_lsa_start_append (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_TDES *tdes);
68 static void prior_lsa_end_append (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node);
69 static void prior_lsa_append_data (int length);
71  int with_lock);
72 static void prior_update_header_mvcc_info (const LOG_LSA &record_lsa, MVCCID mvccid);
73 static LOG_ZIP *log_append_get_zip_undo (THREAD_ENTRY *thread_p);
74 static LOG_ZIP *log_append_get_zip_redo (THREAD_ENTRY *thread_p);
75 static char *log_append_get_data_ptr (THREAD_ENTRY *thread_p);
76 static bool log_append_realloc_data_ptr (THREAD_ENTRY *thread_p, int length);
77 
78 log_data_addr::log_data_addr (const VFID *vfid_arg, PAGE_PTR pgptr_arg, PGLENGTH offset_arg)
79  : vfid (vfid_arg)
80  , pgptr (pgptr_arg)
81  , offset (offset_arg)
82 {
83 }
84 
86  : vdes (NULL_VOLDES)
87  , nxio_lsa (NULL_LSA)
88  , prev_lsa (NULL_LSA)
89  , log_pgptr (NULL)
90  , appending_page_tde_encrypted (false)
91 {
92 
93 }
94 
96  : vdes (other.vdes)
97  , nxio_lsa {other.nxio_lsa.load ()}
98  , prev_lsa (other.prev_lsa)
99  , log_pgptr (other.log_pgptr)
100  , appending_page_tde_encrypted (other.appending_page_tde_encrypted)
101 {
102 
103 }
104 
105 LOG_LSA
107 {
108  return nxio_lsa.load ();
109 }
110 
111 void
113 {
114  nxio_lsa.store (next_io_lsa);
115 }
116 
118  : prior_lsa (NULL_LSA)
119  , prev_lsa (NULL_LSA)
120  , prior_list_header (NULL)
121  , prior_list_tail (NULL)
122  , list_size (0)
123  , prior_flush_list_header (NULL)
124  , prior_lsa_mutex ()
125 {
126 }
127 
128 void
130 {
131  // todo - concurrency safe-guard
132  log_Gl.hdr.append_lsa = *lsa;
133  log_Gl.prior_info.prior_lsa = *lsa;
134 }
135 
136 void
138 {
139  // todo - concurrency safe-guard
140  log_Gl.append.prev_lsa = *lsa;
141  log_Gl.prior_info.prev_lsa = *lsa;
142 }
143 
144 char *
146 {
147  // todo - concurrency safe-guard
149 }
150 
151 bool
153 {
154  LOG_CS_ENTER (thread_p);
155 
156  std::unique_lock<std::mutex> ulock (log_Gl.prior_info.prior_lsa_mutex);
157  LOG_LSA nxio_lsa = log_Gl.append.get_nxio_lsa ();
158 
159  if (!LSA_EQ (&nxio_lsa, &log_Gl.prior_info.prior_lsa))
160  {
161  LOG_PRIOR_NODE *node;
162 
163  assert (LSA_LT (&nxio_lsa, &log_Gl.prior_info.prior_lsa));
165  while (node != NULL)
166  {
167  if (node->log_header.trid != LOG_SYSTEM_TRANID)
168  {
169  ulock.unlock ();
170  LOG_CS_EXIT (thread_p);
171  return true;
172  }
173  node = node->next;
174  }
175  }
176 
177  ulock.unlock ();
178 
179  LOG_CS_EXIT (thread_p);
180 
181  return false;
182 }
183 
184 void
186 {
188  {
189  log_Zip_support = false;
190  return;
191  }
192 
193 #if defined(SERVER_MODE)
194  log_Zip_support = true;
195 #else
196  log_zip_undo = log_zip_alloc (IO_PAGESIZE);
197  log_zip_redo = log_zip_alloc (IO_PAGESIZE);
199  log_data_ptr = (char *) malloc (log_data_length);
200  if (log_data_ptr == NULL)
201  {
203  }
204 
205  if (log_zip_undo == NULL || log_zip_redo == NULL || log_data_ptr == NULL)
206  {
207  log_Zip_support = false;
208  if (log_zip_undo)
209  {
210  log_zip_free (log_zip_undo);
211  log_zip_undo = NULL;
212  }
213  if (log_zip_redo)
214  {
215  log_zip_free (log_zip_redo);
216  log_zip_redo = NULL;
217  }
218  if (log_data_ptr)
219  {
221  log_data_length = 0;
222  }
223  }
224  else
225  {
226  log_Zip_support = true;
227  }
228 #endif
229 }
230 
231 void
233 {
234  if (!log_Zip_support)
235  {
236  return;
237  }
238 
239 #if defined (SERVER_MODE)
240 #else
241  if (log_zip_undo)
242  {
243  log_zip_free (log_zip_undo);
244  log_zip_undo = NULL;
245  }
246  if (log_zip_redo)
247  {
248  log_zip_free (log_zip_redo);
249  log_zip_redo = NULL;
250  }
251  if (log_data_ptr)
252  {
254  log_data_length = 0;
255  }
256 #endif
257 }
258 
259 /*
260  * prior_lsa_alloc_and_copy_data -
261  *
262  * return: new node
263  *
264  * rec_type(in):
265  * rcvindex(in):
266  * addr(in):
267  * ulength(in):
268  * udata(in):
269  * rlength(in):
270  * rdata(in):
271  */
274  LOG_DATA_ADDR *addr, int ulength, const char *udata, int rlength, const char *rdata)
275 {
276  LOG_PRIOR_NODE *node;
277  int error_code = NO_ERROR;
278 
279  node = (LOG_PRIOR_NODE *) malloc (sizeof (LOG_PRIOR_NODE));
280  if (node == NULL)
281  {
283  return NULL;
284  }
285 
286  node->log_header.type = rec_type;
287 
288  node->tde_encrypted = false;
289 
290  node->data_header = NULL;
291  node->ulength = 0;
292  node->udata = NULL;
293  node->rlength = 0;
294  node->rdata = NULL;
295  node->next = NULL;
296 
297  switch (rec_type)
298  {
299  case LOG_UNDOREDO_DATA:
301  case LOG_UNDO_DATA:
302  case LOG_REDO_DATA:
305  case LOG_MVCC_REDO_DATA:
306  case LOG_MVCC_UNDO_DATA:
307  /* We shouldn't be here */
308  /* Use prior_lsa_alloc_and_copy_crumbs instead */
309  assert_release (false);
310  error_code = ER_FAILED;
311  break;
312 
314  error_code = prior_lsa_gen_dbout_redo_record (thread_p, node, rcvindex, rlength, rdata);
315  break;
316 
317  case LOG_POSTPONE:
318  assert (ulength == 0 && udata == NULL);
319 
320  error_code = prior_lsa_gen_postpone_record (thread_p, node, rcvindex, addr, rlength, rdata);
321  break;
322 
323  case LOG_2PC_PREPARE:
324  assert (addr == NULL);
325  error_code = prior_lsa_gen_2pc_prepare_record (thread_p, node, ulength, udata, rlength, rdata);
326  break;
327  case LOG_END_CHKPT:
328  assert (addr == NULL);
329  error_code = prior_lsa_gen_end_chkpt_record (thread_p, node, ulength, udata, rlength, rdata);
330  break;
331 
332  case LOG_RUN_POSTPONE:
333  case LOG_COMPENSATE:
334  case LOG_SAVEPOINT:
335 
337 
341  case LOG_DUMMY_GENERIC:
342 
347  case LOG_COMMIT:
348  case LOG_ABORT:
351  case LOG_SYSOP_END:
354  case LOG_2PC_START:
355  case LOG_START_CHKPT:
357  assert (rlength == 0 && rdata == NULL);
358 
359  error_code = prior_lsa_gen_record (thread_p, node, rec_type, ulength, udata);
360  break;
361 
362  default:
363  break;
364  }
365 
366  if (error_code == NO_ERROR)
367  {
368  return node;
369  }
370  else
371  {
372  if (node != NULL)
373  {
374  if (node->data_header != NULL)
375  {
376  free_and_init (node->data_header);
377  }
378  if (node->udata != NULL)
379  {
380  free_and_init (node->udata);
381  }
382  if (node->rdata != NULL)
383  {
384  free_and_init (node->rdata);
385  }
386  free_and_init (node);
387  }
388 
389  return NULL;
390  }
391 }
392 
393 /*
394  * prior_lsa_alloc_and_copy_crumbs -
395  *
396  * return: new node
397  *
398  * rec_type(in):
399  * rcvindex(in):
400  * addr(in):
401  * num_ucrumbs(in):
402  * ucrumbs(in):
403  * num_rcrumbs(in):
404  * rcrumbs(in):
405  */
408  LOG_DATA_ADDR *addr, const int num_ucrumbs,
409  const LOG_CRUMB *ucrumbs, const int num_rcrumbs, const LOG_CRUMB *rcrumbs)
410 {
411  LOG_PRIOR_NODE *node;
412  int error = NO_ERROR;
413 
414  node = (LOG_PRIOR_NODE *) malloc (sizeof (LOG_PRIOR_NODE));
415  if (node == NULL)
416  {
418  return NULL;
419  }
420 
421  node->log_header.type = rec_type;
422 
423  node->tde_encrypted = false;
424 
425  node->data_header_length = 0;
426  node->data_header = NULL;
427  node->ulength = 0;
428  node->udata = NULL;
429  node->rlength = 0;
430  node->rdata = NULL;
431  node->next = NULL;
432 
433  switch (rec_type)
434  {
435  case LOG_UNDOREDO_DATA:
437  case LOG_UNDO_DATA:
438  case LOG_REDO_DATA:
441  case LOG_MVCC_UNDO_DATA:
442  case LOG_MVCC_REDO_DATA:
443  error = prior_lsa_gen_undoredo_record_from_crumbs (thread_p, node, rcvindex, addr, num_ucrumbs, ucrumbs,
444  num_rcrumbs, rcrumbs);
445  break;
446 
447  default:
448  /* Unhandled */
449  assert_release (false);
450  error = ER_FAILED;
451  break;
452  }
453 
454  if (error == NO_ERROR)
455  {
456  return node;
457  }
458  else
459  {
460  if (node != NULL)
461  {
462  if (node->data_header != NULL)
463  {
464  free_and_init (node->data_header);
465  }
466  if (node->udata != NULL)
467  {
468  free_and_init (node->udata);
469  }
470  if (node->rdata != NULL)
471  {
472  free_and_init (node->rdata);
473  }
474  free_and_init (node);
475  }
476  return NULL;
477  }
478 }
479 
480 /*
481  * prior_lsa_copy_undo_data_to_node -
482  *
483  * return: error code or NO_ERROR
484  *
485  * node(in/out):
486  * length(in):
487  * data(in):
488  */
489 static int
490 prior_lsa_copy_undo_data_to_node (LOG_PRIOR_NODE *node, int length, const char *data)
491 {
492  if (length <= 0 || data == NULL)
493  {
494  return NO_ERROR;
495  }
496 
497  node->udata = (char *) malloc (length);
498  if (node->udata == NULL)
499  {
502  }
503 
504  memcpy (node->udata, data, length);
505 
506  node->ulength = length;
507 
508  return NO_ERROR;
509 }
510 
511 /*
512  * prior_lsa_copy_redo_data_to_node -
513  *
514  * return: error code or NO_ERROR
515  *
516  * node(in/out):
517  * length(in):
518  * data(in):
519  */
520 static int
521 prior_lsa_copy_redo_data_to_node (LOG_PRIOR_NODE *node, int length, const char *data)
522 {
523  if (length <= 0 || data == NULL)
524  {
525  return NO_ERROR;
526  }
527 
528  node->rdata = (char *) malloc (length);
529  if (node->rdata == NULL)
530  {
533  }
534 
535  memcpy (node->rdata, data, length);
536 
537  node->rlength = length;
538 
539  return NO_ERROR;
540 }
541 
542 /*
543  * prior_lsa_copy_undo_crumbs_to_node -
544  *
545  * return: error code or NO_ERROR
546  *
547  * node(in/out):
548  * num_crumbs(in):
549  * crumbs(in):
550  */
551 static int
552 prior_lsa_copy_undo_crumbs_to_node (LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs)
553 {
554  int i, length;
555  char *ptr;
556 
557  /* Safe guard: either num_crumbs is 0 or crumbs array is not NULL */
558  assert (num_crumbs == 0 || crumbs != NULL);
559 
560  for (i = 0, length = 0; i < num_crumbs; i++)
561  {
562  length += crumbs[i].length;
563  }
564 
565  assert (node->udata == NULL);
566  if (length > 0)
567  {
568  node->udata = (char *) malloc (length);
569  if (node->udata == NULL)
570  {
573  }
574 
575  ptr = node->udata;
576  for (i = 0; i < num_crumbs; i++)
577  {
578  memcpy (ptr, crumbs[i].data, crumbs[i].length);
579  ptr += crumbs[i].length;
580  }
581  }
582 
583  node->ulength = length;
584  return NO_ERROR;
585 }
586 
587 /*
588  * prior_lsa_copy_redo_crumbs_to_node -
589  *
590  * return: error code or NO_ERROR
591  *
592  * node(in/out):
593  * num_crumbs(in):
594  * crumbs(in):
595  */
596 static int
597 prior_lsa_copy_redo_crumbs_to_node (LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs)
598 {
599  int i, length;
600  char *ptr;
601 
602  /* Safe guard: either num_crumbs is 0 or crumbs array is not NULL */
603  assert (num_crumbs == 0 || crumbs != NULL);
604 
605  for (i = 0, length = 0; i < num_crumbs; i++)
606  {
607  length += crumbs[i].length;
608  }
609 
610  assert (node->rdata == NULL);
611  if (length > 0)
612  {
613  node->rdata = (char *) malloc (length);
614  if (node->rdata == NULL)
615  {
618  }
619 
620  ptr = node->rdata;
621  for (i = 0; i < num_crumbs; i++)
622  {
623  memcpy (ptr, crumbs[i].data, crumbs[i].length);
624  ptr += crumbs[i].length;
625  }
626  }
627 
628  node->rlength = length;
629 
630  return NO_ERROR;
631 }
632 
633 /*
634  * prior_lsa_gen_undoredo_record_from_crumbs () - Generate undoredo or MVCC
635  * undoredo log record.
636  *
637  * return : Error code.
638  * thread_p (in) : Thread entry.
639  * node (in) : Log prior node.
640  * rcvindex (in) : Index of recovery function.
641  * addr (in) : Logged data address.
642  * num_ucrumbs (in) : Number of undo data crumbs.
643  * ucrumbs (in) : Undo data crumbs.
644  * num_rcrumbs (in) : Number of redo data crumbs.
645  * rcrumbs (in) : Redo data crumbs.
646  */
647 static int
649  LOG_DATA_ADDR *addr, int num_ucrumbs, const LOG_CRUMB *ucrumbs,
650  int num_rcrumbs, const LOG_CRUMB *rcrumbs)
651 {
652  LOG_REC_REDO *redo_p = NULL;
653  LOG_REC_UNDO *undo_p = NULL;
654  LOG_REC_UNDOREDO *undoredo_p = NULL;
655  LOG_REC_MVCC_REDO *mvcc_redo_p = NULL;
656  LOG_REC_MVCC_UNDO *mvcc_undo_p = NULL;
657  LOG_REC_MVCC_UNDOREDO *mvcc_undoredo_p = NULL;
658  LOG_DATA *log_data_p = NULL;
659  LOG_VACUUM_INFO *vacuum_info_p = NULL;
660  VPID *vpid = NULL;
661  int error_code = NO_ERROR;
662  int i;
663  int ulength, rlength, *data_header_ulength_p = NULL, *data_header_rlength_p = NULL;
664  int total_length;
665  MVCCID *mvccid_p = NULL;
666  LOG_TDES *tdes = NULL;
667  char *data_ptr = NULL, *tmp_ptr = NULL;
668  char *undo_data = NULL, *redo_data = NULL;
669  LOG_ZIP *zip_undo = NULL, *zip_redo = NULL;
670  bool is_mvcc_op = LOG_IS_MVCC_OP_RECORD_TYPE (node->log_header.type);
671  bool has_undo = false;
672  bool has_redo = false;
673  bool is_undo_zip = false, is_redo_zip = false, is_diff = false;
674  bool can_zip = false;
675 
677  assert (num_ucrumbs == 0 || ucrumbs != NULL);
678  assert (num_rcrumbs == 0 || rcrumbs != NULL);
679 
680  zip_undo = log_append_get_zip_undo (thread_p);
681  zip_redo = log_append_get_zip_redo (thread_p);
682 
683  ulength = 0;
684  for (i = 0; i < num_ucrumbs; i++)
685  {
686  ulength += ucrumbs[i].length;
687  }
688  assert (0 <= ulength);
689 
690  rlength = 0;
691  for (i = 0; i < num_rcrumbs; i++)
692  {
693  rlength += rcrumbs[i].length;
694  }
695  assert (0 <= rlength);
696 
697  /* Check if we have undo or redo and if we can zip */
699  {
700  has_undo = true;
701  has_redo = true;
702  can_zip = log_Zip_support && (zip_undo != NULL || ulength == 0) && (zip_redo != NULL || rlength == 0);
703  }
704  else if (LOG_IS_REDO_RECORD_TYPE (node->log_header.type))
705  {
706  has_redo = true;
707  can_zip = log_Zip_support && zip_redo;
708  }
709  else
710  {
711  /* UNDO type */
713  has_undo = true;
714  can_zip = log_Zip_support && zip_undo;
715  }
716 
717  if (can_zip == true && (ulength >= log_Zip_min_size_to_compress || rlength >= log_Zip_min_size_to_compress))
718  {
719  /* Try to zip undo and/or redo data */
720  total_length = 0;
721  if (ulength > 0)
722  {
723  total_length += ulength;
724  }
725  if (rlength > 0)
726  {
727  total_length += rlength;
728  }
729 
730  if (log_append_realloc_data_ptr (thread_p, total_length))
731  {
732  data_ptr = log_append_get_data_ptr (thread_p);
733  }
734 
735  if (data_ptr != NULL)
736  {
737  tmp_ptr = data_ptr;
738 
739  if (ulength >= log_Zip_min_size_to_compress)
740  {
741  assert (has_undo == true);
742 
743  undo_data = data_ptr;
744 
745  for (i = 0; i < num_ucrumbs; i++)
746  {
747  memcpy (tmp_ptr, (char *) ucrumbs[i].data, ucrumbs[i].length);
748  tmp_ptr += ucrumbs[i].length;
749  }
750 
751  assert (CAST_BUFLEN (tmp_ptr - undo_data) == ulength);
752  }
753 
754  if (rlength >= log_Zip_min_size_to_compress)
755  {
756  assert (has_redo == true);
757 
758  redo_data = tmp_ptr;
759 
760  for (i = 0; i < num_rcrumbs; i++)
761  {
762  (void) memcpy (tmp_ptr, (char *) rcrumbs[i].data, rcrumbs[i].length);
763  tmp_ptr += rcrumbs[i].length;
764  }
765 
766  assert (CAST_BUFLEN (tmp_ptr - redo_data) == rlength);
767  }
768 
769  assert (CAST_BUFLEN (tmp_ptr - data_ptr) == total_length
771 
772  if (ulength >= log_Zip_min_size_to_compress && rlength >= log_Zip_min_size_to_compress)
773  {
774  (void) log_diff (ulength, undo_data, rlength, redo_data);
775 
776  is_undo_zip = log_zip (zip_undo, ulength, undo_data);
777  is_redo_zip = log_zip (zip_redo, rlength, redo_data);
778 
779  if (is_redo_zip)
780  {
781  is_diff = true;
782  }
783  }
784  else
785  {
786  if (ulength >= log_Zip_min_size_to_compress)
787  {
788  is_undo_zip = log_zip (zip_undo, ulength, undo_data);
789  }
790  if (rlength >= log_Zip_min_size_to_compress)
791  {
792  is_redo_zip = log_zip (zip_redo, rlength, redo_data);
793  }
794  }
795  }
796  }
797 
798  if (is_diff)
799  {
800  /* Set diff UNDOREDO type */
801  assert (has_redo && has_undo);
802  if (is_mvcc_op)
803  {
805  }
806  else
807  {
809  }
810  }
811 
812  /* Compute the length of data header */
813  switch (node->log_header.type)
814  {
815  case LOG_MVCC_UNDO_DATA:
816  node->data_header_length = sizeof (LOG_REC_MVCC_UNDO);
817  break;
818  case LOG_UNDO_DATA:
819  node->data_header_length = sizeof (LOG_REC_UNDO);
820  break;
821  case LOG_MVCC_REDO_DATA:
822  node->data_header_length = sizeof (LOG_REC_MVCC_REDO);
823  break;
824  case LOG_REDO_DATA:
825  node->data_header_length = sizeof (LOG_REC_REDO);
826  break;
829  node->data_header_length = sizeof (LOG_REC_MVCC_UNDOREDO);
830  break;
831  case LOG_UNDOREDO_DATA:
833  node->data_header_length = sizeof (LOG_REC_UNDOREDO);
834  break;
835  default:
836  assert (0);
837  break;
838  }
839 
840  /* Allocate memory for data header */
841  node->data_header = (char *) malloc (node->data_header_length);
842  if (node->data_header == NULL)
843  {
845  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
846  goto error;
847  }
848 
849 #if !defined (NDEBUG)
850  /* Suppress valgrind complaint. */
851  memset (node->data_header, 0, node->data_header_length);
852 #endif // DEBUG
853 
854  /* Fill the data header fields */
855  switch (node->log_header.type)
856  {
857  case LOG_MVCC_UNDO_DATA:
858  /* Use undo data from MVCC undo structure */
859  mvcc_undo_p = (LOG_REC_MVCC_UNDO *) node->data_header;
860 
861  /* Must also fill vacuum info */
862  vacuum_info_p = &mvcc_undo_p->vacuum_info;
863 
864  /* Must also fill MVCCID field */
865  mvccid_p = &mvcc_undo_p->mvccid;
866 
867  /* Fall through */
868  case LOG_UNDO_DATA:
869  undo_p = (node->log_header.type == LOG_UNDO_DATA ? (LOG_REC_UNDO *) node->data_header : &mvcc_undo_p->undo);
870 
871  data_header_ulength_p = &undo_p->length;
872  log_data_p = &undo_p->data;
873  break;
874 
875  case LOG_MVCC_REDO_DATA:
876  /* Use redo data from MVCC redo structure */
877  mvcc_redo_p = (LOG_REC_MVCC_REDO *) node->data_header;
878 
879  /* Must also fill MVCCID field */
880  mvccid_p = &mvcc_redo_p->mvccid;
881 
882  /* Fall through */
883  case LOG_REDO_DATA:
884  redo_p = (node->log_header.type == LOG_REDO_DATA ? (LOG_REC_REDO *) node->data_header : &mvcc_redo_p->redo);
885 
886  data_header_rlength_p = &redo_p->length;
887  log_data_p = &redo_p->data;
888  break;
889 
892  /* Use undoredo data from MVCC undoredo structure */
893  mvcc_undoredo_p = (LOG_REC_MVCC_UNDOREDO *) node->data_header;
894 
895  /* Must also fill vacuum info */
896  vacuum_info_p = &mvcc_undoredo_p->vacuum_info;
897 
898  /* Must also fill MVCCID field */
899  mvccid_p = &mvcc_undoredo_p->mvccid;
900 
901  /* Fall through */
902  case LOG_UNDOREDO_DATA:
904  undoredo_p = ((node->log_header.type == LOG_UNDOREDO_DATA || node->log_header.type == LOG_DIFF_UNDOREDO_DATA)
905  ? (LOG_REC_UNDOREDO *) node->data_header : &mvcc_undoredo_p->undoredo);
906 
907  data_header_ulength_p = &undoredo_p->ulength;
908  data_header_rlength_p = &undoredo_p->rlength;
909  log_data_p = &undoredo_p->data;
910  break;
911 
912  default:
913  assert (0);
914  break;
915  }
916 
917  /* Fill log data fields */
918  assert (log_data_p != NULL);
919 
920  log_data_p->rcvindex = rcvindex;
921  log_data_p->offset = addr->offset;
922 
923  if (addr->pgptr != NULL)
924  {
925  vpid = pgbuf_get_vpid_ptr (addr->pgptr);
926  log_data_p->pageid = vpid->pageid;
927  log_data_p->volid = vpid->volid;
928  }
929  else
930  {
931  log_data_p->pageid = NULL_PAGEID;
932  log_data_p->volid = NULL_VOLID;
933  }
934 
935  if (mvccid_p != NULL)
936  {
937  /* Fill mvccid field */
938 
939  /* Must be an MVCC operation */
941  assert (LOG_IS_MVCC_OPERATION (rcvindex));
942 
943  tdes = LOG_FIND_CURRENT_TDES (thread_p);
944  if (tdes == NULL || !MVCCID_IS_VALID (tdes->mvccinfo.id))
945  {
946  assert_release (false);
947  error_code = ER_FAILED;
948  goto error;
949  }
950  else
951  {
952  if (!tdes->mvccinfo.sub_ids.empty ())
953  {
954  *mvccid_p = tdes->mvccinfo.sub_ids.back ();
955  }
956  else
957  {
958  *mvccid_p = tdes->mvccinfo.id;
959  }
960  }
961  }
962 
963  if (vacuum_info_p != NULL)
964  {
965  /* Fill vacuum info field */
966 
967  /* Must be an UNDO or UNDOREDO MVCC operation */
970  assert (LOG_IS_MVCC_OPERATION (rcvindex));
971 
972  if (addr->vfid != NULL)
973  {
974  VFID_COPY (&vacuum_info_p->vfid, addr->vfid);
975  }
976  else
977  {
978  if (rcvindex == RVES_NOTIFY_VACUUM)
979  {
980  VFID_SET_NULL (&vacuum_info_p->vfid);
981  }
982  else
983  {
984  /* We require VFID for vacuum */
985  assert_release (false);
986  error_code = ER_FAILED;
987  goto error;
988  }
989  }
990 
991  /* Initialize previous MVCC op log lsa - will be completed later */
992  LSA_SET_NULL (&vacuum_info_p->prev_mvcc_op_log_lsa);
993  }
994 
995  if (is_undo_zip)
996  {
997  assert (has_undo && (data_header_ulength_p != NULL));
998 
999  *data_header_ulength_p = MAKE_ZIP_LEN (zip_undo->data_length);
1000  error_code = prior_lsa_copy_undo_data_to_node (node, zip_undo->data_length, (char *) zip_undo->log_data);
1001  }
1002  else if (has_undo)
1003  {
1004  assert (data_header_ulength_p != NULL);
1005 
1006  *data_header_ulength_p = ulength;
1007  error_code = prior_lsa_copy_undo_crumbs_to_node (node, num_ucrumbs, ucrumbs);
1008  }
1009 
1010  if (is_redo_zip)
1011  {
1012  assert (has_redo && (data_header_rlength_p != NULL));
1013 
1014  *data_header_rlength_p = MAKE_ZIP_LEN (zip_redo->data_length);
1015  error_code = prior_lsa_copy_redo_data_to_node (node, zip_redo->data_length, (char *) zip_redo->log_data);
1016  }
1017  else if (has_redo)
1018  {
1019  *data_header_rlength_p = rlength;
1020  error_code = prior_lsa_copy_redo_crumbs_to_node (node, num_rcrumbs, rcrumbs);
1021  }
1022 
1023  if (error_code != NO_ERROR)
1024  {
1025  goto error;
1026  }
1027 
1028  return error_code;
1029 
1030 error:
1031  if (node->data_header != NULL)
1032  {
1033  free_and_init (node->data_header);
1034  }
1035  if (node->udata != NULL)
1036  {
1037  free_and_init (node->udata);
1038  }
1039  if (node->rdata != NULL)
1040  {
1041  free_and_init (node->rdata);
1042  }
1043 
1044  return error_code;
1045 }
1046 
1047 /*
1048  * prior_lsa_gen_postpone_record -
1049  *
1050  * return: error code or NO_ERROR
1051  *
1052  * node(in/out):
1053  * rcvindex(in):
1054  * addr(in):
1055  * length(in):
1056  * data(in):
1057  */
1058 static int
1060  LOG_DATA_ADDR *addr, int length, const char *data)
1061 {
1062  LOG_REC_REDO *redo;
1063  VPID *vpid;
1064  int error_code = NO_ERROR;
1065 
1066  node->data_header_length = sizeof (LOG_REC_REDO);
1067  node->data_header = (char *) malloc (node->data_header_length);
1068  if (node->data_header == NULL)
1069  {
1071  return ER_OUT_OF_VIRTUAL_MEMORY;
1072  }
1073  redo = (LOG_REC_REDO *) node->data_header;
1074 
1075  redo->data.rcvindex = rcvindex;
1076  if (addr->pgptr != NULL)
1077  {
1078  vpid = pgbuf_get_vpid_ptr (addr->pgptr);
1079  redo->data.pageid = vpid->pageid;
1080  redo->data.volid = vpid->volid;
1081  }
1082  else
1083  {
1084  redo->data.pageid = NULL_PAGEID;
1085  redo->data.volid = NULL_VOLID;
1086  }
1087  redo->data.offset = addr->offset;
1088 
1089  redo->length = length;
1090  error_code = prior_lsa_copy_redo_data_to_node (node, redo->length, data);
1091 
1092  return error_code;
1093 }
1094 
1095 /*
1096  * prior_lsa_gen_dbout_redo_record -
1097  *
1098  * return: error code or NO_ERROR
1099  *
1100  * node(in/out):
1101  * rcvindex(in):
1102  * length(in):
1103  * data(in):
1104  */
1105 static int
1107  const char *data)
1108 {
1109  LOG_REC_DBOUT_REDO *dbout_redo;
1110  int error_code = NO_ERROR;
1111 
1112  node->data_header_length = sizeof (LOG_REC_DBOUT_REDO);
1113  node->data_header = (char *) malloc (node->data_header_length);
1114  if (node->data_header == NULL)
1115  {
1117  return ER_OUT_OF_VIRTUAL_MEMORY;
1118  }
1119  dbout_redo = (LOG_REC_DBOUT_REDO *) node->data_header;
1120 
1121  dbout_redo->rcvindex = rcvindex;
1122  dbout_redo->length = length;
1123 
1124  error_code = prior_lsa_copy_redo_data_to_node (node, dbout_redo->length, data);
1125 
1126  return error_code;
1127 }
1128 
1129 /*
1130  * prior_lsa_gen_2pc_prepare_record -
1131  *
1132  * return: error code or NO_ERROR
1133  *
1134  * node(in/out):
1135  * gtran_length(in):
1136  * gtran_data(in):
1137  * lock_length(in):
1138  * lock_data(in):
1139  */
1140 static int
1142  const char *gtran_data, int lock_length, const char *lock_data)
1143 {
1144  int error_code = NO_ERROR;
1145 
1146  node->data_header_length = sizeof (LOG_REC_2PC_PREPCOMMIT);
1147  node->data_header = (char *) malloc (node->data_header_length);
1148  if (node->data_header == NULL)
1149  {
1151  return ER_OUT_OF_VIRTUAL_MEMORY;
1152  }
1153 
1154  if (gtran_length > 0)
1155  {
1156  error_code = prior_lsa_copy_undo_data_to_node (node, gtran_length, gtran_data);
1157  }
1158  if (lock_length > 0)
1159  {
1160  error_code = prior_lsa_copy_redo_data_to_node (node, lock_length, lock_data);
1161  }
1162 
1163  return error_code;
1164 }
1165 
1166 /*
1167  * prior_lsa_gen_end_chkpt_record -
1168  *
1169  * return: error code or NO_ERROR
1170  *
1171  * node(in/out):
1172  * tran_length(in):
1173  * tran_data(in):
1174  * topop_length(in):
1175  * topop_data(in):
1176  */
1177 static int
1178 prior_lsa_gen_end_chkpt_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, int tran_length, const char *tran_data,
1179  int topop_length, const char *topop_data)
1180 {
1181  int error_code = NO_ERROR;
1182 
1183  node->data_header_length = sizeof (LOG_REC_CHKPT);
1184  node->data_header = (char *) malloc (node->data_header_length);
1185  if (node->data_header == NULL)
1186  {
1188  return ER_OUT_OF_VIRTUAL_MEMORY;
1189  }
1190 
1191  if (tran_length > 0)
1192  {
1193  error_code = prior_lsa_copy_undo_data_to_node (node, tran_length, tran_data);
1194  }
1195  if (topop_length > 0)
1196  {
1197  error_code = prior_lsa_copy_redo_data_to_node (node, topop_length, topop_data);
1198  }
1199 
1200  return error_code;
1201 }
1202 
1203 /*
1204  * prior_lsa_gen_record -
1205  *
1206  * return: error code or NO_ERROR
1207  *
1208  * node(in/out):
1209  * rec_type(in):
1210  * length(in):
1211  * data(in):
1212  */
1213 static int
1214 prior_lsa_gen_record (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RECTYPE rec_type, int length,
1215  const char *data)
1216 {
1217  int error_code = NO_ERROR;
1218 
1219  node->data_header_length = 0;
1220  switch (rec_type)
1221  {
1224  case LOG_DUMMY_OVF_RECORD:
1225  case LOG_DUMMY_GENERIC:
1230  case LOG_START_CHKPT:
1232  assert (length == 0 && data == NULL);
1233  break;
1234 
1235  case LOG_RUN_POSTPONE:
1236  node->data_header_length = sizeof (LOG_REC_RUN_POSTPONE);
1237  break;
1238 
1239  case LOG_COMPENSATE:
1240  node->data_header_length = sizeof (LOG_REC_COMPENSATE);
1241  break;
1242 
1244  assert (length == 0 && data == NULL);
1246  break;
1247 
1248  case LOG_SAVEPOINT:
1249  node->data_header_length = sizeof (LOG_REC_SAVEPT);
1250  break;
1251 
1253  node->data_header_length = sizeof (LOG_REC_START_POSTPONE);
1254  break;
1255 
1258  break;
1259 
1260  case LOG_COMMIT:
1261  case LOG_ABORT:
1262  assert (length == 0 && data == NULL);
1263  node->data_header_length = sizeof (LOG_REC_DONETIME);
1264  break;
1265 
1266  case LOG_SYSOP_END:
1267  node->data_header_length = sizeof (LOG_REC_SYSOP_END);
1268  break;
1269 
1270  case LOG_REPLICATION_DATA:
1272  node->data_header_length = sizeof (LOG_REC_REPLICATION);
1273  break;
1274 
1275  case LOG_2PC_START:
1276  node->data_header_length = sizeof (LOG_REC_2PC_START);
1277  break;
1278 
1279  case LOG_END_CHKPT:
1280  node->data_header_length = sizeof (LOG_REC_CHKPT);
1281  break;
1282 
1283  default:
1284  break;
1285  }
1286 
1287  if (node->data_header_length > 0)
1288  {
1289  node->data_header = (char *) malloc (node->data_header_length);
1290  if (node->data_header == NULL)
1291  {
1293  return ER_OUT_OF_VIRTUAL_MEMORY;
1294  }
1295 
1296 #if !defined (NDEBUG)
1297  /* Suppress valgrind complaint. */
1298  memset (node->data_header, 0, node->data_header_length);
1299 #endif // DEBUG
1300  }
1301 
1302  if (length > 0)
1303  {
1304  error_code = prior_lsa_copy_undo_data_to_node (node, length, data);
1305  }
1306 
1307  return error_code;
1308 }
1309 
1310 static void
1311 prior_update_header_mvcc_info (const LOG_LSA &record_lsa, MVCCID mvccid)
1312 {
1313  assert (MVCCID_IS_VALID (mvccid));
1315  {
1316  // first mvcc record for this block
1318  log_Gl.hdr.newest_block_mvccid = mvccid;
1319  }
1320  else
1321  {
1322  // sanity checks
1325  assert (log_Gl.hdr.oldest_visible_mvccid <= mvccid);
1328 
1329  if (log_Gl.hdr.newest_block_mvccid < mvccid)
1330  {
1331  log_Gl.hdr.newest_block_mvccid = mvccid;
1332  }
1333  }
1334  log_Gl.hdr.mvcc_op_log_lsa = record_lsa;
1336 }
1337 
1338 /*
1339  * prior_lsa_next_record_internal -
1340  *
1341  * return: start lsa of log record
1342  *
1343  * node(in/out):
1344  * tdes(in/out):
1345  * with_lock(in):
1346  */
1347 static LOG_LSA
1348 prior_lsa_next_record_internal (THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_TDES *tdes, int with_lock)
1349 {
1350  LOG_LSA start_lsa;
1351  LOG_REC_MVCC_UNDO *mvcc_undo = NULL;
1352  LOG_REC_MVCC_UNDOREDO *mvcc_undoredo = NULL;
1353  LOG_VACUUM_INFO *vacuum_info = NULL;
1354  MVCCID mvccid = MVCCID_NULL;
1355 
1356  if (with_lock == LOG_PRIOR_LSA_WITHOUT_LOCK)
1357  {
1359  }
1360 
1361  prior_lsa_start_append (thread_p, node, tdes);
1362 
1363  LSA_COPY (&start_lsa, &node->start_lsa);
1364 
1366  {
1369  {
1371  <= (vacuum_get_log_blockid (start_lsa.pageid) - 1));
1372 
1373  vacuum_produce_log_block_data (thread_p);
1374  }
1375  }
1376 
1377  /* Is this a valid MVCC operations: 1. node must be undoredo/undo type and must have undo data. 2. record index must
1378  * the index of MVCC operations. */
1381  || (node->log_header.type == LOG_SYSOP_END
1383  {
1384  /* Link the log record to previous MVCC delete/update log record */
1385  /* Will be used by vacuum */
1386  if (node->log_header.type == LOG_MVCC_UNDO_DATA)
1387  {
1388  /* Read from mvcc_undo structure */
1389  mvcc_undo = (LOG_REC_MVCC_UNDO *) node->data_header;
1390  vacuum_info = &mvcc_undo->vacuum_info;
1391  mvccid = mvcc_undo->mvccid;
1392  }
1393  else if (node->log_header.type == LOG_SYSOP_END)
1394  {
1395  /* Read from mvcc_undo structure */
1396  mvcc_undo = & ((LOG_REC_SYSOP_END *) node->data_header)->mvcc_undo;
1397  vacuum_info = &mvcc_undo->vacuum_info;
1398  mvccid = mvcc_undo->mvccid;
1399  }
1400  else
1401  {
1402  /* Read for mvcc_undoredo structure */
1405 
1406  mvcc_undoredo = (LOG_REC_MVCC_UNDOREDO *) node->data_header;
1407  vacuum_info = &mvcc_undoredo->vacuum_info;
1408  mvccid = mvcc_undoredo->mvccid;
1409  }
1410 
1411  /* Save previous mvcc operation log lsa to vacuum info */
1413 
1415  "log mvcc op at (%lld, %d) and create link with log_lsa(%lld, %d)",
1417 
1418  prior_update_header_mvcc_info (start_lsa, mvccid);
1419  }
1420  else if (node->log_header.type == LOG_SYSOP_START_POSTPONE)
1421  {
1422  /* we need the system operation start postpone LSA for recovery. we have to save it under prior_lsa_mutex
1423  * protection.
1424  * at the same time, tdes->rcv.atomic_sysop_start_lsa must be reset if it was inside this system op. */
1425  LOG_REC_SYSOP_START_POSTPONE *sysop_start_postpone = NULL;
1426 
1428  tdes->rcv.sysop_start_postpone_lsa = start_lsa;
1429 
1430  sysop_start_postpone = (LOG_REC_SYSOP_START_POSTPONE *) node->data_header;
1431  if (LSA_LT (&sysop_start_postpone->sysop_end.lastparent_lsa, &tdes->rcv.atomic_sysop_start_lsa))
1432  {
1433  /* atomic system operation finished. */
1435  }
1436 
1437  /* for correct checkpoint, this state change must be done under the protection of prior_lsa_mutex */
1439  }
1440  else if (node->log_header.type == LOG_SYSOP_END)
1441  {
1442  /* reset tdes->rcv.sysop_start_postpone_lsa and tdes->rcv.atomic_sysop_start_lsa, if this system op is not nested.
1443  * we'll use lastparent_lsa to check if system op is nested or not. */
1444  LOG_REC_SYSOP_END *sysop_end = NULL;
1445 
1446  sysop_end = (LOG_REC_SYSOP_END *) node->data_header;
1448  && LSA_LT (&sysop_end->lastparent_lsa, &tdes->rcv.atomic_sysop_start_lsa))
1449  {
1450  /* atomic system operation finished. */
1452  }
1454  && LSA_LT (&sysop_end->lastparent_lsa, &tdes->rcv.sysop_start_postpone_lsa))
1455  {
1456  /* atomic system operation finished. */
1458  }
1459  }
1460  else if (node->log_header.type == LOG_COMMIT_WITH_POSTPONE)
1461  {
1462  /* we need the commit with postpone LSA for recovery. we have to save it under prior_lsa_mutex protection */
1463  tdes->rcv.tran_start_postpone_lsa = start_lsa;
1464  }
1465  else if (node->log_header.type == LOG_SYSOP_ATOMIC_START)
1466  {
1467  /* same as with system op start postpone, we need to save these log records lsa */
1469  tdes->rcv.atomic_sysop_start_lsa = start_lsa;
1470  }
1471 
1474 
1475  if (node->ulength > 0)
1476  {
1478  }
1479 
1480  if (node->rlength > 0)
1481  {
1483  }
1484 
1485  /* END append */
1486  prior_lsa_end_append (thread_p, node);
1487 
1489  {
1492  }
1493  else
1494  {
1497  }
1498 
1499  /* list_size in bytes */
1500  log_Gl.prior_info.list_size += (sizeof (LOG_PRIOR_NODE) + node->data_header_length + node->ulength + node->rlength);
1501 
1502  if (with_lock == LOG_PRIOR_LSA_WITHOUT_LOCK)
1503  {
1504  log_Gl.prior_info.prior_lsa_mutex.unlock ();
1505 
1506  if (log_Gl.prior_info.list_size >= (INT64) logpb_get_memsize ())
1507  {
1509 
1510 #if defined(SERVER_MODE)
1511  if (!log_is_in_crash_recovery ())
1512  {
1514 
1515  thread_sleep (1); /* 1msec */
1516  }
1517  else
1518  {
1519  LOG_CS_ENTER (thread_p);
1521  LOG_CS_EXIT (thread_p);
1522  }
1523 #else
1524  LOG_CS_ENTER (thread_p);
1526  LOG_CS_EXIT (thread_p);
1527 #endif
1528  }
1529  }
1530 
1531  tdes->num_log_records_written++;
1532 
1533  return start_lsa;
1534 }
1535 
1536 LOG_LSA
1538 {
1539  return prior_lsa_next_record_internal (thread_p, node, tdes, LOG_PRIOR_LSA_WITHOUT_LOCK);
1540 }
1541 
1542 LOG_LSA
1544 {
1545  return prior_lsa_next_record_internal (thread_p, node, tdes, LOG_PRIOR_LSA_WITH_LOCK);
1546 }
1547 
1548 int
1550 {
1551  if (!tde_Cipher.is_loaded)
1552  {
1555  }
1556 
1557 #if !defined(NDEBUG)
1558  er_log_debug (ARG_FILE_LINE, "TDE: prior_set_tde_encrypted(): rcvindex = %s\n", rv_rcvindex_string (recvindex));
1559 #endif /* !NDEBUG */
1560 
1561  node->tde_encrypted = true;
1562 
1563  return NO_ERROR;
1564 }
1565 
1566 bool
1568 {
1569  return node->tde_encrypted;
1570 }
1571 
1572 /*
1573  * prior_lsa_start_append:
1574  *
1575  * node(in/out):
1576  * tdes(in):
1577  */
1578 static void
1580 {
1581  /* Does the new log record fit in this page ? */
1583 
1584  node->log_header.trid = tdes->trid;
1585 
1586  /*
1587  * Link the record with the previous transaction record for quick undos.
1588  * Link the record backward for backward traversal of the log.
1589  */
1591 
1592  if (tdes->is_system_worker_transaction () && !tdes->is_under_sysop ())
1593  {
1594  // lose the link to previous record
1596  LSA_SET_NULL (&tdes->head_lsa);
1597  LSA_SET_NULL (&tdes->tail_lsa);
1598  }
1599  else
1600  {
1601  LSA_COPY (&node->log_header.prev_tranlsa, &tdes->tail_lsa);
1602 
1604 
1605  /*
1606  * Is this the first log record of transaction ?
1607  */
1608  if (LSA_ISNULL (&tdes->head_lsa))
1609  {
1610  LSA_COPY (&tdes->head_lsa, &tdes->tail_lsa);
1611  }
1612 
1614  }
1615 
1616  /*
1617  * Remember the address of new append record
1618  */
1620  LSA_SET_NULL (&node->log_header.forw_lsa);
1621 
1623 
1624  /*
1625  * Set the page dirty, increase and align the append offset
1626  */
1628 }
1629 
1630 /*
1631  * prior_lsa_end_append -
1632  *
1633  * return:
1634  *
1635  * node(in/out):
1636  */
1637 static void
1639 {
1642 
1644 }
1645 
1646 static void
1648 {
1649  int copy_length; /* Amount of contiguous data that can be copied */
1650  int current_offset;
1651  int last_offset;
1652 
1653  if (length == 0)
1654  {
1655  return;
1656  }
1657 
1658  /*
1659  * Align if needed,
1660  * don't set it dirty since this function has not updated
1661  */
1663 
1664  current_offset = (int) log_Gl.prior_info.prior_lsa.offset;
1665  last_offset = (int) LOG_PRIOR_LSA_LAST_APPEND_OFFSET ();
1666 
1667  /* Does data fit completely in current page ? */
1668  if ((current_offset + length) >= last_offset)
1669  {
1670  while (length > 0)
1671  {
1672  if (current_offset >= last_offset)
1673  {
1674  /*
1675  * Get next page and set the current one dirty
1676  */
1679 
1680  current_offset = 0;
1681  last_offset = (int) LOG_PRIOR_LSA_LAST_APPEND_OFFSET ();
1682  }
1683  /* Find the amount of contiguous data that can be copied */
1684  if (current_offset + length >= last_offset)
1685  {
1686  copy_length = CAST_BUFLEN (last_offset - current_offset);
1687  }
1688  else
1689  {
1690  copy_length = length;
1691  }
1692 
1693  current_offset += copy_length;
1694  length -= copy_length;
1695  log_Gl.prior_info.prior_lsa.offset += copy_length;
1696  }
1697  }
1698  else
1699  {
1700  log_Gl.prior_info.prior_lsa.offset += length;
1701  }
1702 
1703  /*
1704  * Align the data for future appends.
1705  * Indicate that modifications were done
1706  */
1708 }
1709 
1710 static LOG_ZIP *
1712 {
1713 #if defined (SERVER_MODE)
1714  if (thread_p == NULL)
1715  {
1716  thread_p = thread_get_thread_entry_info ();
1717  }
1718 
1719  if (thread_p == NULL)
1720  {
1721  return NULL;
1722  }
1723  else
1724  {
1725  if (thread_p->log_zip_undo == NULL)
1726  {
1727  thread_p->log_zip_undo = log_zip_alloc (IO_PAGESIZE);
1728  }
1729  return (LOG_ZIP *) thread_p->log_zip_undo;
1730  }
1731 #else
1732  return log_zip_undo;
1733 #endif
1734 }
1735 
1736 static LOG_ZIP *
1738 {
1739 #if defined (SERVER_MODE)
1740  if (thread_p == NULL)
1741  {
1742  thread_p = thread_get_thread_entry_info ();
1743  }
1744 
1745  if (thread_p == NULL)
1746  {
1747  return NULL;
1748  }
1749  else
1750  {
1751  if (thread_p->log_zip_redo == NULL)
1752  {
1753  thread_p->log_zip_redo = log_zip_alloc (IO_PAGESIZE);
1754  }
1755  return (LOG_ZIP *) thread_p->log_zip_redo;
1756  }
1757 #else
1758  return log_zip_redo;
1759 #endif
1760 }
1761 
1762 /*
1763  * log_append_realloc_data_ptr -
1764  *
1765  * return:
1766  *
1767  * data_length(in):
1768  * length(in):
1769  *
1770  * NOTE:
1771  */
1772 static bool
1774 {
1775  char *data_ptr;
1776  int alloc_len;
1777 #if defined (SERVER_MODE)
1778  if (thread_p == NULL)
1779  {
1780  thread_p = thread_get_thread_entry_info ();
1781  }
1782 
1783  if (thread_p == NULL)
1784  {
1785  return false;
1786  }
1787 
1788  if (thread_p->log_data_length < length)
1789  {
1790  alloc_len = ((int) CEIL_PTVDIV (length, IO_PAGESIZE)) * IO_PAGESIZE;
1791 
1792  data_ptr = (char *) realloc (thread_p->log_data_ptr, alloc_len);
1793  if (data_ptr == NULL)
1794  {
1796  if (thread_p->log_data_ptr)
1797  {
1798  free_and_init (thread_p->log_data_ptr);
1799  }
1800  thread_p->log_data_length = 0;
1801  return false;
1802  }
1803  else
1804  {
1805  thread_p->log_data_ptr = data_ptr;
1806  thread_p->log_data_length = alloc_len;
1807  }
1808  }
1809  return true;
1810 #else
1811  if (log_data_length < length)
1812  {
1813  alloc_len = ((int) CEIL_PTVDIV (length, IO_PAGESIZE)) * IO_PAGESIZE;
1814 
1815  data_ptr = (char *) realloc (log_data_ptr, alloc_len);
1816  if (data_ptr == NULL)
1817  {
1819  if (log_data_ptr)
1820  {
1822  }
1823  log_data_length = 0;
1824  return false;
1825  }
1826  else
1827  {
1828  log_data_ptr = data_ptr;
1829  log_data_length = alloc_len;
1830  }
1831  }
1832 
1833  return true;
1834 #endif
1835 }
1836 
1837 /*
1838  * log_append_get_data_ptr -
1839  *
1840  * return:
1841  *
1842  */
1843 static char *
1845 {
1846 #if defined (SERVER_MODE)
1847  if (thread_p == NULL)
1848  {
1849  thread_p = thread_get_thread_entry_info ();
1850  }
1851 
1852  if (thread_p == NULL)
1853  {
1854  return NULL;
1855  }
1856  else
1857  {
1858  if (thread_p->log_data_ptr == NULL)
1859  {
1860  thread_p->log_data_length = IO_PAGESIZE * 2;
1861  thread_p->log_data_ptr = (char *) malloc (thread_p->log_data_length);
1862 
1863  if (thread_p->log_data_ptr == NULL)
1864  {
1865  thread_p->log_data_length = 0;
1867  (size_t) thread_p->log_data_length);
1868  }
1869  }
1870  return thread_p->log_data_ptr;
1871  }
1872 #else
1873  return log_data_ptr;
1874 #endif
1875 }
1876 
1877 static void
1879 {
1881 
1883  if ((size_t) log_Gl.prior_info.prior_lsa.offset >= (size_t) LOGAREA_SIZE)
1884  {
1887  }
1888 }
1889 
1890 static void
1892 {
1894 
1895  if ((size_t) log_Gl.prior_info.prior_lsa.offset + length >= (size_t) LOGAREA_SIZE)
1896  {
1899  }
1900 }
1901 
1902 static void
1904 {
1906 
1907  log_Gl.prior_info.prior_lsa.offset += (add);
1909 }
bool is_null() const
Definition: log_lsa.hpp:92
char * PAGE_PTR
LOG_REC_UNDO undo
Definition: log_record.hpp:209
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
size_t logpb_get_memsize()
static void log_prior_lsa_append_advance_when_doesnot_fit(size_t length)
static LOG_ZIP * log_zip_redo
Definition: log_append.cpp:36
VACUUM_LOG_BLOCKID vacuum_get_log_blockid(LOG_PAGEID pageid)
Definition: vacuum.c:5612
LOG_PRIOR_NODE * prior_list_header
Definition: log_append.hpp:117
#define IO_PAGESIZE
static int prior_lsa_gen_2pc_prepare_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, int gtran_length, const char *gtran_data, int lock_length, const char *lock_data)
size_t LOG_PRIOR_LSA_LAST_APPEND_OFFSET()
Definition: log_append.cpp:42
#define LOG_IS_UNDOREDO_RECORD_TYPE(type)
Definition: log_record.hpp:420
LOG_DATA data
Definition: log_record.hpp:182
const char * rv_rcvindex_string(LOG_RCVINDEX rcvindex)
Definition: recovery.c:846
bool LSA_EQ(const log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:160
static void prior_lsa_end_append(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node)
void LSA_COPY(log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:139
bool prior_is_tde_encrypted(const log_prior_node *node)
static void log_prior_lsa_append_align()
LOG_VACUUM_INFO vacuum_info
Definition: log_record.hpp:211
#define LOGAREA_SIZE
Definition: log_impl.h:116
LOG_RECORD_HEADER log_header
Definition: log_append.hpp:92
LOG_LSA sysop_start_postpone_lsa
Definition: log_impl.h:448
LOG_RCVINDEX
Definition: recovery.h:36
#define ER_FAILED
Definition: error_code.h:47
#define LOG_IS_MVCC_OP_RECORD_TYPE(type)
Definition: log_record.hpp:428
static int prior_lsa_gen_end_chkpt_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, int tran_length, const char *tran_data, int topop_length, const char *topop_data)
LOG_REC_REDO redo
Definition: log_record.hpp:218
static int log_data_length
Definition: log_append.cpp:38
LOG_GLOBAL log_Gl
LOG_HEADER hdr
Definition: log_impl.h:653
MVCCID oldest_visible_mvccid
LOG_LSA forw_lsa
Definition: log_record.hpp:146
static void prior_lsa_append_data(int length)
LOG_LSA prev_tranlsa
Definition: log_record.hpp:144
void thread_sleep(double millisec)
struct log_rec_mvcc_redo LOG_REC_MVCC_REDO
Definition: log_record.hpp:215
#define assert_release(e)
Definition: error_manager.h:96
void LOG_CS_ENTER(THREAD_ENTRY *thread_p)
LOG_PRIOR_NODE * prior_lsa_alloc_and_copy_crumbs(THREAD_ENTRY *thread_p, LOG_RECTYPE rec_type, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, const int num_ucrumbs, const LOG_CRUMB *ucrumbs, const int num_rcrumbs, const LOG_CRUMB *rcrumbs)
Definition: log_append.cpp:407
struct log_rec_replication LOG_REC_REPLICATION
Definition: log_record.hpp:223
LOG_PRIOR_NODE * prior_list_tail
Definition: log_append.hpp:118
#define MVCCID_NULL
VOLID volid
Definition: log_record.hpp:158
LOG_LSA tail_lsa
Definition: log_impl.h:473
static char * log_data_ptr
Definition: log_append.cpp:37
static LOG_LSA prior_lsa_next_record_internal(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_TDES *tdes, int with_lock)
struct log_rec_compensate LOG_REC_COMPENSATE
Definition: log_record.hpp:257
int32_t pageid
Definition: dbtype_def.h:879
#define LSA_AS_ARGS(lsa_ptr)
Definition: log_lsa.hpp:78
struct log_rec_sysop_start_postpone LOG_REC_SYSOP_START_POSTPONE
Definition: log_record.hpp:315
LOG_PRIOR_NODE * prior_lsa_alloc_and_copy_data(THREAD_ENTRY *thread_p, LOG_RECTYPE rec_type, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int ulength, const char *udata, int rlength, const char *rdata)
Definition: log_append.cpp:273
LOG_ZIP_SIZE_T data_length
Definition: log_compress.h:55
static int prior_lsa_copy_redo_data_to_node(LOG_PRIOR_NODE *node, int length, const char *data)
Definition: log_append.cpp:521
MVCCID id
Definition: mvcc.h:197
std::mutex prior_lsa_mutex
Definition: log_append.hpp:125
struct log_rec_start_postpone LOG_REC_START_POSTPONE
Definition: log_record.hpp:266
LOG_VACUUM_INFO vacuum_info
Definition: log_record.hpp:202
bool LSA_LT(const log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:174
#define er_log_debug(...)
LOG_ZIP * log_zip_alloc(LOG_ZIP_SIZE_T size)
Definition: log_compress.c:230
LOG_LSA append_lsa
bool appending_page_tde_encrypted
Definition: log_append.hpp:80
#define NULL_VOLDES
Definition: file_io.h:44
static void prior_lsa_start_append(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_TDES *tdes)
void log_wakeup_log_flush_daemon()
Definition: log_manager.c:9715
struct log_rec_mvcc_undo LOG_REC_MVCC_UNDO
Definition: log_record.hpp:206
LOG_LSA back_lsa
Definition: log_record.hpp:145
void THREAD_ENTRY
mvcctable mvcc_table
Definition: log_impl.h:684
#define NULL_PAGEID
char * log_data
Definition: log_compress.h:57
PGLENGTH offset
Definition: log_record.hpp:157
static int prior_lsa_gen_dbout_redo_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RCVINDEX rcvindex, int length, const char *data)
struct log_rec_sysop_end LOG_REC_SYSOP_END
Definition: log_record.hpp:292
void LOG_RESET_PREV_LSA(const LOG_LSA *lsa)
Definition: log_append.cpp:137
LOG_RECTYPE type
Definition: log_record.hpp:148
LOG_LSA prior_lsa_next_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, log_tdes *tdes)
struct log_rec_donetime LOG_REC_DONETIME
Definition: log_record.hpp:232
char * data_header
Definition: log_append.hpp:99
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
struct log_rec_ha_server_state LOG_REC_HA_SERVER_STATE
Definition: log_record.hpp:239
static bool log_Zip_support
Definition: log_append.cpp:32
LOG_REC_SYSOP_END sysop_end
Definition: log_record.hpp:318
LOG_LSA start_lsa
Definition: log_append.hpp:93
#define assert(x)
LOG_APPEND_INFO append
Definition: log_impl.h:651
LOG_PRIOR_NODE * next
Definition: log_append.hpp:107
LOG_PAGE * log_pgptr
Definition: log_append.hpp:78
void set_nxio_lsa(const LOG_LSA &next_io_lsa)
Definition: log_append.cpp:112
LOG_LSA undo_nxlsa
Definition: log_impl.h:474
static LOG_ZIP * log_zip_undo
Definition: log_append.cpp:35
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
void LOG_CS_EXIT(THREAD_ENTRY *thread_p)
void LOG_RESET_APPEND_LSA(const LOG_LSA *lsa)
Definition: log_append.cpp:129
TDE_CIPHER tde_Cipher
Definition: tde.c:69
LOG_LSA prior_lsa_next_record_with_lock(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, log_tdes *tdes)
char area[1]
Definition: log_storage.hpp:85
#define VACUUM_ER_LOG_LOGGING
Definition: vacuum.h:48
int prior_set_tde_encrypted(log_prior_node *node, LOG_RCVINDEX recvindex)
enum log_rectype LOG_RECTYPE
Definition: log_record.hpp:138
MVCC_INFO mvccinfo
Definition: log_impl.h:463
short volid
Definition: dbtype_def.h:880
static void prior_update_header_mvcc_info(const LOG_LSA &record_lsa, MVCCID mvccid)
LOG_LSA prev_mvcc_op_log_lsa
Definition: log_record.hpp:190
static int prior_lsa_copy_redo_crumbs_to_node(LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs)
Definition: log_append.cpp:597
std::int64_t pageid
Definition: log_lsa.hpp:36
void log_append_init_zip()
Definition: log_append.cpp:185
static void log_prior_lsa_append_add_align(size_t add)
LOG_DATA data
Definition: log_record.hpp:174
struct log_rec_redo LOG_REC_REDO
Definition: log_record.hpp:179
#define NULL
Definition: freelistheap.h:34
UINT64 MVCCID
log_data_addr()=default
struct log_rec_run_postpone LOG_REC_RUN_POSTPONE
Definition: log_record.hpp:323
int num_log_records_written
Definition: log_impl.h:532
if(extra_options)
Definition: dynamic_load.c:958
bool log_is_in_crash_recovery(void)
Definition: log_manager.c:476
const VFID * vfid
Definition: log_append.hpp:56
struct log_rec_mvcc_undoredo LOG_REC_MVCC_UNDOREDO
Definition: log_record.hpp:197
#define vacuum_er_log(er_log_level, msg,...)
Definition: vacuum.h:65
struct log_rec_2pc_start LOG_REC_2PC_START
Definition: log_record.hpp:387
bool LSA_ISNULL(const log_lsa *lsa_ptr)
Definition: log_lsa.hpp:153
PAGE_PTR pgptr
Definition: log_append.hpp:57
LOG_LSA lastparent_lsa
Definition: log_record.hpp:295
LOG_LSA head_lsa
Definition: log_impl.h:472
void log_append_final_zip()
Definition: log_append.cpp:232
struct log_rec_chkpt LOG_REC_CHKPT
Definition: log_record.hpp:332
MVCCID newest_block_mvccid
#define LOG_IS_MVCC_OPERATION(rcvindex)
Definition: mvcc.h:255
struct log_rec_undoredo LOG_REC_UNDOREDO
Definition: log_record.hpp:162
#define CEIL_PTVDIV(dividend, divisor)
Definition: memory_alloc.h:50
LOG_PRIOR_LSA_INFO prior_info
Definition: log_impl.h:652
char * LOG_APPEND_PTR()
Definition: log_append.cpp:145
static int prior_lsa_copy_undo_crumbs_to_node(LOG_PRIOR_NODE *node, int num_crumbs, const LOG_CRUMB *crumbs)
Definition: log_append.cpp:552
offset_type offset
Definition: log_append.hpp:58
static char * log_append_get_data_ptr(THREAD_ENTRY *thread_p)
LOG_REC_UNDOREDO undoredo
Definition: log_record.hpp:200
#define VFID_COPY(vfid_ptr1, vfid_ptr2)
Definition: file_manager.h:69
static LOG_ZIP * log_append_get_zip_redo(THREAD_ENTRY *thread_p)
static LOG_ZIP * log_append_get_zip_undo(THREAD_ENTRY *thread_p)
int data_header_length
Definition: log_append.hpp:98
static int prior_lsa_gen_postpone_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const char *data)
#define CAST_BUFLEN
Definition: porting.h:471
struct log_prior_node LOG_PRIOR_NODE
Definition: log_append.hpp:89
static void error(const char *msg)
Definition: gencat.c:331
LOG_LSA prev_lsa
Definition: log_append.hpp:77
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
MVCCID get_global_oldest_visible() const
Definition: mvcc_table.cpp:612
LOG_TDES * LOG_FIND_CURRENT_TDES(THREAD_ENTRY *thread_p=NULL)
Definition: log_impl.h:1115
void vacuum_produce_log_block_data(THREAD_ENTRY *thread_p)
Definition: vacuum.c:2867
#define ARG_FILE_LINE
Definition: error_manager.h:44
int logpb_prior_lsa_append_all_list(THREAD_ENTRY *thread_p)
INT16 PGLENGTH
bool log_zip(LOG_ZIP *log_zip, LOG_ZIP_SIZE_T length, const void *data)
Definition: log_compress.c:43
#define MAKE_ZIP_LEN(length)
Definition: log_compress.h:33
#define DOUBLE_ALIGNMENT
Definition: memory_alloc.h:64
void log_zip_free(LOG_ZIP *log_zip)
Definition: log_compress.c:265
PAGEID pageid
Definition: log_record.hpp:156
#define free_and_init(ptr)
Definition: memory_alloc.h:147
#define LOG_ISRESTARTED()
Definition: log_impl.h:232
#define DB_ALIGN(offset, align)
Definition: memory_alloc.h:84
void LSA_SET_NULL(log_lsa *lsa_ptr)
Definition: log_lsa.hpp:146
static bool log_append_realloc_data_ptr(THREAD_ENTRY *thread_p, int length)
TRANID trid
Definition: log_impl.h:466
LOG_RCV_TDES rcv
Definition: log_impl.h:541
#define LOG_IS_REDO_RECORD_TYPE(type)
Definition: log_record.hpp:416
bool log_diff(LOG_ZIP_SIZE_T undo_length, const void *undo_data, LOG_ZIP_SIZE_T redo_length, void *redo_data)
Definition: log_compress.c:201
bool prm_get_bool_value(PARAM_ID prm_id)
bool is_loaded
Definition: tde.h:148
int i
Definition: dynamic_load.c:954
struct log_rec_savept LOG_REC_SAVEPT
Definition: log_record.hpp:367
#define NULL_VOLID
LOG_RCVINDEX rcvindex
Definition: log_record.hpp:155
TRAN_STATE state
Definition: log_impl.h:469
LOG_LSA get_nxio_lsa() const
Definition: log_append.cpp:106
struct log_rec_2pc_prepcommit LOG_REC_2PC_PREPCOMMIT
Definition: log_record.hpp:375
#define LOG_IS_UNDO_RECORD_TYPE(type)
Definition: log_record.hpp:412
struct log_rec_undo LOG_REC_UNDO
Definition: log_record.hpp:171
static int log_Zip_min_size_to_compress
Definition: log_append.cpp:33
const void * data
Definition: log_append.hpp:48
bool log_prior_has_worker_log_records(THREAD_ENTRY *thread_p)
Definition: log_append.cpp:152
LOG_LSA tran_start_postpone_lsa
Definition: log_impl.h:450
#define ER_TDE_CIPHER_IS_NOT_LOADED
Definition: error_code.h:1613
std::vector< MVCCID > sub_ids
Definition: mvcc.h:205
static int prior_lsa_gen_record(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RECTYPE rec_type, int length, const char *data)
LOG_LSA mvcc_op_log_lsa
LOG_RCVINDEX rcvindex
Definition: log_record.hpp:252
std::atomic< LOG_LSA > nxio_lsa
Definition: log_append.hpp:75
#define MVCCID_IS_VALID(id)
LOG_LSA atomic_sysop_start_lsa
Definition: log_impl.h:454
std::int64_t offset
Definition: log_lsa.hpp:37
#define VFID_SET_NULL(vfid_ptr)
Definition: file_manager.h:65
VPID * pgbuf_get_vpid_ptr(PAGE_PTR pgptr)
Definition: page_buffer.c:4609
static int prior_lsa_gen_undoredo_record_from_crumbs(THREAD_ENTRY *thread_p, LOG_PRIOR_NODE *node, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_ucrumbs, const LOG_CRUMB *ucrumbs, int num_rcrumbs, const LOG_CRUMB *rcrumbs)
Definition: log_append.cpp:648
const TRANID LOG_SYSTEM_TRANID
static int prior_lsa_copy_undo_data_to_node(LOG_PRIOR_NODE *node, int length, const char *data)
Definition: log_append.cpp:490
const log_lsa NULL_LSA
Definition: log_lsa.hpp:59
struct log_rec_dbout_redo LOG_REC_DBOUT_REDO
Definition: log_record.hpp:249
bool does_block_need_vacuum