CUBRID Engine  latest
log_postpone_cache.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * log_postpone_cache.cpp - log postpone cache module
21  */
22 
23 #include "log_postpone_cache.hpp"
24 
25 #include "memory_alloc.h"
27 #include "object_representation.h"
28 #include "log_manager.h"
29 
30 #include <cstring>
31 
32 void
34 {
35  m_cursor = 0;
39  {
41  }
42 }
43 
50 void
52 {
53  assert (node.data_header != NULL);
54  assert (node.rlength == 0 || node.rdata != NULL);
56 
57  if (is_full ())
58  {
59  // Cannot cache postpones, cache reached max capacity
60  return;
61  }
62 
63  // Check if recovery data fits in preallocated buffer
64  std::size_t redo_data_size = sizeof (log_rec_redo) + node.rlength + (2 * MAX_ALIGNMENT);
65  std::size_t total_size = redo_data_size + m_redo_data_offset;
66  if (total_size > REDO_DATA_MAX_SIZE)
67  {
68  // Cannot store all recovery data
70  return;
71  }
72  else
73  {
74  m_redo_data_buf.extend_to (total_size);
75  }
76 
77  // Cache a new postpone log record entry
78  cache_entry &new_entry = m_cache_entries[m_cursor];
79  new_entry.m_offset = m_redo_data_offset;
80  // first and only first entry has m_offset equal to zero
81  assert ((m_cursor == 0) == (new_entry.m_offset == 0));
82  new_entry.m_lsa.set_null ();
83 
84  // Cache log_rec_redo from data_header
85  char *redo_data_ptr = m_redo_data_buf.get_ptr () + m_redo_data_offset;
86  memcpy (redo_data_ptr, node.data_header, sizeof (log_rec_redo));
87  redo_data_ptr += sizeof (log_rec_redo);
88  redo_data_ptr = PTR_ALIGN (redo_data_ptr, MAX_ALIGNMENT);
89 
90  // Cache recovery data
91  assert (((log_rec_redo *) node.data_header)->length == node.rlength);
92  if (node.rlength > 0)
93  {
94  memcpy (redo_data_ptr, node.rdata, node.rlength);
95  redo_data_ptr += node.rlength;
96  redo_data_ptr = PTR_ALIGN (redo_data_ptr, MAX_ALIGNMENT);
97  }
98 
99  m_redo_data_offset = redo_data_ptr - m_redo_data_buf.get_ptr ();
100 }
101 
111 void
113 {
114  assert (!lsa.is_null ());
115 
116  if (is_full ())
117  {
118  return;
119  }
120 
122 
123  m_cache_entries[m_cursor].m_lsa = lsa;
124 
125  /* Now that all needed data is saved, increment cached entries counter. */
126  m_cursor++;
127 }
128 
136 bool
137 log_postpone_cache::do_postpone (cubthread::entry &thread_ref, const log_lsa &start_postpone_lsa)
138 {
139  assert (!start_postpone_lsa.is_null ());
140 
141  if (is_full ())
142  {
143  // Cache is not usable
144  reset ();
145  return false;
146  }
147 
148  // First cached postpone entry at start_postpone_lsa
149  int start_index = -1;
150  for (std::size_t i = 0; i < m_cursor; ++i)
151  {
152  if (m_cache_entries[i].m_lsa == start_postpone_lsa)
153  {
154  // Found start lsa
155  start_index = (int) i;
156  break;
157  }
158  }
159 
160  if (start_index < 0)
161  {
162  // Start LSA was not found. Unexpected situation
163  return false;
164  }
165 
166  const size_t RCV_DATA_DEFAULT_SIZE = 1024;
168 
169  // Run all postpones after start_index
170  for (std::size_t i = start_index; i < m_cursor; ++i)
171  {
172  cache_entry &entry = m_cache_entries[i];
173 
174  // Get redo data header
175  char *redo_data = m_redo_data_buf.get_ptr () + entry.m_offset;
176  log_rec_redo redo = * (log_rec_redo *) redo_data;
177 
178  // Get recovery data
179  char *data_ptr = redo_data + sizeof (log_rec_redo);
180  data_ptr = PTR_ALIGN (data_ptr, MAX_ALIGNMENT);
181  rcv_data_buffer.extend_to (redo.length);
182  std::memcpy (rcv_data_buffer.get_ptr (), data_ptr, redo.length);
183 
184  (void) log_execute_run_postpone (&thread_ref, &entry.m_lsa, &redo, rcv_data_buffer.get_ptr ());
185  }
186 
187  // Finished running postpones, update the number of entries which should be run on next commit
189  m_cursor = start_index;
190  m_redo_data_offset = m_cache_entries[start_index].m_offset;
191  if (m_cursor == 0)
192  {
193  assert (m_redo_data_offset == 0); // should be 0 when cursor is back to 0
194  reset ();
195  }
196 
197  return true;
198 }
199 
200 bool
202 {
204 }
std::size_t m_redo_data_offset
bool is_null() const
Definition: log_lsa.hpp:92
static const std::size_t REDO_DATA_MAX_SIZE
void set_null()
Definition: log_lsa.hpp:98
int log_execute_run_postpone(THREAD_ENTRY *thread_p, LOG_LSA *log_lsa, LOG_REC_REDO *redo, char *redo_rcv_data)
Definition: log_manager.c:8213
const block_allocator PRIVATE_BLOCK_ALLOCATOR
#define PTR_ALIGN(addr, boundary)
Definition: memory_alloc.h:77
std::size_t get_size() const
Definition: mem_block.hpp:374
#define MAX_ALIGNMENT
Definition: memory_alloc.h:70
void add_lsa(const log_lsa &lsa)
char * data_header
Definition: log_append.hpp:99
std::array< cache_entry, MAX_CACHE_ENTRIES > m_cache_entries
#define assert(x)
#define NULL
Definition: freelistheap.h:34
bool do_postpone(cubthread::entry &thread_ref, const log_lsa &start_postpone_lsa)
static const std::size_t MAX_CACHE_ENTRIES
log_lsa m_lsa
std::size_t m_offset
static const std::size_t BUFFER_RESET_SIZE
cubmem::extensible_block m_redo_data_buf
int i
Definition: dynamic_load.c:954
void extend_to(size_t total_bytes)
Definition: mem_block.hpp:346
void add_redo_data(const log_prior_node &node)