CUBRID Engine  latest
lockfree_freelist.hpp
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #ifndef _LOCKFREE_FREELIST_HPP_
20 #define _LOCKFREE_FREELIST_HPP_
21 
26 
27 #include <atomic>
28 #include <cstddef>
29 
30 namespace lockfree
31 {
32  namespace tran
33  {
34  class system;
35  }
36 }
37 
38 namespace lockfree
39 {
40  // T must have on_reclaim function
41  template <class T>
42  class freelist
43  {
44  public:
45  class free_node;
46 
47  freelist () = delete;
48  freelist (tran::system &transys, size_t block_size, size_t initial_block_count = 1);
49  ~freelist ();
50 
51  free_node *claim (tran::descriptor &tdes);
52  free_node *claim (tran::index tran_index); // claim a free node
53  // note: transaction will remain started!
54 
55  void retire (tran::descriptor &tdes, free_node &node);
56  void retire (tran::index tran_index, free_node &node);
57 
58  size_t get_alloc_count () const;
59  size_t get_available_count () const;
60  size_t get_backbuffer_count () const;
61  size_t get_forced_allocation_count () const;
62  size_t get_retired_count () const;
63  size_t get_claimed_count () const;
64 
65  tran::system &get_transaction_system ();
66  tran::table &get_transaction_table ();
67 
68  private:
71 
72  size_t m_block_size;
73 
74  std::atomic<free_node *> m_available_list; // list of available entries
75 
76  // backbuffer head & tail; when available list is consumed, it is quickly replaced with back-buffer; without
77  // backbuffer, multiple threads can race to allocate multiple blocks at once
78  std::atomic<free_node *> m_backbuffer_head;
79  std::atomic<free_node *> m_backbuffer_tail;
80 
81  // statistics:
82  std::atomic<size_t> m_available_count;
83  std::atomic<size_t> m_alloc_count;
84  std::atomic<size_t> m_bb_count;
85  std::atomic<size_t> m_forced_alloc_count;
86  std::atomic<size_t> m_retired_count;
87 
88  void swap_backbuffer ();
89  void alloc_backbuffer ();
90  void force_alloc_block ();
91 
92  void alloc_list (free_node *&head, free_node *&tail);
93  void dealloc_list (free_node *head);
94 
95  free_node *pop_from_available ();
96  void push_to_list (free_node &head, free_node &tail, std::atomic<free_node *> &dest);
97 
98  void clear_free_nodes (); // not thread safe!
99  void final_sanity_checks () const;
100  void check_my_pointer (free_node *node);
101  };
102 
103  template <class T>
105  {
106  public:
107  free_node ();
108  ~free_node () = default;
109 
110  T &get_data ();
111 
112  void reclaim () final override;
113 
114  private:
115  friend freelist;
116 
117  void set_owner (freelist &m_freelist);
118 
119  void set_freelist_next (free_node *next);
120  void reset_freelist_next (void);
121  free_node *get_freelist_next ();
122 
124  T m_t;
125  };
126 } // namespace lockfree
127 
128 //
129 // implementation
130 //
131 #include <cassert>
132 
133 namespace lockfree
134 {
135  //
136  // freelist
137  //
138  template <class T>
139  freelist<T>::freelist (tran::system &transys, size_t block_size, size_t initial_block_count)
140  : m_transys (transys)
141  , m_trantable (new tran::table (transys))
142  , m_block_size (block_size)
143  , m_available_list { NULL }
144  , m_backbuffer_head { NULL }
145  , m_backbuffer_tail { NULL }
146  , m_available_count { 0 }
147  , m_alloc_count { 0 }
148  , m_bb_count { 0 }
149  , m_forced_alloc_count { 0 }
150  , m_retired_count { 0 }
151  {
152  assert (block_size > 1);
153  // minimum two blocks
154  if (initial_block_count <= 1)
155  {
156  m_block_size /= 2;
157  initial_block_count = 2;
158  }
159 
160  alloc_backbuffer ();
161  for (size_t i = 0; i < initial_block_count; i++)
162  {
163  swap_backbuffer ();
164  }
165  }
166 
167  template <class T>
168  void
170  {
171  free_node *bb_head = m_backbuffer_head;
172  if (bb_head == NULL)
173  {
174  // somebody already allocated block
175  return;
176  }
177  free_node *bb_head_copy = bb_head; // make sure a copy is passed to compare exchange
178  if (!m_backbuffer_head.compare_exchange_strong (bb_head_copy, NULL))
179  {
180  // somebody already changing it
181  return;
182  }
183 
184  free_node *bb_tail = m_backbuffer_tail.exchange (NULL);
185  assert (bb_tail != NULL);
186 
187  m_available_count += m_bb_count.exchange (0);
188  push_to_list (*bb_head, *bb_tail, m_available_list);
189 
190  alloc_backbuffer ();
191  }
192 
193  template <class T>
194  void
196  {
197  free_node *new_bb_head = NULL;
198  free_node *new_bb_tail = NULL;
199 
200  alloc_list (new_bb_head, new_bb_tail);
201 
202  // update backbuffer tail
203  free_node *dummy_null = NULL;
204  m_backbuffer_tail.compare_exchange_strong (dummy_null, new_bb_tail);
205 
206  // update backbuffer head
207  push_to_list (*new_bb_head, *new_bb_tail, m_backbuffer_head);
209  }
210 
211  template <class T>
212  void
214  {
215  free_node *new_head = NULL;
216  free_node *new_tail = NULL;
217  alloc_list (new_head, new_tail);
218 
219  // push directly to available
222  push_to_list (*new_head, *new_tail, m_available_list);
223  }
224 
225  template <class T>
226  void
228  {
229  head = tail = NULL;
230  free_node *node;
231  for (size_t i = 0; i < m_block_size; i++)
232  {
233  node = new free_node ();
234  node->set_owner (*this);
235  if (tail == NULL)
236  {
237  tail = node;
238  }
239  node->set_freelist_next (head);
240  head = node;
241  }
243  }
244 
245  template <class T>
246  void
248  {
249  // free all
250  free_node *save_next = NULL;
251  for (free_node *node = head; node != NULL; node = save_next)
252  {
253  save_next = node->get_freelist_next ();
254  delete node;
255  }
256  }
257 
258  template <class T>
260  {
261  delete m_trantable;
262  clear_free_nodes ();
263  }
264 
265  template <class T>
266  void
268  {
270 
271  // move back-buffer to available
275 
276  dealloc_list (m_available_list.load ());
278 
280  }
281 
282  template<class T>
283  typename freelist<T>::free_node *
285  {
286  return claim (m_trantable->get_descriptor (tran_index));
287  }
288 
289  template<class T>
290  typename freelist<T>::free_node *
292  {
293  tdes.start_tran ();
294  tdes.reclaim_retired_list ();
295 
296  free_node *node;
297  size_t count = 0;
298  for (node = pop_from_available (); node == NULL && count < 100; node = pop_from_available (), ++count)
299  {
300  // if it loops many times, it is probably because the back-buffer allocator was preempted for a very long time.
301  // force allocations
302  swap_backbuffer ();
303  }
304  // if swapping backbuffer didn't work (probably back-buffer allocator was preempted for a long time), force
305  // allocating directly into available list
306  while (node == NULL)
307  {
309  node = pop_from_available ();
310  }
311 
312  assert (node != NULL);
315  check_my_pointer (node);
316 
317  return node;
318  }
319 
320  template<class T>
321  typename freelist<T>::free_node *
323  {
324  free_node *rhead = NULL;
325  free_node *rhead_copy = NULL;
326  free_node *next;
327  do
328  {
329  rhead = m_available_list;
330  if (rhead == NULL)
331  {
332  return NULL;
333  }
334  next = rhead->get_freelist_next ();
335  rhead_copy = rhead;
336  // todo: this is a dangerous preemption point; if I am preempted here, and thread 2 comes and does:
337  // - second thread gets same rhead and successfully moves m_available_list to next
338  // - third thread gets next and successfully moves m_available_list to next->next
339  // - second thread retires rhead. m_available_list becomes rhead and its next becomes next->next
340  // - I wake up, compare exchange m_available_list successfully because it is rhead again, but next will
341  // become the item third thread already claimed.
342  }
343  while (!m_available_list.compare_exchange_weak (rhead_copy, next));
344 
345  rhead->reset_freelist_next ();
346  return rhead;
347  }
348 
349  template<class T>
350  void
352  {
353  retire (m_trantable->get_descriptor (tran_index), node);
354  }
355 
356  template<class T>
357  void
359  {
360  assert (node.get_freelist_next () == NULL);
361  ++m_retired_count;
362  check_my_pointer (&node);
363  tdes.retire_node (node);
364  }
365 
366  template<class T>
367  void
368  freelist<T>::push_to_list (free_node &head, free_node &tail, std::atomic<free_node *> &dest)
369  {
370  free_node *rhead;
371  assert (tail.get_freelist_next () == NULL);
372 
373  do
374  {
375  rhead = dest;
376  tail.set_freelist_next (rhead);
377  }
378  while (!dest.compare_exchange_weak (rhead, &head));
379  }
380 
381  template<class T>
382  size_t
384  {
385  return m_alloc_count;
386  }
387 
388  template<class T>
389  size_t
391  {
392  return m_available_count;
393  }
394 
395  template<class T>
396  size_t
398  {
399  return m_bb_count;
400  }
401 
402  template<class T>
403  size_t
405  {
406  return m_forced_alloc_count;
407  }
408 
409  template<class T>
410  size_t
412  {
413  return m_retired_count;
414  }
415 
416  template<class T>
417  size_t
419  {
420  size_t alloc_count = m_alloc_count;
421  size_t unused_count = m_available_count + m_bb_count + m_retired_count;
422  if (alloc_count > unused_count)
423  {
424  return alloc_count - unused_count;
425  }
426  else
427  {
428  return 0;
429  }
430  }
431 
432  template<class T>
433  tran::system &
435  {
436  return m_transys;
437  }
438 
439  template<class T>
440  tran::table &
442  {
443  return *m_trantable;
444  }
445 
446  template<class T>
447  void
449  {
450 #if !defined (NDEBUG)
452 
453  // check back-buffer
454  size_t list_count = 0;
455  free_node *save_last = NULL;
456  for (free_node *iter = m_backbuffer_head; iter != NULL; iter = iter->get_freelist_next ())
457  {
458  ++list_count;
459  save_last = iter;
460  }
461  assert (list_count == m_bb_count);
462  assert (list_count == m_block_size);
463  assert (save_last == m_backbuffer_tail);
464 
465  // check available
466  list_count = 0;
467  for (free_node *iter = m_available_list; iter != NULL; iter = iter->get_freelist_next ())
468  {
469  ++list_count;
470  }
471  assert (list_count == m_available_count);
472 #endif // DEBUG
473  }
474 
475  template<class T>
476  void
478  {
479  assert (this == node->m_owner);
480  }
481 
482  //
483  // freelist::handle
484  //
485  template<class T>
487  : tran::reclaimable_node ()
488  , m_owner (NULL)
489  , m_t {}
490  {
491  }
492 
493  template<class T>
494  void
496  {
497  m_owner = &fl;
498  }
499 
500  template<class T>
501  void
503  {
504  m_retired_next = next;
505  }
506 
507  template<class T>
508  void
510  {
512  }
513 
514  template<class T>
515  typename freelist<T>::free_node *
517  {
518  return static_cast<free_node *> (m_retired_next);
519  }
520 
521  template<class T>
522  void
524  {
525  m_t.on_reclaim ();
526 
530  m_owner->push_to_list (*this, *this, m_owner->m_available_list);
531  }
532 
533  template<class T>
534  T &
536  {
537  return m_t;
538  }
539 } // namespace lockfree
540 
541 #endif // !_LOCKFREE_FREELIST_HPP_
size_t get_backbuffer_count() const
std::atomic< size_t > m_alloc_count
free_node * pop_from_available()
std::atomic< size_t > m_retired_count
std::atomic< free_node * > m_backbuffer_head
void check_my_pointer(free_node *node)
size_t get_alloc_count() const
tran::table & get_transaction_table()
size_t get_claimed_count() const
void push_to_list(free_node &head, free_node &tail, std::atomic< free_node * > &dest)
#define assert(x)
tran::table * m_trantable
void final_sanity_checks() const
std::atomic< size_t > m_bb_count
void dealloc_list(free_node *head)
size_t get_available_count() const
#define NULL
Definition: freelistheap.h:34
void set_freelist_next(free_node *next)
size_t get_retired_count() const
descriptor & get_descriptor(const index &tran_index)
int count(int &result, const cub_regex_object &reg, const std::string &src, const int position, const INTL_CODESET codeset)
std::atomic< size_t > m_forced_alloc_count
free_node * claim(tran::descriptor &tdes)
tran::system & get_transaction_system()
static void free_node(T_NODE_INFO *node)
Definition: cas_runner.c:1444
void retire(tran::descriptor &tdes, free_node &node)
int i
Definition: dynamic_load.c:954
std::atomic< free_node * > m_backbuffer_tail
void set_owner(freelist &m_freelist)
tran::system & m_transys
std::atomic< size_t > m_available_count
void alloc_list(free_node *&head, free_node *&tail)
std::atomic< free_node * > m_available_list
size_t get_forced_allocation_count() const