CUBRID Engine  latest
filter_pred_cache.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * Filter predicate cache.
21  */
22 
23 #ident "$Id$"
24 
25 #include "binaryheap.h"
26 #include "filter_pred_cache.h"
27 #include "lock_free.h"
28 #include "query_executor.h"
29 #include "stream_to_xasl.h"
30 #include "system_parameter.h"
32 #include "thread_manager.hpp" // for thread_get_thread_entry_info
33 #include "xasl.h"
34 #include "xasl_unpack_info.hpp"
35 
36 #include <algorithm>
37 
38 typedef struct fpcache_ent FPCACHE_ENTRY;
40 {
41  BTID btid; /* B-tree identifier. */
42 
43  /* Latch-free stuff. */
44  FPCACHE_ENTRY *stack; /* used in freelist */
45  FPCACHE_ENTRY *next; /* used in hash table */
46  pthread_mutex_t mutex; /* Mutex. */
47  UINT64 del_id; /* delete transaction ID (for lock free) */
48 
49  /* Entry info */
50  OID class_oid; /* Class OID. */
51  struct timeval time_last_used; /* when this entry used lastly */
52 
55 
56  // *INDENT-OFF*
57  fpcache_ent ();
58  ~fpcache_ent ();
59  // *INDENT-ON*
60 };
61 
62 #define FPCACHE_PTR_TO_KEY(ptr) ((BTID *) ptr)
63 #define FPCACHE_PTR_TO_ENTRY(ptr) ((FPCACHE_ENTRY *) ptr)
64 
65 // *INDENT-OFF*
68 // *INDENT-ON*
69 
70 static bool fpcache_Enabled = false;
71 static INT32 fpcache_Soft_capacity = 0;
75 /* TODO: Handle counter >= soft capacity. */
76 static volatile INT32 fpcache_Entry_counter = 0;
77 static volatile INT32 fpcache_Clone_counter = 0;
79 
80 /* Cleanup */
83 {
85  struct timeval time_last_used;
86 };
89 
90 #define FPCACHE_CLEANUP_RATIO 0.2
91 
92 /* Statistics. */
93 static INT64 fpcache_Stat_lookup;
94 static INT64 fpcache_Stat_miss;
95 static INT64 fpcache_Stat_hit;
96 static INT64 fpcache_Stat_discard;
97 static INT64 fpcache_Stat_add;
102 static INT64 fpcache_Stat_cleanup;
104 
105 /* fpcache_Entry_descriptor - used for latch-free hash table.
106  * we have to declare member functions before instantiating fpcache_Entry_descriptor.
107  */
108 static void *fpcache_entry_alloc (void);
109 static int fpcache_entry_free (void *entry);
110 static int fpcache_entry_init (void *entry);
111 static int fpcache_entry_uninit (void *entry);
112 static int fpcache_copy_key (void *src, void *dest);
113 static void fpcache_cleanup (THREAD_ENTRY * thread_p);
114 static BH_CMP_RESULT fpcache_compare_cleanup_candidates (const void *left, const void *right, BH_CMP_ARG ingore_arg);
115 
117  offsetof (FPCACHE_ENTRY, stack),
118  offsetof (FPCACHE_ENTRY, next),
119  offsetof (FPCACHE_ENTRY, del_id),
120  offsetof (FPCACHE_ENTRY, btid),
121  offsetof (FPCACHE_ENTRY, mutex),
122 
123  /* using mutex */
125 
133  NULL, /* duplicates not accepted. */
134 };
135 
136 /*
137  * fpcache_initialize () - Initialize filter predicate cache.
138  *
139  * return : Error code.
140  * thread_p (in) : Thread entry.
141  */
142 int
144 {
145  int error_code = NO_ERROR;
146  HL_HEAPID save_heapid;
147 
148  fpcache_Enabled = false;
149 
151  if (fpcache_Soft_capacity <= 0)
152  {
153  /* Filter predicate cache disabled. */
154  return NO_ERROR;
155  }
156 
157  /* Initialize free list */
158  const int freelist_block_count = 2;
159  const int freelist_block_size = std::max (1, fpcache_Soft_capacity / freelist_block_count);
160  fpcache_Hashmap.init (fpcache_Ts, THREAD_TS_FPCACHE, fpcache_Soft_capacity, freelist_block_size, freelist_block_count,
161  fpcache_Entry_descriptor);
164 
166 
167  /* Cleanup */
168  /* Use global heap to allocate binary heap. */
169  save_heapid = db_change_private_heap (thread_p, 0);
171  fpcache_Cleanup_bh =
174  (void) db_change_private_heap (thread_p, save_heapid);
175  if (fpcache_Cleanup_bh == NULL)
176  {
177  lf_freelist_destroy (&fpcache_Ht_freelist);
178  lf_hash_destroy (&fpcache_Ht);
179  ASSERT_ERROR_AND_SET (error_code);
180  return error_code;
181  }
182 
184  fpcache_Stat_miss = 0;
185  fpcache_Stat_hit = 0;
187  fpcache_Stat_add = 0;
194 
195  fpcache_Enabled = true;
196  return NO_ERROR;
197 }
198 
199 /*
200  * fpcache_finalize () - Finalize filter predicate cache.
201  *
202  * return : Void.
203  * thread_entry (in) : Thread entry.
204  */
205 void
207 {
208  HL_HEAPID save_heapid;
209 
210  if (!fpcache_Enabled)
211  {
212  return;
213  }
214 
215  fpcache_Hashmap.destroy ();
216 
217  /* Use global heap */
218  save_heapid = db_change_private_heap (thread_p, 0);
219  if (fpcache_Cleanup_bh != NULL)
220  {
221  bh_destroy (thread_p, fpcache_Cleanup_bh);
222  fpcache_Cleanup_bh = NULL;
223  }
224  (void) db_change_private_heap (thread_p, save_heapid);
225 
226  fpcache_Enabled = false;
227 }
228 
229 // *INDENT-OFF*
231 {
233 }
234 
236 {
238 }
239 // *INDENT-ON*
240 
241 /*
242  * fpcache_entry_alloc () - Allocate a filter predicate cache entry.
243  *
244  * return : Pointer to allocated memory.
245  */
246 static void *
248 {
249  FPCACHE_ENTRY *fpcache_entry = (FPCACHE_ENTRY *) malloc (sizeof (FPCACHE_ENTRY));
250  if (fpcache_entry == NULL)
251  {
252  return NULL;
253  }
254  pthread_mutex_init (&fpcache_entry->mutex, NULL);
255  return fpcache_entry;
256 }
257 
258 /*
259  * fpcache_entry_free () - Free filter predicate cache entry.
260  *
261  * return : NO_ERROR.
262  * entry (in) : filter predicate cache entry.
263  */
264 static int
265 fpcache_entry_free (void *entry)
266 {
267  pthread_mutex_destroy (&((FPCACHE_ENTRY *) entry)->mutex);
268  free (entry);
269  return NO_ERROR;
270 }
271 
272 /*
273  * fpcache_entry_init () - Initialize filter predicate cache entry.
274  *
275  * return : Error code.
276  * entry (in) : filter predicate cache entry
277  */
278 static int
279 fpcache_entry_init (void *entry)
280 {
281  FPCACHE_ENTRY *fpcache_entry = FPCACHE_PTR_TO_ENTRY (entry);
282  /* Add here if anything should be initialized. */
283  /* Allocate clone stack. */
284  fpcache_entry->clone_stack =
286  if (fpcache_entry->clone_stack == NULL)
287  {
291  }
292  fpcache_entry->clone_stack_head = -1;
293  return NO_ERROR;
294 }
295 
296 /*
297  * fpcache_entry_uninit () - Retire filter predicate cache entry.
298  *
299  * return : NO_ERROR.
300  * entry (in) : filter predicate cache entry.
301  */
302 static int
303 fpcache_entry_uninit (void *entry)
304 {
305  FPCACHE_ENTRY *fpcache_entry = FPCACHE_PTR_TO_ENTRY (entry);
307  HL_HEAPID old_private_heap;
308  PRED_EXPR_WITH_CONTEXT *pred_expr = NULL;
309  int head;
310 
311  old_private_heap = db_change_private_heap (thread_p, 0);
312 
313  for (head = fpcache_entry->clone_stack_head; head >= 0; head--)
314  {
315  pred_expr = fpcache_entry->clone_stack[head];
316  assert (pred_expr != NULL);
317 
318  qexec_clear_pred_context (thread_p, pred_expr, true);
319  free_xasl_unpack_info (thread_p, pred_expr->unpack_info);
320  db_private_free_and_init (thread_p, pred_expr);
321  }
322 
323  (void) db_change_private_heap (thread_p, old_private_heap);
324  fpcache_entry->clone_stack_head = -1;
325 
326  if (fpcache_entry->clone_stack != NULL)
327  {
328  free_and_init (fpcache_entry->clone_stack);
329  }
330 
331  return NO_ERROR;
332 }
333 
334 /*
335  * fpcache_copy_key () - Copy filter predicate cache entry key (b-tree ID).
336  *
337  * return : NO_ERROR.
338  * src (in) : Source b-tree ID.
339  * dest (out) : Destination b-tree ID.
340  */
341 static int
342 fpcache_copy_key (void *src, void *dest)
343 {
344  BTID_COPY ((BTID *) dest, (BTID *) src);
345  return NO_ERROR;
346 }
347 
348 /*
349  * fpcache_claim () - Claim a filter predicate expression from filter predicate cache. If no expression is available in
350  * cache, a new one is generated.
351  *
352  * return : Error code.
353  * thread_p (in) : Thread entry.
354  * btid (in) : B-tree ID.
355  * or_pred (in) : Filter predicate (string and stream).
356  * filter_pred (out) : Filter predicate expression (with context - unpack buffer).
357  */
358 int
359 fpcache_claim (THREAD_ENTRY * thread_p, BTID * btid, or_predicate * or_pred, pred_expr_with_context ** filter_pred)
360 {
361  FPCACHE_ENTRY *fpcache_entry = NULL;
362  int error_code = NO_ERROR;
363 
364  assert (filter_pred != NULL && *filter_pred == NULL);
365 
366  if (fpcache_Enabled)
367  {
368  /* Try to find available filter predicate expression in cache. */
369  ATOMIC_INC_64 (&fpcache_Stat_lookup, 1);
370 
371  fpcache_entry = fpcache_Hashmap.find (thread_p, *btid);
372  if (fpcache_entry == NULL)
373  {
374  /* Entry not found. */
375  ATOMIC_INC_64 (&fpcache_Stat_miss, 1);
376  ATOMIC_INC_64 (&fpcache_Stat_clone_miss, 1);
377  }
378  else
379  {
380  /* Hash-table entry found. Try to claim a filter predicate expression, if there is any available. */
381  ATOMIC_INC_64 (&fpcache_Stat_hit, 1);
382  if (fpcache_entry->clone_stack_head >= 0)
383  {
384  /* Available filter predicate expression. */
386  *filter_pred = fpcache_entry->clone_stack[fpcache_entry->clone_stack_head--];
387  ATOMIC_INC_64 (&fpcache_Stat_clone_hit, 1);
388  ATOMIC_INC_32 (&fpcache_Clone_counter, -1);
389  }
390  else
391  {
392  /* No filter predicate expression is available. */
393  ATOMIC_INC_64 (&fpcache_Stat_clone_miss, 1);
394  }
395  /* Unlock hash-table entry. */
396  pthread_mutex_unlock (&fpcache_entry->mutex);
397  }
398  }
399 
400  if (*filter_pred == NULL)
401  {
402  /* Allocate new filter predicate expression. */
403  /* Use global heap as other threads may also use this filter predicate expression. */
404  HL_HEAPID old_private_heap = db_change_private_heap (thread_p, 0);
405  error_code =
406  stx_map_stream_to_filter_pred (thread_p, filter_pred, or_pred->pred_stream, or_pred->pred_stream_size);
407  if (error_code != NO_ERROR)
408  {
409  ASSERT_ERROR ();
410  }
411  (void) db_change_private_heap (thread_p, old_private_heap);
412  }
413  return NO_ERROR;
414 }
415 
416 /*
417  * fpcache_retire () - Retire filter predicate expression; if the filter predicate hash entry is already at maximum
418  * capacity, the predicate expression must be freed.
419  *
420  * return : Error code.
421  * thread_p (in) : Thread entry.
422  * class_oid (in) : Class OID (of index).
423  * btid (in) : B-tree ID.
424  * filter_pred (in) : Filter predicate expression.
425  */
426 int
428 {
429  FPCACHE_ENTRY *fpcache_entry = NULL;
430  int error_code = NO_ERROR;
431  bool inserted = false;
432 
433  if (fpcache_Enabled)
434  {
435  /* Try to retire in cache entry. */
436  ATOMIC_INC_64 (&fpcache_Stat_add, 1);
437  inserted = fpcache_Hashmap.find_or_insert (thread_p, *btid, fpcache_entry);
438  if (fpcache_entry != NULL)
439  {
440  if (inserted)
441  {
442  /* Newly inserted. We must set class_oid. */
443  COPY_OID (&fpcache_entry->class_oid, class_oid);
444 
445  ATOMIC_INC_32 (&fpcache_Entry_counter, 1);
446  ATOMIC_INC_64 (&fpcache_Stat_add, 1);
447 
449  {
450  /* Try cleanup. */
451  fpcache_cleanup (thread_p);
452  }
453  }
454  else
455  {
456  /* Entry is older. Safe-guard: class OID must match. */
457  assert (OID_EQ (&fpcache_entry->class_oid, class_oid));
458  }
459  /* save filter_pred for later usage. */
460  if (fpcache_entry->clone_stack_head < fpcache_Clone_stack_size - 1)
461  {
462  /* Can save filter predicate expression. */
463  fpcache_entry->clone_stack[++fpcache_entry->clone_stack_head] = filter_pred;
464  filter_pred = NULL;
465  ATOMIC_INC_64 (&fpcache_Stat_clone_add, 1);
466  ATOMIC_INC_32 (&fpcache_Clone_counter, 1);
467  }
468  else
469  {
470  /* No room for another filter predicate expression. */
471  ATOMIC_INC_64 (&fpcache_Stat_clone_discard, 1);
472  }
473  gettimeofday (&fpcache_entry->time_last_used, NULL);
474  pthread_mutex_unlock (&fpcache_entry->mutex);
475  }
476  else
477  {
478  /* Unexpected. */
479  assert (false);
480  error_code = ER_FAILED;
481  }
482  }
483 
484  if (filter_pred != NULL)
485  {
486  /* Filter predicate expression could not be cached. Free it. */
487  HL_HEAPID old_private_heap = db_change_private_heap (thread_p, 0);
488  free_xasl_unpack_info (thread_p, filter_pred->unpack_info);
489  db_private_free_and_init (thread_p, filter_pred);
490  (void) db_change_private_heap (thread_p, old_private_heap);
491  }
492  return error_code;
493 }
494 
495 /*
496  * fpcache_remove_by_class () - Remove all filter predicate cache entries belonging to the given class.
497  *
498  * return : Void.
499  * thread_p (in) : Thread entry.
500  * class_oid (in) : Class OID.
501  */
502 void
504 {
505 #define FPCACHE_DELETE_BTIDS_SIZE 1024
506 
507  if (!fpcache_Enabled)
508  {
509  return;
510  }
511 
512  // *INDENT-OFF*
513  fpcache_hashmap_iterator iter { thread_p, fpcache_Hashmap };
514  // *INDENT-ON*
515  FPCACHE_ENTRY *fpcache_entry;
516  int success = 0;
517  BTID delete_btids[FPCACHE_DELETE_BTIDS_SIZE];
518  int n_delete_btids = 0;
519  int btid_index = 0;
520  bool finished = false;
521 
522  if (!fpcache_Enabled)
523  {
524  return;
525  }
526 
527  while (!finished)
528  {
529  iter.restart ();
530 
531  while (true)
532  {
533  /* Start by iterating to next hash entry. */
534  fpcache_entry = iter.iterate ();
535 
536  if (fpcache_entry == NULL)
537  {
538  /* Finished hash. */
539  finished = true;
540  break;
541  }
542 
543  if (OID_EQ (&fpcache_entry->class_oid, class_oid))
544  {
545  /* Save entry to be deleted after the iteration.
546  * We cannot delete from hash while iterating. The lock-free transaction used by iterator cannot be used
547  * for delete too (and we have just one transaction for each thread).
548  */
549  delete_btids[n_delete_btids++] = fpcache_entry->btid;
550 
551  if (n_delete_btids == FPCACHE_DELETE_BTIDS_SIZE)
552  {
553  /* Free mutex. */
554  pthread_mutex_unlock (&fpcache_entry->mutex);
555  /* Full buffer. Interrupt iteration, delete entries collected so far and then start over. */
556  fpcache_Hashmap.end_tran (thread_p);
557  break;
558  }
559  }
560  }
561 
562  /* Delete collected btids. */
563  for (btid_index = 0; btid_index < n_delete_btids; btid_index++)
564  {
565  if (fpcache_Hashmap.erase (thread_p, delete_btids[btid_index]))
566  {
567  /* Successfully removed. */
568  ATOMIC_INC_32 (&fpcache_Entry_counter, -1);
569  ATOMIC_INC_64 (&fpcache_Stat_discard, 1);
570  }
571  else
572  {
573  /* Unexpected. */
574  assert (false);
575  }
576  }
577  n_delete_btids = 0;
578  }
579 
580 #undef FPCACHE_DELETE_BTIDS_SIZE
581 }
582 
583 /*
584  * fpcache_dump () - Dump filter predicate cache info.
585  *
586  * return : Void.
587  * thread_p (in) : Thread entry.
588  * fp (out) : Dump output.
589  */
590 void
591 fpcache_dump (THREAD_ENTRY * thread_p, FILE * fp)
592 {
593  FPCACHE_ENTRY *fpcache_entry = NULL;
594 
595  assert (fp != NULL);
596 
597  fprintf (fp, "\n");
598 
599  if (!fpcache_Enabled)
600  {
601  fprintf (fp, "Filter predicate cache is disabled.\n");
602  return;
603  }
604 
605  /* NOTE: While dumping information, other threads are still free to modify the existing entries. */
606 
607  fprintf (fp, "Filter predicate cache\n");
608  fprintf (fp, "Stats: \n");
609  fprintf (fp, "Max size: %d\n", fpcache_Soft_capacity);
610  fprintf (fp, "Current entry count: %d\n", ATOMIC_INC_32 (&fpcache_Entry_counter, 0));
611  fprintf (fp, "Current clone count: %d\n", ATOMIC_INC_32 (&fpcache_Clone_counter, 0));
612  fprintf (fp, "Lookups: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_lookup));
613  fprintf (fp, "Entry Hits: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_hit));
614  fprintf (fp, "Entry Miss: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_miss));
615  fprintf (fp, "Entry discards: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_discard));
616  fprintf (fp, "Clone Hits: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_clone_hit));
617  fprintf (fp, "Clone Miss: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_clone_miss));
618  fprintf (fp, "Clone discards: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_clone_discard));
619  fprintf (fp, "Adds: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_add));
620  fprintf (fp, "Clone adds: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_clone_add));
621  fprintf (fp, "Cleanups: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_cleanup));
622  fprintf (fp, "Cleaned entries: %lld\n", (long long) ATOMIC_LOAD_64 (&fpcache_Stat_cleanup_entry));
623 
624  fpcache_hashmap_iterator iter = { thread_p, fpcache_Hashmap };
625  fprintf (fp, "\nEntries:\n");
626  while ((fpcache_entry = iter.iterate ()) != NULL)
627  {
628  fprintf (fp, "\n BTID = %d, %d|%d\n", fpcache_entry->btid.root_pageid, fpcache_entry->btid.vfid.volid,
629  fpcache_entry->btid.vfid.fileid);
630  fprintf (fp, " Clones = %d\n", fpcache_entry->clone_stack_head + 1);
631  }
632  /* TODO: add more. */
633 }
634 
635 /*
636 * fpcache_cleanup () - Cleanup filter predicate cache when soft capacity is exceeded.
637 *
638 * return : Void.
639 * thread_p (in) : Thread entry.
640  */
641 static void
643 {
645 
646  fpcache_hashmap_iterator iter = { thread_p, fpcache_Hashmap };
647  FPCACHE_ENTRY *fpcache_entry = NULL;
648  FPCACHE_CLEANUP_CANDIDATE candidate;
649  int candidate_index;
650 
651  /* We can allow only one cleanup process at a time. There is no point in duplicating this work. Therefore, anyone
652  * trying to do the cleanup should first try to set fpcache_Cleanup_flag. */
653  if (!ATOMIC_CAS_32 (&fpcache_Cleanup_flag, 0, 1))
654  {
655  /* Somebody else does the cleanup. */
656  return;
657  }
659  {
660  /* Already cleaned up. */
661  if (!ATOMIC_CAS_32 (&fpcache_Cleanup_flag, 1, 0))
662  {
663  assert_release (false);
665  }
666  return;
667  }
668 
669  /* Start cleanup. */
670 
671  /* The cleanup is a two-step process:
672  * 1. Iterate through hash and select candidates for cleanup. The least recently used entries are sorted into a binary
673  * heap.
674  * NOTE: the binary heap does not story references to hash entries; it stores copies from the candidate keys and
675  * last used timer of course to sort the candidates.
676  * 2. Remove collected candidates from hash. Entries must be unfix and no flags must be set.
677  */
678 
679  assert (fpcache_Cleanup_bh->element_count == 0);
680  fpcache_Cleanup_bh->element_count = 0;
681 
682  /* Collect candidates for cleanup. */
683  while ((fpcache_entry = iter.iterate ()) != NULL)
684  {
685  candidate.btid = fpcache_entry->btid;
686  candidate.time_last_used = fpcache_entry->time_last_used;
687 
688  (void) bh_try_insert (fpcache_Cleanup_bh, &candidate, NULL);
689  }
690 
691  /* Remove candidates from filter predicate cache. */
692  for (candidate_index = 0; candidate_index < fpcache_Cleanup_bh->element_count; candidate_index++)
693  {
694  /* Get candidate at candidate_index. */
695  bh_element_at (fpcache_Cleanup_bh, candidate_index, &candidate);
696 
697  /* Try delete. */
698  if (fpcache_Hashmap.erase (thread_p, candidate.btid))
699  {
700  ATOMIC_INC_64 (&fpcache_Stat_cleanup_entry, 1);
701  ATOMIC_INC_64 (&fpcache_Stat_discard, 1);
702  ATOMIC_INC_32 (&fpcache_Entry_counter, -1);
703  }
704  }
705 
706  /* Reset binary heap. */
707  fpcache_Cleanup_bh->element_count = 0;
708 
709  ATOMIC_INC_64 (&fpcache_Stat_cleanup, 1);
710  if (!ATOMIC_CAS_32 (&fpcache_Cleanup_flag, 1, 0))
711  {
712  assert_release (false);
714  }
715 }
716 
717 /*
718  * fpcache_compare_cleanup_candidates () - Compare cleanup candidates by their time_last_used. Oldest candidates are
719  * considered "greater".
720  *
721  * return : BH_CMP_RESULT:
722  * BH_GT if left is older.
723  * BH_LT if right is older.
724  * BH_EQ if left and right are equal.
725  * left (in) : Left FPCACHE cleanup candidate.
726  * right (in) : Right FPCACHE cleanup candidate.
727  * ignore_arg (in) : Ignored.
728  */
729 static BH_CMP_RESULT
730 fpcache_compare_cleanup_candidates (const void *left, const void *right, BH_CMP_ARG ingore_arg)
731 {
732  struct timeval left_timeval = ((FPCACHE_CLEANUP_CANDIDATE *) left)->time_last_used;
733  struct timeval right_timeval = ((FPCACHE_CLEANUP_CANDIDATE *) right)->time_last_used;
734 
735  /* Lesser means placed in binary heap. So return BH_LT for older timeval. */
736  if (left_timeval.tv_sec < right_timeval.tv_sec)
737  {
738  return BH_LT;
739  }
740  else if (left_timeval.tv_sec == right_timeval.tv_sec)
741  {
742  return BH_EQ;
743  }
744  else
745  {
746  return BH_GT;
747  }
748 }
749 
750 /*
751  * fpcache_drop_all () - Free all filter predicate cache entries.
752  *
753  * return : Void.
754  * thread_p (in) : Thread entry.
755  */
756 void
758 {
759  /* Reset fpcache_Entry_counter and fpcache_Clone_counter.
760  * NOTE: If entries/clones are created concurrently to this, the counters may become a little off. However, exact
761  * counters are not mandatory.
762  */
765 
768 
769  fpcache_Hashmap.clear (thread_p);
770 }
static int fpcache_entry_init(void *entry)
static void fpcache_cleanup(THREAD_ENTRY *thread_p)
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
#define FPCACHE_DELETE_BTIDS_SIZE
#define ASSERT_ERROR()
void lf_hash_destroy(LF_HASH_TABLE *table)
Definition: lock_free.c:1933
#define FPCACHE_CLEANUP_RATIO
#define pthread_mutex_init(a, b)
Definition: area_alloc.c:48
static INT64 fpcache_Stat_cleanup_entry
INT32 clone_stack_head
unsigned int btree_hash_btid(void *btid, int hash_size)
Definition: btree.c:32874
#define ER_FAILED
Definition: error_code.h:47
void bh_element_at(BINARY_HEAP *heap, int index, void *elem)
Definition: binaryheap.c:442
#define pthread_mutex_unlock(a)
Definition: area_alloc.c:51
#define ASSERT_ERROR_AND_SET(error_code)
Definition: lock_free.h:63
#define assert_release(e)
Definition: error_manager.h:96
static volatile INT32 fpcache_Clone_counter
static INT64 fpcache_Stat_hit
static BH_CMP_RESULT fpcache_compare_cleanup_candidates(const void *left, const void *right, BH_CMP_ARG ingore_arg)
static int fpcache_copy_key(void *src, void *dest)
#define FPCACHE_PTR_TO_ENTRY(ptr)
static INT64 fpcache_Stat_miss
INT32 root_pageid
int element_count
Definition: binaryheap.h:71
void * BH_CMP_ARG
Definition: binaryheap.h:40
static bool fpcache_Enabled
pthread_mutex_t mutex
HL_HEAPID db_change_private_heap(THREAD_ENTRY *thread_p, HL_HEAPID heap_id)
Definition: memory_alloc.c:337
#define COPY_OID(dest_oid_ptr, src_oid_ptr)
Definition: oid.h:63
INT32 fpcache_Cleanup_flag
bool find_or_insert(cubthread::entry *thread_p, Key &key, T *&t)
#define LF_HASH_TABLE_INITIALIZER
Definition: lock_free.h:317
void THREAD_ENTRY
static INT64 fpcache_Stat_discard
static INT64 fpcache_Stat_add
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
void bh_destroy(THREAD_ENTRY *thread_p, BINARY_HEAP *heap)
Definition: binaryheap.c:157
#define assert(x)
int32_t fileid
Definition: dbtype_def.h:886
static INT32 fpcache_Soft_capacity
int prm_get_integer_value(PARAM_ID prm_id)
T * find(cubthread::entry *thread_p, Key &key)
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
PRED_EXPR_WITH_CONTEXT ** clone_stack
static LF_FREELIST fpcache_Ht_freelist
int btree_compare_btids(void *mem_btid1, void *mem_btid2)
Definition: btree.c:21777
void fpcache_remove_by_class(THREAD_ENTRY *thread_p, const OID *class_oid)
static int fpcache_entry_uninit(void *entry)
static INT64 fpcache_Stat_clone_hit
static INT64 fpcache_Stat_lookup
#define OID_EQ(oidp1, oidp2)
Definition: oid.h:92
#define NULL
Definition: freelistheap.h:34
VFID vfid
static INT64 fpcache_Stat_clone_add
static int success()
void init(lf_tran_system &transys, int entry_idx, int hash_size, int freelist_block_size, int freelist_block_count, lf_entry_descriptor &edesc)
#define db_private_free_and_init(thrd, ptr)
Definition: memory_alloc.h:141
static LF_ENTRY_DESCRIPTOR fpcache_Entry_descriptor
static fpcache_hashmap_type fpcache_Hashmap
#define LF_FREELIST_INITIALIZER
Definition: lock_free.h:248
#define max(a, b)
void free_xasl_unpack_info(THREAD_ENTRY *thread_p, REFPTR(XASL_UNPACK_INFO, xasl_unpack_info))
static volatile INT32 fpcache_Entry_counter
#define ARG_FILE_LINE
Definition: error_manager.h:44
struct timeval time_last_used
static INT64 fpcache_Stat_clone_discard
void fpcache_finalize(THREAD_ENTRY *thread_p)
FPCACHE_ENTRY * stack
#define free_and_init(ptr)
Definition: memory_alloc.h:147
int fpcache_initialize(THREAD_ENTRY *thread_p)
#define BTID_COPY(btid_ptr1, btid_ptr2)
BH_TRY_INSERT_RESULT bh_try_insert(BINARY_HEAP *heap, void *elem, void *replaced)
Definition: binaryheap.c:249
static int fpcache_Clone_stack_size
void fpcache_drop_all(THREAD_ENTRY *thread_p)
void lf_freelist_destroy(LF_FREELIST *freelist)
Definition: lock_free.c:711
BINARY_HEAP * bh_create(THREAD_ENTRY *thread_p, int max_capacity, int elem_size, bh_key_comparator cmp_func, BH_CMP_ARG cmp_arg)
Definition: binaryheap.c:114
static INT64 fpcache_Stat_clone_miss
int qexec_clear_pred_context(THREAD_ENTRY *thread_p, pred_expr_with_context *pred_filter, bool dealloc_dbvalues)
static int fpcache_entry_free(void *entry)
short volid
Definition: dbtype_def.h:887
void fpcache_dump(THREAD_ENTRY *thread_p, FILE *fp)
BINARY_HEAP * fpcache_Cleanup_bh
void clear(cubthread::entry *thread_p)
static INT64 fpcache_Stat_cleanup
FPCACHE_ENTRY * next
#define LF_EM_USING_MUTEX
Definition: lock_free.h:60
BH_CMP_RESULT
Definition: binaryheap.h:42
int stx_map_stream_to_filter_pred(THREAD_ENTRY *thread_p, pred_expr_with_context **pred, char *pred_stream, int pred_stream_size)
static LF_HASH_TABLE fpcache_Ht
int fpcache_retire(THREAD_ENTRY *thread_p, OID *class_oid, BTID *btid, pred_expr_with_context *filter_pred)
LF_TRAN_SYSTEM fpcache_Ts
Definition: lock_free.c:55
int fpcache_claim(THREAD_ENTRY *thread_p, BTID *btid, or_predicate *or_pred, pred_expr_with_context **filter_pred)
#define pthread_mutex_destroy(a)
Definition: area_alloc.c:49
static void * fpcache_entry_alloc(void)
XASL_UNPACK_INFO * unpack_info
Definition: xasl.h:1062