CUBRID Engine  latest
xasl_cache.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * XASL cache.
21  */
22 
23 #ident "$Id$"
24 
25 #include "xasl_cache.h"
26 
27 #include "binaryheap.h"
28 #include "compile_context.h"
29 #include "config.h"
30 #include "system_parameter.h"
31 #include "list_file.h"
32 #include "perf_monitor.h"
33 #include "query_executor.h"
34 #include "query_manager.h"
35 #include "statistics_sr.h"
36 #include "stream_to_xasl.h"
37 #include "thread_entry.hpp"
39 #include "thread_manager.hpp"
40 #include "xasl_unpack_info.hpp"
41 #include "list_file.h"
42 
43 #include <algorithm>
44 #include <assert.h>
45 
46 #define XCACHE_ENTRY_MARK_DELETED ((INT32) 0x80000000)
47 #define XCACHE_ENTRY_TO_BE_RECOMPILED ((INT32) 0x40000000)
48 #define XCACHE_ENTRY_WAS_RECOMPILED ((INT32) 0x20000000)
49 #define XCACHE_ENTRY_SKIP_TO_BE_RECOMPILED ((INT32) 0x10000000)
50 #define XCACHE_ENTRY_CLEANUP ((INT32) 0x08000000)
51 #define XCACHE_ENTRY_RECOMPILED_REQUESTED ((INT32) 0x04000000)
52 #define XCACHE_ENTRY_FLAGS_MASK ((INT32) 0xFF000000)
53 
54 #define XCACHE_ENTRY_FIX_COUNT_MASK ((INT32) 0x00FFFFFF)
55 
56 #if defined (SERVER_MODE)
57 #define XCACHE_ENTRY_DELETED_BY_ME \
58  ((XCACHE_ENTRY_MARK_DELETED | XCACHE_ENTRY_FIX_COUNT_MASK) - logtb_get_current_tran_index ())
59 #else /* !SERVER_MODE */ /* SA_MODE */
60 #define XCACHE_ENTRY_DELETED_BY_ME (XCACHE_ENTRY_MARK_DELETED | XCACHE_ENTRY_FIX_COUNT_MASK)
61 #endif /* SA_MODE */
62 
63 #define XCACHE_PTR_TO_KEY(ptr) ((XASL_ID *) ptr)
64 #define XCACHE_PTR_TO_ENTRY(ptr) ((XASL_CACHE_ENTRY *) ptr)
65 
66 /* xcache statistics. */
67 typedef struct xcache_stats XCACHE_STATS;
69 {
70  INT64 lookups;
71  INT64 hits;
72  INT64 miss;
73  INT64 recompiles;
75  INT64 deletes;
76  INT64 cleanups;
78  INT64 fix;
79  INT64 unfix;
80  INT64 inserts;
82  INT64 rt_checks;
83  INT64 rt_true;
84 };
85 #define XCACHE_STATS_INITIALIZER { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
86 
87 
90 {
93 };
94 
95 // *INDENT-OFF*
97 using xcache_hashmap_iterator = xcache_hashmap_type::iterator;
98 // *INDENT-ON*
99 
100 /* Structure to include all xasl cache global variable. It is easier to visualize the entire system when debugging. */
101 typedef struct xcache XCACHE;
102 struct xcache
103 {
104  bool enabled;
106  struct timeval last_cleaned_time;
109  volatile INT32 entry_count;
115 
117 
118  // *INDENT-OFF*
119  xcache ()
120  : enabled (false)
121  , soft_capacity (0)
122  , last_cleaned_time { 0, 0 }
123  , time_threshold (360)
124  , hashmap {}
125  , entry_count (0)
126  , logging_enabled (false)
127  , max_clones (0)
128  , cleanup_flag (0)
129  , cleanup_bh (NULL)
130  , cleanup_array (NULL)
132  {
133  }
134  // *INDENT-ON*
135 };
136 
138 
139 /* Create macro's for xcache_Global fields to access them as if they were global variables. */
140 #define xcache_Enabled xcache_Global.enabled
141 #define xcache_Soft_capacity xcache_Global.soft_capacity
142 #define xcache_Time_threshold xcache_Global.time_threshold
143 #define xcache_Last_cleaned_time xcache_Global.last_cleaned_time
144 #define xcache_Hashmap xcache_Global.hashmap
145 #define xcache_Entry_count xcache_Global.entry_count
146 #define xcache_Log xcache_Global.logging_enabled
147 #define xcache_Max_clones xcache_Global.max_clones
148 #define xcache_Cleanup_flag xcache_Global.cleanup_flag
149 #define xcache_Cleanup_bh xcache_Global.cleanup_bh
150 #define xcache_Cleanup_array xcache_Global.cleanup_array
151 
152 /* Statistics */
153 #define XCACHE_STAT_GET(name) ATOMIC_LOAD_64 (&xcache_Global.stats.name)
154 #define XCACHE_STAT_INC(name) ATOMIC_INC_64 (&xcache_Global.stats.name, 1)
155 
156 #define TIME_DIFF_SEC(t1, t2) (t1.tv_sec - t2.tv_sec)
157 
158 /* xcache_Entry_descriptor - used for latch-free hash table.
159  * we have to declare member functions before instantiating xcache_Entry_descriptor.
160  */
161 static void *xcache_entry_alloc (void);
162 static int xcache_entry_free (void *entry);
163 static int xcache_entry_init (void *entry);
164 static int xcache_entry_uninit (void *entry);
165 static int xcache_copy_key (void *src, void *dest);
166 static int xcache_compare_key (void *key1, void *key2);
167 static unsigned int xcache_hash_key (void *key, int hash_table_size);
168 
170  offsetof (XASL_CACHE_ENTRY, stack),
171  offsetof (XASL_CACHE_ENTRY, next),
172  offsetof (XASL_CACHE_ENTRY, del_id),
173  offsetof (XASL_CACHE_ENTRY, xasl_id),
174  0, /* No mutex. */
175 
176  /* using mutex? */
178 
186  NULL, /* duplicates not accepted. */
187 };
188 
189 #define XCACHE_ATOMIC_CAS_CACHE_FLAG(xid, oldcf, newcf) (ATOMIC_CAS_32 (&(xid)->cache_flag, oldcf, newcf))
190 
191 /* Cleanup */
192 #define XCACHE_CLEANUP_RATIO 0.2
193 #define XCACHE_CLEANUP_MIN_NUM_ENTRIES 20
194 #define XCACHE_CLEANUP_NUM_ENTRIES(capacity) \
195  (MAX ((int) (2 * XCACHE_CLEANUP_RATIO * (capacity)), XCACHE_CLEANUP_MIN_NUM_ENTRIES))
196 
197 
198 /* Recompile threshold */
199 #define XCACHE_RT_TIMEDIFF_IN_SEC 360 /* 10 minutes */
200 #define XCACHE_RT_MAX_THRESHOLD 10000 /* 10k pages */
201 #define XCACHE_RT_FACTOR 10 /* 10x or 0.1x cardinal change */
202 #define XCACHE_RT_CLASS_STAT_NEED_UPDATE(class_pages,heap_pages) \
203  (((class_pages) < 100 && (((heap_pages) * 2 < (class_pages)) || ((heap_pages) > (class_pages) * 2))) \
204  || ((heap_pages) < (class_pages) * 0.8f) || ((heap_pages) > (class_pages) * 1.2f))
205 
206 /* Logging macro's */
207 #define xcache_check_logging() (xcache_Log = prm_get_bool_value (PRM_ID_XASL_CACHE_LOGGING))
208 #define xcache_log(...) if (xcache_Log) _er_log_debug (ARG_FILE_LINE, "XASL CACHE: " __VA_ARGS__)
209 #define xcache_log_error(...) if (xcache_Log) _er_log_debug (ARG_FILE_LINE, "XASL CACHE ERROR: " __VA_ARGS__)
210 
211 #define XCACHE_LOG_TRAN_TEXT "\t tran = %d \n"
212 #define XCACHE_LOG_TRAN_ARGS(thrd) LOG_FIND_THREAD_TRAN_INDEX (thrd)
213 
214 #define XCACHE_LOG_ERROR_TEXT "\t error_code = %d \n"
215 
216 #define XCACHE_LOG_ENTRY_PTR_TEXT "\t\t entry ptr = %p \n"
217 
218 #define XCACHE_LOG_SHA1_TEXT "\t\t\t sha1 = %08x | %08x | %08x | %08x | %08x \n"
219 #define XCACHE_LOG_SHA1_ARGS(sha1) SHA1_AS_ARGS (sha1)
220 
221 #define XCACHE_LOG_TIME_STORED_TEXT "\t\t\t time stored = %d sec, %d usec \n"
222 #define XCACHE_LOG_EXEINFO_TEXT "\t\t\t user text = %s \n" \
223  "\t\t\t plan text = %s \n" \
224  "\t\t\t hash text = %s \n"
225 #define XCACHE_LOG_CLONE "\t\t\t xasl = %p \n" \
226  "\t\t\t xasl_buf = %p \n"
227 
228 #define XCACHE_LOG_XASL_ID_TEXT(msg) \
229  "\t\t " msg ": \n" \
230  XCACHE_LOG_SHA1_TEXT \
231  XCACHE_LOG_TIME_STORED_TEXT
232 #define XCACHE_LOG_XASL_ID_ARGS(xid) \
233  SHA1_AS_ARGS (&(xid)->sha1), \
234  CACHE_TIME_AS_ARGS (&(xid)->time_stored)
235 
236 #define XCACHE_LOG_ENTRY_TEXT(msg) \
237  "\t " msg ": \n" \
238  XCACHE_LOG_ENTRY_PTR_TEXT \
239  XCACHE_LOG_XASL_ID_TEXT ("xasl_id") \
240  "\t\t sql_info: \n" \
241  XCACHE_LOG_EXEINFO_TEXT \
242  "\t\t n_oids = %d \n"
243 #define XCACHE_LOG_ENTRY_ARGS(xent) \
244  (xent), \
245  XCACHE_LOG_XASL_ID_ARGS (&(xent)->xasl_id), \
246  EXEINFO_AS_ARGS(&(xent)->sql_info), \
247  (xent)->n_related_objects
248 #define XCACHE_LOG_CLONE_ARGS(xclone) XASL_CLONE_AS_ARGS (xclone)
249 
250 #define XCACHE_LOG_OBJECT_TEXT "\t\t\t oid = %d|%d|%d \n" \
251  "\t\t\t lock = %s \n" \
252  "\t\t\t tcard = %d \n"
253 #define XCACHE_LOG_ENTRY_OBJECT_TEXT(msg) \
254  "\t\t " msg ": \n" \
255  XCACHE_LOG_OBJECT_TEXT
256 #define XCACHE_LOG_ENTRY_OBJECT_ARGS(xent, oidx) \
257  OID_AS_ARGS (&(xent)->related_objects[oidx].oid), \
258  LOCK_TO_LOCKMODE_STRING ((xent)->related_objects[oidx].lock), \
259  (xent)->related_objects[oidx].tcard
260 
261 static bool xcache_entry_mark_deleted (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry);
262 static bool xcache_entry_set_request_recompile_flag (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry,
263  bool set_flag);
264 static void xcache_clone_decache (THREAD_ENTRY * thread_p, XASL_CLONE * xclone);
265 static void xcache_cleanup (THREAD_ENTRY * thread_p);
266 static BH_CMP_RESULT xcache_compare_cleanup_candidates (const void *left, const void *right, BH_CMP_ARG ignore_arg);
267 static bool xcache_check_recompilation_threshold (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry);
268 static void xcache_invalidate_entries (THREAD_ENTRY * thread_p,
269  bool (*invalidate_check) (XASL_CACHE_ENTRY *, const OID *), const OID * arg);
270 static bool xcache_entry_is_related_to_oid (XASL_CACHE_ENTRY * xcache_entry, const OID * related_to_oid);
272 
273 /*
274  * xcache_initialize () - Initialize XASL cache.
275  *
276  * return : Error Code.
277  * thread_p (in) : Thread entry.
278  */
279 int
281 {
282  int error_code = NO_ERROR;
283  HL_HEAPID save_heapid;
284 
285  xcache_Enabled = false;
286 
288 
291 
292  if (xcache_Soft_capacity <= 0)
293  {
294  xcache_log ("disabled.\n");
295  return NO_ERROR;
296  }
297 
299 
300  const int freelist_block_count = 2;
301  const int freelist_block_size = std::max (1, xcache_Soft_capacity / freelist_block_count);
302  xcache_Hashmap.init (xcache_Ts, THREAD_TS_XCACHE, xcache_Soft_capacity, freelist_block_size, freelist_block_count,
303  xcache_Entry_descriptor);
304 
305  /* Use global heap */
306  save_heapid = db_change_private_heap (thread_p, 0);
310  (void) db_change_private_heap (thread_p, save_heapid);
311  if (xcache_Cleanup_bh == NULL)
312  {
313  xcache_Hashmap.destroy ();
314  xcache_log_error ("could not init hash table.\n");
315  ASSERT_ERROR_AND_SET (error_code);
316  return error_code;
317  }
318 
320  if (xcache_Cleanup_array == NULL)
321  {
324  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
325  return error_code;
326  }
327 
328  /* set last_cleaned_time as current */
329  gettimeofday (&xcache_Last_cleaned_time, NULL);
330 
331  xcache_log ("init successful.\n");
332 
333  xcache_Enabled = true;
334  return NO_ERROR;
335 }
336 
337 /*
338  * xcache_finalize () - Finalize XASL cache.
339  *
340  * return : Void.
341  * thread_entry (in) : Thread entry.
342  */
343 void
345 {
346  HL_HEAPID save_heapid;
347 
348  if (!xcache_Enabled)
349  {
350  return;
351  }
352 
354  xcache_log ("finalize.\n");
355 
356  xcache_Hashmap.destroy ();
357 
358  /* Use global heap */
359  save_heapid = db_change_private_heap (thread_p, 0);
360  if (xcache_Cleanup_bh != NULL)
361  {
362  bh_destroy (thread_p, xcache_Cleanup_bh);
364  }
365  (void) db_change_private_heap (thread_p, save_heapid);
366 
367  xcache_Enabled = false;
368 }
369 
370 // *INDENT-OFF*
372 {
373  pthread_mutex_init (&cache_clones_mutex, NULL);
374  init_clone_cache ();
375 }
376 
378 {
379  assert (cache_clones == NULL || cache_clones == &one_clone);
380  pthread_mutex_destroy (&cache_clones_mutex);
381 }
382 
383 void
385 {
386  cache_clones = &one_clone;
387  one_clone.xasl = NULL;
388  one_clone.xasl_buf = NULL;
389  cache_clones_capacity = 1;
390  n_cache_clones = 0;
391 }
392 // *INDENT-ON*
393 
394 /*
395  * xcache_entry_alloc () - Allocate an XASL cache entry.
396  *
397  * return : Pointer to allocated memory.
398  */
399 static void *
401 {
402  static int xcache_nentries = 0;
403 
404  XASL_CACHE_ENTRY *xcache_entry = (XASL_CACHE_ENTRY *) malloc (sizeof (XASL_CACHE_ENTRY));
405  if (xcache_entry == NULL)
406  {
407  return NULL;
408  }
409  xcache_entry->init_clone_cache ();
410  pthread_mutex_init (&xcache_entry->cache_clones_mutex, NULL);
411 
412  xcache_entry->list_ht_no = xcache_nentries++;
413 
414  return xcache_entry;
415 }
416 
417 /*
418  * xcache_entry_free () - Free an XASL cache entry.
419  *
420  * return : NO_ERROR.
421  * entry (in) : XASL cache entry pointer.
422  */
423 static int
424 xcache_entry_free (void *entry)
425 {
426  XASL_CACHE_ENTRY *xcache_entry = (XASL_CACHE_ENTRY *) entry;
427 
428  if (xcache_entry->cache_clones != &xcache_entry->one_clone)
429  {
430  /* Should be already freed? */
431  assert (false);
432  free (xcache_entry->cache_clones);
433  }
434  pthread_mutex_destroy (&xcache_entry->cache_clones_mutex);
435  free (entry);
436  return NO_ERROR;
437 }
438 
439 /*
440  * xcache_entry_init () - Initialize new XASL cache entry.
441  *
442  * return : NO_ERROR.
443  * entry (in) : XASL cache entry pointer.
444  */
445 static int
446 xcache_entry_init (void *entry)
447 {
448  XASL_CACHE_ENTRY *xcache_entry = XCACHE_PTR_TO_ENTRY (entry);
449  /* Add here if anything should be initialized. */
450  xcache_entry->related_objects = NULL;
451  xcache_entry->ref_count = 0;
452  xcache_entry->clr_count = 0;
453 
454  xcache_entry->sql_info.sql_hash_text = NULL;
455  xcache_entry->sql_info.sql_user_text = NULL;
456  xcache_entry->sql_info.sql_plan_text = NULL;
457 
458  XASL_ID_SET_NULL (&xcache_entry->xasl_id);
459  xcache_entry->stream.xasl_id = NULL;
460  xcache_entry->stream.buffer = NULL;
461 
462  xcache_entry->free_data_on_uninit = false;
463  xcache_entry->initialized = true;
464 
465  assert (xcache_entry->n_cache_clones == 0);
466  return NO_ERROR;
467 }
468 
469 /*
470  * xcache_entry_uninit () - Uninitialize XASL cache entry.
471  *
472  * return : NO_ERROR.
473  * entry (in) : XASL cache entry pointer.
474  */
475 static int
476 xcache_entry_uninit (void *entry)
477 {
478  XASL_CACHE_ENTRY *xcache_entry = XCACHE_PTR_TO_ENTRY (entry);
480 
481  /* 1. not fixed
482  * 2. or was deleted
483  * 3. or was claimed & retired immediately. */
484  assert ((xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_FIX_COUNT_MASK) == 0
485  || (xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_MARK_DELETED)
486  || ((xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_FIX_COUNT_MASK) == 1
487  && !xcache_entry->free_data_on_uninit));
488 
489  if (!xcache_entry->initialized)
490  {
491  /* Already uninitialized? */
492  assert (false);
493  return NO_ERROR;
494  }
495 
496  if (xcache_entry->free_data_on_uninit)
497  {
498  xcache_log ("uninit an entry from cache and free its data: \n"
499  XCACHE_LOG_ENTRY_TEXT ("xasl cache entry") XCACHE_LOG_TRAN_TEXT,
500  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
501 
502  if (xcache_entry->related_objects != NULL)
503  {
504  free_and_init (xcache_entry->related_objects);
505  }
506 
507  if (xcache_entry->sql_info.sql_hash_text != NULL)
508  {
509  free_and_init (xcache_entry->sql_info.sql_hash_text);
510  }
511 
512  XASL_ID_SET_NULL (&xcache_entry->xasl_id);
513 
514  /* Free XASL clones. */
515  assert (xcache_entry->n_cache_clones == 0
516  || (xcache_Max_clones > 0 && xcache_entry->n_cache_clones <= xcache_Max_clones));
517  assert (xcache_entry->n_cache_clones == 0 || xcache_entry->cache_clones != NULL);
518  while (xcache_entry->n_cache_clones > 0)
519  {
520  xcache_clone_decache (thread_p, &xcache_entry->cache_clones[--xcache_entry->n_cache_clones]);
521  }
522  if (xcache_entry->cache_clones != &xcache_entry->one_clone)
523  {
524  /* Free cache clones. */
525  assert (xcache_entry->cache_clones_capacity > 0);
526  free (xcache_entry->cache_clones);
527  xcache_entry->cache_clones = &xcache_entry->one_clone;
528  xcache_entry->one_clone.xasl = NULL;
529  xcache_entry->one_clone.xasl_buf = NULL;
530  xcache_entry->cache_clones_capacity = 1;
531  }
532  if (xcache_entry->stream.buffer != NULL)
533  {
534  free_and_init (xcache_entry->stream.buffer);
535  }
536  }
537  else
538  {
539  xcache_log ("uninit an entry without freeing its data: \n"
540  XCACHE_LOG_ENTRY_TEXT ("xasl cache entry") XCACHE_LOG_TRAN_TEXT,
541  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
542  xcache_entry->related_objects = NULL;
543  xcache_entry->sql_info.sql_hash_text = NULL;
544  xcache_entry->sql_info.sql_plan_text = NULL;
545  xcache_entry->sql_info.sql_user_text = NULL;
546  XASL_ID_SET_NULL (&xcache_entry->xasl_id);
547 
548  assert (xcache_entry->n_cache_clones == 0);
549  }
550  xcache_entry->initialized = false;
551  return NO_ERROR;
552 }
553 
554 /*
555  * xcache_copy_key () - Dummy copy key function; XASL cache entry and its key are initialized before being inserted.
556  *
557  * return : NO_ERROR.
558  * src (in) : Dummy key source.
559  * dest (in) : Dummy key destination.
560  */
561 static int
562 xcache_copy_key (void *src, void *dest)
563 {
564  /* Key is already set before insert. */
565  XASL_ID *xid = (XASL_ID *) dest;
566  THREAD_ENTRY *thread_p = NULL;
567 
568 #if !defined (NDEBUG)
569  assert (xid->cache_flag == 1); /* One reader, no flags. */
570 #endif /* !NDEBUG */
571 
572  xcache_log ("dummy copy key call: \n"
575 
576  return NO_ERROR;
577 }
578 
579 /*
580  * xcache_compare_key () - XASL hash compare key function.
581  *
582  * return : 0 for match, != 0 for no match.
583  * key1 (in) : Lookup key.
584  * key2 (in) : Entry key.
585  */
586 static int
587 xcache_compare_key (void *key1, void *key2)
588 {
589  XASL_ID *lookup_key = XCACHE_PTR_TO_KEY (key1);
590  XASL_ID *entry_key = XCACHE_PTR_TO_KEY (key2);
591  INT32 cache_flag;
592  THREAD_ENTRY *thread_p = NULL;
593 
594  /* Compare key algorithm depends on sha1 and cache flags.
595  * SHA-1 is generated hash based on query hash text.
596  * If SHA-1 does not match, the entry does not belong to the same query (so clearly no match).
597  *
598  * Even if SHA-1 hash matches, the cache flags can still invalidate the entry.
599  * 1. If this is a cleanup lookup, the entry must be unfixed and unmarked (its cache_flag must be 0). Successful
600  * cleanup can occur if cache-flag is CASed to XCACHE_ENTRY_MARK_DELETED.
601  * 2. Marked deleted entry can only be found with the scope of deleting entry (the lookup key must also be marked as
602  * deleted.
603  * 3. Was recompiled entry can never be found. They are followed by another entry with similar SHA-1 which will can
604  * be found.
605  * 4. To be recompiled entry cannot be found by its recompiler - lookup key is marked as skip to be recompiled.
606  * 5. When all previous flags do not invalidate entry, the thread looking for entry must also increment fix count.
607  * Incrementing fix count can fail if concurrent thread mark entry as deleted.
608  */
609 
610  if (SHA1Compare (&lookup_key->sha1, &entry_key->sha1) != 0)
611  {
612  /* Not the same query. */
613 
614  xcache_log ("compare keys: sha1 mismatch\n"
615  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
616  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
618  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
619  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
620  return -1;
621  }
622  /* SHA-1 hash matched. */
623  /* Now we matched XASL_ID. */
624 
625  if (lookup_key->cache_flag == XCACHE_ENTRY_CLEANUP)
626  {
627  /* Lookup for cleaning the entry from hash. The entry must not be fixed by another and must not have any
628  * flags. */
630  {
631  /* Successfully marked for delete. */
632  xcache_log ("compare keys: found for cleanup\n"
633  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
634  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
636  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
637  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
638  return 0;
639  }
640  else
641  {
642  /* Entry was fixed or had another flag set. */
643  xcache_log ("compare keys: failed to cleanup\n"
644  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
645  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
647  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
648  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
649  return -1;
650  }
651  }
652 
653  if (lookup_key->cache_flag & XCACHE_ENTRY_MARK_DELETED)
654  {
655  /* we may have multiple threads trying to delete multiple entries at once. each entry is marked with a different
656  * flag so each thread knows what entry to delete. */
657  if (entry_key->cache_flag == XCACHE_ENTRY_DELETED_BY_ME)
658  {
659  /* The deleter found its entry. */
660  xcache_log ("compare keys: found for delete\n"
661  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
662  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
664  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
665  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
666  return 0;
667  }
668  else
669  {
670  /* This is not the deleter. Ignore this entry - it will be removed from hash. */
671  xcache_log ("compare keys: skip not deleted\n"
672  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
673  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
675  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
676  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
677  return -1;
678  }
679  }
680 
681  /* try to fix entry */
682  do
683  {
684  cache_flag = entry_key->cache_flag;
685  if (cache_flag & XCACHE_ENTRY_MARK_DELETED)
686  {
687  /* deleted */
688  xcache_log ("compare keys: skip deleted\n"
689  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
690  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
692  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
693  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
694  return -1;
695  }
696  if (cache_flag & XCACHE_ENTRY_WAS_RECOMPILED)
697  {
698  xcache_log ("compare keys: skip recompiled\n"
699  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
700  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
702  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
703  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
704  return -1;
705  }
707  {
708  /* We are trying to insert a new entry to replace the entry to be recompiled. Skip this. */
709  xcache_log ("compare keys: skip to be recompiled\n"
710  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
711  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
713  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
714  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
715  return -1;
716  }
717  }
718  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (entry_key, cache_flag, cache_flag + 1));
719 
720  /* Successfully marked as reader. */
722 
723  xcache_log ("compare keys: key matched and fixed %s\n"
724  "\t\t lookup key: \n" XCACHE_LOG_SHA1_TEXT
725  "\t\t entry key: \n" XCACHE_LOG_SHA1_TEXT
727  (cache_flag & XCACHE_ENTRY_RECOMPILED_REQUESTED) ? "(recompile requested)" : "",
728  XCACHE_LOG_SHA1_ARGS (&lookup_key->sha1),
729  XCACHE_LOG_SHA1_ARGS (&entry_key->sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
730  return 0;
731 }
732 
733 /*
734  * xcache_hash_key () - Hash index based on key SHA-1.
735  *
736  * return : Hash index.
737  * key (in) : Key value.
738  * hash_table_size (in) : Hash size.
739  */
740 static unsigned int
741 xcache_hash_key (void *key, int hash_table_size)
742 {
744  unsigned int hash_index = ((unsigned int) xasl_id->sha1.h[0]) % hash_table_size;
745 
746  xcache_log ("hash index: \n"
748  "\t\t hash index value = %d \n"
750  XCACHE_LOG_SHA1_ARGS (&xasl_id->sha1),
752 
753  return hash_index;
754 }
755 
756 /*
757  * xcache_find_sha1 () - Lookup XASL cache by SHA-1.
758  *
759  * return : Error code.
760  * thread_p (in) : Thread entry.
761  * sha1 (in) : SHA-1 hash.
762  * search_mode(in) : search mode (for prepare or generic)
763  * xcache_entry (out) : XASL cache entry if found.
764  * rt_check (out) : Set NULL if recompile threshold should not be checked.
765  * Returns recompile result flag (due to recompile threshold).
766  *
767  * Note:
768  *
769  * The scenario flow for XASL recompile is as follows :
770  * 1. Client (CAS) executes a query (execute_query)
771  * 2. Server searches for XASL (xcache_find_sha1 with XASL_CACHE_SEARCH_FOR_EXECUTE);
772  * it detects that XASL needs recompile, sets the XCACHE_ENTRY_RECOMPILED_REQUESTED flag
773  * and returns error ER_QPROC_INVALID_XASLNODE to client
774  * [ While this flag is set, the XASL is still valid and is still being used ]
775  * 3. Client handles this error by performing a prepare_query (without XASL generation)
776  * 4. Server receives the first prepare_query (but still recompile_xasl == false)
777  * It searches using xcache_find_sha1 with XASL_CACHE_SEARCH_FOR_PREPARE mode, and detects that
778  * a recompile is needed (XCACHE_ENTRY_RECOMPILED_REQUESTED) : returns the XASL entry and sets the output
779  * parameter *rt_check = true; The recompile_xasl is set and returned to client
780  * 5. On client (do_prepare_select), the recompile_xasl flag is detected : the query is recompiled
781  * (XASL is regenerated) and send again to server (2nd prepare_query)
782  * 6. Server receives the second prepare_query (this time with recompile_xasl == true);
783  * A new XASL cache entry is created and is attempted to be added.
784  * Since it finds the existing one (having XCACHE_ENTRY_RECOMPILED_REQUESTED flag), the first insert fails,
785  * which is marked as XCACHE_ENTRY_TO_BE_RECOMPILED (the flag XCACHE_ENTRY_RECOMPILED_REQUESTED is cleared).
786  * The xcache_insert loop then proceeds to inserts the new recompiled entry (xcache_find_sha1 ignores the
787  * existing entry), and afterwards marks the old entry as XCACHE_ENTRY_WAS_RECOMPILED.
788  * After last unfix, the state is translated to XCACHE_ENTRY_MARK_DELETED, and then deleted.
789  */
790 int
791 xcache_find_sha1 (THREAD_ENTRY * thread_p, const SHA1Hash * sha1, const XASL_CACHE_SEARCH_MODE search_mode,
792  XASL_CACHE_ENTRY ** xcache_entry, xasl_cache_rt_check_result * rt_check)
793 {
794  XASL_ID lookup_key;
795  int error_code = NO_ERROR;
796 
797  assert (xcache_entry != NULL && *xcache_entry == NULL);
798 
799  if (!xcache_Enabled)
800  {
801  return NO_ERROR;
802  }
803 
805 
808 
809  XASL_ID_SET_NULL (&lookup_key);
810  lookup_key.sha1 = *sha1;
811 
812  *xcache_entry = xcache_Hashmap.find (thread_p, lookup_key);
813  if (*xcache_entry == NULL)
814  {
815  /* No match! */
818  xcache_log ("could not find cache entry: \n"
820  XCACHE_LOG_SHA1_ARGS (&lookup_key.sha1), XCACHE_LOG_TRAN_ARGS (thread_p));
821 
822  return NO_ERROR;
823  }
824  /* Found a match. */
825  /* We have incremented fix count, we don't need lf_tran anymore. */
826  xcache_Hashmap.end_tran (thread_p);
827 
830 
831  assert (*xcache_entry != NULL);
832 
833  if (rt_check)
834  {
835  /* Check if query should be recompile. */
836  if (search_mode == XASL_CACHE_SEARCH_FOR_PREPARE
837  && ((*xcache_entry)->xasl_id.cache_flag & XCACHE_ENTRY_RECOMPILED_REQUESTED) != 0)
838  {
839  /* this is first prepare_query request after an execute_query detected the recompile case
840  * (the same client which received the execute_query error ER_QPROC_INVALID_XASLNODE, sends this request)
841  * We need to re-ask client to also send the recompiled XASL (this coresponds to step 3 in Notes).
842  */
843  *rt_check = XASL_CACHE_RECOMPILE_PREPARE;
844  }
845  else
846  {
847  bool recompile_needed = xcache_check_recompilation_threshold (thread_p, *xcache_entry);
848  if (recompile_needed)
849  {
850  /* We need to recompile. */
851  /* and we need to clear the list cache entry first */
852  xcache_unfix (thread_p, *xcache_entry);
853  *xcache_entry = NULL;
854  if (search_mode == XASL_CACHE_SEARCH_FOR_EXECUTE)
855  {
856  *rt_check = XASL_CACHE_RECOMPILE_EXECUTE;
857  }
858  else
859  {
860  assert (search_mode == XASL_CACHE_SEARCH_FOR_PREPARE);
861  *rt_check = XASL_CACHE_RECOMPILE_PREPARE;
862  }
863 
864  return NO_ERROR;
865  }
866  }
867  }
868 
869  assert (*xcache_entry != NULL);
870  xcache_log ("found cache entry by sha1: \n"
872  XCACHE_LOG_ENTRY_ARGS (*xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
873 
874  return NO_ERROR;
875 }
876 
877 /*
878  * xcache_find_xasl_id_for_execute () - Find XASL cache entry by XASL_ID. Besides matching SHA-1, we have to match
879  * time_stored.
880  *
881  * return : NO_ERROR.
882  * thread_p (in) : Thread entry.
883  * xid (in) : XASL_ID.
884  * xcache_entry (out) : XASL cache entry if found.
885  * xclone (out) : XASL_CLONE (obtained from cache or loaded).
886  */
887 int
888 xcache_find_xasl_id_for_execute (THREAD_ENTRY * thread_p, const XASL_ID * xid, XASL_CACHE_ENTRY ** xcache_entry,
889  XASL_CLONE * xclone)
890 {
891  int error_code = NO_ERROR;
892  HL_HEAPID save_heapid = 0;
893  int oid_index;
894  int lock_result;
895  bool use_xasl_clone = false;
896  xasl_cache_rt_check_result recompile_due_to_threshold = XASL_CACHE_RECOMPILE_NOT_NEEDED;
897 
898  assert (xid != NULL);
899  assert (xcache_entry != NULL && *xcache_entry == NULL);
900  assert (xclone != NULL);
901 
902  error_code = xcache_find_sha1 (thread_p, &xid->sha1, XASL_CACHE_SEARCH_FOR_EXECUTE, xcache_entry,
903  &recompile_due_to_threshold);
904  if (error_code != NO_ERROR)
905  {
906  ASSERT_ERROR ();
907  return error_code;
908  }
909  if (*xcache_entry == NULL)
910  {
911  /* No entry was found. */
912  if (recompile_due_to_threshold == XASL_CACHE_RECOMPILE_EXECUTE)
913  {
917  }
918  return NO_ERROR;
919  }
920  if ((*xcache_entry)->xasl_id.time_stored.sec != xid->time_stored.sec
921  || (*xcache_entry)->xasl_id.time_stored.usec != xid->time_stored.usec)
922  {
923  /* We don't know if this XASL cache entry is good for us. We need to restart by recompiling. */
924  xcache_log ("could not get cache entry because time_stored mismatch \n"
925  XCACHE_LOG_ENTRY_TEXT ("entry")
926  XCACHE_LOG_XASL_ID_TEXT ("lookup xasl_id")
928  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
930  xcache_unfix (thread_p, *xcache_entry);
931  *xcache_entry = NULL;
932 
933  /* TODO:
934  * The one reason we cannot accept this cache entry is because one of the referenced classes might have suffered
935  * a schema change. Or maybe a serial may have been altered, although I am not sure this can actually affect our
936  * plan.
937  * Instead of using time_stored, we could find another way to identify if an XASL cache entry is still usable.
938  * Something that could detect if classes have been modified (and maybe serials).
939  */
940 
941  return NO_ERROR;
942  }
943  else
944  {
945  xcache_log ("found cache entry by xasl_id: \n"
946  XCACHE_LOG_ENTRY_TEXT ("entry")
947  XCACHE_LOG_XASL_ID_TEXT ("lookup xasl_id")
949  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
951  }
952 
953  assert ((*xcache_entry) != NULL);
954 
955  /* Get lock on all classes in xasl cache entry. */
956  /* The reason we need to do the locking here is to confirm the entry validity. Without the locks, we cannot guarantee
957  * the entry will remain valid (somebody holding SCH_M_LOCK may invalidate it). Moreover, in most cases, the
958  * transaction did not have locks up to this point (because it executes a prepared query).
959  * So, we have to get all locks and then check the entry validity.
960  */
961  for (oid_index = 0; oid_index < (*xcache_entry)->n_related_objects; oid_index++)
962  {
963  if ((*xcache_entry)->related_objects[oid_index].lock <= NULL_LOCK)
964  {
965  /* No lock. */
966  continue;
967  }
968 
969  lock_result = lock_object (thread_p, &(*xcache_entry)->related_objects[oid_index].oid, oid_Root_class_oid,
970  (*xcache_entry)->related_objects[oid_index].lock, LK_UNCOND_LOCK);
971  if (lock_result != LK_GRANTED)
972  {
973  ASSERT_ERROR_AND_SET (error_code);
974  xcache_unfix (thread_p, *xcache_entry);
975  *xcache_entry = NULL;
976  xcache_log ("could not get cache entry because lock on oid failed: \n"
977  XCACHE_LOG_ENTRY_TEXT ("entry")
978  XCACHE_LOG_ENTRY_OBJECT_TEXT ("object that could not be locked")
980  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
981  XCACHE_LOG_ENTRY_OBJECT_ARGS (*xcache_entry, oid_index), XCACHE_LOG_TRAN_ARGS (thread_p));
982 
983  return error_code;
984  }
985  }
986 
987  /* Check the entry is still valid. Uses atomic to prevent any code reordering */
988  if (ATOMIC_INC_32 (&((*xcache_entry)->xasl_id.cache_flag), 0) & XCACHE_ENTRY_MARK_DELETED)
989  {
990  /* Someone has marked entry as deleted. */
991  xcache_log ("could not get cache entry because it was deleted until locked: \n"
993  XCACHE_LOG_ENTRY_ARGS (*xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
994 
995  xcache_unfix (thread_p, *xcache_entry);
996  *xcache_entry = NULL;
997  return NO_ERROR;
998  }
999 
1000  assert ((*xcache_entry) != NULL);
1001 
1002  if (xcache_uses_clones ())
1003  {
1004  use_xasl_clone = true;
1005  /* Try to fetch a cached clone. */
1006  if ((*xcache_entry)->cache_clones == NULL)
1007  {
1008  assert_release (false);
1009  /* Fall through. */
1010  }
1011  else
1012  {
1013  (void) pthread_mutex_lock (&(*xcache_entry)->cache_clones_mutex);
1014  assert ((*xcache_entry)->n_cache_clones <= xcache_Max_clones);
1015  if ((*xcache_entry)->n_cache_clones > 0)
1016  {
1017  /* A clone is available. */
1018  *xclone = (*xcache_entry)->cache_clones[--(*xcache_entry)->n_cache_clones];
1019  (void) pthread_mutex_unlock (&(*xcache_entry)->cache_clones_mutex);
1020 
1021  assert (xclone->xasl != NULL && xclone->xasl_buf != NULL);
1022 
1023  xcache_log ("found cached clone: \n"
1024  XCACHE_LOG_ENTRY_TEXT ("entry")
1025  XCACHE_LOG_XASL_ID_TEXT ("lookup xasl_id")
1028  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
1030  XCACHE_LOG_CLONE_ARGS (xclone), XCACHE_LOG_TRAN_ARGS (thread_p));
1031  return NO_ERROR;
1032  }
1033  (void) pthread_mutex_unlock (&(*xcache_entry)->cache_clones_mutex);
1034  }
1035  /* Clone not found. */
1036  /* When clones are activated, we use global heap to generate the XASL's; this way, other threads can use the
1037  * clone. */
1038  save_heapid = db_change_private_heap (thread_p, 0);
1039  }
1040  error_code =
1041  stx_map_stream_to_xasl (thread_p, &xclone->xasl, use_xasl_clone, (*xcache_entry)->stream.buffer,
1042  (*xcache_entry)->stream.buffer_size, &xclone->xasl_buf);
1043  if (save_heapid != 0)
1044  {
1045  /* Restore heap id. */
1046  (void) db_change_private_heap (thread_p, save_heapid);
1047  }
1048  if (error_code != NO_ERROR)
1049  {
1050  ASSERT_ERROR ();
1051  assert (xclone->xasl == NULL && xclone->xasl_buf == NULL);
1052  xcache_unfix (thread_p, *xcache_entry);
1053  *xcache_entry = NULL;
1054 
1055  xcache_log_error ("could not load XASL tree and buffer: \n"
1057  XCACHE_LOG_XASL_ID_ARGS (xid), XCACHE_LOG_TRAN_ARGS (thread_p));
1058 
1059  return error_code;
1060  }
1061  assert (xclone->xasl != NULL && xclone->xasl_buf != NULL);
1062 
1063  xcache_log ("loaded xasl clone: \n"
1064  XCACHE_LOG_ENTRY_TEXT ("entry")
1065  XCACHE_LOG_XASL_ID_TEXT ("lookup xasl_id")
1068  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
1070 
1071  return NO_ERROR;
1072 }
1073 
1074 /*
1075  * xcache_unfix () - Unfix XASL cache entry by decrementing fix count in cache flag. If we are last to use entry
1076  * remove it from hash.
1077  *
1078  * return : Void.
1079  * thread_p (in) : Thread entry.
1080  * xcache_entry (in/out) : XASL cache entry.
1081  */
1082 void
1083 xcache_unfix (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry)
1084 {
1085  INT32 cache_flag = 0;
1086  INT32 new_cache_flag = 0;
1087  int error_code = NO_ERROR;
1088  struct timeval time_last_used;
1089 
1090  assert (xcache_entry != NULL);
1092 
1093  /* Mark last used. We need to do an atomic operation here. We cannot set both tv_sec and tv_usec and we don't have to.
1094  * Setting tv_sec is enough.
1095  */
1096  (void) gettimeofday (&time_last_used, NULL);
1097  ATOMIC_TAS (&xcache_entry->time_last_used.tv_sec, time_last_used.tv_sec);
1098 
1100  ATOMIC_INC_64 (&xcache_entry->ref_count, 1);
1101 
1102  /* Decrement the number of users. */
1103  do
1104  {
1105  cache_flag = xcache_entry->xasl_id.cache_flag;
1106  new_cache_flag = cache_flag;
1107 
1108  /* There should be at least one fix. */
1109  assert ((new_cache_flag & XCACHE_ENTRY_FIX_COUNT_MASK) != 0);
1110 
1111  /* Unfix */
1112  new_cache_flag = cache_flag - 1;
1113  if (new_cache_flag == XCACHE_ENTRY_TO_BE_RECOMPILED)
1114  {
1115  /* We are the last to have fixed to be recompiled entry. This is an invalid state.
1116  * The recompiler should set the entry as "was recompiled" before unfixing it!
1117  */
1118  assert (false);
1119  xcache_log_error ("unexpected cache_flag = XCACHE_ENTRY_TO_BE_RECOMPILED on unfix: \n"
1120  XCACHE_LOG_ENTRY_TEXT ("invalid entry") XCACHE_LOG_TRAN_TEXT,
1121  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1122  /* Delete the entry. */
1123  new_cache_flag = XCACHE_ENTRY_MARK_DELETED;
1124  }
1125  else if (new_cache_flag == XCACHE_ENTRY_MARK_DELETED)
1126  {
1127  /* If entry is marked as deleted and we are the last thread to have fixed this entry, we must remove it. */
1128  }
1129  else if (new_cache_flag == XCACHE_ENTRY_WAS_RECOMPILED)
1130  {
1131  /* This the last thread to have fixed the entry and we should mark it as deleted and remove it. */
1132  new_cache_flag = XCACHE_ENTRY_MARK_DELETED;
1133  }
1134 
1135  if (new_cache_flag == XCACHE_ENTRY_MARK_DELETED)
1136  {
1137  /* we need to mark entry so only me can delete it */
1138  new_cache_flag = XCACHE_ENTRY_DELETED_BY_ME;
1139  }
1140  }
1141  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&xcache_entry->xasl_id, cache_flag, new_cache_flag));
1142 
1143  xcache_log ("unfix entry: \n"
1145  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1146 
1147  if (new_cache_flag == XCACHE_ENTRY_DELETED_BY_ME)
1148  {
1149  /* I am last user after object was marked as deleted. */
1150  xcache_log ("delete entry from hash after unfix: \n"
1152  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1153  /* No need to acquire the clone mutex, since I'm the unique user. */
1154  while (xcache_entry->n_cache_clones > 0)
1155  {
1156  xcache_clone_decache (thread_p, &xcache_entry->cache_clones[--xcache_entry->n_cache_clones]);
1157  }
1158 
1159  /* need to clear list-cache first */
1160  (void) qfile_clear_list_cache (thread_p, xcache_entry->list_ht_no);
1161 
1162  if (!xcache_Hashmap.erase (thread_p, xcache_entry->xasl_id))
1163  {
1164  /* Failure is not expected. */
1165  assert (false);
1167  return;
1168  }
1169  }
1170 }
1171 
1172 /*
1173  * xcache_entry_mark_deleted () - Mark XASL cache entry for delete.
1174  *
1175  * return : True if no one else is using the entry and can be removed from hash.
1176  * thread_p (in) : Thread entry.
1177  * xcache_entry (in) : XASL cache entry.
1178  */
1179 static bool
1181 {
1182  INT32 cache_flag = 0;
1183  INT32 new_cache_flag;
1184 
1185  /* Mark for delete. We must successfully set XCACHE_ENTRY_MARK_DELETED flag. */
1186  do
1187  {
1188  cache_flag = xcache_entry->xasl_id.cache_flag;
1189  if (cache_flag & XCACHE_ENTRY_MARK_DELETED)
1190  {
1191  /* Cleanup could have marked this entry for delete. */
1192  xcache_log ("tried to mark entry as deleted, but somebody else already marked it: \n"
1194  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1195  return false;
1196  }
1197  if (cache_flag & XCACHE_ENTRY_TO_BE_RECOMPILED)
1198  {
1199  /* Somebody is compiling the entry? I think the locks have been messed up. */
1200  xcache_log_error ("tried to mark entry as deleted, but it was marked as to be recompiled: \n"
1202  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1203  assert (false);
1205  return false;
1206  }
1207 
1208  new_cache_flag = cache_flag;
1209  if (new_cache_flag & XCACHE_ENTRY_WAS_RECOMPILED)
1210  {
1211  /* This can happen. Somebody recompiled the entry and it was not (yet) removed. We will replace the flag
1212  * with XCACHE_ENTRY_MARK_DELETED. */
1213  new_cache_flag &= ~XCACHE_ENTRY_WAS_RECOMPILED;
1214  }
1215  new_cache_flag = new_cache_flag | XCACHE_ENTRY_MARK_DELETED;
1216 
1217  if (new_cache_flag == XCACHE_ENTRY_MARK_DELETED)
1218  {
1219  new_cache_flag = XCACHE_ENTRY_DELETED_BY_ME;
1220  }
1221  }
1222  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&xcache_entry->xasl_id, cache_flag, new_cache_flag));
1223 
1224  xcache_log ("marked entry as deleted: \n"
1226  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1227 
1230  ATOMIC_INC_32 (&xcache_Entry_count, -1);
1231 
1232  /* The entry can be deleted if the only fixer is this transaction. */
1233  return (new_cache_flag == XCACHE_ENTRY_DELETED_BY_ME);
1234 }
1235 
1236 /*
1237  * xcache_entry_set_request_recompile_flag () - Mark XASL cache entry as "request recompile".
1238  *
1239  * return : True if the flag was successfully set (or cleared).
1240  * thread_p (in) : Thread entry.
1241  * xcache_entry (in) : XASL cache entry.
1242  * set_flag (in) : true if flag should be set, false if should be cleared
1243  */
1244 static bool
1245 xcache_entry_set_request_recompile_flag (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry, bool set_flag)
1246 {
1247  INT32 cache_flag = 0;
1248  INT32 new_cache_flag;
1249 
1250  /* Mark for delete. We must successfully set XCACHE_ENTRY_MARK_DELETED flag. */
1251  do
1252  {
1253  cache_flag = xcache_entry->xasl_id.cache_flag;
1254 
1255  if (cache_flag & XCACHE_ENTRY_MARK_DELETED)
1256  {
1257  xcache_log ("tried to set flag request recompile, but entry is marked for delete: \n"
1259  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1260  return false;
1261  }
1262 
1263  if (set_flag && (cache_flag & XCACHE_ENTRY_TO_BE_RECOMPILED))
1264  {
1265  /* Somebody is compiling the entry already we are too late */
1266  xcache_log_error ("tried to mark entry as request recompile, but it was marked as to be recompiled: \n"
1268  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1269  return false;
1270  }
1271 
1272  if (!set_flag && (cache_flag & XCACHE_ENTRY_TO_BE_RECOMPILED))
1273  {
1274  /* this is allowed; during recompilation, first we set XCACHE_ENTRY_TO_BE_RECOMPILED, and then clear
1275  * XCACHE_ENTRY_RECOMPILED_REQUESTED */
1276  }
1277 
1278  if (set_flag && (cache_flag & XCACHE_ENTRY_RECOMPILED_REQUESTED))
1279  {
1280  xcache_log ("tried to mark entry as request recompile, but somebody else already marked it: \n"
1282  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1283  return false;
1284  }
1285 
1286  new_cache_flag = cache_flag;
1287 
1288  if (set_flag)
1289  {
1290  new_cache_flag = new_cache_flag | XCACHE_ENTRY_RECOMPILED_REQUESTED;
1291  }
1292  else
1293  {
1294  new_cache_flag = new_cache_flag & (~XCACHE_ENTRY_RECOMPILED_REQUESTED);
1295  }
1296 
1297  }
1298  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&xcache_entry->xasl_id, cache_flag, new_cache_flag));
1299 
1300  if (set_flag)
1301  {
1302  xcache_log ("set entry request recompile flag: \n"
1304  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1305  }
1306  else
1307  {
1308  xcache_log ("clear entry request recompile flag: \n"
1310  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1311  }
1312 
1313  return true;
1314 }
1315 
1316 static XCACHE_CLEANUP_REASON
1318 {
1319  struct timeval current_time;
1321  {
1322  return XCACHE_CLEANUP_FULL;
1323  }
1324  else
1325  {
1326  gettimeofday (&current_time, NULL);
1328  {
1329  return XCACHE_CLEANUP_TIMEOUT;
1330  }
1331  }
1332 
1333  return XCACHE_CLEANUP_NONE;
1334 }
1335 
1336 /*
1337  * xcache_insert () - Insert or recompile XASL cache entry.
1338  *
1339  * return : Error code.
1340  * thread_p (in) : Thread entry.
1341  * context (in) : Compile context (sql_info & recompile_xasl).
1342  * stream (in) : XASL stream.
1343  * n_oid (in) : Related objects count.
1344  * class_oids (in) : Related objects OID's.
1345  * class_locks (in) : Related objects locks.
1346  * tcards (in) : Related objects cardinality.
1347  * xcache_entry (out) : XASL cache entry.
1348  */
1349 int
1350 xcache_insert (THREAD_ENTRY * thread_p, const compile_context * context, XASL_STREAM * stream,
1351  int n_oid, const OID * class_oids, const int *class_locks, const int *tcards,
1352  XASL_CACHE_ENTRY ** xcache_entry)
1353 {
1354  int error_code = NO_ERROR;
1355  bool inserted = false;
1356  XASL_ID xid;
1357  INT32 cache_flag;
1358  INT32 new_cache_flag;
1359  XASL_CACHE_ENTRY *to_be_recompiled = NULL;
1360  XCACHE_RELATED_OBJECT *related_objects = NULL;
1361  char *sql_hash_text = NULL;
1362  char *sql_user_text = NULL;
1363  char *sql_plan_text = NULL;
1364  struct timeval time_stored;
1365  size_t sql_hash_text_len = 0, sql_user_text_len = 0, sql_plan_text_len = 0;
1366  char *strbuf = NULL;
1367 
1368  assert (xcache_entry != NULL && *xcache_entry == NULL);
1369  assert (stream != NULL);
1370  assert (stream->buffer != NULL || !context->recompile_xasl);
1371 
1372  if (!xcache_Enabled)
1373  {
1374  return NO_ERROR;
1375  }
1376 
1378 
1379  XASL_ID_SET_NULL (&xid);
1380  xid.sha1 = context->sha1;
1381 
1383 
1384  /* Allocate XASL cache entry data. */
1385  if (n_oid > 0)
1386  {
1387  int index;
1388  related_objects = (XCACHE_RELATED_OBJECT *) malloc (n_oid * sizeof (XCACHE_RELATED_OBJECT));
1389  if (related_objects == NULL)
1390  {
1392  n_oid * sizeof (XCACHE_RELATED_OBJECT));
1393  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
1394  goto error;
1395  }
1396  for (index = 0; index < n_oid; index++)
1397  {
1398  related_objects[index].oid = class_oids[index];
1399  related_objects[index].lock = (LOCK) class_locks[index];
1400  related_objects[index].tcard = tcards[index];
1401  }
1402  }
1403 
1404  sql_hash_text_len = strlen (context->sql_hash_text) + 1;
1405  if (context->sql_user_text != NULL)
1406  {
1407  sql_user_text_len = strlen (context->sql_user_text) + 1;
1408  }
1409  if (context->sql_plan_text != NULL)
1410  {
1411  sql_plan_text_len = strlen (context->sql_plan_text) + 1;
1412  }
1413  strbuf = (char *) malloc (sql_hash_text_len + sql_user_text_len + sql_plan_text_len);
1414  if (strbuf == NULL)
1415  {
1417  sql_hash_text_len + sql_user_text_len + sql_plan_text_len);
1418  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
1419  goto error;
1420  }
1421 
1422  memcpy (strbuf, context->sql_hash_text, sql_hash_text_len);
1423  sql_hash_text = strbuf;
1424  strbuf += sql_hash_text_len;
1425 
1426  if (sql_user_text_len > 0)
1427  {
1428  memcpy (strbuf, context->sql_user_text, sql_user_text_len);
1429  sql_user_text = strbuf;
1430  strbuf += sql_user_text_len;
1431  }
1432 
1433  if (sql_plan_text_len > 0)
1434  {
1435  memcpy (strbuf, context->sql_plan_text, sql_plan_text_len);
1436  sql_plan_text = strbuf;
1437  }
1438 
1439  /* save stored time */
1440  (void) gettimeofday (&time_stored, NULL);
1441  CACHE_TIME_MAKE (&stream->xasl_id->time_stored, &time_stored);
1442 
1443 
1444  /* We need to do a loop here for recompile_xasl case. It will break after the first iteration if recompile_xasl flag
1445  * is false.
1446  *
1447  * When we want to recompile the XASL cache entry, we try to avoid blocking others from using existing cache entry.
1448  * If an entry exists, the recompiler mark it "to be recompiled". Concurrent transactions can find and use this
1449  * entry. After adding new entry, the original is marked as "was recompiled". This entry can no longer be found
1450  * but is still valid if it was previously obtained.
1451  *
1452  * Things get messy if there are at least two concurrent recompilers. We assume that this does not (or should not)
1453  * happen in real-world scenarios. But if it happens, we need to make it work.
1454  * Multiple recompilers can loop here several times. One entry can be recompiled by one thread at a time. Others will
1455  * loop until current recompiler finishes.
1456  */
1457  while (true)
1458  {
1459  /* Claim a new entry from freelist to initialize. */
1460  *xcache_entry = xcache_Hashmap.freelist_claim (thread_p);
1461  if (*xcache_entry == NULL)
1462  {
1463  ASSERT_ERROR_AND_SET (error_code);
1464  return error_code;
1465  }
1466 
1467  /* Initialize xcache_entry stuff. */
1468  XASL_ID_COPY (&(*xcache_entry)->xasl_id, stream->xasl_id);
1469  (*xcache_entry)->xasl_id.sha1 = context->sha1;
1470  (*xcache_entry)->xasl_id.cache_flag = 1; /* Start with fix count = 1. */
1471  (*xcache_entry)->n_related_objects = n_oid;
1472  (*xcache_entry)->related_objects = related_objects;
1473  (*xcache_entry)->sql_info.sql_hash_text = sql_hash_text;
1474  (*xcache_entry)->sql_info.sql_user_text = sql_user_text;
1475  (*xcache_entry)->sql_info.sql_plan_text = sql_plan_text;
1476  (*xcache_entry)->stream = *stream;
1477  (*xcache_entry)->time_last_rt_check = (INT64) time_stored.tv_sec;
1478  (*xcache_entry)->time_last_used = time_stored;
1479 
1480  /* Now that new entry is initialized, we can try to insert it. */
1481 
1482  inserted = xcache_Hashmap.insert_given (thread_p, xid, *xcache_entry);
1483  assert (*xcache_entry != NULL);
1484 
1485  /* We have incremented fix count, we don't need lf_tran anymore. */
1486  xcache_Hashmap.end_tran (thread_p);
1487 
1488  if (inserted)
1489  {
1490  (*xcache_entry)->free_data_on_uninit = true;
1491  perfmon_inc_stat (thread_p, PSTAT_PC_NUM_ADD);
1492  }
1493  else
1494  {
1496  }
1497 
1498  if (inserted || !context->recompile_xasl)
1499  {
1500  /* The entry is accepted. */
1501 
1502  if (to_be_recompiled != NULL)
1503  {
1504  assert (context->recompile_xasl);
1505  /* Now that we inserted new cache entry, we can mark the old entry as recompiled. */
1506  do
1507  {
1508  cache_flag = to_be_recompiled->xasl_id.cache_flag;
1510  {
1511  /* Unexpected flags. */
1512  assert (false);
1513  xcache_log_error ("unexpected flag for entry to be recompiled: \n"
1514  XCACHE_LOG_ENTRY_TEXT ("entry to be recompiled")
1515  "\t cache_flag = %d\n"
1517  XCACHE_LOG_ENTRY_ARGS (to_be_recompiled),
1518  cache_flag, XCACHE_LOG_TRAN_ARGS (thread_p));
1520  error_code = ER_QPROC_INVALID_XASLNODE;
1521  goto error;
1522  }
1523  new_cache_flag = (cache_flag & XCACHE_ENTRY_FIX_COUNT_MASK) | XCACHE_ENTRY_WAS_RECOMPILED;
1524  }
1525  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&to_be_recompiled->xasl_id, cache_flag, new_cache_flag));
1526  /* We marked the entry as recompiled. */
1527  xcache_log ("marked entry as recompiled: \n"
1529  XCACHE_LOG_ENTRY_ARGS (to_be_recompiled), XCACHE_LOG_TRAN_ARGS (thread_p));
1530  xcache_unfix (thread_p, to_be_recompiled);
1531  to_be_recompiled = NULL;
1532  }
1533  else if (inserted)
1534  {
1535  /* new entry added */
1536  ATOMIC_INC_32 (&xcache_Entry_count, 1);
1537  }
1538 
1539  xcache_log ("successful find or insert: \n"
1540  XCACHE_LOG_ENTRY_TEXT ("entry found or inserted")
1541  "\t found or inserted = %s \n"
1542  "\t recompile xasl = %s \n"
1544  XCACHE_LOG_ENTRY_ARGS (*xcache_entry),
1545  inserted ? "inserted" : "found",
1546  context->recompile_xasl ? "true" : "false", XCACHE_LOG_TRAN_ARGS (thread_p));
1547  break;
1548  }
1549 
1550  assert (!inserted && context->recompile_xasl);
1551  assert (to_be_recompiled == NULL);
1552  /* We want to refresh the xasl cache entry, not to use existing. */
1553  /* Mark existing as to be recompiled. */
1554  do
1555  {
1556  cache_flag = (*xcache_entry)->xasl_id.cache_flag;
1557  if (cache_flag & XCACHE_ENTRY_MARK_DELETED)
1558  {
1559  /* Deleted? We certainly did not expect. */
1560  assert (false);
1561  xcache_log_error ("(recompile) entry is marked as deleted: \n"
1563  XCACHE_LOG_ENTRY_ARGS (*xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1564  xcache_unfix (thread_p, *xcache_entry);
1565  *xcache_entry = NULL;
1567  error_code = ER_QPROC_INVALID_XASLNODE;
1568  goto error;
1569  }
1571  {
1572  /* Somebody else recompiles this entry. Loop again. */
1573  xcache_log ("(recompile) entry is recompiled by somebody else: \n"
1575  XCACHE_LOG_ENTRY_ARGS (*xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
1577  xcache_unfix (thread_p, *xcache_entry);
1578  *xcache_entry = NULL;
1579  break;
1580  }
1581  /* Set XCACHE_ENTRY_TO_BE_RECOMPILED to be recompiled flag. */
1582  new_cache_flag = cache_flag | XCACHE_ENTRY_TO_BE_RECOMPILED;
1583  }
1584  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&(*xcache_entry)->xasl_id, cache_flag, new_cache_flag));
1585 
1586  if (*xcache_entry != NULL)
1587  {
1588  /* We have marked this entry to be recompiled. We have to insert new and then we will mark it as recompiled.
1589  */
1590  to_be_recompiled = *xcache_entry;
1591  /* clear request recompile flag */
1592  xcache_entry_set_request_recompile_flag (thread_p, to_be_recompiled, false);
1593  *xcache_entry = NULL;
1595 
1596  xcache_log ("(recompile) we marked entry to be recompiled: \n"
1598  XCACHE_LOG_ENTRY_ARGS (to_be_recompiled), XCACHE_LOG_TRAN_ARGS (thread_p));
1599 
1600  /* We have to "inherit" the time_stored from this entry. The new XASL cache entry should be usable by anyone
1601  * that cached this entry on client. Currently, xcache_find_xasl_id uses time_stored to match the entries.
1602  */
1603  stream->xasl_id->time_stored = to_be_recompiled->xasl_id.time_stored;
1604 
1605  /* On next find or insert, we want to skip the to be recompiled entry. */
1607  /* We don't unfix yet. */
1608  }
1609  else
1610  {
1611 #if defined (SERVER_MODE)
1612  /* Failed marking entry for recompile; we need to try again, but just to avoid burning CPU sleep a little
1613  * first.
1614  */
1615  thread_sleep (1);
1616 #endif /* SERVER_MODE */
1617  }
1618  /* Try to insert again. */
1619  }
1620  /* Found or inserted entry. */
1621  assert (*xcache_entry != NULL);
1622 
1623  if (!inserted)
1624  {
1625  /* Free allocated resources. */
1626  if (related_objects)
1627  {
1628  free (related_objects);
1629  }
1630  if (sql_hash_text)
1631  {
1632  free (sql_hash_text);
1633  }
1634  free_and_init (stream->buffer);
1635  }
1636  else
1637  {
1639  {
1640  /* Try to clean up some of the oldest entries. */
1641  xcache_cleanup (thread_p);
1642  }
1643 
1644  /* XASL stream was used. Remove from argument. */
1645  stream->buffer = NULL;
1646  }
1647 
1648  return NO_ERROR;
1649 
1650 error:
1651  assert (error_code != NO_ERROR);
1652  ASSERT_ERROR ();
1653  if ((*xcache_entry) != NULL)
1654  {
1655  xcache_Hashmap.freelist_retire (thread_p, *xcache_entry);
1656  }
1657  if (to_be_recompiled)
1658  {
1659  /* Remove to be recompiled flag. */
1660  do
1661  {
1662  cache_flag = to_be_recompiled->xasl_id.cache_flag;
1663  new_cache_flag = cache_flag & (~XCACHE_ENTRY_TO_BE_RECOMPILED);
1664  }
1665  while (!XCACHE_ATOMIC_CAS_CACHE_FLAG (&to_be_recompiled->xasl_id, cache_flag, new_cache_flag));
1666  xcache_unfix (thread_p, to_be_recompiled);
1667  }
1668  if (related_objects)
1669  {
1670  free (related_objects);
1671  }
1672  if (sql_hash_text)
1673  {
1674  free (sql_hash_text);
1675  }
1676  return error_code;
1677 }
1678 
1679 /*
1680  * xcache_invalidate_qcaches () - Invalidate all query cache entries which pass the invalidation check.
1681  * all invalidated query cache entries are removed.
1682  *
1683  * return : Void.
1684  * thread_p (in) : Thread entry.
1685  * invalidate_check (in) : Invalidation check function.
1686  * arg (in) : Argument for invalidation check function.
1687  */
1688 int
1689 xcache_invalidate_qcaches (THREAD_ENTRY * thread_p, const OID * oid)
1690 {
1691  int res = NO_ERROR;
1692  bool finished = false;
1693  XASL_CACHE_ENTRY *xcache_entry = NULL;
1694 
1695  if (!xcache_Enabled)
1696  {
1697  return NO_ERROR;
1698  }
1699 
1700  xcache_hashmap_iterator iter = { thread_p, xcache_Hashmap };
1701 
1702  while (!finished)
1703  {
1704  /* make sure to start from beginning */
1705  iter.restart ();
1706 
1707  /* Iterate through hash, check entry OID's and if one matches the argument, mark the entry for delete and save
1708  * it in delete_xids buffer. We cannot delete them from hash while iterating, because the one lock-free
1709  * transaction can be used for one hash entry only.
1710  */
1711  while (true)
1712  {
1713  int num_entries;
1714 
1715  xcache_entry = iter.iterate ();
1716  if (xcache_entry == NULL)
1717  {
1718  finished = true;
1719  break;
1720  }
1721 
1722  num_entries = qfile_get_list_cache_number_of_entries (xcache_entry->list_ht_no);
1723  if (num_entries > 0 && xcache_entry_is_related_to_oid (xcache_entry, oid))
1724  {
1725  res = qfile_clear_list_cache (thread_p, xcache_entry->list_ht_no);
1726  if (res != NO_ERROR)
1727  {
1728  finished = true;
1729  break;
1730  }
1731  }
1732  }
1733  }
1734 
1735  return res;
1736 }
1737 
1738 /*
1739  * xcache_invalidate_entries () - Invalidate all cache entries which pass the invalidation check. If there is no
1740  * invalidation check, all cache entries are removed.
1741  * return : Void.
1742  * thread_p (in) : Thread entry.
1743  * invalidate_check (in) : Invalidation check function.
1744  * arg (in) : Argument for invalidation check function.
1745  */
1746 static void
1747 xcache_invalidate_entries (THREAD_ENTRY * thread_p, bool (*invalidate_check) (XASL_CACHE_ENTRY *, const OID *),
1748  const OID * arg)
1749 {
1750 #define XCACHE_DELETE_XIDS_SIZE 1024
1751  XASL_CACHE_ENTRY *xcache_entry = NULL;
1752  XASL_ID delete_xids[XCACHE_DELETE_XIDS_SIZE];
1753  int n_delete_xids = 0;
1754  int xid_index = 0;
1755  bool finished = false;
1756 
1757  if (!xcache_Enabled)
1758  {
1759  return;
1760  }
1761 
1762  xcache_hashmap_iterator iter = { thread_p, xcache_Hashmap };
1763 
1764  while (!finished)
1765  {
1766  /* make sure to start from beginning */
1767  iter.restart ();
1768 
1769  /* Iterate through hash, check entry OID's and if one matches the argument, mark the entry for delete and save
1770  * it in delete_xids buffer. We cannot delete them from hash while iterating, because the one lock-free
1771  * transaction can be used for one hash entry only.
1772  */
1773  while (true)
1774  {
1775  xcache_entry = iter.iterate ();
1776  if (xcache_entry == NULL)
1777  {
1778  finished = true;
1779  break;
1780  }
1781 
1782  /* Check invalidation conditions. */
1783  if (invalidate_check == NULL || invalidate_check (xcache_entry, arg))
1784  {
1786  {
1787  /* delete query cache from xcache entry */
1788  {
1789  qfile_clear_list_cache (thread_p, xcache_entry->list_ht_no);
1790  }
1791  }
1792 
1793  /* Mark entry as deleted. */
1794  if (xcache_entry_mark_deleted (thread_p, xcache_entry))
1795  {
1796  /*
1797  * Successfully marked for delete. Save it to delete after the iteration.
1798  * No need to acquire the clone mutex, since I'm the unique user.
1799  */
1800  while (xcache_entry->n_cache_clones > 0)
1801  {
1802  xcache_clone_decache (thread_p, &xcache_entry->cache_clones[--xcache_entry->n_cache_clones]);
1803  }
1804  delete_xids[n_delete_xids++] = xcache_entry->xasl_id;
1805  }
1806  }
1807 
1808  if (n_delete_xids == XCACHE_DELETE_XIDS_SIZE)
1809  {
1810  /* Full buffer. Interrupt iteration and we'll start over. */
1811  xcache_Hashmap.end_tran (thread_p);
1812 
1813  xcache_log ("xcache_remove_by_oid full buffer\n" XCACHE_LOG_TRAN_TEXT, XCACHE_LOG_TRAN_ARGS (thread_p));
1814 
1815  break;
1816  }
1817  }
1818 
1819  /* Remove collected entries. */
1820  for (xid_index = 0; xid_index < n_delete_xids; xid_index++)
1821  {
1822  if (!xcache_Hashmap.erase (thread_p, delete_xids[xid_index]))
1823  {
1824  /* I don't think this is expected. */
1825  assert (false);
1826  }
1827  }
1828  n_delete_xids = 0;
1829  }
1830 
1831 #undef XCACHE_DELETE_XIDS_SIZE
1832 }
1833 
1834 /*
1835  * xcache_entry_is_related_to_oid () - Is XASL cache entry related to the OID given as argument.
1836  *
1837  * return : True if entry is related, false otherwise.
1838  * xcache_entry (in) : XASL cache entry.
1839  * arg (in) : Pointer to OID.
1840  */
1841 static bool
1842 xcache_entry_is_related_to_oid (XASL_CACHE_ENTRY * xcache_entry, const OID * related_to_oid)
1843 {
1844  int oid_idx = 0;
1845 
1846  assert (xcache_entry != NULL);
1847  assert (related_to_oid != NULL);
1848 
1849  for (oid_idx = 0; oid_idx < xcache_entry->n_related_objects; oid_idx++)
1850  {
1851  if (OID_EQ (&xcache_entry->related_objects[oid_idx].oid, related_to_oid))
1852  {
1853  /* Found relation. */
1854  return true;
1855  }
1856  }
1857  /* Not related. */
1858  return false;
1859 }
1860 
1861 /*
1862  * xcache_remove_by_oid () - Remove all XASL cache entries related to given object.
1863  *
1864  * return : Void.
1865  * thread_p (in) : Thread entry.
1866  * oid (in) : Object ID.
1867  */
1868 void
1869 xcache_remove_by_oid (THREAD_ENTRY * thread_p, const OID * oid)
1870 {
1871  if (!xcache_Enabled)
1872  {
1873  return;
1874  }
1875 
1877 
1878  xcache_log ("remove all entries: \n"
1879  "\t OID = %d|%d|%d \n" XCACHE_LOG_TRAN_TEXT, OID_AS_ARGS (oid), XCACHE_LOG_TRAN_ARGS (thread_p));
1881 }
1882 
1883 /*
1884  * xcache_drop_all () - Remove all entries from XASL cache.
1885  *
1886  * return : Void.
1887  * thread_p (in) : Thread entry.
1888  */
1889 void
1891 {
1892  if (!xcache_Enabled)
1893  {
1894  return;
1895  }
1896 
1898 
1899  xcache_log ("drop all queries \n" XCACHE_LOG_TRAN_TEXT, XCACHE_LOG_TRAN_ARGS (thread_p));
1900  xcache_invalidate_entries (thread_p, NULL, NULL);
1901 }
1902 
1903 /*
1904  * xcache_dump () - Dump XASL cache.
1905  *
1906  * return : Void.
1907  * thread_p (in) : Thread entry.
1908  * fp (out) : Output.
1909  */
1910 void
1911 xcache_dump (THREAD_ENTRY * thread_p, FILE * fp)
1912 {
1913  XASL_CACHE_ENTRY *xcache_entry = NULL;
1914  int oid_index;
1915  char *sql_id = NULL;
1916 
1917  assert (fp);
1918 
1919  fprintf (fp, "\n");
1920 
1921  if (!xcache_Enabled)
1922  {
1923  fprintf (fp, "XASL cache is disabled.\n");
1924  return;
1925  }
1926 
1927  /* NOTE: While dumping information, other threads are still free to modify the existing entries. */
1928 
1929  fprintf (fp, "XASL cache\n");
1930  fprintf (fp, "Stats: \n");
1931  fprintf (fp, "Max size: %d\n", xcache_Soft_capacity);
1932  fprintf (fp, "Current entry count: %d\n", ATOMIC_INC_32 (&xcache_Entry_count, 0));
1933  fprintf (fp, "Lookups: %lld\n", (long long) XCACHE_STAT_GET (lookups));
1934  fprintf (fp, "Hits: %lld\n", (long long) XCACHE_STAT_GET (hits));
1935  fprintf (fp, "Miss: %lld\n", (long long) XCACHE_STAT_GET (miss));
1936  fprintf (fp, "Inserts: %lld\n", (long long) XCACHE_STAT_GET (inserts));
1937  fprintf (fp, "Found at insert: %lld\n", (long long) XCACHE_STAT_GET (found_at_insert));
1938  fprintf (fp, "Recompiles: %lld\n", (long long) XCACHE_STAT_GET (recompiles));
1939  fprintf (fp, "Failed recompiles: %lld\n", (long long) XCACHE_STAT_GET (failed_recompiles));
1940  fprintf (fp, "Deletes: %lld\n", (long long) XCACHE_STAT_GET (deletes));
1941  fprintf (fp, "Fix: %lld\n", (long long) XCACHE_STAT_GET (fix));
1942  fprintf (fp, "Unfix: %lld\n", (long long) XCACHE_STAT_GET (unfix));
1943  fprintf (fp, "Cache cleanups: %lld\n", (long long) XCACHE_STAT_GET (cleanups));
1944  fprintf (fp, "Deletes at cleanup: %lld\n", (long long) XCACHE_STAT_GET (deletes_at_cleanup));
1945  /* add overflow, RT checks. */
1946 
1947  xcache_hashmap_iterator iter = { thread_p, xcache_Hashmap };
1948 
1949  fprintf (fp, "\nEntries:\n");
1950  while ((xcache_entry = iter.iterate ()) != NULL)
1951  {
1952  fprintf (fp, "\n");
1953  fprintf (fp, " XASL_ID = { \n");
1954  fprintf (fp, " sha1 = { %08x %08x %08x %08x %08x }, \n", SHA1_AS_ARGS (&xcache_entry->xasl_id.sha1));
1955  fprintf (fp, " time_stored = %d sec, %d usec \n",
1956  xcache_entry->xasl_id.time_stored.sec, xcache_entry->xasl_id.time_stored.usec);
1957  fprintf (fp, " } \n");
1958  fprintf (fp, " fix_count = %d \n", xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_FIX_COUNT_MASK);
1959  fprintf (fp, " cache flags = %08x \n", xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_FLAGS_MASK);
1960  fprintf (fp, " reference count = %lld \n", (long long) ATOMIC_INC_64 (&xcache_entry->ref_count, 0));
1961  fprintf (fp, " time second last used = %lld \n", (long long) xcache_entry->time_last_used.tv_sec);
1962  if (xcache_uses_clones ())
1963  {
1964  fprintf (fp, " clone count = %d \n", xcache_entry->n_cache_clones);
1965  }
1966  fprintf (fp, " sql info: \n");
1967 
1968  qmgr_get_sql_id (thread_p, &sql_id, xcache_entry->sql_info.sql_hash_text,
1969  strlen (xcache_entry->sql_info.sql_hash_text));
1970  fprintf (fp, " SQL_ID = %s \n", sql_id ? sql_id : "(UNKNOWN)");
1971  if (sql_id != NULL)
1972  {
1973  free_and_init (sql_id);
1974  }
1975 
1976  fprintf (fp, " sql user text = %s \n", xcache_entry->sql_info.sql_user_text);
1977  fprintf (fp, " sql hash text = %s \n", xcache_entry->sql_info.sql_hash_text);
1979  {
1980  fprintf (fp, " sql plan text = %s \n",
1981  xcache_entry->sql_info.sql_plan_text ? xcache_entry->sql_info.sql_plan_text : "(NONE)");
1982  }
1983 
1984  fprintf (fp, " OID_LIST (count = %d): \n", xcache_entry->n_related_objects);
1985  for (oid_index = 0; oid_index < xcache_entry->n_related_objects; oid_index++)
1986  {
1987  fprintf (fp, " OID = %d|%d|%d, LOCK = %s, TCARD = %8d \n",
1988  OID_AS_ARGS (&xcache_entry->related_objects[oid_index].oid),
1989  LOCK_TO_LOCKMODE_STRING (xcache_entry->related_objects[oid_index].lock),
1990  xcache_entry->related_objects[oid_index].tcard);
1991  }
1992  }
1993 
1994  /* TODO: add more */
1995 }
1996 
1997 /*
1998  * xcache_can_entry_cache_list () - Can entry cache list files?
1999  *
2000  * return : True/false.
2001  * xcache_entry (in) : XASL cache entry.
2002  */
2003 bool
2005 {
2006  if (!xcache_Enabled)
2007  {
2008  return false;
2009  }
2010  return (xcache_entry != NULL && (xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_FLAGS_MASK) == 0);
2011 }
2012 
2013 /*
2014  * xcache_clone_decache () - Free cached XASL clone resources.
2015  *
2016  * return : Void.
2017  * thread_p (in) : Thread entry.
2018  * xclone (in/out) : XASL cache clone.
2019  */
2020 static void
2022 {
2023  HL_HEAPID save_heapid = db_change_private_heap (thread_p, 0);
2025  qexec_clear_xasl (thread_p, xclone->xasl, true);
2026  free_xasl_unpack_info (thread_p, xclone->xasl_buf);
2027  xclone->xasl = NULL;
2028  (void) db_change_private_heap (thread_p, save_heapid);
2029 }
2030 
2031 /*
2032  * xcache_retire_clone () - Retire XASL clone. If clones caches are enabled, first try to cache it in xcache_entry.
2033  *
2034  * return : Void.
2035  * thread_p (in) : Thread entry.
2036  * xcache_entry (in) : XASL cache entry.
2037  * xclone (in) : XASL clone.
2038  */
2039 void
2040 xcache_retire_clone (THREAD_ENTRY * thread_p, XASL_CACHE_ENTRY * xcache_entry, XASL_CLONE * xclone)
2041 {
2042  /* Free XASL. Be sure that was already cleared to avoid memory leaks. */
2043  assert (xclone->xasl->status == XASL_CLEARED || xclone->xasl->status == XASL_INITIALIZED);
2044 
2045  if (xcache_uses_clones ())
2046  {
2047  pthread_mutex_lock (&xcache_entry->cache_clones_mutex);
2048  if (xcache_entry->n_cache_clones < xcache_Max_clones)
2049  {
2050  if (xcache_entry->n_cache_clones == xcache_entry->cache_clones_capacity
2051  && xcache_entry->cache_clones_capacity < xcache_Max_clones)
2052  {
2053  /* Extend cache clone buffer. */
2054  XASL_CLONE *new_clones = NULL;
2055  int new_capacity = MIN (xcache_Max_clones, xcache_entry->cache_clones_capacity * 2);
2056  if (xcache_entry->cache_clones == &xcache_entry->one_clone)
2057  {
2058  assert (xcache_entry->cache_clones_capacity == 1);
2059  new_clones = (XASL_CLONE *) malloc (new_capacity * sizeof (XASL_CLONE));
2060  if (new_clones != NULL)
2061  {
2062  new_clones[0].xasl = xcache_entry->cache_clones[0].xasl;
2063  new_clones[0].xasl_buf = xcache_entry->cache_clones[0].xasl_buf;
2064  }
2065  }
2066  else
2067  {
2068  new_clones = (XASL_CLONE *) realloc (xcache_entry->cache_clones, new_capacity * sizeof (XASL_CLONE));
2069  }
2070  if (new_clones == NULL)
2071  {
2072  /* Out of memory? */
2074  new_capacity * sizeof (XASL_CLONE));
2075  assert (false);
2076  pthread_mutex_unlock (&xcache_entry->cache_clones_mutex);
2077 
2078  /* Free the clone. */
2079  xcache_clone_decache (thread_p, xclone);
2080  return;
2081  }
2082  xcache_entry->cache_clones = new_clones;
2083  xcache_entry->cache_clones_capacity = new_capacity;
2084  }
2085  assert (xcache_entry->cache_clones_capacity > xcache_entry->n_cache_clones);
2086  xcache_entry->cache_clones[xcache_entry->n_cache_clones++] = *xclone;
2087  pthread_mutex_unlock (&xcache_entry->cache_clones_mutex);
2088 
2089  xclone->xasl = NULL;
2090  xclone->xasl_buf = NULL;
2091  return;
2092  }
2093  pthread_mutex_unlock (&xcache_entry->cache_clones_mutex);
2094 
2095  /* No more room. */
2096  xcache_clone_decache (thread_p, xclone);
2097  return;
2098  }
2099 
2100  free_xasl_unpack_info (thread_p, xclone->xasl_buf);
2101  xclone->xasl = NULL;
2102 }
2103 
2104 /*
2105  * xcache_cleanup () - Cleanup xasl cache when soft capacity is exceeded.
2106  *
2107  * return : Void.
2108  * thread_p (in) : Thread entry.
2109  */
2110 static void
2112 {
2114 
2115  xcache_hashmap_iterator iter = { thread_p, xcache_Hashmap };
2116  XASL_CACHE_ENTRY *xcache_entry = NULL;
2117  XCACHE_CLEANUP_CANDIDATE candidate;
2118  struct timeval current_time;
2119  int need_cleanup;
2120  int candidate_index;
2121  int count;
2122  int cleanup_count;
2123  BINARY_HEAP *bh = NULL;
2124  int save_max_capacity = 0;
2125 
2126  /* We can allow only one cleanup process at a time. There is no point in duplicating this work. Therefore, anyone
2127  * trying to do the cleanup should first try to set xcache_Cleanup_flag. */
2128  if (!ATOMIC_CAS_32 (&xcache_Cleanup_flag, 0, 1))
2129  {
2130  /* Somebody else does the cleanup. */
2131  return;
2132  }
2133 
2134  need_cleanup = xcache_need_cleanup ();
2135  if (need_cleanup == XCACHE_CLEANUP_NONE)
2136  {
2137  /* Already cleaned up. */
2138  if (!ATOMIC_CAS_32 (&xcache_Cleanup_flag, 1, 0))
2139  {
2140  assert_release (false);
2141  }
2142  return;
2143  }
2144 
2145  xcache_log ("cleanup start: entries = %d \n" XCACHE_LOG_TRAN_TEXT,
2147 
2148  if (need_cleanup == XCACHE_CLEANUP_FULL) /* cleanup because there are too many entries */
2149  {
2151  /* Start cleanup. */
2152  perfmon_inc_stat (thread_p, PSTAT_PC_NUM_FULL);
2153 
2154  if (cleanup_count <= 0)
2155  {
2156  /* Not enough to cleanup */
2157  if (!ATOMIC_CAS_32 (&xcache_Cleanup_flag, 1, 0))
2158  {
2159  assert_release (false);
2160  }
2161  return;
2162  }
2163  /* Can we use preallocated binary heap? */
2164  if (cleanup_count <= xcache_Cleanup_bh->max_capacity)
2165  {
2166  bh = xcache_Cleanup_bh;
2167  /* Hack binary heap max capacity to the desired cleanup count. */
2168  save_max_capacity = bh->max_capacity;
2169  bh->max_capacity = cleanup_count;
2170  }
2171  else
2172  {
2173  /* We need a larger binary heap. */
2174  bh =
2175  bh_create (thread_p, cleanup_count, sizeof (XCACHE_CLEANUP_CANDIDATE), xcache_compare_cleanup_candidates,
2176  NULL);
2177  if (bh == NULL)
2178  {
2179  /* Not really expected */
2180  assert (false);
2181  if (!ATOMIC_CAS_32 (&xcache_Cleanup_flag, 1, 0))
2182  {
2183  assert_release (false);
2184  }
2185  return;
2186  }
2187  }
2188 
2189  /* The cleanup is a two-step process:
2190  * 1. Iterate through hash and select candidates for cleanup. The least recently used entries are sorted into a binary
2191  * heap.
2192  * NOTE: the binary heap does not story references to hash entries; it stores copies from the candidate keys and
2193  * last used timer of course to sort the candidates.
2194  * 2. Remove collected candidates from hash. Entries must be unfix and no flags must be set.
2195  */
2196 
2197  assert (bh->element_count == 0);
2198  bh->element_count = 0;
2199 
2200  /* Collect candidates for cleanup. */
2201  while ((xcache_entry = iter.iterate ()) != NULL)
2202  {
2203  candidate.xid = xcache_entry->xasl_id;
2204  candidate.xcache = xcache_entry;
2205  if (candidate.xid.cache_flag > 0 || (candidate.xid.cache_flag & XCACHE_ENTRY_FLAGS_MASK))
2206  {
2207  /* Either marked for delete or recompile, or already recompiled. Not a valid candidate. */
2208  continue;
2209  }
2210 
2211  (void) bh_try_insert (bh, &candidate, NULL);
2212  }
2213 
2214  count = bh->element_count;
2215  }
2216  else
2217  {
2218  /* Collect candidates for cleanup. */
2219  count = 0;
2220  gettimeofday (&current_time, NULL);
2221 
2222  while ((xcache_entry = iter.iterate ()) != NULL && count < xcache_Soft_capacity)
2223  {
2224  candidate.xid = xcache_entry->xasl_id;
2225  candidate.xcache = xcache_entry;
2226  if (candidate.xid.cache_flag > 0
2227  || (candidate.xid.cache_flag & XCACHE_ENTRY_FLAGS_MASK)
2228  || TIME_DIFF_SEC (current_time, candidate.xcache->time_last_used) <= xcache_Time_threshold)
2229  {
2230  continue;
2231  }
2232  xcache_Cleanup_array[count] = candidate;
2233  count++;
2234  }
2235  }
2236 
2237  xcache_log ("cleanup collected entries = %d \n" XCACHE_LOG_TRAN_TEXT, count, XCACHE_LOG_TRAN_ARGS (thread_p));
2238 
2239  /* Remove candidates from cache. */
2240  for (candidate_index = 0; candidate_index < count; candidate_index++)
2241  {
2242  if (need_cleanup == XCACHE_CLEANUP_FULL) /* binary heap for candidates */
2243  {
2244  /* Get candidate at candidate_index. */
2245  bh_element_at (bh, candidate_index, &candidate);
2246  }
2247  else
2248  {
2249  candidate = xcache_Cleanup_array[candidate_index];
2250  }
2251  /* Set intention to cleanup the entry. */
2252  candidate.xid.cache_flag = XCACHE_ENTRY_CLEANUP;
2253 
2254  /* clear list cache entries first */
2255  (void) qfile_clear_list_cache (thread_p, candidate.xcache->list_ht_no);
2256 
2257  /* Try delete. Would be better to decache the clones here. For simplicity, since is not an usual case,
2258  * clone decache is postponed - is decached when retired list will be cleared.
2259  */
2260  if (xcache_Hashmap.erase (thread_p, candidate.xid))
2261  {
2262  xcache_log ("cleanup: candidate was removed from hash"
2264  XCACHE_LOG_XASL_ID_ARGS (&candidate.xid), XCACHE_LOG_TRAN_ARGS (thread_p));
2265 
2268  ATOMIC_INC_32 (&xcache_Entry_count, -1);
2269  }
2270  else
2271  {
2272  xcache_log ("cleanup: candidate was not removed from hash"
2274  XCACHE_LOG_XASL_ID_ARGS (&candidate.xid), XCACHE_LOG_TRAN_ARGS (thread_p));
2275  }
2276  }
2277  if (need_cleanup == XCACHE_CLEANUP_FULL)
2278  {
2279  /* Reset binary heap. */
2280  bh->element_count = 0;
2281 
2282  if (bh != xcache_Cleanup_bh)
2283  {
2284  /* Destroy bh */
2285  bh_destroy (thread_p, bh);
2286  }
2287  else
2288  {
2289  /* Reset binary heap max capacity. */
2290  xcache_Cleanup_bh->max_capacity = save_max_capacity;
2291  }
2292  }
2293 
2294  xcache_log ("cleanup finished: entries = %d \n"
2296 
2298 
2299  gettimeofday (&xcache_Last_cleaned_time, NULL);
2300  if (!ATOMIC_CAS_32 (&xcache_Cleanup_flag, 1, 0))
2301  {
2302  assert_release (false);
2303  }
2304 }
2305 
2306 /*
2307  * xcache_compare_cleanup_candidates () - Compare cleanup candidates by their time_last_used. Oldest candidates are
2308  * considered "greater".
2309  *
2310  * return : BH_CMP_RESULT:
2311  * BH_GT if left is older.
2312  * BH_LT if right is older.
2313  * BH_EQ if left and right are equal.
2314  * left (in) : Left XCACHE cleanup candidate.
2315  * right (in) : Right XCACHE cleanup candidate.
2316  * ignore_arg (in) : Ignored.
2317  */
2318 static BH_CMP_RESULT
2319 xcache_compare_cleanup_candidates (const void *left, const void *right, BH_CMP_ARG ignore_arg)
2320 {
2321  struct timeval left_timeval = ((XCACHE_CLEANUP_CANDIDATE *) left)->xcache->time_last_used;
2322  struct timeval right_timeval = ((XCACHE_CLEANUP_CANDIDATE *) right)->xcache->time_last_used;
2323 
2324  /* Lesser means placed in binary heap. So return BH_LT for older timeval. */
2325  if (left_timeval.tv_sec < right_timeval.tv_sec)
2326  {
2327  return BH_LT;
2328  }
2329  else if (left_timeval.tv_sec == right_timeval.tv_sec)
2330  {
2331  return BH_EQ;
2332  }
2333  else
2334  {
2335  return BH_GT;
2336  }
2337 }
2338 
2339 /*
2340  * xcache_check_recompilation_threshold () - Check if one of the related classes suffered big changes and if we should
2341  * try to recompile the query.
2342  *
2343  * return : True to recompile query, false otherwise.
2344  * thread_p (in) : Thread entry.
2345  * xcache_entry (in) : XASL cache entry.
2346  */
2347 static bool
2349 {
2350  INT64 save_secs = xcache_entry->time_last_rt_check;
2351  struct timeval crt_time;
2352  int relobj;
2353  CLS_INFO *cls_info_p = NULL;
2354  int npages;
2355  bool recompile = false;
2356 
2357  (void) gettimeofday (&crt_time, NULL);
2358  if ((INT64) crt_time.tv_sec - xcache_entry->time_last_rt_check < XCACHE_RT_TIMEDIFF_IN_SEC)
2359  {
2360  /* Too soon. */
2361  return false;
2362  }
2363  if (!ATOMIC_CAS_64 (&xcache_entry->time_last_rt_check, save_secs, (INT64) crt_time.tv_sec))
2364  {
2365  /* Somebody else started the check. */
2366  return false;
2367  }
2368 
2369  if ((xcache_entry->xasl_id.cache_flag & XCACHE_ENTRY_RECOMPILED_REQUESTED) != 0)
2370  {
2371  xcache_log ("Unexpected flag found (recompile requested). Maybe the client preparing the XASL crashed !?: \n"
2373  XCACHE_LOG_ENTRY_ARGS (xcache_entry), XCACHE_LOG_TRAN_ARGS (thread_p));
2374 
2375  xcache_entry_set_request_recompile_flag (thread_p, xcache_entry, false);
2376  }
2377 
2378  for (relobj = 0; relobj < xcache_entry->n_related_objects; relobj++)
2379  {
2380  if (xcache_entry->related_objects[relobj].tcard < 0)
2381  {
2382  assert (xcache_entry->related_objects[relobj].tcard == XASL_CLASS_NO_TCARD
2383  || xcache_entry->related_objects[relobj].tcard == XASL_SERIAL_OID_TCARD);
2384  continue;
2385  }
2386 
2387  if (xcache_entry->related_objects[relobj].tcard >= XCACHE_RT_MAX_THRESHOLD)
2388  {
2389  continue;
2390  }
2391 
2392  cls_info_p = catalog_get_class_info (thread_p, &xcache_entry->related_objects[relobj].oid, NULL);
2393  if (cls_info_p == NULL)
2394  {
2395  /* Is this acceptable? */
2396  return false;
2397  }
2398  if (HFID_IS_NULL (&cls_info_p->ci_hfid))
2399  {
2400  /* Is this expected?? */
2401  catalog_free_class_info_and_init (cls_info_p);
2402  continue;
2403  }
2404  assert (!VFID_ISNULL (&cls_info_p->ci_hfid.vfid));
2405 
2407  {
2408  /* Consider recompiling the plan when statistic is updated. */
2409  npages = cls_info_p->ci_tot_pages;
2410  }
2411  else
2412  {
2413  /* Because statistics are automatically updated, number of real pages of file can be used */
2414  /* default of use_stat_estimation is 'false' because btree statistics estimations is so inaccurate. */
2415  if (file_get_num_user_pages (thread_p, &cls_info_p->ci_hfid.vfid, &npages) != NO_ERROR)
2416  {
2417  ASSERT_ERROR ();
2418  catalog_free_class_info_and_init (cls_info_p);
2419  return false;
2420  }
2421  }
2422  if (npages > XCACHE_RT_FACTOR * xcache_entry->related_objects[relobj].tcard
2423  || npages < xcache_entry->related_objects[relobj].tcard / XCACHE_RT_FACTOR)
2424  {
2425  bool try_recompile = true;
2426 
2427  if (XCACHE_RT_CLASS_STAT_NEED_UPDATE (cls_info_p->ci_tot_pages, npages))
2428  {
2429  cls_info_p->ci_time_stamp = stats_get_time_stamp ();
2430  if (catalog_update_class_info (thread_p, &xcache_entry->related_objects[relobj].oid, cls_info_p, NULL,
2431  true) == NULL)
2432  {
2433  try_recompile = false;
2434  }
2435  }
2436 
2437  if (try_recompile)
2438  {
2439  /* mark the entry as requst recompile, the client will request a prepare */
2440  if (xcache_entry_set_request_recompile_flag (thread_p, xcache_entry, true))
2441  {
2442  recompile = true;
2443  }
2444  }
2445  }
2446  catalog_free_class_info_and_init (cls_info_p);
2447  }
2448  return recompile;
2449 }
2450 
2451 /*
2452  * xcache_get_entry_count () - Returns the number of xasl cache entries
2453  *
2454  * return : the number of xasl cache entries
2455  */
2456 int
2458 {
2459  return xcache_Global.entry_count;
2460 }
2461 
2462 /*
2463  * xcache_uses_clones () - Check whether XASL clones are used
2464  *
2465  * return : True, if XASL clones are used, false otherwise
2466  */
2467 bool
2469 {
2470  return xcache_Max_clones > 0;
2471 }
#define XCACHE_RT_CLASS_STAT_NEED_UPDATE(class_pages, heap_pages)
Definition: xasl_cache.c:202
int stx_map_stream_to_xasl(THREAD_ENTRY *thread_p, xasl_node **xasl_tree, bool use_xasl_clone, char *xasl_stream, int xasl_stream_size, XASL_UNPACK_INFO **xasl_unpack_info_ptr)
#define ER_QPROC_XASLNODE_RECOMPILE_REQUESTED
Definition: error_code.h:1595
Definition: sha1.h:50
INT64 inserts
Definition: xasl_cache.c:80
XCACHE_CLEANUP_CANDIDATE * cleanup_array
Definition: xasl_cache.c:114
int ci_tot_pages
OID * oid_Root_class_oid
Definition: oid.c:73
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
int xcache_find_sha1(THREAD_ENTRY *thread_p, const SHA1Hash *sha1, const XASL_CACHE_SEARCH_MODE search_mode, XASL_CACHE_ENTRY **xcache_entry, xasl_cache_rt_check_result *rt_check)
Definition: xasl_cache.c:791
CLS_INFO * catalog_get_class_info(THREAD_ENTRY *thread_p, OID *class_id_p, CATALOG_ACCESS_INFO *catalog_access_info_p)
INT32 h[5]
Definition: sha1.h:52
INT64 miss
Definition: xasl_cache.c:72
#define XCACHE_ENTRY_FIX_COUNT_MASK
Definition: xasl_cache.c:54
static int xcache_entry_uninit(void *entry)
Definition: xasl_cache.c:476
#define LF_EM_NOT_USING_MUTEX
Definition: lock_free.h:59
XCACHE xcache_Global
Definition: xasl_cache.c:137
INT64 deletes
Definition: xasl_cache.c:75
xcache_hashmap_type::iterator xcache_hashmap_iterator
Definition: xasl_cache.c:97
XCACHE_RELATED_OBJECT * related_objects
Definition: xasl_cache.h:105
#define ASSERT_ERROR()
char * sql_plan_text
Definition: xasl_cache.h:78
#define XCACHE_CLEANUP_RATIO
Definition: xasl_cache.c:192
#define XCACHE_RT_TIMEDIFF_IN_SEC
Definition: xasl_cache.c:199
static int xcache_copy_key(void *src, void *dest)
Definition: xasl_cache.c:562
SHA1Hash sha1
#define pthread_mutex_init(a, b)
Definition: area_alloc.c:48
CLS_INFO * catalog_update_class_info(THREAD_ENTRY *thread_p, OID *class_id_p, CLS_INFO *class_info_p, CATALOG_ACCESS_INFO *catalog_access_info_p, bool skip_logging)
#define XCACHE_LOG_SHA1_TEXT
Definition: xasl_cache.c:218
#define XCACHE_ENTRY_WAS_RECOMPILED
Definition: xasl_cache.c:48
void bh_element_at(BINARY_HEAP *heap, int index, void *elem)
Definition: binaryheap.c:442
#define pthread_mutex_unlock(a)
Definition: area_alloc.c:51
#define XCACHE_LOG_XASL_ID_TEXT(msg)
Definition: xasl_cache.c:228
#define xcache_check_logging()
Definition: xasl_cache.c:207
bool xcache_uses_clones(void)
Definition: xasl_cache.c:2468
#define ER_QPROC_INVALID_XASLNODE
Definition: error_code.h:532
#define ASSERT_ERROR_AND_SET(error_code)
#define XCACHE_ENTRY_MARK_DELETED
Definition: xasl_cache.c:46
void thread_sleep(double millisec)
XCACHE_STATS stats
Definition: xasl_cache.c:116
Definition: lock_free.h:63
#define XCACHE_LOG_CLONE
Definition: xasl_cache.c:225
#define assert_release(e)
Definition: error_manager.h:96
unsigned int ci_time_stamp
int lock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int cond_flag)
xasl_cache_rt_check_result
Definition: xasl_cache.h:147
int element_count
Definition: binaryheap.h:71
#define XCACHE_STAT_GET(name)
Definition: xasl_cache.c:153
#define XCACHE_LOG_ENTRY_TEXT(msg)
Definition: xasl_cache.c:236
void xcache_retire_clone(THREAD_ENTRY *thread_p, XASL_CACHE_ENTRY *xcache_entry, XASL_CLONE *xclone)
Definition: xasl_cache.c:2040
volatile INT32 entry_count
Definition: xasl_cache.c:109
void * BH_CMP_ARG
Definition: binaryheap.h:40
void xcache_remove_by_oid(THREAD_ENTRY *thread_p, const OID *oid)
Definition: xasl_cache.c:1869
XASL_CLONE * cache_clones
Definition: xasl_cache.h:119
#define OID_AS_ARGS(oidp)
Definition: oid.h:39
bool enabled
Definition: xasl_cache.c:104
int xcache_insert(THREAD_ENTRY *thread_p, const compile_context *context, XASL_STREAM *stream, int n_oid, const OID *class_oids, const int *class_locks, const int *tcards, XASL_CACHE_ENTRY **xcache_entry)
Definition: xasl_cache.c:1350
static void xcache_cleanup(THREAD_ENTRY *thread_p)
Definition: xasl_cache.c:2111
HL_HEAPID db_change_private_heap(THREAD_ENTRY *thread_p, HL_HEAPID heap_id)
Definition: memory_alloc.c:337
static XCACHE_CLEANUP_REASON xcache_need_cleanup(void)
Definition: xasl_cache.c:1317
#define xcache_Cleanup_flag
Definition: xasl_cache.c:148
#define XCACHE_LOG_ENTRY_OBJECT_TEXT(msg)
Definition: xasl_cache.c:253
INT64 unfix
Definition: xasl_cache.c:79
XASL_CLONE one_clone
Definition: xasl_cache.h:120
#define XCACHE_STAT_INC(name)
Definition: xasl_cache.c:154
INT64 deletes_at_cleanup
Definition: xasl_cache.c:77
#define XCACHE_LOG_XASL_ID_ARGS(xid)
Definition: xasl_cache.c:232
enum xcache_cleanup_reason XCACHE_CLEANUP_REASON
Definition: xasl_cache.h:56
#define VFID_ISNULL(vfid_ptr)
Definition: file_manager.h:72
void THREAD_ENTRY
#define SHA1_AS_ARGS(sha1)
Definition: sha1.h:56
XASL_NODE * xasl
Definition: xasl_cache.h:65
bool xcache_can_entry_cache_list(XASL_CACHE_ENTRY *xcache_entry)
Definition: xasl_cache.c:2004
XASL_STREAM stream
Definition: xasl_cache.h:94
LOCK
int xcache_invalidate_qcaches(THREAD_ENTRY *thread_p, const OID *oid)
Definition: xasl_cache.c:1689
static bool xcache_entry_mark_deleted(THREAD_ENTRY *thread_p, XASL_CACHE_ENTRY *xcache_entry)
Definition: xasl_cache.c:1180
INT64 rt_true
Definition: xasl_cache.c:83
LF_TRAN_SYSTEM xcache_Ts
Definition: lock_free.c:54
int xcache_initialize(THREAD_ENTRY *thread_p)
Definition: xasl_cache.c:280
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
INT32 cleanup_flag
Definition: xasl_cache.c:112
void bh_destroy(THREAD_ENTRY *thread_p, BINARY_HEAP *heap)
Definition: binaryheap.c:157
#define XASL_SET_FLAG(x, f)
Definition: xasl.h:496
int max_clones
Definition: xasl_cache.c:111
#define XASL_ID_COPY(X1, X2)
Definition: xasl.h:562
#define assert(x)
#define xcache_Cleanup_array
Definition: xasl_cache.c:150
#define XCACHE_LOG_ENTRY_ARGS(xent)
Definition: xasl_cache.c:243
void init_clone_cache()
Definition: xasl_cache.c:384
int file_get_num_user_pages(THREAD_ENTRY *thread_p, const VFID *vfid, int *n_user_pages_out)
int prm_get_integer_value(PARAM_ID prm_id)
INT64 time_last_rt_check
Definition: xasl_cache.h:126
#define ER_GENERIC_ERROR
Definition: error_code.h:49
CACHE_TIME time_stored
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
#define xcache_log_error(...)
Definition: xasl_cache.c:209
#define XASL_CLASS_NO_TCARD
Definition: xasl.h:1068
static int xcache_compare_key(void *key1, void *key2)
Definition: xasl_cache.c:587
int xcache_find_xasl_id_for_execute(THREAD_ENTRY *thread_p, const XASL_ID *xid, XASL_CACHE_ENTRY **xcache_entry, XASL_CLONE *xclone)
Definition: xasl_cache.c:888
#define xcache_log(...)
Definition: xasl_cache.c:208
void xcache_finalize(THREAD_ENTRY *thread_p)
Definition: xasl_cache.c:344
void xcache_unfix(THREAD_ENTRY *thread_p, XASL_CACHE_ENTRY *xcache_entry)
Definition: xasl_cache.c:1083
bool logging_enabled
Definition: xasl_cache.c:110
#define xcache_Hashmap
Definition: xasl_cache.c:144
#define xcache_Soft_capacity
Definition: xasl_cache.c:141
unsigned int stats_get_time_stamp(void)
#define XCACHE_DELETE_XIDS_SIZE
#define XCACHE_ENTRY_RECOMPILED_REQUESTED
Definition: xasl_cache.c:51
#define XCACHE_PTR_TO_KEY(ptr)
Definition: xasl_cache.c:63
bool free_data_on_uninit
Definition: xasl_cache.h:116
XASL_ID xasl_id
Definition: xasl_cache.h:92
INT64 cleanups
Definition: xasl_cache.c:76
#define OID_EQ(oidp1, oidp2)
Definition: oid.h:92
#define XCACHE_STATS_INITIALIZER
Definition: xasl_cache.c:85
int qexec_clear_xasl(THREAD_ENTRY *thread_p, xasl_node *xasl, bool is_final)
VFID vfid
XASL_CACHE_ENTRY * xcache
Definition: xasl_cache.c:92
#define LOCK_TO_LOCKMODE_STRING(lock)
#define NULL
Definition: freelistheap.h:34
static int xcache_entry_init(void *entry)
Definition: xasl_cache.c:446
static void xcache_clone_decache(THREAD_ENTRY *thread_p, XASL_CLONE *xclone)
Definition: xasl_cache.c:2021
#define XCACHE_ENTRY_CLEANUP
Definition: xasl_cache.c:50
#define XASL_DECACHE_CLONE
Definition: xasl.h:489
#define XCACHE_ENTRY_TO_BE_RECOMPILED
Definition: xasl_cache.c:47
static unsigned int xcache_hash_key(void *key, int hash_table_size)
Definition: xasl_cache.c:741
static bool xcache_entry_is_related_to_oid(XASL_CACHE_ENTRY *xcache_entry, const OID *related_to_oid)
Definition: xasl_cache.c:1842
int max_capacity
Definition: binaryheap.h:69
INT64 lookups
Definition: xasl_cache.c:70
#define XCACHE_LOG_CLONE_ARGS(xclone)
Definition: xasl_cache.c:248
#define TIME_DIFF_SEC(t1, t2)
Definition: xasl_cache.c:156
#define CACHE_TIME_MAKE(CT, TV)
Definition: cache_time.h:48
INT64 failed_recompiles
Definition: xasl_cache.c:74
int count(int &result, const cub_regex_object &reg, const std::string &src, const int position, const INTL_CODESET codeset)
EXECUTION_INFO sql_info
Definition: xasl_cache.h:103
#define XCACHE_CLEANUP_NUM_ENTRIES(capacity)
Definition: xasl_cache.c:194
#define max(a, b)
#define xcache_Cleanup_bh
Definition: xasl_cache.c:149
static int xcache_entry_free(void *entry)
Definition: xasl_cache.c:424
void free_xasl_unpack_info(THREAD_ENTRY *thread_p, REFPTR(XASL_UNPACK_INFO, xasl_unpack_info))
static void error(const char *msg)
Definition: gencat.c:331
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
int n_related_objects
Definition: xasl_cache.h:110
#define HFID_IS_NULL(hfid)
bool qfile_has_no_cache_entries()
Definition: list_file.c:6494
#define ARG_FILE_LINE
Definition: error_manager.h:44
BINARY_HEAP * cleanup_bh
Definition: xasl_cache.c:113
int xcache_get_entry_count(void)
Definition: xasl_cache.c:2457
INT64 recompiles
Definition: xasl_cache.c:73
char * sql_hash_text
Definition: xasl_cache.h:76
static bool xcache_entry_set_request_recompile_flag(THREAD_ENTRY *thread_p, XASL_CACHE_ENTRY *xcache_entry, bool set_flag)
Definition: xasl_cache.c:1245
enum xasl_cache_search_mode XASL_CACHE_SEARCH_MODE
Definition: xasl_cache.h:145
#define free_and_init(ptr)
Definition: memory_alloc.h:147
#define strlen(s1)
Definition: intl_support.c:43
BH_TRY_INSERT_RESULT bh_try_insert(BINARY_HEAP *heap, void *elem, void *replaced)
Definition: binaryheap.c:249
int qfile_get_list_cache_number_of_entries(int ht_no)
Definition: list_file.c:6486
int cache_clones_capacity
Definition: xasl_cache.h:122
static void * xcache_entry_alloc(void)
Definition: xasl_cache.c:400
void xcache_dump(THREAD_ENTRY *thread_p, FILE *fp)
Definition: xasl_cache.c:1911
INT32 cache_flag
#define QFILE_IS_LIST_CACHE_DISABLED
Definition: list_file.h:54
bool prm_get_bool_value(PARAM_ID prm_id)
BINARY_HEAP * bh_create(THREAD_ENTRY *thread_p, int max_capacity, int elem_size, bh_key_comparator cmp_func, BH_CMP_ARG cmp_arg)
Definition: binaryheap.c:114
#define XCACHE_LOG_TRAN_ARGS(thrd)
Definition: xasl_cache.c:212
int time_threshold
Definition: xasl_cache.c:107
#define XCACHE_ENTRY_SKIP_TO_BE_RECOMPILED
Definition: xasl_cache.c:49
INT64 rt_checks
Definition: xasl_cache.c:82
XASL_ID * xasl_id
Definition: xasl.h:610
#define XCACHE_RT_MAX_THRESHOLD
Definition: xasl_cache.c:200
INT64 hits
Definition: xasl_cache.c:71
INT64 found_at_insert
Definition: xasl_cache.c:81
int soft_capacity
Definition: xasl_cache.c:105
static bool xcache_check_recompilation_threshold(THREAD_ENTRY *thread_p, XASL_CACHE_ENTRY *xcache_entry)
Definition: xasl_cache.c:2348
xasl_unpack_info * xasl_buf
Definition: xasl_cache.h:64
char * buffer
Definition: xasl.h:613
#define xcache_Enabled
Definition: xasl_cache.c:140
int SHA1Compare(void *a, void *b)
Definition: sha1.c:400
#define xcache_Max_clones
Definition: xasl_cache.c:147
static BH_CMP_RESULT xcache_compare_cleanup_candidates(const void *left, const void *right, BH_CMP_ARG ignore_arg)
Definition: xasl_cache.c:2319
#define XCACHE_LOG_SHA1_ARGS(sha1)
Definition: xasl_cache.c:219
#define pthread_mutex_lock(a)
Definition: area_alloc.c:50
#define catalog_free_class_info_and_init(class_info_p)
#define XCACHE_LOG_ENTRY_OBJECT_ARGS(xent, oidx)
Definition: xasl_cache.c:256
int qmgr_get_sql_id(THREAD_ENTRY *thread_p, char **sql_id_buf, char *query, size_t sql_len)
char * sql_user_text
Definition: xasl_cache.h:77
#define XCACHE_ATOMIC_CAS_CACHE_FLAG(xid, oldcf, newcf)
Definition: xasl_cache.c:189
xcache_hashmap_type hashmap
Definition: xasl_cache.c:108
#define xcache_Entry_count
Definition: xasl_cache.c:145
static void xcache_invalidate_entries(THREAD_ENTRY *thread_p, bool(*invalidate_check)(XASL_CACHE_ENTRY *, const OID *), const OID *arg)
Definition: xasl_cache.c:1747
static LF_ENTRY_DESCRIPTOR xcache_Entry_descriptor
Definition: xasl_cache.c:169
struct timeval time_last_used
Definition: xasl_cache.h:111
#define XASL_ID_SET_NULL(X)
Definition: xasl.h:546
pthread_mutex_t cache_clones_mutex
Definition: xasl_cache.h:123
#define xcache_Time_threshold
Definition: xasl_cache.c:142
#define XASL_SERIAL_OID_TCARD
Definition: xasl.h:1069
#define XCACHE_ENTRY_FLAGS_MASK
Definition: xasl_cache.c:52
HFID ci_hfid
void xcache_drop_all(THREAD_ENTRY *thread_p)
Definition: xasl_cache.c:1890
BH_CMP_RESULT
Definition: binaryheap.h:42
#define XCACHE_ENTRY_DELETED_BY_ME
Definition: xasl_cache.c:60
int qfile_clear_list_cache(THREAD_ENTRY *thread_p, int list_ht_no)
Definition: list_file.c:5053
#define XCACHE_PTR_TO_ENTRY(ptr)
Definition: xasl_cache.c:64
#define XCACHE_LOG_TRAN_TEXT
Definition: xasl_cache.c:211
#define xcache_Last_cleaned_time
Definition: xasl_cache.c:143
#define pthread_mutex_destroy(a)
Definition: area_alloc.c:49
#define XCACHE_RT_FACTOR
Definition: xasl_cache.c:201