CUBRID Engine  latest
lock_manager.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * lock_manager.c - lock management module (at the server)
21  */
22 
23 #ident "$Id$"
24 
25 #include "config.h"
26 
27 #include <array>
28 #include <assert.h>
29 #if defined(SOLARIS)
30 #include <netdb.h>
31 #endif /* SOLARIS */
32 #include <stdio.h>
33 #include <string.h>
34 #include <time.h>
35 
36 #include "boot_sr.h"
37 #include "critical_section.h"
38 #include "environment_variable.h"
39 #include "event_log.h"
40 #include "locator.h"
41 #include "lock_free.h"
42 #include "lock_manager.h"
43 #include "log_impl.h"
44 #include "log_manager.h"
45 #include "memory_alloc.h"
46 #include "memory_hash.h"
47 #include "message_catalog.h"
48 #include "mvcc.h"
50 #include "oid.h"
51 #include "page_buffer.h"
52 #include "perf_monitor.h"
53 #include "porting.h"
54 #if defined(ENABLE_SYSTEMTAP)
55 #include "probes.h"
56 #endif /* ENABLE_SYSTEMTAP */
57 #include "query_manager.h"
58 #include "server_support.h"
59 #include "storage_common.h"
60 #include "system_parameter.h"
61 #include "thread_daemon.hpp"
62 #include "thread_entry_task.hpp"
64 #include "thread_manager.hpp"
65 #include "transaction_sr.h"
66 #include "tsc_timer.h"
67 #include "wait_for_graph.h"
68 #include "xserver_interface.h"
69 #include "xasl.h"
70 
71 #include <array>
72 
73 extern LOCK_COMPATIBILITY lock_Comp[12][12];
74 
75 #if defined (SERVER_MODE)
76 /* object lock hash function */
77 #define LK_OBJ_LOCK_HASH(oid,htsize) \
78  ((OID_ISTEMP(oid)) ? (unsigned int)(-((oid)->pageid) % htsize) :\
79  lock_get_hash_value(oid, htsize))
80 
81 /* thread is lock-waiting ? */
82 #define LK_IS_LOCKWAIT_THREAD(thrd) \
83  ((thrd)->lockwait != NULL \
84  && (thrd)->lockwait_state == (int) LOCK_SUSPENDED)
85 
86 /* transaction wait for only some msecs ? */
87 #define LK_CAN_TIMEOUT(msecs) ((msecs) != LK_INFINITE_WAIT)
88 
89 /* is younger transaction ? */
90 #define LK_ISYOUNGER(young_tranid, old_tranid) (young_tranid > old_tranid)
91 
92 /* Defines for printing lock activity messages */
93 #define LK_MSG_LOCK_HELPER(entry, msgnum) \
94  fprintf(stdout, \
95  msgcat_message (MSGCAT_CATALOG_CUBRID, MSGCAT_SET_LOCK, msgnum)), \
96  (entry)->tran_index, LOCK_TO_LOCKMODE_STRING((entry)->granted_mode), \
97  (entry)->res_head->oid->volid, (entry)->res_head->oid->pageid, \
98  (entry)->oid->slotid)
99 
100 #define LK_MSG_LOCK_ACQUIRED(entry) \
101  LK_MSG_LOCK_HELPER(entry, MSGCAT_LK_OID_LOCK_ACQUIRED)
102 
103 #define LK_MSG_LOCK_CONVERTED(entry) \
104  LK_MSG_LOCK_HELPER(entry, MSGCAT_LK_OID_LOCK_CONVERTED)
105 
106 #define LK_MSG_LOCK_WAITFOR(entry) \
107  LK_MSG_LOCK_HELPER(entry, MSGCAT_LK_OID_LOCK_WAITFOR)
108 
109 #define LK_MSG_LOCK_RELEASE(entry) \
110  LK_MSG_LOCK_HELPER(entry, MSGCAT_LK_OID_LOCK_RELEASE)
111 
112 #define LK_MSG_LOCK_DEMOTE(entry) \
113  LK_MSG_LOCK_HELPER(entry, MSGCAT_LK_OID_LOCK_DEMOTE)
114 
115 #define EXPAND_WAIT_FOR_ARRAY_IF_NEEDED() \
116  do \
117  { \
118  if (nwaits == max_waits) \
119  { \
120  if (wait_for == wait_for_buf) \
121  { \
122  t = (int *) malloc (sizeof (int) * max_waits * 2); \
123  if (t != NULL) \
124  { \
125  memcpy (t, wait_for, sizeof (int) * max_waits); \
126  } \
127  } \
128  else \
129  { \
130  t = (int *) realloc (wait_for, sizeof (int) * max_waits * 2); \
131  } \
132  if (t != NULL) \
133  { \
134  wait_for = t; \
135  max_waits *= 2; \
136  } \
137  else \
138  { \
139  goto set_error; \
140  } \
141  } \
142  } \
143  while (0)
144 
145 #define SET_EMULATE_THREAD_WITH_LOCK_ENTRY(th,lock_entry) \
146  do \
147  { \
148  THREAD_ENTRY *locked_thread_entry_p; \
149  assert ((th)->emulate_tid == thread_id_t ()); \
150  locked_thread_entry_p = logtb_find_thread_by_tran_index ((lock_entry)->tran_index); \
151  if (locked_thread_entry_p != NULL) \
152  { \
153  (th)->emulate_tid = locked_thread_entry_p->get_id (); \
154  } \
155  } \
156  while (0)
157 
158 #define CLEAR_EMULATE_THREAD(th) \
159  do \
160  { \
161  (th)->emulate_tid = thread_id_t (); \
162  } \
163  while (0)
164 
165 #endif /* SERVER_MODE */
166 
167 #define RESOURCE_ALLOC_WAIT_TIME 10 /* 10 msec */
168 #define KEY_LOCK_ESCALATION_THRESHOLD 10 /* key lock escalation threshold */
169 #define MAX_NUM_LOCKS_DUMP_TO_EVENT_LOG 100
170 
171 /* state of suspended threads */
172 typedef enum
173 {
174  LOCK_SUSPENDED, /* Thread has been suspended */
175  LOCK_RESUMED, /* Thread has been resumed */
176  LOCK_RESUMED_TIMEOUT, /* Thread has been resumed and notified of lock timeout */
177  LOCK_RESUMED_DEADLOCK_TIMEOUT, /* Thread has been resumed and notified of lock timeout because the current
178  * transaction is selected as a deadlock victim */
179  LOCK_RESUMED_ABORTED, /* Thread has been resumed, however it must be aborted because of a deadlock */
180  LOCK_RESUMED_ABORTED_FIRST, /* in case of the first aborted thread */
181  LOCK_RESUMED_ABORTED_OTHER, /* in case of other aborted threads */
184 
185 /*
186  * Message id in the set MSGCAT_SET_LOCK
187  * in the message catalog MSGCAT_CATALOG_CUBRID (file cubrid.msg).
188  */
189 #define MSGCAT_LK_NEWLINE 1
190 #define MSGCAT_LK_SUSPEND_TRAN 2
191 #define MSGCAT_LK_RESUME_TRAN 3
192 #define MSGCAT_LK_OID_LOCK_ACQUIRED 4
193 #define MSGCAT_LK_VPID_LOCK_ACQUIRED 5
194 #define MSGCAT_LK_OID_LOCK_CONVERTED 6
195 #define MSGCAT_LK_VPID_LOCK_CONVERTED 7
196 #define MSGCAT_LK_OID_LOCK_WAITFOR 8
197 #define MSGCAT_LK_VPID_LOCK_WAITFOR 9
198 #define MSGCAT_LK_OID_LOCK_RELEASE 10
199 #define MSGCAT_LK_VPID_LOCK_RELEASE 11
200 #define MSGCAT_LK_OID_LOCK_DEMOTE 12
201 #define MSGCAT_LK_VPID_LOCK_DEMOTE 13
202 #define MSGCAT_LK_RES_OID 14
203 #define MSGCAT_LK_RES_ROOT_CLASS_TYPE 15
204 #define MSGCAT_LK_RES_CLASS_TYPE 16
205 #define MSGCAT_LK_RES_INSTANCE_TYPE 17
206 #define MSGCAT_LK_RES_UNKNOWN_TYPE 18
207 #define MSGCAT_LK_RES_TOTAL_MODE 19
208 #define MSGCAT_LK_RES_LOCK_COUNT 20
209 #define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_HEAD 21
210 #define MSGCAT_LK_RES_BLOCKED_HOLDER_HEAD 22
211 #define MSGCAT_LK_RES_BLOCKED_WAITER_HEAD 23
212 #define MSGCAT_LK_RES_NON2PL_RELEASED_HEAD 24
213 #define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_ENTRY 25
214 #define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_ENTRY_WITH_GRANULE 26
215 #define MSGCAT_LK_RES_BLOCKED_HOLDER_ENTRY 27
216 #define MSGCAT_LK_RES_BLOCKED_HOLDER_ENTRY_WITH_GRANULE 28
217 #define MSGCAT_LK_RES_BLOCKED_WAITER_ENTRY 29
218 #define MSGCAT_LK_RES_NON2PL_RELEASED_ENTRY 30
219 #define MSGCAT_LK_RES_VPID 31
220 #define MSGCAT_LK_DUMP_LOCK_TABLE 32
221 #define MSGCAT_LK_DUMP_TRAN_IDENTIFIERS 33
222 #define MSGCAT_LK_DUMP_TRAN_ISOLATION 34
223 #define MSGCAT_LK_DUMP_TRAN_STATE 35
224 #define MSGCAT_LK_DUMP_TRAN_TIMEOUT_PERIOD 36
225 #define MSGCAT_LK_DEADLOCK_ABORT_HDR 37
226 #define MSGCAT_LK_DEADLOCK_ABORT 38
227 #define MSGCAT_LK_DEADLOCK_TIMEOUT_HDR 39
228 #define MSGCAT_LK_DEADLOCK_TIMEOUT 40
229 #define MSGCAT_LK_DEADLOCK_FUN_HDR 41
230 #define MSGCAT_LK_DEADLOCK_FUN 42
231 #define MSGCAT_LK_RES_INDEX_KEY_TYPE 43
232 #define MSGCAT_LK_INDEXNAME 44
233 #define MSGCAT_LK_RES_RR_TYPE 45
234 #define MSGCAT_LK_MVCC_INFO 46
235 #define MSGCAT_LK_LASTONE 47
236 
237 #if defined(SERVER_MODE)
238 
239 typedef struct lk_lockinfo LK_LOCKINFO;
240 struct lk_lockinfo
241 {
242  OID *org_oidp;
243  OID oid;
244  OID class_oid;
245  LOCK lock;
246 };
247 
248 
249 /* TWFG (transaction wait-for graph) entry and edge */
250 typedef struct lk_WFG_node LK_WFG_NODE;
251 struct lk_WFG_node
252 {
253  int first_edge;
254  bool candidate;
255  int current;
256  int ancestor;
257  INT64 thrd_wait_stime;
258  int tran_edge_seq_num;
259  bool checked_by_deadlock_detector;
260  bool DL_victim;
261 };
262 
263 typedef struct lk_WFG_edge LK_WFG_EDGE;
264 struct lk_WFG_edge
265 {
266  int to_tran_index;
267  int edge_seq_num;
268  int holder_flag;
269  int next;
270  INT64 edge_wait_stime;
271 };
272 
273 typedef struct lk_deadlock_victim LK_DEADLOCK_VICTIM;
274 struct lk_deadlock_victim
275 {
276  /* following two fields are used for only global deadlock detection */
277  int (*cycle_fun) (int tran_index, void *args);
278  void *args; /* Arguments to be passed to cycle_fun */
279 
280  int tran_index; /* Index of selected victim */
281  TRANID tranid; /* Transaction identifier */
282  bool can_timeout; /* Is abort or timeout */
283 
284  int num_trans_in_cycle; /* # of transaction in cycle */
285  int *tran_index_in_cycle; /* tran_index array for transaction in cycle */
286 };
287 
288 /*
289  * Lock Entry Block Structure
290  */
291 typedef struct lk_entry_block LK_ENTRY_BLOCK;
292 struct lk_entry_block
293 {
294  LK_ENTRY_BLOCK *next_block; /* next lock entry block */
295  LK_ENTRY *block; /* lk_entry block */
296  int count; /* # of entries in lock entry block */
297 };
298 
299 /*
300  * Lock Resource Block Structure
301  */
302 typedef struct lk_res_block LK_RES_BLOCK;
303 struct lk_res_block
304 {
305  LK_RES_BLOCK *next_block; /* next lock resource block */
306  LK_RES *block; /* lk_res block */
307  int count; /* # of entries in lock res block */
308 };
309 
310 /*
311  * Transaction Lock Entry Structure
312  */
313 typedef struct lk_tran_lock LK_TRAN_LOCK;
314 struct lk_tran_lock
315 {
316  /* transaction lock hold lists */
317  pthread_mutex_t hold_mutex; /* mutex for hold lists */
318  LK_ENTRY *inst_hold_list; /* instance lock hold list */
319  LK_ENTRY *class_hold_list; /* class lock hold list */
320  LK_ENTRY *root_class_hold; /* root class lock hold */
321  LK_ENTRY *lk_entry_pool; /* local pool of lock entries which can be used with no synchronization. */
322  int lk_entry_pool_count; /* Current count of lock entries in local pool. */
323  int inst_hold_count; /* # of entries in inst_hold_list */
324  int class_hold_count; /* # of entries in class_hold_list */
325 
326  LK_ENTRY *waiting; /* waiting lock entry */
327 
328  /* non two phase lock list */
329  pthread_mutex_t non2pl_mutex; /* mutex for non2pl_list */
330  LK_ENTRY *non2pl_list; /* non2pl list */
331  int num_incons_non2pl; /* # of inconsistent non2pl */
332 
333  /* lock escalation related fields */
334  bool lock_escalation_on;
335 
336  /* locking on manual duration */
337  bool is_instant_duration;
338 };
339 /* Max size of transaction local pool of lock entries. */
340 #define LOCK_TRAN_LOCAL_POOL_MAX_SIZE 10
341 
342 /*
343  * Lock Manager Global Data Structure
344  */
345 // *INDENT-OFF*
347 using lk_hashmap_iterator = lk_hashmap_type::iterator;
348 // *INDENT-ON*
349 
350 typedef struct lk_global_data LK_GLOBAL_DATA;
351 struct lk_global_data
352 {
353  /* object lock table including hash table */
354  int max_obj_locks; /* max # of object locks */
355 
356  lk_hashmap_type m_obj_hash_table;
357  LF_FREELIST obj_free_entry_list;
358 
359  /* transaction lock table */
360  int num_trans; /* # of transactions */
361  LK_TRAN_LOCK *tran_lock_table; /* transaction lock hold table */
362 
363  /* deadlock detection related fields */
364  pthread_mutex_t DL_detection_mutex;
365  struct timeval last_deadlock_run; /* last deadlock detection time */
366  LK_WFG_NODE *TWFG_node; /* transaction WFG node */
367  LK_WFG_EDGE *TWFG_edge; /* transaction WFG edge */
368  int max_TWFG_edge;
369  int TWFG_free_edge_idx;
370  int global_edge_seq_num;
371 
372  /* miscellaneous things */
373  short no_victim_case_count;
374  bool verbose_mode;
375  // *INDENT-OFF*
376  std::atomic_int deadlock_and_timeout_detector;
377  // *INDENT-ON*
378 #if defined(LK_DUMP)
379  bool dump_level;
380 #endif /* LK_DUMP */
381 
382  // *INDENT-OFF*
383  lk_global_data ()
384  : max_obj_locks (0)
385  , m_obj_hash_table {}
386  , obj_free_entry_list LF_FREELIST_INITIALIZER
387  , num_trans (0)
388  , tran_lock_table (NULL)
389  , DL_detection_mutex PTHREAD_MUTEX_INITIALIZER
390  , last_deadlock_run { 0, 0 }
391  , TWFG_node (NULL)
392  , TWFG_edge (NULL)
393  , max_TWFG_edge (0)
394  , TWFG_free_edge_idx (0)
395  , global_edge_seq_num (0)
396  , no_victim_case_count (0)
397  , verbose_mode (false)
398  , deadlock_and_timeout_detector { 0 }
399 #if defined(LK_DUMP)
400  , dump_level (0)
401 #endif
402  {
403  }
404  // *INDENT-ON*
405 };
406 
407 LK_GLOBAL_DATA lk_Gl;
408 
409 /* size of each data structure */
410 static const int SIZEOF_LK_LOCKINFO = sizeof (LK_LOCKINFO);
411 static const int SIZEOF_LK_WFG_NODE = sizeof (LK_WFG_NODE);
412 static const int SIZEOF_LK_WFG_EDGE = sizeof (LK_WFG_EDGE);
413 static const int SIZEOF_LK_TRAN_LOCK = sizeof (LK_TRAN_LOCK);
414 
415 static const int SIZEOF_LK_RES = sizeof (LK_RES);
416 static const int SIZEOF_LK_ENTRY_BLOCK = sizeof (LK_ENTRY_BLOCK);
417 static const int SIZEOF_LK_RES_BLOCK = sizeof (LK_RES_BLOCK);
418 static const int SIZEOF_LK_ACQOBJ_LOCK = sizeof (LK_ACQOBJ_LOCK);
419 
420 /* minimum # of locks that are required */
421 /* TODO : change const */
422 #define LK_MIN_OBJECT_LOCKS (MAX_NTRANS * 300)
423 
424 /* the ratio in the number of lock entries for each entry type */
425 static const float LK_RES_RATIO = 0.1f;
426 static const float LK_ENTRY_RATIO = 0.1f;
427 
428 /* the lock entry expansion count */
429 /* TODO : change const */
430 #define LK_MORE_RES_COUNT (MAX_NTRANS * 20 * LK_RES_RATIO)
431 #define LK_MORE_ENTRY_COUNT (MAX_NTRANS * 20 * LK_ENTRY_RATIO)
432 
433 /* miscellaneous constants */
434 static const int LK_SLEEP_MAX_COUNT = 3;
435 #define LK_LOCKINFO_FIXED_COUNT 30
436 /* TODO : change const */
437 #define LK_MAX_VICTIM_COUNT 300
438 
439 /* transaction WFG edge related constants */
440 static const int LK_MIN_TWFG_EDGE_COUNT = 200;
441 /* TODO : change const */
442 #define LK_MID_TWFG_EDGE_COUNT 1000
443 /* TODO : change const */
444 #define LK_MAX_TWFG_EDGE_COUNT (MAX_NTRANS * MAX_NTRANS)
445 
446 #define DEFAULT_WAIT_USERS 10
447 static const int LK_COMPOSITE_LOCK_OID_INCREMENT = 100;
448 #endif /* SERVER_MODE */
449 
450 #if defined(SERVER_MODE)
451 
452 static LK_WFG_EDGE TWFG_edge_block[LK_MID_TWFG_EDGE_COUNT];
453 static LK_DEADLOCK_VICTIM victims[LK_MAX_VICTIM_COUNT];
454 static int victim_count;
455 #else /* !SERVER_MODE */
456 static int lk_Standalone_has_xlock = 0;
457 #define LK_SET_STANDALONE_XLOCK(lock) \
458  do { \
459  if ((lock) == SCH_M_LOCK || (lock) == X_LOCK || lock == IX_LOCK \
460  || lock == SIX_LOCK) \
461  { \
462  lk_Standalone_has_xlock = true; \
463  } \
464  } while (0)
465 #endif /* !SERVER_MODE */
466 
468 // *INDENT-OFF*
469 using tran_lock_waiters_array_type = std::array<THREAD_ENTRY *, DEFAULT_LOCK_WAITING_THREAD_ARRAY_SIZE>;
470 // *INDENT-ON*
471 
472 #if defined(SERVER_MODE)
473 static void lock_initialize_entry (LK_ENTRY * entry_ptr);
474 static void lock_initialize_entry_as_granted (LK_ENTRY * entry_ptr, int tran_index, LK_RES * res, LOCK lock);
475 static void lock_initialize_entry_as_blocked (LK_ENTRY * entry_ptr, THREAD_ENTRY * thread_p, int tran_index,
476  LK_RES * res, LOCK lock);
477 static void lock_initialize_entry_as_non2pl (LK_ENTRY * entry_ptr, int tran_index, LK_RES * res, LOCK lock);
478 static void lock_initialize_resource (LK_RES * res_ptr);
479 static void lock_initialize_resource_as_allocated (LK_RES * res_ptr, LOCK lock);
480 static unsigned int lock_get_hash_value (const OID * oid, int htsize);
481 static int lock_initialize_tran_lock_table (void);
482 static void lock_initialize_object_hash_table (void);
483 static int lock_initialize_object_lock_entry_list (void);
484 static int lock_initialize_deadlock_detection (void);
485 static int lock_remove_resource (THREAD_ENTRY * thread_p, LK_RES * res_ptr);
486 static void lock_insert_into_tran_hold_list (LK_ENTRY * entry_ptr, int owner_tran_index);
487 static int lock_delete_from_tran_hold_list (LK_ENTRY * entry_ptr, int owner_tran_index);
488 static void lock_insert_into_tran_non2pl_list (LK_ENTRY * non2pl, int owner_tran_index);
489 static int lock_delete_from_tran_non2pl_list (LK_ENTRY * non2pl, int owner_tran_index);
490 static LK_ENTRY *lock_find_tran_hold_entry (THREAD_ENTRY * thread_p, int tran_index, const OID * oid, bool is_class);
491 static bool lock_force_timeout_expired_wait_transactions (void *thrd_entry);
493 static void lock_detect_local_deadlock (THREAD_ENTRY * thread_p);
494 static bool lock_is_class_lock_escalated (LOCK class_lock, LOCK lock_escalation);
495 static LK_ENTRY *lock_add_non2pl_lock (THREAD_ENTRY * thread_p, LK_RES * res_ptr, int tran_index, LOCK lock);
496 static void lock_position_holder_entry (LK_RES * res_ptr, LK_ENTRY * entry_ptr);
497 static void lock_set_error_for_timeout (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr);
498 static void lock_set_error_for_aborted (LK_ENTRY * entry_ptr);
499 static void lock_set_tran_abort_reason (int tran_index, TRAN_ABORT_REASON abort_reason);
500 static LOCK_WAIT_STATE lock_suspend (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, int wait_msecs);
501 static void lock_resume (LK_ENTRY * entry_ptr, int state);
502 static bool lock_wakeup_deadlock_victim_timeout (int tran_index);
503 static bool lock_wakeup_deadlock_victim_aborted (int tran_index);
504 static void lock_grant_blocked_holder (THREAD_ENTRY * thread_p, LK_RES * res_ptr);
505 static int lock_grant_blocked_waiter (THREAD_ENTRY * thread_p, LK_RES * res_ptr);
506 static void lock_grant_blocked_waiter_partial (THREAD_ENTRY * thread_p, LK_RES * res_ptr, LK_ENTRY * from_whom);
507 static bool lock_check_escalate (THREAD_ENTRY * thread_p, LK_ENTRY * class_entry, LK_TRAN_LOCK * tran_lock);
508 static int lock_escalate_if_needed (THREAD_ENTRY * thread_p, LK_ENTRY * class_entry, int tran_index);
509 static int lock_internal_hold_lock_object_instant (THREAD_ENTRY * thread_p, int tran_index, const OID * oid,
510  const OID * class_oid, LOCK lock);
511 static int lock_internal_perform_lock_object (THREAD_ENTRY * thread_p, int tran_index, const OID * oid,
512  const OID * class_oid, LOCK lock, int wait_msecs,
513  LK_ENTRY ** entry_addr_ptr, LK_ENTRY * class_entry);
514 static void lock_internal_perform_unlock_object (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, bool release_flag,
515  bool move_to_non2pl);
516 static void lock_unlock_object_by_isolation (THREAD_ENTRY * thread_p, int tran_index, TRAN_ISOLATION isolation,
517  const OID * class_oid, const OID * oid);
518 static void lock_unlock_inst_locks_of_class_by_isolation (THREAD_ENTRY * thread_p, int tran_index,
519  TRAN_ISOLATION isolation, const OID * class_oid);
520 static int lock_internal_demote_class_lock (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, LOCK to_be_lock,
521  LOCK * ex_lock);
522 static void lock_demote_all_shared_class_locks (THREAD_ENTRY * thread_p, int tran_index);
523 static void lock_unlock_shared_inst_lock (THREAD_ENTRY * thread_p, int tran_index, const OID * inst_oid);
524 static void lock_remove_all_class_locks (THREAD_ENTRY * thread_p, int tran_index, LOCK lock);
525 static void lock_remove_non2pl (THREAD_ENTRY * thread_p, LK_ENTRY * non2pl, int tran_index);
526 static void lock_update_non2pl_list (THREAD_ENTRY * thread_p, LK_RES * res_ptr, int tran_index, LOCK lock);
527 static int lock_add_WFG_edge (int from_tran_index, int to_tran_index, int holder_flag, INT64 edge_wait_stime);
528 static void lock_select_deadlock_victim (THREAD_ENTRY * thread_p, int s, int t);
529 static void lock_dump_deadlock_victims (THREAD_ENTRY * thread_p, FILE * outfile);
530 static int lock_compare_lock_info (const void *lockinfo1, const void *lockinfo2);
531 static float lock_wait_msecs_to_secs (int msecs);
532 static void lock_dump_resource (THREAD_ENTRY * thread_p, FILE * outfp, LK_RES * res_ptr);
533 
534 static void lock_increment_class_granules (LK_ENTRY * class_entry);
535 
536 static void lock_decrement_class_granules (LK_ENTRY * class_entry);
537 static LK_ENTRY *lock_find_class_entry (int tran_index, const OID * class_oid);
538 
539 static void lock_event_log_tran_locks (THREAD_ENTRY * thread_p, FILE * log_fp, int tran_index);
540 static void lock_event_log_blocked_lock (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * entry);
541 static void lock_event_log_blocking_locks (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * wait_entry);
542 static void lock_event_log_lock_info (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * entry);
543 static void lock_event_set_tran_wait_entry (int tran_index, LK_ENTRY * entry);
544 static void lock_event_set_xasl_id_to_entry (int tran_index, LK_ENTRY * entry);
545 static LK_RES_KEY lock_create_search_key (OID * oid, OID * class_oid);
546 #if defined (SERVER_MODE)
547 static bool lock_is_safe_lock_with_page (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr);
548 #endif /* SERVER_MODE */
549 
550 static LK_ENTRY *lock_get_new_entry (int tran_index, LF_TRAN_ENTRY * tran_entry, LF_FREELIST * freelist);
551 static void lock_free_entry (int tran_index, LF_TRAN_ENTRY * tran_entry, LF_FREELIST * freelist, LK_ENTRY * lock_entry);
552 
553 static void lock_victimize_first_thread_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper);
554 static void lock_check_timeout_expired_and_count_suspended_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper,
555  size_t & suspend_count);
556 static void lock_get_transaction_lock_waiting_threads_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper,
557  int tran_index,
558  tran_lock_waiters_array_type & tran_lock_waiters,
559  size_t & count);
560 static void lock_get_transaction_lock_waiting_threads (int tran_index, tran_lock_waiters_array_type & tran_lock_waiters,
561  size_t & count);
562 
563 // *INDENT-OFF*
564 static cubthread::daemon *lock_Deadlock_detect_daemon = NULL;
565 
566 static void lock_deadlock_detect_daemon_init ();
567 static void lock_deadlock_detect_daemon_destroy ();
568 // *INDENT-ON*
569 
570 /* object lock entry */
571 static void *lock_alloc_entry (void);
572 static int lock_dealloc_entry (void *res);
573 static int lock_init_entry (void *res);
574 static int lock_uninit_entry (void *res);
575 
576 LF_ENTRY_DESCRIPTOR obj_lock_entry_desc = {
577  offsetof (LK_ENTRY, stack),
578  offsetof (LK_ENTRY, next),
579  offsetof (LK_ENTRY, del_id),
580  0, /* does not have a key, not used in a hash table */
581  0, /* does not have a mutex, protected by resource mutex */
583  lock_alloc_entry,
584  lock_dealloc_entry,
585  lock_init_entry,
586  lock_uninit_entry,
587  NULL,
588  NULL,
589  NULL, /* no key */
590  NULL /* no inserts */
591 };
592 
593 /*
594  * Object lock resource
595  */
596 static void *lock_alloc_resource (void);
597 static int lock_dealloc_resource (void *res);
598 static int lock_init_resource (void *res);
599 static int lock_uninit_resource (void *res);
600 static int lock_res_key_copy (void *src, void *dest);
601 static int lock_res_key_compare (void *k1, void *k2);
602 static unsigned int lock_res_key_hash (void *key, int htsize);
603 
604 LF_ENTRY_DESCRIPTOR lk_Obj_lock_res_desc = {
605  offsetof (LK_RES, stack),
606  offsetof (LK_RES, hash_next),
607  offsetof (LK_RES, del_id),
608  offsetof (LK_RES, key),
609  offsetof (LK_RES, res_mutex),
611  lock_alloc_resource,
612  lock_dealloc_resource,
613  lock_init_resource,
614  lock_uninit_resource,
615  lock_res_key_copy,
616  lock_res_key_compare,
617  lock_res_key_hash,
618  NULL /* no inserts */
619 };
620 #endif /* SERVER_MODE */
621 
622 #if defined(SERVER_MODE)
623 static LK_RES_KEY
624 lock_create_search_key (OID * oid, OID * class_oid)
625 {
626  LK_RES_KEY search_key;
627 
628  /* copy *IDs */
629  if (oid != NULL)
630  {
631  COPY_OID (&search_key.oid, oid);
632  }
633  else
634  {
635  OID_SET_NULL (&search_key.oid);
636  }
637 
638  if (class_oid != NULL)
639  {
640  COPY_OID (&search_key.class_oid, class_oid);
641  }
642  else
643  {
644  OID_SET_NULL (&search_key.class_oid);
645  }
646 
647  /* set correct type */
648  if (oid != NULL && OID_IS_ROOTOID (oid))
649  {
650  search_key.type = LOCK_RESOURCE_ROOT_CLASS;
651  }
652  else
653  {
654  if (class_oid == NULL || OID_IS_ROOTOID (class_oid))
655  {
656  search_key.type = LOCK_RESOURCE_CLASS;
657  }
658  else
659  {
660  search_key.type = LOCK_RESOURCE_INSTANCE;
661  }
662  }
663 
664  /* done! */
665  return search_key;
666 }
667 
668 static void *
669 lock_alloc_entry (void)
670 {
671  return malloc (sizeof (LK_ENTRY));
672 }
673 
674 static int
675 lock_dealloc_entry (void *res)
676 {
677  free (res);
678  return NO_ERROR;
679 }
680 
681 static int
682 lock_init_entry (void *entry)
683 {
684  LK_ENTRY *entry_ptr = (LK_ENTRY *) entry;
685  if (entry_ptr != NULL)
686  {
687  return NO_ERROR;
688  }
689  else
690  {
691  assert (false);
692  return ER_FAILED;
693  }
694 }
695 
696 static int
697 lock_uninit_entry (void *entry)
698 {
699  LK_ENTRY *entry_ptr = (LK_ENTRY *) entry;
700 
701  if (entry_ptr == NULL)
702  {
703  return ER_FAILED;
704  }
705 
706  entry_ptr->tran_index = -1;
707  entry_ptr->thrd_entry = NULL;
708 
709  return NO_ERROR;
710 }
711 
712 // *INDENT-OFF*
714 {
715  pthread_mutex_init (&res_mutex, NULL);
716 }
717 
719 {
720  pthread_mutex_destroy (&res_mutex);
721 }
722 // *INDENT-ON*
723 
724 static void *
725 lock_alloc_resource (void)
726 {
727  LK_RES *res_ptr = (LK_RES *) malloc (sizeof (LK_RES));
728  if (res_ptr != NULL)
729  {
730  pthread_mutex_init (&(res_ptr->res_mutex), NULL);
731  }
732  return res_ptr;
733 }
734 
735 static int
736 lock_dealloc_resource (void *res)
737 {
738  LK_RES *res_ptr = (LK_RES *) res;
739  if (res_ptr != NULL)
740  {
741  pthread_mutex_destroy (&res_ptr->res_mutex);
742  free (res_ptr);
743  return NO_ERROR;
744  }
745  else
746  {
747  assert (false);
748  return ER_FAILED;
749  }
750 }
751 
752 static int
753 lock_init_resource (void *res)
754 {
755  LK_RES *res_ptr = (LK_RES *) res;
756 
757  if (res_ptr == NULL)
758  {
759  return ER_FAILED;
760  }
761 
762  res_ptr->total_holders_mode = NULL_LOCK;
763  res_ptr->total_waiters_mode = NULL_LOCK;
764  res_ptr->holder = NULL;
765  res_ptr->waiter = NULL;
766  res_ptr->non2pl = NULL;
767  res_ptr->hash_next = NULL;
768 
769  return NO_ERROR;
770 }
771 
772 static int
773 lock_uninit_resource (void *res)
774 {
775  LK_RES *res_ptr = (LK_RES *) res;
776 
777  if (res == NULL)
778  {
779  return ER_FAILED;
780  }
781 
782  assert (res_ptr->holder == NULL);
783  assert (res_ptr->waiter == NULL);
784  assert (res_ptr->non2pl == NULL);
785 
786  /* TO BE FILLED IN AS NECESSARY */
787 
788  return NO_ERROR;
789 }
790 
791 static int
792 lock_res_key_copy (void *src, void *dest)
793 {
794  LK_RES_KEY *src_k = (LK_RES_KEY *) src;
795  LK_RES_KEY *dest_k = (LK_RES_KEY *) dest;
796 
797  if (src_k == NULL || dest_k == NULL)
798  {
799  return ER_FAILED;
800  }
801 
802  dest_k->type = src_k->type;
803  switch (src_k->type)
804  {
806  COPY_OID (&dest_k->oid, &src_k->oid);
807  COPY_OID (&dest_k->class_oid, &src_k->class_oid);
808  break;
809 
810  case LOCK_RESOURCE_CLASS:
812  COPY_OID (&dest_k->oid, &src_k->oid);
813  OID_SET_NULL (&dest_k->class_oid);
814  break;
815 
817  /* nothing, it's a free object */
818  break;
819 
820  default:
821  assert (false);
822  return ER_FAILED;
823  }
824 
825  return NO_ERROR;
826 }
827 
828 static int
829 lock_res_key_compare (void *k1, void *k2)
830 {
831  LK_RES_KEY *k1_k = (LK_RES_KEY *) k1;
832  LK_RES_KEY *k2_k = (LK_RES_KEY *) k2;
833 
834  if (k1_k == NULL || k2_k == NULL)
835  {
836  return 1;
837  }
838 
839  switch (k1_k->type)
840  {
842  case LOCK_RESOURCE_CLASS:
844  /* fast and dirty oid comparison */
845  if (OID_EQ (&k1_k->oid, &k2_k->oid))
846  {
847  assert (k1_k->type == k2_k->type);
848 
849  /* equal */
850  return 0;
851  }
852  else
853  {
854  /* not equal */
855  return 1;
856  }
857  break;
858 
860  default:
861  /* unfortunately, there's no error reporting here, but an always-true comparison will generate errors early on
862  * and is easier to spot */
863  assert (false);
864  return 0;
865  }
866 }
867 
868 static unsigned int
869 lock_res_key_hash (void *key, int htsize)
870 {
871  LK_RES_KEY *key_k = (LK_RES_KEY *) key;
872 
873  if (key_k != NULL)
874  {
875  return LK_OBJ_LOCK_HASH (&key_k->oid, htsize);
876  }
877  else
878  {
879  assert (false);
880  return 0;
881  }
882 }
883 
884 /* initialize lock entry as free state */
885 static void
886 lock_initialize_entry (LK_ENTRY * entry_ptr)
887 {
888  entry_ptr->tran_index = -1;
889  entry_ptr->thrd_entry = NULL;
890  entry_ptr->res_head = NULL;
891  entry_ptr->granted_mode = NULL_LOCK;
892  entry_ptr->blocked_mode = NULL_LOCK;
893  entry_ptr->next = NULL;
894  entry_ptr->tran_next = NULL;
895  entry_ptr->tran_prev = NULL;
896  entry_ptr->class_entry = NULL;
897  entry_ptr->ngranules = 0;
898  entry_ptr->instant_lock_count = 0;
899  entry_ptr->bind_index_in_tran = -1;
900  XASL_ID_SET_NULL (&entry_ptr->xasl_id);
901 }
902 
903 /* initialize lock entry as granted state */
904 static void
905 lock_initialize_entry_as_granted (LK_ENTRY * entry_ptr, int tran_index, LK_RES * res, LOCK lock)
906 {
907  entry_ptr->tran_index = tran_index;
908  entry_ptr->thrd_entry = NULL;
909  entry_ptr->res_head = res;
910  entry_ptr->granted_mode = lock;
911  entry_ptr->blocked_mode = NULL_LOCK;
912  entry_ptr->count = 1;
913  entry_ptr->next = NULL;
914  entry_ptr->tran_next = NULL;
915  entry_ptr->tran_prev = NULL;
916  entry_ptr->class_entry = NULL;
917  entry_ptr->ngranules = 0;
918  entry_ptr->instant_lock_count = 0;
919 
920  lock_event_set_xasl_id_to_entry (tran_index, entry_ptr);
921 }
922 
923 /* initialize lock entry as blocked state */
924 static void
925 lock_initialize_entry_as_blocked (LK_ENTRY * entry_ptr, THREAD_ENTRY * thread_p, int tran_index, LK_RES * res,
926  LOCK lock)
927 {
928  entry_ptr->tran_index = tran_index;
929  entry_ptr->thrd_entry = thread_p;
930  entry_ptr->res_head = res;
931  entry_ptr->granted_mode = NULL_LOCK;
932  entry_ptr->blocked_mode = lock;
933  entry_ptr->count = 1;
934  entry_ptr->next = NULL;
935  entry_ptr->tran_next = NULL;
936  entry_ptr->tran_prev = NULL;
937  entry_ptr->class_entry = NULL;
938  entry_ptr->ngranules = 0;
939  entry_ptr->instant_lock_count = 0;
940 
941  lock_event_set_xasl_id_to_entry (tran_index, entry_ptr);
942 }
943 
944 /* initialize lock entry as non2pl state */
945 static void
946 lock_initialize_entry_as_non2pl (LK_ENTRY * entry_ptr, int tran_index, LK_RES * res, LOCK lock)
947 {
948  entry_ptr->tran_index = tran_index;
949  entry_ptr->thrd_entry = NULL;
950  entry_ptr->res_head = res;
951  entry_ptr->granted_mode = lock;
952  entry_ptr->blocked_mode = NULL_LOCK;
953  entry_ptr->count = 0;
954  entry_ptr->next = NULL;
955  entry_ptr->tran_next = NULL;
956  entry_ptr->tran_prev = NULL;
957  entry_ptr->class_entry = NULL;
958  entry_ptr->ngranules = 0;
959  entry_ptr->instant_lock_count = 0;
960 }
961 
962 /* initialize lock resource as free state */
963 static void
964 lock_initialize_resource (LK_RES * res_ptr)
965 {
966  pthread_mutex_init (&(res_ptr->res_mutex), NULL);
967  res_ptr->key.type = LOCK_RESOURCE_OBJECT;
968  OID_SET_NULL (&(res_ptr->key.oid));
969  OID_SET_NULL (&(res_ptr->key.class_oid));
970  res_ptr->total_holders_mode = NULL_LOCK;
971  res_ptr->total_waiters_mode = NULL_LOCK;
972  res_ptr->holder = NULL;
973  res_ptr->waiter = NULL;
974  res_ptr->non2pl = NULL;
975  res_ptr->hash_next = NULL;
976 }
977 
978 /* initialize lock resource as allocated state */
979 static void
980 lock_initialize_resource_as_allocated (LK_RES * res_ptr, LOCK lock)
981 {
982  res_ptr->total_holders_mode = lock;
983  res_ptr->total_waiters_mode = NULL_LOCK;
984  res_ptr->holder = NULL;
985  res_ptr->waiter = NULL;
986  res_ptr->non2pl = NULL;
987 }
988 
989 /*
990  * lock_get_hash_value -
991  *
992  * return:
993  *
994  * oid(in):
995  */
996 static unsigned int
997 lock_get_hash_value (const OID * oid, int htsize)
998 {
999  unsigned int next_base_slotid, addr;
1000 
1001  if (oid->slotid <= 0)
1002  {
1003  /* In an unique index, the OID and ClassOID of the last key are <root page's volid, root page's pageid, -1> and
1004  * <root page's volid, root page's pageid, 0>, recpectively. In a non-unique index, the OID of the last key is
1005  * <root page's volid, root page's pageid, -1> */
1006  addr = oid->pageid - oid->slotid;
1007  }
1008  else
1009  {
1010  next_base_slotid = 2;
1011  while (next_base_slotid <= (unsigned) oid->slotid)
1012  {
1013  next_base_slotid = next_base_slotid * 2;
1014  }
1015 
1016  addr = oid->pageid + (htsize / next_base_slotid) * (2 * oid->slotid - next_base_slotid + 1);
1017  }
1018 
1019  return (addr % htsize);
1020 }
1021 #endif /* SERVER_MODE */
1022 
1023 /*
1024  * Private Functions Group 1: initalize and finalize major structures
1025  *
1026  * - lock_init_tran_lock_table()
1027  * - lock_init_object_hash_table()
1028  * - lock_init_object_lock_res_list()
1029  * - lock_init_object_lock_entry_list()
1030  * - lock_init_deadlock_detection()
1031  */
1032 
1033 #if defined(SERVER_MODE)
1034 /*
1035  * lock_initialize_tran_lock_table - Initialize the transaction lock hold table.
1036  *
1037  * return: error code
1038  *
1039  * Note:This function allocates the transaction lock hold table and
1040  * initializes the table.
1041  */
1042 static int
1043 lock_initialize_tran_lock_table (void)
1044 {
1045  LK_TRAN_LOCK *tran_lock; /* pointer to transaction hold entry */
1046  int i, j; /* loop variable */
1047  LK_ENTRY *entry = NULL;
1048 
1049  /* initialize the number of transactions */
1050  lk_Gl.num_trans = MAX_NTRANS;
1051 
1052  /* allocate memory space for transaction lock table */
1053  lk_Gl.tran_lock_table = (LK_TRAN_LOCK *) malloc (SIZEOF_LK_TRAN_LOCK * lk_Gl.num_trans);
1054  if (lk_Gl.tran_lock_table == NULL)
1055  {
1057  (size_t) (SIZEOF_LK_TRAN_LOCK * lk_Gl.num_trans));
1058  return ER_OUT_OF_VIRTUAL_MEMORY;
1059  }
1060 
1061  /* initialize all the entries of transaction lock table */
1062  memset (lk_Gl.tran_lock_table, 0, SIZEOF_LK_TRAN_LOCK * lk_Gl.num_trans);
1063  for (i = 0; i < lk_Gl.num_trans; i++)
1064  {
1065  tran_lock = &lk_Gl.tran_lock_table[i];
1066  pthread_mutex_init (&tran_lock->hold_mutex, NULL);
1067  pthread_mutex_init (&tran_lock->non2pl_mutex, NULL);
1068 
1069  for (j = 0; j < LOCK_TRAN_LOCAL_POOL_MAX_SIZE; j++)
1070  {
1071  entry = (LK_ENTRY *) malloc (sizeof (LK_ENTRY));
1072  lock_initialize_entry (entry);
1073  entry->next = tran_lock->lk_entry_pool;
1074  tran_lock->lk_entry_pool = entry;
1075  }
1076  tran_lock->lk_entry_pool_count = LOCK_TRAN_LOCAL_POOL_MAX_SIZE;
1077  }
1078 
1079  return NO_ERROR;
1080 }
1081 #endif /* SERVER_MODE */
1082 
1083 #if defined(SERVER_MODE)
1084 /*
1085  * lock_initialize_object_hash_table - Initializes the object lock hash table
1086  *
1087  * return: error code
1088  *
1089  * Note:This function initializes an object lock hash table.
1090  */
1091 static void
1092 lock_initialize_object_hash_table (void)
1093 {
1094 #define LK_INITIAL_OBJECT_LOCK_TABLE_SIZE 10000
1095 
1096  lk_Gl.max_obj_locks = LK_INITIAL_OBJECT_LOCK_TABLE_SIZE;
1097 
1098  const int obj_hash_size = MAX (lk_Gl.max_obj_locks, LK_MIN_OBJECT_LOCKS);
1099 
1100  const int block_count = 2;
1101  const int block_size = (int) MAX ((lk_Gl.max_obj_locks * LK_RES_RATIO) / block_count, 1);
1102 
1103  /* initialize object hash table */
1104  lk_Gl.m_obj_hash_table.init (obj_lock_res_Ts, THREAD_TS_OBJ_LOCK_RES, obj_hash_size, block_size, block_count,
1105  lk_Obj_lock_res_desc);
1106 }
1107 #endif /* SERVER_MODE */
1108 
1109 #if defined(SERVER_MODE)
1110 /*
1111  * lockk_initialize_object_lock_entry_list - Initializes the object lock entry list
1112  *
1113  * return: error code
1114  *
1115  * Note:
1116  * This function initializes following two lists.
1117  * 1. a list of object lock entry block
1118  * => each node has object lock entry block.
1119  * 2. a list of freed object lock entries.
1120  */
1121 static int
1122 lock_initialize_object_lock_entry_list (void)
1123 {
1124  int block_count, block_size, ret;
1125 
1126  /* initialize the entry freelist */
1127  block_count = 1;
1128  block_size = (int) MAX ((lk_Gl.max_obj_locks * LK_ENTRY_RATIO), 1);
1129  ret = lf_freelist_init (&lk_Gl.obj_free_entry_list, block_count, block_size, &obj_lock_entry_desc, &obj_lock_ent_Ts);
1130  if (ret != NO_ERROR)
1131  {
1132  return ER_FAILED;
1133  }
1134 
1135  return NO_ERROR;
1136 }
1137 #endif /* SERVER_MODE */
1138 
1139 #if defined(SERVER_MODE)
1140 /*
1141  * lock_initialize_deadlock_detection - Initializes transaction wait-for graph.
1142  *
1143  * return: error code
1144  *
1145  * Note:This function initializes the transaction waif-for graph.
1146  */
1147 static int
1148 lock_initialize_deadlock_detection (void)
1149 {
1150  int i;
1151 
1152  pthread_mutex_init (&lk_Gl.DL_detection_mutex, NULL);
1153  gettimeofday (&lk_Gl.last_deadlock_run, NULL);
1154 
1155  /* allocate transaction WFG node table */
1156  lk_Gl.TWFG_node = (LK_WFG_NODE *) malloc (SIZEOF_LK_WFG_NODE * lk_Gl.num_trans);
1157  if (lk_Gl.TWFG_node == NULL)
1158  {
1160  (size_t) (SIZEOF_LK_WFG_NODE * lk_Gl.num_trans));
1161  return ER_OUT_OF_VIRTUAL_MEMORY;
1162  }
1163  /* initialize transaction WFG node table */
1164  for (i = 0; i < lk_Gl.num_trans; i++)
1165  {
1166  lk_Gl.TWFG_node[i].DL_victim = false;
1167  lk_Gl.TWFG_node[i].checked_by_deadlock_detector = false;
1168  lk_Gl.TWFG_node[i].thrd_wait_stime = 0;
1169  }
1170 
1171  /* initialize other related fields */
1172  lk_Gl.TWFG_edge = NULL;
1173  lk_Gl.max_TWFG_edge = 0;
1174  lk_Gl.TWFG_free_edge_idx = -1;
1175  lk_Gl.global_edge_seq_num = 0;
1176 
1177  return NO_ERROR;
1178 }
1179 #endif /* SERVER_MODE */
1180 
1181 #if defined(SERVER_MODE)
1182 /*
1183  * lock_remove_resource - Remove lock resource entry
1184  *
1185  * return: error code
1186  *
1187  * res_ptr(in):
1188  *
1189  * Note:This function removes the given lock resource entry from lock hash table.
1190  */
1191 static int
1192 lock_remove_resource (THREAD_ENTRY * thread_p, LK_RES * res_ptr)
1193 {
1194  assert (res_ptr != NULL);
1195 
1196  if (!lk_Gl.m_obj_hash_table.erase_locked (thread_p, res_ptr->key, res_ptr))
1197  {
1198  /* this should not happen, as the hash entry is mutex protected and no clear operations are performed on the hash
1199  * table */
1200  pthread_mutex_unlock (&res_ptr->res_mutex);
1201  assert_release (false);
1202  return ER_FAILED;
1203  }
1204  else
1205  {
1206  return NO_ERROR;
1207  }
1208 }
1209 #endif /* SERVER_MODE */
1210 
1211 /*
1212  * Private Functions Group: transaction lock list related functios
1213  * - lk_insert_into_tran_hold_list()
1214  * - lk_delete_from_tran_hold_list()
1215  * - lk_insert_into_tran_non2pl_list()
1216  * - lk_delete_from_tran_non2pl_list()
1217  */
1218 
1219 #if defined(SERVER_MODE)
1220 /*
1221  * lock_insert_into_tran_hold_list - Insert the given lock entry
1222  * into the transaction lock hold list
1223  *
1224  * return: nothing
1225  *
1226  * entry_ptr(in):
1227  *
1228  * Note:This function inserts the given lock entry into the transaction lock
1229  * hold list. The given lock entry was included in the lock holder
1230  * list. That is, The lock is held by the transaction.
1231  */
1232 static void
1233 lock_insert_into_tran_hold_list (LK_ENTRY * entry_ptr, int owner_tran_index)
1234 {
1235  LK_TRAN_LOCK *tran_lock;
1236  int rv;
1237 
1238  /* The caller is holding a resource mutex */
1239 
1240  if (owner_tran_index != entry_ptr->tran_index)
1241  {
1242  assert (owner_tran_index == entry_ptr->tran_index);
1243  return;
1244  }
1245 
1246  tran_lock = &lk_Gl.tran_lock_table[entry_ptr->tran_index];
1247  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
1248 
1249  switch (entry_ptr->res_head->key.type)
1250  {
1252 #if defined(CUBRID_DEBUG)
1253  if (tran_lock->root_class_hold != NULL)
1254  {
1255  fprintf (stderr, "lk_insert_into_tran_hold_list() error.. (1)\n");
1256  }
1257 #endif /* CUBRID_DEBUG */
1258  entry_ptr->tran_next = tran_lock->root_class_hold;
1259  tran_lock->root_class_hold = entry_ptr;
1260  break;
1261 
1262  case LOCK_RESOURCE_CLASS:
1263 #if defined(CUBRID_DEBUG)
1264  if (tran_lock->class_hold_list != NULL)
1265  {
1266  LK_ENTRY *_ptr;
1267  _ptr = tran_lock->class_hold_list;
1268  while (_ptr != NULL)
1269  {
1270  if (_ptr->res_head == entry_ptr->res_head)
1271  {
1272  break;
1273  }
1274  _ptr = _ptr->tran_next;
1275  }
1276  if (_ptr != NULL)
1277  {
1278  fprintf (stderr, "lk_insert_into_tran_hold_list() error.. (2)\n");
1279  }
1280  }
1281 #endif /* CUBRID_DEBUG */
1282  if (tran_lock->class_hold_list != NULL)
1283  {
1284  tran_lock->class_hold_list->tran_prev = entry_ptr;
1285  }
1286  entry_ptr->tran_next = tran_lock->class_hold_list;
1287  tran_lock->class_hold_list = entry_ptr;
1288  tran_lock->class_hold_count++;
1289  break;
1290 
1292 #if defined(CUBRID_DEBUG)
1293  if (tran_lock->inst_hold_list != NULL)
1294  {
1295  LK_ENTRY *_ptr;
1296  _ptr = tran_lock->inst_hold_list;
1297  while (_ptr != NULL)
1298  {
1299  if (_ptr->res_head == entry_ptr->res_head)
1300  {
1301  break;
1302  }
1303  _ptr = _ptr->tran_next;
1304  }
1305  if (_ptr != NULL)
1306  {
1307  fprintf (stderr, "lk_insert_into_tran_hold_list() error.. (3)\n");
1308  }
1309  }
1310 #endif /* CUBRID_DEBUG */
1311  if (tran_lock->inst_hold_list != NULL)
1312  {
1313  tran_lock->inst_hold_list->tran_prev = entry_ptr;
1314  }
1315  entry_ptr->tran_next = tran_lock->inst_hold_list;
1316  tran_lock->inst_hold_list = entry_ptr;
1317  tran_lock->inst_hold_count++;
1318  break;
1319 
1320  default:
1321  break;
1322  }
1323 
1324  pthread_mutex_unlock (&tran_lock->hold_mutex);
1325 }
1326 #endif /* SERVER_MODE */
1327 
1328 #if defined(SERVER_MODE)
1329 /*
1330  * lock_delete_from_tran_hold_list - Delted the given lock entry
1331  * from the transaction lock hold list
1332  *
1333  * return: error code
1334  *
1335  * entry_ptr(in):
1336  *
1337  * Note:This functions finds the given lock entry in the transaction
1338  * lock hold list and then deletes it from the lock hold list.
1339  */
1340 static int
1341 lock_delete_from_tran_hold_list (LK_ENTRY * entry_ptr, int owner_tran_index)
1342 {
1343  LK_TRAN_LOCK *tran_lock;
1344  int rv;
1345  int error_code = NO_ERROR;
1346 
1347  /* The caller is holding a resource mutex */
1348 
1349  if (owner_tran_index != entry_ptr->tran_index)
1350  {
1351  assert (owner_tran_index == entry_ptr->tran_index);
1352  return ER_FAILED;
1353  }
1354 
1355  tran_lock = &lk_Gl.tran_lock_table[entry_ptr->tran_index];
1356  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
1357 
1358  switch (entry_ptr->res_head->key.type)
1359  {
1361  if (entry_ptr != tran_lock->root_class_hold)
1362  { /* does not exist */
1364  LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), "ROOT CLASS", entry_ptr->res_head->key.oid.volid,
1365  entry_ptr->res_head->key.oid.pageid, entry_ptr->res_head->key.oid.slotid, entry_ptr->tran_index,
1366  (tran_lock->root_class_hold == NULL ? 0 : 1));
1367  error_code = ER_LK_NOTFOUND_IN_TRAN_HOLD_LIST;
1368  }
1369  else
1370  {
1371  tran_lock->root_class_hold = NULL;
1372  }
1373  break;
1374 
1375  case LOCK_RESOURCE_CLASS:
1376  if (tran_lock->class_hold_list == entry_ptr)
1377  {
1378  tran_lock->class_hold_list = entry_ptr->tran_next;
1379  if (entry_ptr->tran_next)
1380  {
1381  entry_ptr->tran_next->tran_prev = NULL;
1382  }
1383  }
1384  else
1385  {
1386  if (entry_ptr->tran_prev)
1387  {
1388  entry_ptr->tran_prev->tran_next = entry_ptr->tran_next;
1389  }
1390  if (entry_ptr->tran_next)
1391  {
1392  entry_ptr->tran_next->tran_prev = entry_ptr->tran_prev;
1393  }
1394  }
1395  tran_lock->class_hold_count--;
1396  break;
1397 
1399  if (tran_lock->inst_hold_list == entry_ptr)
1400  {
1401  tran_lock->inst_hold_list = entry_ptr->tran_next;
1402  if (entry_ptr->tran_next)
1403  {
1404  entry_ptr->tran_next->tran_prev = NULL;
1405  }
1406  }
1407  else
1408  {
1409  if (entry_ptr->tran_prev)
1410  {
1411  entry_ptr->tran_prev->tran_next = entry_ptr->tran_next;
1412  }
1413  if (entry_ptr->tran_next)
1414  {
1415  entry_ptr->tran_next->tran_prev = entry_ptr->tran_prev;
1416  }
1417  }
1418  tran_lock->inst_hold_count--;
1419  break;
1420 
1421  default:
1422  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_INVALID_OBJECT_TYPE, 4, entry_ptr->res_head->key.type,
1423  entry_ptr->res_head->key.oid.volid, entry_ptr->res_head->key.oid.pageid,
1424  entry_ptr->res_head->key.oid.slotid);
1425  error_code = ER_LK_INVALID_OBJECT_TYPE;
1426  break;
1427  }
1428 
1429  pthread_mutex_unlock (&tran_lock->hold_mutex);
1430 
1431  return error_code;
1432 }
1433 #endif /* SERVER_MODE */
1434 
1435 #if defined(SERVER_MODE)
1436 /*
1437  * lock_insert_into_tran_non2pl_list - Insert the given lock entry
1438  * into the transaction non2pl list
1439  *
1440  * return: nothing
1441  *
1442  * non2pl(in):
1443  *
1444  * Note:This function inserts the given lock entry into the transaction
1445  * non2pl list.
1446  */
1447 static void
1448 lock_insert_into_tran_non2pl_list (LK_ENTRY * non2pl, int owner_tran_index)
1449 {
1450  LK_TRAN_LOCK *tran_lock;
1451  int rv;
1452 
1453  /* The caller is holding a resource mutex */
1454 
1455  if (owner_tran_index != non2pl->tran_index)
1456  {
1457  assert (owner_tran_index == non2pl->tran_index);
1458  return;
1459  }
1460 
1461  tran_lock = &lk_Gl.tran_lock_table[non2pl->tran_index];
1462  rv = pthread_mutex_lock (&tran_lock->non2pl_mutex);
1463 
1464  non2pl->tran_next = tran_lock->non2pl_list;
1465  tran_lock->non2pl_list = non2pl;
1466  if (non2pl->granted_mode == INCON_NON_TWO_PHASE_LOCK)
1467  {
1468  tran_lock->num_incons_non2pl += 1;
1469  }
1470 
1471  pthread_mutex_unlock (&tran_lock->non2pl_mutex);
1472 }
1473 #endif /* SERVER_MODE */
1474 
1475 #if defined(SERVER_MODE)
1476 /*
1477  * lock_delete_from_tran_non2pl_list - Delete the given lock entry
1478  * from the transaction non2pl list
1479  *
1480  * return: error code
1481  *
1482  * non2pl(in):
1483  *
1484  * Note:This function finds the given lock entry in the transaction
1485  * non2pl list and then deletes it from the non2pl list.
1486  */
1487 static int
1488 lock_delete_from_tran_non2pl_list (LK_ENTRY * non2pl, int owner_tran_index)
1489 {
1490  LK_TRAN_LOCK *tran_lock;
1491  LK_ENTRY *prev, *curr;
1492  int rv;
1493  int error_code = NO_ERROR;
1494 
1495  /* The caller is holding a resource mutex */
1496 
1497  if (owner_tran_index != non2pl->tran_index)
1498  {
1499  assert (owner_tran_index == non2pl->tran_index);
1500  return ER_FAILED;
1501  }
1502 
1503  tran_lock = &lk_Gl.tran_lock_table[non2pl->tran_index];
1504  rv = pthread_mutex_lock (&tran_lock->non2pl_mutex);
1505 
1506  /* find the given non2pl entry in transaction non2pl list */
1507  prev = NULL;
1508  curr = tran_lock->non2pl_list;
1509  while (curr != NULL && curr != non2pl)
1510  {
1511  prev = curr;
1512  curr = curr->tran_next;
1513  }
1514  if (curr == NULL)
1515  { /* not found */
1517  LOCK_TO_LOCKMODE_STRING (non2pl->granted_mode),
1518  (non2pl->res_head != NULL ? non2pl->res_head->key.oid.volid : -2),
1519  (non2pl->res_head != NULL ? non2pl->res_head->key.oid.pageid : -2),
1520  (non2pl->res_head != NULL ? non2pl->res_head->key.oid.slotid : -2), non2pl->tran_index);
1522  }
1523  else
1524  { /* found */
1525  /* delete it from the transaction non2pl list */
1526  if (prev == NULL)
1527  {
1528  tran_lock->non2pl_list = curr->tran_next;
1529  }
1530  else
1531  {
1532  prev->tran_next = curr->tran_next;
1533  }
1534 
1535  if (curr->granted_mode == INCON_NON_TWO_PHASE_LOCK)
1536  {
1537  tran_lock->num_incons_non2pl -= 1;
1538  }
1539  }
1540  pthread_mutex_unlock (&tran_lock->non2pl_mutex);
1541 
1542  return error_code;
1543 }
1544 #endif /* SERVER_MODE */
1545 
1546 /*
1547  * Private Functions Group: lock entry addition related functions
1548  * - lk_add_non2pl_lock()
1549  * - lk_position_holder_entry()
1550  */
1551 
1552 #if defined(SERVER_MODE)
1553 /*
1554  * lock_find_class_entry - Find a class lock entry
1555  * in the transaction lock hold list
1556  *
1557  * return:
1558  *
1559  * tran_index(in):
1560  * class_oid(in):
1561  *
1562  * Note:This function finds a class lock entry, whose lock object id
1563  * is the given class_oid, in the transaction lock hold list.
1564  */
1565 static LK_ENTRY *
1566 lock_find_class_entry (int tran_index, const OID * class_oid)
1567 {
1568  LK_TRAN_LOCK *tran_lock;
1569  LK_ENTRY *entry_ptr;
1570  int rv;
1571 
1572  /* The caller is not holding any mutex */
1573 
1574  tran_lock = &lk_Gl.tran_lock_table[tran_index];
1575  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
1576 
1577  if (OID_IS_ROOTOID (class_oid))
1578  {
1579  entry_ptr = tran_lock->root_class_hold;
1580  }
1581  else
1582  {
1583  entry_ptr = tran_lock->class_hold_list;
1584  while (entry_ptr != NULL)
1585  {
1586  assert (tran_index == entry_ptr->tran_index);
1587 
1588  if (OID_EQ (&entry_ptr->res_head->key.oid, class_oid))
1589  {
1590  break;
1591  }
1592  entry_ptr = entry_ptr->tran_next;
1593  }
1594  }
1595 
1596  pthread_mutex_unlock (&tran_lock->hold_mutex);
1597 
1598  return entry_ptr; /* it might be NULL */
1599 }
1600 #endif /* SERVER_MODE */
1601 
1602 #if defined(SERVER_MODE)
1603 /*
1604  * lock_add_non2pl_lock - Add a release lock which has never been acquired
1605  *
1606  * return: pointer to the lock entry in non2pl list.
1607  *
1608  * res_ptr(in): pointer to lock resource
1609  * tran_index(in): transaction table index
1610  * lock(in): the lock mode of non2pl lock
1611  *
1612  * Note:Cache a release lock (which has never been acquired) onto the list
1613  * of non two phase lock to detect future serializable inconsistencies
1614  *
1615  */
1616 static LK_ENTRY *
1617 lock_add_non2pl_lock (THREAD_ENTRY * thread_p, LK_RES * res_ptr, int tran_index, LOCK lock)
1618 {
1620  LK_ENTRY *non2pl;
1621  LK_TRAN_LOCK *tran_lock;
1622  int rv;
1623  LOCK_COMPATIBILITY compat;
1624 
1625  assert (!OID_ISNULL (&res_ptr->key.oid));
1626 
1627  /* The caller is holding a resource mutex */
1628 
1629  /* find the non2pl entry of the given transaction */
1630  non2pl = res_ptr->non2pl;
1631  while (non2pl != NULL)
1632  {
1633  if (non2pl->tran_index == tran_index)
1634  {
1635  break;
1636  }
1637  non2pl = non2pl->next;
1638  }
1639 
1640  if (non2pl != NULL)
1641  {
1642  /* 1. I have a non2pl entry on the lock resource */
1643  /* reflect the current lock acquisition into the non2pl entry */
1644  tran_lock = &lk_Gl.tran_lock_table[tran_index];
1645  rv = pthread_mutex_lock (&tran_lock->non2pl_mutex);
1646 
1647  if (non2pl->granted_mode != INCON_NON_TWO_PHASE_LOCK)
1648  {
1649  if (lock == INCON_NON_TWO_PHASE_LOCK)
1650  {
1651  non2pl->granted_mode = INCON_NON_TWO_PHASE_LOCK;
1652  tran_lock->num_incons_non2pl += 1;
1653  }
1654  else
1655  {
1656  assert (lock >= NULL_LOCK && non2pl->granted_mode >= NULL_LOCK);
1657  compat = lock_Comp[lock][non2pl->granted_mode];
1658  assert (compat != LOCK_COMPAT_UNKNOWN);
1659 
1660  if (compat == LOCK_COMPAT_NO)
1661  {
1662  non2pl->granted_mode = INCON_NON_TWO_PHASE_LOCK;
1663  tran_lock->num_incons_non2pl += 1;
1664  }
1665  else
1666  {
1667  non2pl->granted_mode = lock_Conv[lock][non2pl->granted_mode];
1668  assert (non2pl->granted_mode != NA_LOCK);
1669  }
1670  }
1671  }
1672 
1673  pthread_mutex_unlock (&tran_lock->non2pl_mutex);
1674  }
1675  else
1676  { /* non2pl == (LK_ENTRY *)NULL */
1677  /* 2. I do not have a non2pl entry on the lock resource */
1678  /* allocate a lock entry, initialize it, and connect it */
1679  non2pl = lock_get_new_entry (tran_index, t_entry, &lk_Gl.obj_free_entry_list);
1680  if (non2pl != NULL)
1681  {
1682  lock_initialize_entry_as_non2pl (non2pl, tran_index, res_ptr, lock);
1683  non2pl->next = res_ptr->non2pl;
1684  res_ptr->non2pl = non2pl;
1685  lock_insert_into_tran_non2pl_list (non2pl, tran_index);
1686  }
1687  else
1688  {
1689  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ALLOC_RESOURCE, 1, "lock heap entry");
1690  }
1691  }
1692  return non2pl; /* it might be NULL */
1693 }
1694 #endif /* SERVER_MODE */
1695 
1696 #if defined(SERVER_MODE)
1697 /*
1698  * lock_position_holder_entry - Position given lock entry in the lock
1699  * holder list of given lock resource
1700  *
1701  * return:
1702  *
1703  * res_ptr(in):
1704  * entry_ptr(in):
1705  *
1706  * Note:This function positions the given lock entry
1707  * in the lock holder list of the given lock resource
1708  * according to Upgrader Positioning Rule(UPR).
1709  *
1710  * NOTE that the granted_mode and blocked_mode of the given lock
1711  * entry must be set before this function is called.
1712  */
1713 static void
1714 lock_position_holder_entry (LK_RES * res_ptr, LK_ENTRY * entry_ptr)
1715 {
1716  LK_ENTRY *prev, *i;
1717  LK_ENTRY *ta, *tap;
1718  LK_ENTRY *tb, *tbp;
1719  LK_ENTRY *tc, *tcp;
1720  LOCK_COMPATIBILITY compat1, compat2;
1721 
1722  /* find the position where the lock entry to be inserted */
1723  if (entry_ptr->blocked_mode == NULL_LOCK)
1724  {
1725  /* case 1: when block_mode is NULL_LOCK */
1726  prev = NULL;
1727  i = res_ptr->holder;
1728  while (i != NULL)
1729  {
1730  if (i->blocked_mode == NULL_LOCK)
1731  {
1732  break;
1733  }
1734  prev = i;
1735  i = i->next;
1736  }
1737  }
1738  else
1739  {
1740  /* case 2: when block_mode is not NULL_LOCK */
1741  /* find ta, tb, tc among other holders */
1742  ta = tb = tc = NULL;
1743  tap = tbp = tcp = NULL;
1744 
1745  prev = NULL;
1746  i = res_ptr->holder;
1747  while (i != NULL)
1748  {
1749  if (i->blocked_mode != NULL_LOCK)
1750  {
1751  assert (entry_ptr->blocked_mode >= NULL_LOCK && entry_ptr->granted_mode >= NULL_LOCK);
1752  assert (i->blocked_mode >= NULL_LOCK && i->granted_mode >= NULL_LOCK);
1753 
1754  compat1 = lock_Comp[entry_ptr->blocked_mode][i->blocked_mode];
1755  assert (compat1 != LOCK_COMPAT_UNKNOWN);
1756 
1757  if (ta == NULL && compat1 == LOCK_COMPAT_YES)
1758  {
1759  ta = i;
1760  tap = prev;
1761  }
1762 
1763  compat1 = lock_Comp[entry_ptr->blocked_mode][i->granted_mode];
1764  assert (compat1 != LOCK_COMPAT_UNKNOWN);
1765 
1766  compat2 = lock_Comp[i->blocked_mode][entry_ptr->granted_mode];
1767  assert (compat2 != LOCK_COMPAT_UNKNOWN);
1768 
1769  if (ta == NULL && tb == NULL && compat1 == LOCK_COMPAT_YES && compat2 == LOCK_COMPAT_NO)
1770  {
1771  tb = i;
1772  tbp = prev;
1773  }
1774  }
1775  else
1776  {
1777  if (tc == NULL)
1778  {
1779  tc = i;
1780  tcp = prev;
1781  }
1782  }
1783  prev = i;
1784  i = i->next;
1785  }
1786  if (ta != NULL)
1787  {
1788  prev = tap;
1789  }
1790  else if (tb != NULL)
1791  {
1792  prev = tbp;
1793  }
1794  else if (tc != NULL)
1795  {
1796  prev = tcp;
1797  }
1798  }
1799 
1800  /* insert the given lock entry into the found position */
1801  if (prev == NULL)
1802  {
1803  entry_ptr->next = res_ptr->holder;
1804  res_ptr->holder = entry_ptr;
1805  }
1806  else
1807  {
1808  entry_ptr->next = prev->next;
1809  prev->next = entry_ptr;
1810  }
1811 }
1812 #endif /* SERVER_MODE */
1813 
1814 
1815 /*
1816  * Private Functions Group: timeout related functions
1817  *
1818  * - lock_set_error_for_timeout()
1819  * - lock_set_error_for_aborted()
1820  * - lock_suspend(), lock_resume()
1821  * - lock_wakeup_deadlock_victim_timeout()
1822  * - lock_wakeup_deadlock_victim_aborted()
1823  */
1824 
1825 #if defined(SERVER_MODE)
1826 /*
1827  * lock_set_error_for_timeout - Set error for lock timeout
1828  *
1829  * return:
1830  *
1831  * entry_ptr(in): pointer to the lock entry for waiting
1832  *
1833  * Note:Set error code for lock timeout
1834  */
1835 static void
1836 lock_set_error_for_timeout (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr)
1837 {
1838  const char *client_prog_name; /* Client program name for transaction */
1839  const char *client_user_name; /* Client user name for transaction */
1840  const char *client_host_name; /* Client host for transaction */
1841  int client_pid; /* Client process id for transaction */
1842  char *waitfor_client_users_default = (char *) "";
1843  char *waitfor_client_users; /* Waitfor users */
1844  char *classname; /* Name of the class */
1845  int n, i, nwaits, max_waits = DEFAULT_WAIT_USERS;
1846  int wait_for_buf[DEFAULT_WAIT_USERS];
1847  int *wait_for = wait_for_buf, *t;
1848  LK_ENTRY *entry;
1849  LK_RES *res_ptr = NULL;
1850  int unit_size = LOG_USERNAME_MAX + CUB_MAXHOSTNAMELEN + PATH_MAX + 20 + 4;
1851  char *ptr;
1852  int rv;
1853  bool is_classname_alloced = false;
1854  bool free_mutex_flag = false;
1855  bool isdeadlock_timeout = false;
1856  int compat1, compat2;
1857  OID *oid_rr;
1858 
1859  /* Find the users that transaction is waiting for */
1860  waitfor_client_users = waitfor_client_users_default;
1861  nwaits = 0;
1862 
1863  assert (entry_ptr->granted_mode >= NULL_LOCK && entry_ptr->blocked_mode >= NULL_LOCK);
1864 
1865  /* Dump all the tran. info. which this tran. is waiting for */
1866  res_ptr = entry_ptr->res_head;
1867  wait_for[0] = NULL_TRAN_INDEX;
1868 
1869  rv = pthread_mutex_lock (&res_ptr->res_mutex);
1870  free_mutex_flag = true;
1871  for (entry = res_ptr->holder; entry != NULL; entry = entry->next)
1872  {
1873  if (entry == entry_ptr)
1874  {
1875  continue;
1876  }
1877 
1878  assert (entry->granted_mode >= NULL_LOCK && entry->blocked_mode >= NULL_LOCK);
1879  compat1 = lock_Comp[entry->granted_mode][entry_ptr->blocked_mode];
1880  compat2 = lock_Comp[entry->blocked_mode][entry_ptr->blocked_mode];
1881  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
1882 
1883  if (compat1 == LOCK_COMPAT_NO || compat2 == LOCK_COMPAT_NO)
1884  {
1885  EXPAND_WAIT_FOR_ARRAY_IF_NEEDED ();
1886  wait_for[nwaits++] = entry->tran_index;
1887  }
1888  }
1889 
1890  for (entry = res_ptr->waiter; entry != NULL; entry = entry->next)
1891  {
1892  if (entry == entry_ptr)
1893  {
1894  continue;
1895  }
1896 
1897  assert (entry->granted_mode >= NULL_LOCK && entry->blocked_mode >= NULL_LOCK);
1898  compat1 = lock_Comp[entry->blocked_mode][entry_ptr->blocked_mode];
1899  assert (compat1 != LOCK_COMPAT_UNKNOWN);
1900 
1901  if (compat1 == LOCK_COMPAT_NO)
1902  {
1903  EXPAND_WAIT_FOR_ARRAY_IF_NEEDED ();
1904  wait_for[nwaits++] = entry->tran_index;
1905  }
1906  }
1907 
1908  pthread_mutex_unlock (&res_ptr->res_mutex);
1909  free_mutex_flag = false;
1910 
1911  if (nwaits == 0 || (waitfor_client_users = (char *) malloc (unit_size * nwaits)) == NULL)
1912  {
1913  waitfor_client_users = waitfor_client_users_default;
1914  }
1915  else
1916  {
1917  for (ptr = waitfor_client_users, i = 0; i < nwaits; i++)
1918  {
1919  (void) logtb_find_client_name_host_pid (wait_for[i], &client_prog_name, &client_user_name, &client_host_name,
1920  &client_pid);
1921  n = sprintf (ptr, "%s%s@%s|%s(%d)", ((i == 0) ? "" : ", "), client_user_name, client_host_name,
1922  client_prog_name, client_pid);
1923  ptr += n;
1924  }
1925  }
1926 
1927 set_error:
1928 
1929  if (wait_for != wait_for_buf)
1930  {
1931  free_and_init (wait_for);
1932  }
1933 
1934  if (free_mutex_flag)
1935  {
1936  pthread_mutex_unlock (&res_ptr->res_mutex);
1937  free_mutex_flag = false;
1938  }
1939 
1940  /* get the client information of current transaction */
1941  (void) logtb_find_client_name_host_pid (entry_ptr->tran_index, &client_prog_name, &client_user_name,
1942  &client_host_name, &client_pid);
1943 
1944  if (entry_ptr->thrd_entry != NULL
1945  && ((entry_ptr->thrd_entry->lockwait_state == LOCK_RESUMED_DEADLOCK_TIMEOUT)
1946  || (entry_ptr->thrd_entry->lockwait_state == LOCK_RESUMED_ABORTED_OTHER)))
1947  {
1948  isdeadlock_timeout = true;
1949  }
1950 
1951  switch (entry_ptr->res_head->key.type)
1952  {
1954  case LOCK_RESOURCE_CLASS:
1955  oid_rr = oid_get_rep_read_tran_oid ();
1956  if (oid_rr != NULL && OID_EQ (&entry_ptr->res_head->key.oid, oid_rr))
1957  {
1958  classname = (char *) "Generic object for Repeatable Read consistency";
1959  is_classname_alloced = false;
1960  }
1961  else if (OID_ISTEMP (&entry_ptr->res_head->key.oid))
1962  {
1963  classname = NULL;
1964  }
1965  else
1966  {
1967  OID real_class_oid;
1968 
1969  if (entry_ptr->res_head->key.type == LOCK_RESOURCE_CLASS
1970  && OID_IS_VIRTUAL_CLASS_OF_DIR_OID (&entry_ptr->res_head->key.oid))
1971  {
1972  OID_GET_REAL_CLASS_OF_DIR_OID (&entry_ptr->res_head->key.oid, &real_class_oid);
1973  }
1974  else
1975  {
1976  COPY_OID (&real_class_oid, &entry_ptr->res_head->key.oid);
1977  }
1978  if (heap_get_class_name (thread_p, &real_class_oid, &classname) != NO_ERROR)
1979  {
1980  /* ignore */
1981  er_clear ();
1982  }
1983  else if (classname != NULL)
1984  {
1985  is_classname_alloced = true;
1986  }
1987  }
1988 
1989  if (classname != NULL)
1990  {
1993  entry_ptr->tran_index, client_user_name, client_host_name, client_pid,
1994  LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), classname, waitfor_client_users);
1995  if (is_classname_alloced)
1996  {
1997  free_and_init (classname);
1998  }
1999  }
2000  else
2001  {
2004  entry_ptr->tran_index, client_user_name, client_host_name, client_pid,
2005  LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), entry_ptr->res_head->key.oid.volid,
2006  entry_ptr->res_head->key.oid.pageid, entry_ptr->res_head->key.oid.slotid, waitfor_client_users);
2007  }
2008  break;
2009 
2011  if (OID_ISTEMP (&entry_ptr->res_head->key.class_oid))
2012  {
2013  classname = NULL;
2014  }
2015  else
2016  {
2017  OID real_class_oid;
2018  if (OID_IS_VIRTUAL_CLASS_OF_DIR_OID (&entry_ptr->res_head->key.class_oid))
2019  {
2020  OID_GET_REAL_CLASS_OF_DIR_OID (&entry_ptr->res_head->key.class_oid, &real_class_oid);
2021  }
2022  else
2023  {
2024  COPY_OID (&real_class_oid, &entry_ptr->res_head->key.class_oid);
2025  }
2026  if (heap_get_class_name (thread_p, &real_class_oid, &classname) != NO_ERROR)
2027  {
2028  /* ignore */
2029  er_clear ();
2030  }
2031  }
2032 
2033  if (classname != NULL)
2034  {
2037  entry_ptr->tran_index, client_user_name, client_host_name, client_pid,
2038  LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), entry_ptr->res_head->key.oid.volid,
2039  entry_ptr->res_head->key.oid.pageid, entry_ptr->res_head->key.oid.slotid, classname,
2040  waitfor_client_users);
2041  free_and_init (classname);
2042  }
2043  else
2044  {
2047  entry_ptr->tran_index, client_user_name, client_host_name, client_pid,
2048  LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), entry_ptr->res_head->key.oid.volid,
2049  entry_ptr->res_head->key.oid.pageid, entry_ptr->res_head->key.oid.slotid, waitfor_client_users);
2050  }
2051  break;
2052  default:
2053  break;
2054  }
2055 
2056  if (waitfor_client_users && waitfor_client_users != waitfor_client_users_default)
2057  {
2058  free_and_init (waitfor_client_users);
2059  }
2060 
2061  if (isdeadlock_timeout == false)
2062  {
2063  FILE *log_fp;
2064 
2065  log_fp = event_log_start (thread_p, "LOCK_TIMEOUT");
2066  if (log_fp == NULL)
2067  {
2068  return;
2069  }
2070 
2071  lock_event_log_blocked_lock (thread_p, log_fp, entry_ptr);
2072  lock_event_log_blocking_locks (thread_p, log_fp, entry_ptr);
2073 
2074  event_log_end (thread_p);
2075  }
2076 }
2077 #endif /* SERVER_MODE */
2078 
2079 #if defined(SERVER_MODE)
2080 /*
2081  * lock_set_tran_abort_reason - Set tran_abort_reason for the tran_index
2082  *
2083  * return: void
2084  * tran_index(in):
2085  * abort_reason(in):
2086  */
2087 static void
2088 lock_set_tran_abort_reason (int tran_index, TRAN_ABORT_REASON abort_reason)
2089 {
2090  LOG_TDES *tdes;
2091 
2092  tdes = LOG_FIND_TDES (tran_index);
2093  assert (tdes != NULL);
2094 
2095  tdes->tran_abort_reason = abort_reason;
2096 }
2097 
2098 /*
2099  * lock_set_error_for_aborted - Set error for unilaterally aborted
2100  *
2101  * return:
2102  *
2103  * entry_ptr(in): pointer to the entry for waiting
2104  *
2105  * Note:set error code for unilaterally aborted deadlock victim
2106  */
2107 static void
2108 lock_set_error_for_aborted (LK_ENTRY * entry_ptr)
2109 {
2110  const char *client_prog_name; /* Client user name for transaction */
2111  const char *client_user_name; /* Client user name for transaction */
2112  const char *client_host_name; /* Client host for transaction */
2113  int client_pid; /* Client process id for transaction */
2114 
2115  (void) logtb_find_client_name_host_pid (entry_ptr->tran_index, &client_prog_name, &client_user_name,
2116  &client_host_name, &client_pid);
2117  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_UNILATERALLY_ABORTED, 4, entry_ptr->tran_index, client_user_name,
2118  client_host_name, client_pid);
2119 }
2120 #endif /* SERVER_MODE */
2121 
2122 #if defined(SERVER_MODE)
2123 /*
2124  * lock_suspend - Suspend current thread (transaction)
2125  *
2126  * return: LOCK_WAIT_STATE (state of resumption)
2127  *
2128  * entry_ptr(in): lock entry for lock waiting
2129  * wait_msecs(in): lock wait milliseconds
2130  */
2131 static LOCK_WAIT_STATE
2132 lock_suspend (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, int wait_msecs)
2133 {
2134  THREAD_ENTRY *p;
2135  struct timeval tv;
2136  int client_id;
2137  LOG_TDES *tdes;
2138 
2139  /* The threads must not hold a page latch to be blocked on a lock request. */
2140  assert (lock_is_safe_lock_with_page (thread_p, entry_ptr) || !pgbuf_has_perm_pages_fixed (thread_p));
2141 
2142  /* The caller is holding the thread entry mutex */
2143 
2144  if (lk_Gl.verbose_mode)
2145  {
2146  const char *__client_prog_name; /* Client program name for transaction */
2147  const char *__client_user_name; /* Client user name for transaction */
2148  const char *__client_host_name; /* Client host for transaction */
2149  int __client_pid; /* Client process id for transaction */
2150 
2151  fflush (stderr);
2152  fflush (stdout);
2153  logtb_find_client_name_host_pid (entry_ptr->tran_index, &__client_prog_name, &__client_user_name,
2154  &__client_host_name, &__client_pid);
2156  entry_ptr->thrd_entry->index, entry_ptr->tran_index, __client_prog_name, __client_user_name,
2157  __client_host_name, __client_pid);
2158  fflush (stdout);
2159  }
2160 
2161  /* register lock wait info. into the thread entry */
2162  entry_ptr->thrd_entry->lockwait = (void *) entry_ptr;
2163  gettimeofday (&tv, NULL);
2164  entry_ptr->thrd_entry->lockwait_stime = (tv.tv_sec * 1000000LL + tv.tv_usec) / 1000LL;
2165  entry_ptr->thrd_entry->lockwait_msecs = wait_msecs;
2166  entry_ptr->thrd_entry->lockwait_state = (int) LOCK_SUSPENDED;
2167 
2168  lk_Gl.TWFG_node[entry_ptr->tran_index].thrd_wait_stime = entry_ptr->thrd_entry->lockwait_stime;
2169  lk_Gl.deadlock_and_timeout_detector++;
2170 
2171  tdes = LOG_FIND_CURRENT_TDES (thread_p);
2172 
2173  /* I must not be a deadlock-victim thread */
2175 
2176  if (tdes)
2177  {
2178  tdes->waiting_for_res = entry_ptr->res_head;
2179  }
2180 
2181  lock_event_set_tran_wait_entry (entry_ptr->tran_index, entry_ptr);
2182 
2183  /* suspend the worker thread (transaction) */
2185 
2186  lk_Gl.deadlock_and_timeout_detector--;
2187  lk_Gl.TWFG_node[entry_ptr->tran_index].thrd_wait_stime = 0;
2188 
2189  if (tdes)
2190  {
2191  tdes->waiting_for_res = NULL;
2192  }
2193 
2194  lock_event_set_tran_wait_entry (entry_ptr->tran_index, NULL);
2195 
2196  if (entry_ptr->thrd_entry->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
2197  {
2198  /* a shutdown thread wakes me up */
2200  return LOCK_RESUMED_INTERRUPT;
2201  }
2202  else if (entry_ptr->thrd_entry->resume_status != THREAD_LOCK_RESUMED)
2203  {
2204  /* wake up with other reason */
2205  assert (false);
2207  return LOCK_RESUMED_INTERRUPT;
2208  }
2209  else
2210  {
2211  assert (entry_ptr->thrd_entry->resume_status == THREAD_LOCK_RESUMED);
2212  }
2213 
2214  thread_lock_entry (entry_ptr->thrd_entry);
2215  while (entry_ptr->thrd_entry->tran_next_wait)
2216  {
2217  p = entry_ptr->thrd_entry->tran_next_wait;
2218  entry_ptr->thrd_entry->tran_next_wait = p->tran_next_wait;
2219  p->tran_next_wait = NULL;
2221  }
2222  thread_unlock_entry (entry_ptr->thrd_entry);
2223 
2224  /* The thread has been awaken Before waking up the thread, the waker cleared the lockwait field of the thread entry
2225  * and set lockwait_state field of it to the resumed state while holding the thread entry mutex. After the wakeup, no
2226  * one can update the lockwait releated fields of the thread entry. Therefore, waken-up thread can read the values of
2227  * lockwait related fields of its own thread entry without holding thread entry mutex. */
2228 
2229  switch ((LOCK_WAIT_STATE) (entry_ptr->thrd_entry->lockwait_state))
2230  {
2231  case LOCK_RESUMED:
2232  /* The lock entry has already been moved to the holder list */
2233  return LOCK_RESUMED;
2234 
2236  /* The lock entry does exist within the blocked holder list or blocked waiter list. Therefore, current thread
2237  * must disconnect it from the list. */
2238  if (logtb_is_current_active (thread_p))
2239  {
2240  /* set error code */
2241  lock_set_error_for_aborted (entry_ptr);
2242  lock_set_tran_abort_reason (entry_ptr->tran_index, TRAN_ABORT_DUE_DEADLOCK);
2243 
2244  /* wait until other threads finish their works A css_server_thread is always running for this transaction.
2245  * so, wait until css_count_transaction_worker_threads () becomes 1 (except me) */
2246  if (css_count_transaction_worker_threads (thread_p, entry_ptr->tran_index, css_get_client_id (thread_p)) >= 1)
2247  {
2248  logtb_set_tran_index_interrupt (thread_p, entry_ptr->tran_index, true);
2249  while (true)
2250  {
2251  thread_sleep (10); /* sleep 10 msec */
2252  logtb_wakeup_thread_with_tran_index (entry_ptr->tran_index, THREAD_RESUME_DUE_TO_INTERRUPT);
2253 
2254  client_id = css_get_client_id (thread_p);
2255  if (css_count_transaction_worker_threads (thread_p, entry_ptr->tran_index, client_id) == 0)
2256  {
2257  break;
2258  }
2259  }
2260  logtb_set_tran_index_interrupt (thread_p, entry_ptr->tran_index, false);
2261  }
2262  }
2263  else
2264  {
2265  /* We are already aborting, fall through. Don't do double aborts that could cause an infinite loop. */
2266  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ABORT_TRAN_TWICE, 1, entry_ptr->tran_index);
2267  /* er_log_debug(ARG_FILE_LINE, "lk_suspend: Likely a system error. Trying to abort a transaction
2268  * twice.\n"); */
2269 
2270  /* Since we deadlocked during an abort, forcibly remove all page latches of this transaction and hope this
2271  * transaction is the cause of the logjam. We are hoping that this frees things just enough to let other
2272  * transactions continue. Note it is not be safe to unlock object locks this way. */
2273  pgbuf_unfix_all (thread_p);
2274  }
2275  return LOCK_RESUMED_ABORTED;
2276 
2278  /* The lock entry does exist within the blocked holder list or blocked waiter list. Therefore, current thread
2279  * must diconnect it from the list. */
2280  /* If two or more threads, which were executing for one transaction, are selected as deadlock victims, one of
2281  * them is charged of the transaction abortion and the other threads are notified of timeout. */
2282  (void) lock_set_error_for_timeout (thread_p, entry_ptr);
2284 
2286  (void) lock_set_error_for_timeout (thread_p, entry_ptr);
2288 
2289  case LOCK_RESUMED_TIMEOUT:
2290  /* The lock entry does exist within the blocked holder list or blocked waiter list. Therefore, current thread
2291  * must diconnect it from the list. An error is ONLY set when the caller was willing to wait.
2292  * entry_ptr->thrd_entry->lockwait_msecs > 0 */
2293  (void) lock_set_error_for_timeout (thread_p, entry_ptr);
2294  return LOCK_RESUMED_TIMEOUT;
2295 
2298  return LOCK_RESUMED_INTERRUPT;
2299 
2300  case LOCK_SUSPENDED:
2301  default:
2302  /* Probabely, the waiting structure has not been removed from the waiting hash table. May be a system error. */
2303  (void) lock_set_error_for_timeout (thread_p, entry_ptr);
2304  return LOCK_RESUMED_TIMEOUT;
2305  }
2306 }
2307 #endif /* SERVER_MODE */
2308 
2309 #if defined(SERVER_MODE)
2310 /*
2311  * lk_resume - Resume the thread (transaction)
2312  *
2313  * return:
2314  *
2315  * entry_ptr(in):
2316  * state(in): resume state
2317  */
2318 static void
2319 lock_resume (LK_ENTRY * entry_ptr, int state)
2320 {
2321  /* The caller is holding the thread entry mutex */
2322  /* The caller has identified the fact that lockwait is not NULL. that is, the thread is suspended. */
2323  if (lk_Gl.verbose_mode == true)
2324  {
2325  const char *__client_prog_name; /* Client program name for transaction */
2326  const char *__client_user_name; /* Client user name for transaction */
2327  const char *__client_host_name; /* Client host for transaction */
2328  int __client_pid; /* Client process id for transaction */
2329 
2330  fflush (stderr);
2331  fflush (stdout);
2332  (void) logtb_find_client_name_host_pid (entry_ptr->tran_index, &__client_prog_name, &__client_user_name,
2333  &__client_host_name, &__client_pid);
2335  entry_ptr->tran_index, entry_ptr->tran_index, __client_prog_name, __client_user_name, __client_host_name,
2336  __client_pid);
2337  fflush (stdout);
2338  }
2339 
2340  /* Before wake up the thread, clears lockwait field and set lockwait_state with the given state. */
2341  entry_ptr->thrd_entry->lockwait = NULL;
2342  entry_ptr->thrd_entry->lockwait_state = (int) state;
2343 
2344  /* wakes up the thread and release the thread entry mutex */
2345  entry_ptr->thrd_entry->resume_status = THREAD_LOCK_RESUMED;
2346  pthread_cond_signal (&entry_ptr->thrd_entry->wakeup_cond);
2347  thread_unlock_entry (entry_ptr->thrd_entry);
2348 }
2349 #endif /* SERVER_MODE */
2350 
2351 #if defined(SERVER_MODE)
2352 /*
2353  * lock_wakeup_deadlock_victim_timeout - Wake up the deadlock victim while notifying timeout
2354  *
2355  * return: true if the transaction is treated as deadlock victim or
2356  * false if the transaction is not treated as deadlock victim.
2357  * in this case, the transaction has already been waken up
2358  * by other threads with other purposes(ex. lock is granted)
2359  *
2360  * tran_index(in): deadlock victim transaction
2361  *
2362  * Note:The given transaction was selected as a deadlock victim in the last
2363  * deadlock detection. The deadlock victim is waked up and noitified of
2364  * timeout by this function if the deadlock victim is still suspended.
2365  */
2366 static bool
2367 lock_wakeup_deadlock_victim_timeout (int tran_index)
2368 {
2369  tran_lock_waiters_array_type tran_lock_waiters;
2370  THREAD_ENTRY **thrd_array;
2371  size_t thrd_count, i;
2372  THREAD_ENTRY *thrd_ptr;
2373  bool wakeup_first = false;
2374 
2375  thrd_count = 0;
2376  lock_get_transaction_lock_waiting_threads (tran_index, tran_lock_waiters, thrd_count);
2377  thrd_array = tran_lock_waiters.data ();
2378 
2379  for (i = 0; i < thrd_count; i++)
2380  {
2381  thrd_ptr = thrd_array[i];
2382  thread_lock_entry (thrd_ptr);
2383  if (thrd_ptr->tran_index == tran_index && LK_IS_LOCKWAIT_THREAD (thrd_ptr))
2384  {
2385  /* wake up the thread while notifying timeout */
2386  lock_resume ((LK_ENTRY *) thrd_ptr->lockwait, LOCK_RESUMED_DEADLOCK_TIMEOUT);
2387  wakeup_first = true;
2388  }
2389  else
2390  {
2391  if (thrd_ptr->lockwait != NULL || thrd_ptr->lockwait_state == (int) LOCK_SUSPENDED)
2392  {
2393  /* some strange lock wait state.. */
2395  thrd_ptr->lockwait_state, thrd_ptr->index, thrd_ptr->get_posix_id (), thrd_ptr->tran_index);
2396  }
2397  /* The current thread has already been waken up by other threads. The current thread might be granted the
2398  * lock. or with any other reason....... even if it is a thread of the deadlock victim. */
2399  /* release the thread entry mutex */
2400  thread_unlock_entry (thrd_ptr);
2401  }
2402  }
2403  return wakeup_first;
2404 }
2405 #endif /* SERVER_MODE */
2406 
2407 #if defined(SERVER_MODE)
2408 /*
2409  * lock_wakeup_deadlock_victim_aborted - Wake up the deadlock victim while notifying aborted
2410  *
2411  * return: true if the transaction is treated as deadlock victim or
2412  * false if the transaction is not treated as deadlock victim.
2413  * in this case, the transaction has already been waken up
2414  * by other threads with other purposes(ex. lock is granted)
2415  *
2416  * tran_index(in): deadlock victim transaction
2417  *
2418  * Note:The given transaction was selected as a deadlock victim in the last
2419  * deadlock detection. The deadlock victim is waked up and noitified of
2420  * abortion by this function if the deadlock victim is still suspended.
2421  */
2422 static bool
2423 lock_wakeup_deadlock_victim_aborted (int tran_index)
2424 {
2425  tran_lock_waiters_array_type tran_lock_waiters;
2426  THREAD_ENTRY **thrd_array;
2427  size_t thrd_count, i;
2428  THREAD_ENTRY *thrd_ptr;
2429  bool wakeup_first = false;
2430 
2431  thrd_count = 0;
2432  lock_get_transaction_lock_waiting_threads (tran_index, tran_lock_waiters, thrd_count);
2433  thrd_array = tran_lock_waiters.data ();
2434 
2435  for (i = 0; i < thrd_count; i++)
2436  {
2437  thrd_ptr = thrd_array[i];
2438  thread_lock_entry (thrd_ptr);
2439  if (thrd_ptr->tran_index == tran_index && LK_IS_LOCKWAIT_THREAD (thrd_ptr))
2440  {
2441  /* wake up the thread while notifying deadlock victim */
2442  if (wakeup_first == false)
2443  {
2444  /* The current transaction is really aborted. Therefore, other threads of the current transaction must
2445  * quit their executions and return to client. Then the first waken-up thread must be charge of the
2446  * rollback of the current transaction. */
2447  /* set the transaction as deadlock victim */
2448  lk_Gl.TWFG_node[tran_index].DL_victim = true;
2449  lock_resume ((LK_ENTRY *) thrd_ptr->lockwait, LOCK_RESUMED_ABORTED_FIRST);
2450  wakeup_first = true;
2451  }
2452  else
2453  {
2454  lock_resume ((LK_ENTRY *) thrd_ptr->lockwait, LOCK_RESUMED_ABORTED_OTHER);
2455  }
2456  }
2457  else
2458  {
2459  if (thrd_ptr->lockwait != NULL || thrd_ptr->lockwait_state == (int) LOCK_SUSPENDED)
2460  {
2461  /* some strange lock wait state.. */
2463  thrd_ptr->lockwait_state, thrd_ptr->index, thrd_ptr->get_posix_id (), thrd_ptr->tran_index);
2464  }
2465  /* The current thread has already been waken up by other threads. The current thread might have held the
2466  * lock. or with any other reason....... even if it is a thread of the deadlock victim. */
2467  /* release the thread entry mutex */
2468  thread_unlock_entry (thrd_ptr);
2469  }
2470  }
2471  return wakeup_first;
2472 }
2473 #endif /* SERVER_MODE */
2474 
2475 
2476 /*
2477  * Private Functions Group: grant lock requests of blocked threads
2478  * - lock_grant_blocked_holder()
2479  * - lock_grant_blocked_waiter()
2480  * - lock_grant_blocked_waiter_partial()
2481  */
2482 
2483 #if defined(SERVER_MODE)
2484 /*
2485  * lock_grant_blocked_holder - Grant blocked holders
2486  *
2487  * return:
2488  *
2489  * res_ptr(in): This function grants blocked holders whose blocked lock mode is
2490  * compatible with all the granted lock mode of non-blocked holders.
2491  */
2492 static void
2493 lock_grant_blocked_holder (THREAD_ENTRY * thread_p, LK_RES * res_ptr)
2494 {
2495  LK_ENTRY *prev_holder;
2496  LK_ENTRY *holder, *h, *prev;
2497  LOCK mode;
2498  LOCK_COMPATIBILITY compat;
2499 
2500  /* The caller is holding a resource mutex */
2501 
2502  prev_holder = NULL;
2503  holder = res_ptr->holder;
2504  while (holder != NULL && holder->blocked_mode != NULL_LOCK)
2505  {
2506  /* there are some blocked holders */
2507  mode = NULL_LOCK;
2508  for (h = holder->next; h != NULL; h = h->next)
2509  {
2510  assert (h->granted_mode >= NULL_LOCK && mode >= NULL_LOCK);
2511  mode = lock_Conv[h->granted_mode][mode];
2512  assert (mode != NA_LOCK);
2513  }
2514 
2515  assert (holder->blocked_mode >= NULL_LOCK);
2516  compat = lock_Comp[holder->blocked_mode][mode];
2517  assert (compat != LOCK_COMPAT_UNKNOWN);
2518 
2519  if (compat == LOCK_COMPAT_NO)
2520  {
2521  break; /* stop the granting */
2522  }
2523 
2524  /* compatible: grant it */
2525 
2526  /* hold the thread entry mutex */
2527  thread_lock_entry (holder->thrd_entry);
2528 
2529  /* check if the thread is still waiting on a lock */
2530  if (LK_IS_LOCKWAIT_THREAD (holder->thrd_entry))
2531  {
2532  /* the thread is still waiting on a lock */
2533 
2534  /* reposition the lock entry according to UPR */
2535  for (prev = holder, h = holder->next; h != NULL; prev = h, h = h->next)
2536  {
2537  if (h->blocked_mode == NULL_LOCK)
2538  {
2539  break;
2540  }
2541  }
2542  if (prev != holder)
2543  { /* reposition it */
2544  /* remove it */
2545  if (prev_holder == NULL)
2546  {
2547  res_ptr->holder = holder->next;
2548  }
2549  else
2550  {
2551  prev_holder->next = holder->next;
2552  }
2553  /* insert it */
2554  holder->next = prev->next;
2555  prev->next = holder;
2556  }
2557 
2558  /* change granted_mode and blocked_mode */
2559  holder->granted_mode = holder->blocked_mode;
2560  holder->blocked_mode = NULL_LOCK;
2561 
2562  /* reflect the granted lock in the non2pl list */
2563  lock_update_non2pl_list (thread_p, res_ptr, holder->tran_index, holder->granted_mode);
2564 
2565  /* Record number of acquired locks */
2567 #if defined(LK_TRACE_OBJECT)
2568  LK_MSG_LOCK_ACQUIRED (entry_ptr);
2569 #endif /* LK_TRACE_OBJECT */
2570  /* wake up the blocked holder */
2571  lock_resume (holder, LOCK_RESUMED);
2572  }
2573  else
2574  {
2575  if (holder->thrd_entry->lockwait != NULL || holder->thrd_entry->lockwait_state == (int) LOCK_SUSPENDED)
2576  {
2577  /* some strange lock wait state.. */
2578  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, ER_LK_STRANGE_LOCK_WAIT, 5, holder->thrd_entry->lockwait,
2579  holder->thrd_entry->lockwait_state, holder->thrd_entry->index,
2580  holder->thrd_entry->get_posix_id (), holder->thrd_entry->tran_index);
2581  }
2582  /* The thread is not waiting for a lock, currently. That is, the thread has already been waked up by timeout,
2583  * deadlock victim or interrupt. In this case, we have nothing to do since the thread itself will remove this
2584  * lock entry. */
2585  thread_unlock_entry (holder->thrd_entry);
2586  prev_holder = holder;
2587  }
2588 
2589  if (prev_holder == NULL)
2590  {
2591  holder = res_ptr->holder;
2592  }
2593  else
2594  {
2595  holder = prev_holder->next;
2596  }
2597  }
2598 
2599 }
2600 #endif /* SERVER_MODE */
2601 
2602 #if defined(SERVER_MODE)
2603 /*
2604  * lock_grant_blocked_waiter - Grant blocked waiters
2605  *
2606  * return:
2607  *
2608  * res_ptr(in): This function grants blocked waiters whose blocked lock mode is
2609  * compatible with the total mode of lock holders.
2610  */
2611 static int
2612 lock_grant_blocked_waiter (THREAD_ENTRY * thread_p, LK_RES * res_ptr)
2613 {
2614  LK_ENTRY *prev_waiter;
2615  LK_ENTRY *waiter, *w;
2616  LOCK mode;
2617  bool change_total_waiters_mode = false;
2618  int error_code = NO_ERROR;
2619  LOCK_COMPATIBILITY compat;
2620 
2621  /* The caller is holding a resource mutex */
2622 
2623  prev_waiter = NULL;
2624  waiter = res_ptr->waiter;
2625  while (waiter != NULL)
2626  {
2627  assert (waiter->blocked_mode >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
2628  compat = lock_Comp[waiter->blocked_mode][res_ptr->total_holders_mode];
2629  assert (compat != LOCK_COMPAT_UNKNOWN);
2630 
2631  if (compat == LOCK_COMPAT_NO)
2632  {
2633  break; /* stop the granting */
2634  }
2635 
2636  /* compatible: grant it */
2637  /* hold the thread entry mutex */
2638  thread_lock_entry (waiter->thrd_entry);
2639 
2640  /* check if the thread is still waiting for a lock */
2641  if (LK_IS_LOCKWAIT_THREAD (waiter->thrd_entry))
2642  {
2643  int owner_tran_index;
2644 
2645  /* The thread is still waiting for a lock. */
2646  change_total_waiters_mode = true;
2647 
2648  /* remove the lock entry from the waiter */
2649  if (prev_waiter == NULL)
2650  {
2651  res_ptr->waiter = waiter->next;
2652  }
2653  else
2654  {
2655  prev_waiter->next = waiter->next;
2656  }
2657 
2658  /* change granted_mode and blocked_mode of the entry */
2659  waiter->granted_mode = waiter->blocked_mode;
2660  waiter->blocked_mode = NULL_LOCK;
2661 
2662  /* position the lock entry in the holder list */
2663  lock_position_holder_entry (res_ptr, waiter);
2664 
2665  /* change total_holders_mode */
2666  assert (waiter->granted_mode >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
2667  res_ptr->total_holders_mode = lock_Conv[waiter->granted_mode][res_ptr->total_holders_mode];
2668  assert (res_ptr->total_holders_mode != NA_LOCK);
2669 
2670  /* insert the lock entry into transaction hold list. */
2671  owner_tran_index = LOG_FIND_THREAD_TRAN_INDEX (waiter->thrd_entry);
2672  lock_insert_into_tran_hold_list (waiter, owner_tran_index);
2673 
2674  /* reflect the granted lock in the non2pl list */
2675  lock_update_non2pl_list (thread_p, res_ptr, waiter->tran_index, waiter->granted_mode);
2676 
2677  /* Record number of acquired locks */
2679 #if defined(LK_TRACE_OBJECT)
2680  LK_MSG_LOCK_ACQUIRED (entry_ptr);
2681 #endif /* LK_TRACE_OBJECT */
2682 
2683  /* wake up the blocked waiter */
2684  lock_resume (waiter, LOCK_RESUMED);
2685  }
2686  else
2687  {
2688  if (waiter->thrd_entry->lockwait != NULL || waiter->thrd_entry->lockwait_state == (int) LOCK_SUSPENDED)
2689  {
2690  /* some strange lock wait state.. */
2691  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, ER_LK_STRANGE_LOCK_WAIT, 5, waiter->thrd_entry->lockwait,
2692  waiter->thrd_entry->lockwait_state, waiter->thrd_entry->index,
2693  waiter->thrd_entry->get_posix_id (), waiter->thrd_entry->tran_index);
2694  error_code = ER_LK_STRANGE_LOCK_WAIT;
2695  }
2696  /* The thread is not waiting on the lock, currently. That is, the thread has already been waken up by lock
2697  * timeout, deadlock victim or interrupt. In this case, we have nothing to do since the thread itself will
2698  * remove this lock entry. */
2699  thread_unlock_entry (waiter->thrd_entry);
2700  prev_waiter = waiter;
2701  }
2702 
2703  if (prev_waiter == NULL)
2704  {
2705  waiter = res_ptr->waiter;
2706  }
2707  else
2708  {
2709  waiter = prev_waiter->next;
2710  }
2711  }
2712 
2713  if (change_total_waiters_mode == true)
2714  {
2715  mode = NULL_LOCK;
2716  for (w = res_ptr->waiter; w != NULL; w = w->next)
2717  {
2718  assert (w->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2719  mode = lock_Conv[w->blocked_mode][mode];
2720  assert (mode != NA_LOCK);
2721  }
2722  res_ptr->total_waiters_mode = mode;
2723  }
2724 
2725  return error_code;
2726 }
2727 #endif /* SERVER_MODE */
2728 
2729 #if defined(SERVER_MODE)
2730 /*
2731  * lock_grant_blocked_waiter_partial - Grant blocked waiters partially
2732  *
2733  * return:
2734  *
2735  * res_ptr(in):
2736  * from_whom(in):
2737  *
2738  * Note:This function grants blocked waiters that are located from from_whom
2739  * to the end of waiter list whose blocked lock mode is compatible with
2740  * all the blocked mode of the previous lock waiters and the total mode
2741  * of lock holders.
2742  */
2743 static void
2744 lock_grant_blocked_waiter_partial (THREAD_ENTRY * thread_p, LK_RES * res_ptr, LK_ENTRY * from_whom)
2745 {
2746  LK_ENTRY *prev_check;
2747  LK_ENTRY *check, *i;
2748  LOCK mode;
2749  LOCK_COMPATIBILITY compat;
2750 
2751  /* the caller is holding a resource mutex */
2752 
2753  mode = NULL_LOCK;
2754  prev_check = NULL;
2755  check = res_ptr->waiter;
2756  while (check != from_whom)
2757  {
2758  assert (check->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2759  mode = lock_Conv[check->blocked_mode][mode];
2760  assert (mode != NA_LOCK);
2761 
2762  prev_check = check;
2763  check = check->next;
2764  }
2765 
2766  /* check = from_whom; */
2767  while (check != NULL)
2768  {
2769  assert (check->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2770  compat = lock_Comp[check->blocked_mode][mode];
2771  assert (compat != LOCK_COMPAT_UNKNOWN);
2772 
2773  if (compat != LOCK_COMPAT_YES)
2774  {
2775  break;
2776  }
2777 
2778  assert (check->blocked_mode >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
2779  compat = lock_Comp[check->blocked_mode][res_ptr->total_holders_mode];
2780  assert (compat != LOCK_COMPAT_UNKNOWN);
2781 
2782  if (compat == LOCK_COMPAT_NO)
2783  {
2784  assert (check->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2785  mode = lock_Conv[check->blocked_mode][mode];
2786  assert (mode != NA_LOCK);
2787 
2788  prev_check = check;
2789  check = check->next;
2790  continue;
2791  }
2792 
2793  /* compatible: grant it */
2794  thread_lock_entry (check->thrd_entry);
2795  if (LK_IS_LOCKWAIT_THREAD (check->thrd_entry))
2796  {
2797  int owner_tran_index;
2798 
2799  /* the thread is waiting on a lock */
2800  /* remove the lock entry from the waiter */
2801  if (prev_check == NULL)
2802  {
2803  res_ptr->waiter = check->next;
2804  }
2805  else
2806  {
2807  prev_check->next = check->next;
2808  }
2809 
2810  /* change granted_mode and blocked_mode of the entry */
2811  check->granted_mode = check->blocked_mode;
2812  check->blocked_mode = NULL_LOCK;
2813 
2814  /* position the lock entry into the holder list */
2815  lock_position_holder_entry (res_ptr, check);
2816 
2817  /* change total_holders_mode */
2818  assert (check->granted_mode >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
2819  res_ptr->total_holders_mode = lock_Conv[check->granted_mode][res_ptr->total_holders_mode];
2820  assert (res_ptr->total_holders_mode != NA_LOCK);
2821 
2822  /* insert into transaction lock hold list */
2823  owner_tran_index = LOG_FIND_THREAD_TRAN_INDEX (check->thrd_entry);
2824  lock_insert_into_tran_hold_list (check, owner_tran_index);
2825 
2826  /* reflect the granted lock in the non2pl list */
2827  lock_update_non2pl_list (thread_p, res_ptr, check->tran_index, check->granted_mode);
2828 
2829  /* Record number of acquired locks */
2831 #if defined(LK_TRACE_OBJECT)
2832  LK_MSG_LOCK_ACQUIRED (entry_ptr);
2833 #endif /* LK_TRACE_OBJECT */
2834 
2835  /* wake up the blocked waiter (correctness must be checked) */
2836  lock_resume (check, LOCK_RESUMED);
2837  }
2838  else
2839  {
2840  if (check->thrd_entry->lockwait != NULL || check->thrd_entry->lockwait_state == (int) LOCK_SUSPENDED)
2841  {
2842  /* some strange lock wait state.. */
2843  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, ER_LK_STRANGE_LOCK_WAIT, 5, check->thrd_entry->lockwait,
2844  check->thrd_entry->lockwait_state, check->thrd_entry->index, check->thrd_entry->get_posix_id (),
2845  check->thrd_entry->tran_index);
2846  }
2847  /* The thread is not waiting on the lock. That is, the thread has already been waken up by lock timeout,
2848  * deadlock victim or interrupt. In this case, we have nothing to do since the thread itself will remove this
2849  * lock entry. */
2850  thread_unlock_entry (check->thrd_entry);
2851 
2852  /* change prev_check */
2853  assert (check->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2854  mode = lock_Conv[check->blocked_mode][mode];
2855  assert (mode != NA_LOCK);
2856 
2857  prev_check = check;
2858  }
2859 
2860  if (prev_check == NULL)
2861  {
2862  check = res_ptr->waiter;
2863  }
2864  else
2865  {
2866  check = prev_check->next;
2867  }
2868  }
2869 
2870  if (check == NULL)
2871  {
2872  res_ptr->total_waiters_mode = mode;
2873  }
2874  else
2875  {
2876  mode = NULL_LOCK;
2877  for (i = res_ptr->waiter; i != NULL; i = i->next)
2878  {
2879  assert (i->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
2880  mode = lock_Conv[i->blocked_mode][mode];
2881  assert (mode != NA_LOCK);
2882  }
2883  res_ptr->total_waiters_mode = mode;
2884  }
2885 
2886 }
2887 #endif /* SERVER_MODE */
2888 
2889 #if defined(SERVER_MODE)
2890 /*
2891  * lock_check_escalate- check if lcok counts over escalation limits or not
2892  *
2893  * return: true if escalation is needed.
2894  * thread_p(in):
2895  * class_entry(in):
2896  * tran_lock(in):
2897  *
2898  */
2899 static bool
2900 lock_check_escalate (THREAD_ENTRY * thread_p, LK_ENTRY * class_entry, LK_TRAN_LOCK * tran_lock)
2901 {
2902  LK_ENTRY *superclass_entry = NULL;
2903 
2904  if (class_entry->granted_mode == BU_LOCK)
2905  {
2906  // disallow lock escalation for bulk updates
2907  return false;
2908  }
2909 
2910  if (tran_lock->lock_escalation_on == true)
2911  {
2912  /* An another thread of current transaction is doing lock escalation. Therefore, the current thread gives up
2913  * doing lock escalation. */
2914  return false;
2915  }
2916 
2917  /* It cannot do lock escalation if class_entry is NULL */
2918  if (class_entry == NULL)
2919  {
2920  return false;
2921  }
2922 
2923  superclass_entry = class_entry->class_entry;
2924 
2925  /* check if the lock escalation is needed. */
2926  if (superclass_entry != NULL && !OID_IS_ROOTOID (&superclass_entry->res_head->key.oid))
2927  {
2928  /* Superclass_entry points to a root class in a class hierarchy. Escalate locks only if the criteria for the
2929  * superclass is met. Superclass keeps a counter for all locks set in the hierarchy. */
2930  if (superclass_entry->ngranules < prm_get_integer_value (PRM_ID_LK_ESCALATION_AT))
2931  {
2932  return false;
2933  }
2934  }
2935  else if (class_entry->ngranules < prm_get_integer_value (PRM_ID_LK_ESCALATION_AT))
2936  {
2937  return false;
2938  }
2939 
2940  return true;
2941 }
2942 
2943 
2944 /*
2945  * lock_escalate_if_needed -
2946  *
2947  * return: one of following values
2948  * LK_GRANTED
2949  * LK_NOTGRANTED_DUE_ABORTED
2950  * LK_NOTGRANTED_DUE_TIMEOUT
2951  * LK_NOTGRANTED_DUE_ERROR
2952  *
2953  * class_entry(in):
2954  * tran_index(in):
2955  *
2956  * Note:This function check if lock escalation is needed at first.
2957  * If lock escalation is needed, that is, an escalation threshold is over,
2958  * this function converts instance lock(s) to a class lock and
2959  * releases unnecessary instance locks.
2960  */
2961 static int
2962 lock_escalate_if_needed (THREAD_ENTRY * thread_p, LK_ENTRY * class_entry, int tran_index)
2963 {
2964  LK_TRAN_LOCK *tran_lock;
2965  LOCK max_class_lock = NULL_LOCK; /* escalated class lock mode */
2966  int granted;
2967  int wait_msecs;
2968  int rv;
2969 
2970  /* check lock escalation count */
2971  tran_lock = &lk_Gl.tran_lock_table[tran_index];
2972  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
2973 
2974  if (lock_check_escalate (thread_p, class_entry, tran_lock) == false)
2975  {
2976  pthread_mutex_unlock (&tran_lock->hold_mutex);
2977  return LK_NOTGRANTED;
2978  }
2979 
2980  /* abort lock escalation if lock_escalation_abort = yes */
2982  {
2985 
2986  lock_set_error_for_aborted (class_entry);
2987  lock_set_tran_abort_reason (class_entry->tran_index, TRAN_ABORT_DUE_ROLLBACK_ON_ESCALATION);
2988 
2989  pthread_mutex_unlock (&tran_lock->hold_mutex);
2991  }
2992 
2993  /* lock escalation should be performed */
2994  tran_lock->lock_escalation_on = true;
2995 
2996  if (class_entry->granted_mode == NULL_LOCK || class_entry->granted_mode == S_LOCK
2997  || class_entry->granted_mode == X_LOCK || class_entry->granted_mode == SCH_M_LOCK)
2998  {
2999  /* The class has no instance lock. */
3000  tran_lock->lock_escalation_on = false;
3001  pthread_mutex_unlock (&tran_lock->hold_mutex);
3002  return LK_GRANTED;
3003  }
3004 
3005  /* class_entry->granted_mode : IS_LOCK, IX_LOCK or SIX_LOCK */
3006 
3007  /* Because to count the shared and exclusive instance locks may cause high CPU usage, we used a simple rule to decide
3008  * the escalated class lock mode */
3009  if (class_entry->granted_mode == IX_LOCK || class_entry->granted_mode == SIX_LOCK)
3010  {
3011  max_class_lock = X_LOCK;
3012  }
3013  else
3014  {
3015  max_class_lock = S_LOCK;
3016  }
3017 
3018  pthread_mutex_unlock (&tran_lock->hold_mutex);
3019 
3020  if (max_class_lock != NULL_LOCK)
3021  {
3022  /*
3023  * lock escalation is performed
3024  * 1. hold a lock on the class with the escalated lock mode
3025  */
3026  wait_msecs = LK_FORCE_ZERO_WAIT; /* Conditional Locking */
3027  granted = lock_internal_perform_lock_object (thread_p, tran_index, &class_entry->res_head->key.oid, NULL,
3028  max_class_lock, wait_msecs, &class_entry, NULL);
3029  if (granted != LK_GRANTED)
3030  {
3031  /* The reason of the lock request failure: 1. interrupt 2. shortage of lock resource entries 3. shortage of
3032  * lock entries */
3033  /* reset lock_escalation_on */
3034  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
3035  tran_lock->lock_escalation_on = false;
3036  pthread_mutex_unlock (&tran_lock->hold_mutex);
3037  return granted;
3038  }
3039 
3040  /* 2. release original class lock only one time in order to maintain original class lock count */
3041  lock_internal_perform_unlock_object (thread_p, class_entry, false, true);
3042  }
3043 
3044  /* reset lock_escalation_on */
3045  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
3046  tran_lock->lock_escalation_on = false;
3047  pthread_mutex_unlock (&tran_lock->hold_mutex);
3048 
3049  return LK_GRANTED;
3050 }
3051 #endif /* SERVER_MODE */
3052 
3053 /*
3054  * Private Functions Group: major functions for locking and unlocking
3055  *
3056  * - lk_internal_lock_object_instant()
3057  * - lk_internal_lock_object()
3058  * - lk_internal_unlock_object()
3059  */
3060 
3061 #if defined(SERVER_MODE)
3062 /*
3063  * lock_internal_hold_lock_object_instant - Hold object lock with instant duration
3064  *
3065  * return: LK_GRANTED/LK_NOTGRANTED/LK_NOTGRANTED_DUE_ERROR
3066  *
3067  * tran_index(in):
3068  * oid(in):
3069  * class_oid(in):
3070  * lock(in):
3071  *
3072  * Note:hold a lock on the given object with instant duration.
3073  */
3074 static int
3075 lock_internal_hold_lock_object_instant (THREAD_ENTRY * thread_p, int tran_index, const OID * oid, const OID * class_oid,
3076  LOCK lock)
3077 {
3078  LK_RES_KEY search_key;
3079  LK_RES *res_ptr;
3080  LK_ENTRY *entry_ptr, *i;
3081  LOCK new_mode;
3082  LOCK group_mode;
3083  int compat1, compat2;
3084 
3085 #if defined(LK_DUMP)
3086  if (lk_Gl.dump_level >= 1)
3087  {
3088  fprintf (stderr,
3089  "LK_DUMP::lk_internal_lock_object_instant()\n"
3090  " tran(%2d) : oid(%2d|%3d|%3d), class_oid(%2d|%3d|%3d), LOCK(%7s)\n", tran_index, oid->volid,
3091  oid->pageid, oid->slotid, class_oid ? class_oid->volid : -1, class_oid ? class_oid->pageid : -1,
3092  class_oid ? class_oid->slotid : -1, LOCK_TO_LOCKMODE_STRING (lock));
3093  }
3094 #endif /* LK_DUMP */
3095 
3096  if (class_oid != NULL && !OID_IS_ROOTOID (class_oid))
3097  {
3098  /* instance lock request */
3099  /* check if an implicit lock has been acquired */
3100  if (lock_is_class_lock_escalated (lock_get_object_lock (class_oid, oid_Root_class_oid), lock) == true)
3101  {
3102  return LK_GRANTED;
3103  }
3104  }
3105 
3106  /* search hash table */
3107  search_key = lock_create_search_key ((OID *) oid, (OID *) class_oid);
3108  res_ptr = lk_Gl.m_obj_hash_table.find (thread_p, search_key);
3109  if (res_ptr == NULL)
3110  {
3111  /* the lockable object is NOT in the hash chain */
3112  /* the request can be granted */
3113  return LK_GRANTED;
3114  }
3115 
3116  /* the lockable object exists in the hash chain */
3117  /* So, check whether I am a holder of the object. */
3118  /* find the lock entry of current transaction */
3119  for (entry_ptr = res_ptr->holder; entry_ptr != NULL; entry_ptr = entry_ptr->next)
3120  {
3121  if (entry_ptr->tran_index == tran_index)
3122  {
3123  break;
3124  }
3125  }
3126 
3127  /* I am not a lock holder of the lockable object. */
3128  if (entry_ptr == NULL)
3129  {
3130  assert (lock >= NULL_LOCK && res_ptr->total_waiters_mode >= NULL_LOCK
3131  && res_ptr->total_holders_mode >= NULL_LOCK);
3132 
3133  compat1 = lock_Comp[lock][res_ptr->total_waiters_mode];
3134  compat2 = lock_Comp[lock][res_ptr->total_holders_mode];
3135  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
3136 
3137  if (compat1 == LOCK_COMPAT_YES && compat2 == LOCK_COMPAT_YES)
3138  {
3139  pthread_mutex_unlock (&res_ptr->res_mutex);
3140  return LK_GRANTED;
3141  }
3142  else
3143  {
3144  pthread_mutex_unlock (&res_ptr->res_mutex);
3145  return LK_NOTGRANTED;
3146  }
3147  }
3148 
3149  /* I am a lock holder of the lockable object. */
3150  assert (lock >= NULL_LOCK && entry_ptr->granted_mode >= NULL_LOCK);
3151  new_mode = lock_Conv[lock][entry_ptr->granted_mode];
3152  assert (new_mode != NA_LOCK);
3153 
3154  if (new_mode == entry_ptr->granted_mode)
3155  {
3156  /* a request with either a less exclusive or an equal mode of lock */
3157  pthread_mutex_unlock (&res_ptr->res_mutex);
3158  return LK_GRANTED;
3159  }
3160  else
3161  {
3162  /* check the compatibility with other holders' granted mode */
3163  group_mode = NULL_LOCK;
3164  for (i = res_ptr->holder; i != NULL; i = i->next)
3165  {
3166  if (i != entry_ptr)
3167  {
3168  assert (i->granted_mode >= NULL_LOCK && group_mode >= NULL_LOCK);
3169  group_mode = lock_Conv[i->granted_mode][group_mode];
3170  assert (group_mode != NA_LOCK);
3171  }
3172  }
3173 
3174  assert (new_mode >= NULL_LOCK && group_mode >= NULL_LOCK);
3175  compat1 = lock_Comp[new_mode][group_mode];
3176  assert (compat1 != LOCK_COMPAT_UNKNOWN);
3177 
3178  if (compat1 == LOCK_COMPAT_YES)
3179  {
3180  pthread_mutex_unlock (&res_ptr->res_mutex);
3181  return LK_GRANTED;
3182  }
3183  else
3184  {
3185  pthread_mutex_unlock (&res_ptr->res_mutex);
3186  return LK_NOTGRANTED;
3187  }
3188  }
3189 }
3190 #endif /* SERVER_MODE */
3191 
3192 #if defined(SERVER_MODE)
3193 /*
3194  * lock_internal_perform_lock_object - Performs actual object lock operation
3195  *
3196  * return: one of following values
3197  * LK_GRANTED
3198  * LK_NOTGRANTED_DUE_ABORTED
3199  * LK_NOTGRANTED_DUE_TIMEOUT
3200  * LK_NOTGRANTED_DUE_ERROR
3201  *
3202  * tran_index(in):
3203  * oid(in):
3204  * class_oid(in):
3205  * lock(in):
3206  * wait_msecs(in):
3207  * entry_addr_ptr(in):
3208  * class_entry(in):
3209  *
3210  * Note:lock an object whose id is pointed by oid with given lock mode 'lock'.
3211  *
3212  * If cond_flag is true and the object has already been locked
3213  * by other transaction, then return LK_NOTGRANTED;
3214  * else this transaction is suspended until it can acquire the lock.
3215  */
3216 static int
3217 lock_internal_perform_lock_object (THREAD_ENTRY * thread_p, int tran_index, const OID * oid, const OID * class_oid,
3218  LOCK lock, int wait_msecs, LK_ENTRY ** entry_addr_ptr, LK_ENTRY * class_entry)
3219 {
3220  LF_TRAN_ENTRY *t_entry_ent = thread_get_tran_entry (thread_p, THREAD_TS_OBJ_LOCK_ENT);
3221  LK_RES_KEY search_key;
3222  TRAN_ISOLATION isolation;
3223  int ret_val;
3224  LOCK group_mode, old_mode, new_mode; /* lock mode */
3225  LK_RES *res_ptr;
3226  LK_ENTRY *entry_ptr = NULL;
3227  LK_ENTRY *wait_entry_ptr = NULL;
3228  LK_ENTRY *prev, *curr, *i;
3229  bool lock_conversion = false;
3230  THREAD_ENTRY *thrd_entry;
3231  LK_TRAN_LOCK *tran_lock;
3232  bool is_instant_duration;
3233  LOCK_COMPATIBILITY compat1, compat2;
3234  bool is_res_mutex_locked = false;
3235  TSC_TICKS start_tick, end_tick;
3236  TSCTIMEVAL tv_diff;
3237  UINT64 lock_wait_time;
3238 
3239 #if defined(ENABLE_SYSTEMTAP)
3240  const OID *class_oid_for_marker_p;
3241  const OID *oid_for_marker_p;
3242 #endif /* ENABLE_SYSTEMTAP */
3243 
3244  assert (!OID_ISNULL (oid));
3245  assert (class_oid == NULL || !OID_ISNULL (class_oid));
3246 
3247 #if defined(ENABLE_SYSTEMTAP)
3248  if (class_oid == NULL)
3249  {
3250  class_oid_for_marker_p = &oid_Null_oid;
3251  }
3252  else
3253  {
3254  class_oid_for_marker_p = class_oid;
3255  }
3256 
3257  if (oid == NULL)
3258  {
3259  oid_for_marker_p = &oid_Null_oid;
3260  }
3261  else
3262  {
3263  oid_for_marker_p = oid;
3264  }
3265 
3266  CUBRID_LOCK_ACQUIRE_START (oid_for_marker_p, class_oid_for_marker_p, lock);
3267 #endif /* ENABLE_SYSTEMTAP */
3268 
3269  if (thread_p == NULL)
3270  {
3271  thread_p = thread_get_thread_entry_info ();
3272  }
3273 
3274  assert (thread_p->type != TT_LOADDB);
3275 
3276  thrd_entry = thread_p;
3277 
3278  new_mode = group_mode = old_mode = NULL_LOCK;
3279 #if defined(LK_DUMP)
3280  if (lk_Gl.dump_level >= 1)
3281  {
3282  fprintf (stderr,
3283  "LK_DUMP::lk_internal_lock_object()\n"
3284  " tran(%2d) : oid(%2d|%3d|%3d), class_oid(%2d|%3d|%3d), LOCK(%7s) wait_msecs(%d)\n", tran_index,
3285  oid->volid, oid->pageid, oid->slotid, class_oid ? class_oid->volid : -1,
3286  class_oid ? class_oid->pageid : -1, class_oid ? class_oid->slotid : -1, LOCK_TO_LOCKMODE_STRING (lock),
3287  wait_msecs);
3288  }
3289 #endif /* LK_DUMP */
3290 
3291  /* isolation */
3292  isolation = logtb_find_isolation (tran_index);
3293 
3294  /* initialize */
3295  *entry_addr_ptr = NULL;
3296 
3297  /* get current locking phase */
3298  tran_lock = &lk_Gl.tran_lock_table[tran_index];
3299  is_instant_duration = tran_lock->is_instant_duration;
3300 
3301 start:
3302  assert (!is_res_mutex_locked);
3303 
3304  if (class_oid != NULL && !OID_IS_ROOTOID (class_oid))
3305  {
3306  /* instance lock request */
3307 
3308  /* do lock escalation if it is needed and check if an implicit lock has been acquired. */
3309  ret_val = lock_escalate_if_needed (thread_p, class_entry, tran_index);
3310  if (ret_val == LK_NOTGRANTED_DUE_ABORTED)
3311  {
3312  LOG_TDES *tdes = LOG_FIND_TDES (tran_index);
3314  {
3315  goto end;
3316  }
3317  }
3318 
3319  if (ret_val == LK_GRANTED
3321  {
3322  perfmon_inc_stat (thread_p, PSTAT_LK_NUM_RE_REQUESTED_ON_OBJECTS); /* monitoring */
3323  ret_val = LK_GRANTED;
3324  goto end;
3325  }
3326  }
3327  else
3328  {
3329  /* Class lock request. */
3330  /* Try to find class lock entry if it already exists to avoid using the expensive resource mutex. */
3331  entry_ptr = lock_find_class_entry (tran_index, oid);
3332  if (entry_ptr != NULL)
3333  {
3334  res_ptr = entry_ptr->res_head;
3335  goto lock_tran_lk_entry;
3336  }
3337  }
3338 
3339  /* find or add the lockable object in the lock table */
3340  search_key = lock_create_search_key ((OID *) oid, (OID *) class_oid);
3341  (void) lk_Gl.m_obj_hash_table.find_or_insert (thread_p, search_key, res_ptr);
3342  if (res_ptr == NULL)
3343  {
3344  assert (false);
3345  return ER_FAILED;
3346  }
3347  /* Find or insert also locks the resource mutex. */
3348  is_res_mutex_locked = true;
3349 
3350  if (res_ptr->holder == NULL && res_ptr->waiter == NULL && res_ptr->non2pl == NULL)
3351  {
3352  /* the lockable object was NOT in the hash chain */
3353  /* the lock request can be granted. */
3354 
3355  /* initialize the lock resource entry */
3356  lock_initialize_resource_as_allocated (res_ptr, NULL_LOCK);
3357 
3358  entry_ptr = lock_get_new_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list);
3359  if (entry_ptr == NULL)
3360  {
3361  assert (is_res_mutex_locked);
3362  pthread_mutex_unlock (&res_ptr->res_mutex);
3363  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ALLOC_RESOURCE, 1, "lock heap entry");
3364  ret_val = LK_NOTGRANTED_DUE_ERROR;
3365  goto end;
3366  }
3367 
3368  /* initialize the lock entry as granted state */
3369  lock_initialize_entry_as_granted (entry_ptr, tran_index, res_ptr, lock);
3370  if (is_instant_duration)
3371  {
3372  entry_ptr->instant_lock_count++;
3373  assert (entry_ptr->instant_lock_count > 0);
3374  }
3375 
3376  /* add the lock entry into the holder list */
3377  res_ptr->holder = entry_ptr;
3378 
3379  /* to manage granules */
3380  entry_ptr->class_entry = class_entry;
3381  lock_increment_class_granules (class_entry);
3382 
3383  /* add the lock entry into the transaction hold list */
3384  lock_insert_into_tran_hold_list (entry_ptr, tran_index);
3385 
3386  res_ptr->total_holders_mode = lock;
3387 
3388  /* Record number of acquired locks */
3390 #if defined(LK_TRACE_OBJECT)
3391  LK_MSG_LOCK_ACQUIRED (entry_ptr);
3392 #endif /* LK_TRACE_OBJECT */
3393 
3394  /* release all mutexes */
3395  assert (is_res_mutex_locked);
3396  pthread_mutex_unlock (&res_ptr->res_mutex);
3397 
3398  *entry_addr_ptr = entry_ptr;
3399 
3400  ret_val = LK_GRANTED;
3401  goto end;
3402  }
3403 
3404  /* the lockable object existed in the hash chain So, check whether I am a holder of the object. */
3405 
3406  /* find the lock entry of current transaction */
3407  entry_ptr = res_ptr->holder;
3408  while (entry_ptr != NULL)
3409  {
3410  if (entry_ptr->tran_index == tran_index)
3411  {
3412  break;
3413  }
3414  entry_ptr = entry_ptr->next;
3415  }
3416 
3417  if (entry_ptr == NULL)
3418  {
3419  /* The object exists in the hash chain & I am not a lock holder of the lockable object. */
3420 
3421  /* 1. I am not a holder & my request can be granted. */
3422  assert (lock >= NULL_LOCK && res_ptr->total_waiters_mode >= NULL_LOCK
3423  && res_ptr->total_holders_mode >= NULL_LOCK);
3424  compat1 = lock_Comp[lock][res_ptr->total_waiters_mode];
3425  compat2 = lock_Comp[lock][res_ptr->total_holders_mode];
3426  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
3427 
3428  if (compat1 == LOCK_COMPAT_YES && compat2 == LOCK_COMPAT_YES)
3429  {
3430  entry_ptr = lock_get_new_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list);
3431  if (entry_ptr == NULL)
3432  {
3433  pthread_mutex_unlock (&res_ptr->res_mutex);
3434  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ALLOC_RESOURCE, 1, "lock heap entry");
3435 
3436  ret_val = LK_NOTGRANTED_DUE_ERROR;
3437  goto end;
3438  }
3439 
3440  /* initialize the lock entry as granted state */
3441  lock_initialize_entry_as_granted (entry_ptr, tran_index, res_ptr, lock);
3442  if (is_instant_duration)
3443  {
3444  entry_ptr->instant_lock_count++;
3445  assert (entry_ptr->instant_lock_count > 0);
3446  }
3447 
3448  /* to manage granules */
3449  entry_ptr->class_entry = class_entry;
3450  lock_increment_class_granules (class_entry);
3451 
3452  /* add the lock entry into the holder list */
3453  lock_position_holder_entry (res_ptr, entry_ptr);
3454 
3455  /* change total_holders_mode (total mode of holder list) */
3456  assert (lock >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
3457  res_ptr->total_holders_mode = lock_Conv[lock][res_ptr->total_holders_mode];
3458  assert (res_ptr->total_holders_mode != NA_LOCK);
3459 
3460  /* add the lock entry into the transaction hold list */
3461  lock_insert_into_tran_hold_list (entry_ptr, tran_index);
3462 
3463  lock_update_non2pl_list (thread_p, res_ptr, tran_index, lock);
3464 
3465  /* Record number of acquired locks */
3467 #if defined(LK_TRACE_OBJECT)
3468  LK_MSG_LOCK_ACQUIRED (entry_ptr);
3469 #endif /* LK_TRACE_OBJECT */
3470 
3471  assert (is_res_mutex_locked);
3472  pthread_mutex_unlock (&res_ptr->res_mutex);
3473  *entry_addr_ptr = entry_ptr;
3474 
3475  ret_val = LK_GRANTED;
3476  goto end;
3477  }
3478 
3479  /* 2. I am not a holder & my request cannot be granted. */
3480  if (wait_msecs == LK_ZERO_WAIT || wait_msecs == LK_FORCE_ZERO_WAIT)
3481  {
3482  assert (is_res_mutex_locked);
3483  pthread_mutex_unlock (&res_ptr->res_mutex);
3484  if (wait_msecs == LK_ZERO_WAIT)
3485  {
3486  if (entry_ptr == NULL)
3487  {
3488  entry_ptr = lock_get_new_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list);
3489  if (entry_ptr == NULL)
3490  {
3491  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ALLOC_RESOURCE, 1, "lock heap entry");
3492  ret_val = LK_NOTGRANTED_DUE_ERROR;
3493  goto end;
3494  }
3495  lock_initialize_entry_as_blocked (entry_ptr, thread_p, tran_index, res_ptr, lock);
3496  if (is_instant_duration
3497  /* && lock_Comp[lock][NULL_LOCK] == true */ )
3498  {
3499  entry_ptr->instant_lock_count++;
3500  assert (entry_ptr->instant_lock_count > 0);
3501  }
3502  }
3503  (void) lock_set_error_for_timeout (thread_p, entry_ptr);
3504 
3505  lock_free_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list, entry_ptr);
3506  }
3507 
3508  ret_val = LK_NOTGRANTED_DUE_TIMEOUT;
3509  goto end;
3510  }
3511 
3512  /* check if another thread is waiting for the same resource */
3513  wait_entry_ptr = res_ptr->waiter;
3514  while (wait_entry_ptr != NULL)
3515  {
3516  if (wait_entry_ptr->tran_index == tran_index)
3517  {
3518  break;
3519  }
3520  wait_entry_ptr = wait_entry_ptr->next;
3521  }
3522 
3523  if (wait_entry_ptr != NULL)
3524  {
3526  thread_lock_entry (thrd_entry);
3527  thread_lock_entry (wait_entry_ptr->thrd_entry);
3528  if (wait_entry_ptr->thrd_entry->lockwait == NULL)
3529  {
3530  /* */
3531  thread_unlock_entry (wait_entry_ptr->thrd_entry);
3532  thread_unlock_entry (thrd_entry);
3533  assert (is_res_mutex_locked);
3534  pthread_mutex_unlock (&res_ptr->res_mutex);
3535  is_res_mutex_locked = false;
3536  goto start;
3537  }
3538 
3539  thrd_entry->tran_next_wait = wait_entry_ptr->thrd_entry->tran_next_wait;
3540  wait_entry_ptr->thrd_entry->tran_next_wait = thrd_entry;
3541 
3542  thread_unlock_entry (wait_entry_ptr->thrd_entry);
3543  assert (is_res_mutex_locked);
3544  pthread_mutex_unlock (&res_ptr->res_mutex);
3545  is_res_mutex_locked = false;
3546 
3548  if (entry_ptr)
3549  {
3550  if (entry_ptr->thrd_entry->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
3551  {
3552  /* a shutdown thread wakes me up */
3554 
3555  ret_val = LK_NOTGRANTED_DUE_ERROR;
3556  goto end;
3557  }
3558  else if (entry_ptr->thrd_entry->resume_status != THREAD_LOCK_RESUMED)
3559  {
3560  /* wake up with other reason */
3561  assert (0);
3562 
3563  if (er_errid () == NO_ERROR)
3564  {
3566  }
3567  ret_val = LK_NOTGRANTED_DUE_ERROR;
3568  goto end;
3569  }
3570  else
3571  {
3572  assert (entry_ptr->thrd_entry->resume_status == THREAD_LOCK_RESUMED);
3573  }
3574  }
3575 
3576  goto start;
3577  }
3578 
3579  /* allocate a lock entry. */
3580  entry_ptr = lock_get_new_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list);
3581  if (entry_ptr == NULL)
3582  {
3583  assert (is_res_mutex_locked);
3584  pthread_mutex_unlock (&res_ptr->res_mutex);
3585  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_ALLOC_RESOURCE, 1, "lock heap entry");
3586  ret_val = LK_NOTGRANTED_DUE_ERROR;
3587  goto end;
3588  }
3589  /* initialize the lock entry as blocked state */
3590  lock_initialize_entry_as_blocked (entry_ptr, thread_p, tran_index, res_ptr, lock);
3591  if (is_instant_duration)
3592  {
3593  entry_ptr->instant_lock_count++;
3594  assert (entry_ptr->instant_lock_count > 0);
3595  }
3596 
3597  /* append the lock request at the end of the waiter */
3598  prev = NULL;
3599  for (i = res_ptr->waiter; i != NULL; i = i->next)
3600  {
3601  prev = i;
3602  }
3603  if (prev == NULL)
3604  {
3605  res_ptr->waiter = entry_ptr;
3606  }
3607  else
3608  {
3609  prev->next = entry_ptr;
3610  }
3611 
3612  /* change total_waiters_mode (total mode of waiting waiter) */
3613  assert (lock >= NULL_LOCK && res_ptr->total_waiters_mode >= NULL_LOCK);
3614  res_ptr->total_waiters_mode = lock_Conv[lock][res_ptr->total_waiters_mode];
3615  assert (res_ptr->total_waiters_mode != NA_LOCK);
3616 
3617  goto blocked;
3618  } /* end of a new lock request */
3619 
3620 lock_tran_lk_entry:
3621  /* The object exists in the hash chain & I am a lock holder of the lockable object. */
3622  lock_conversion = true;
3623  old_mode = entry_ptr->granted_mode;
3624  assert (lock >= NULL_LOCK && entry_ptr->granted_mode >= NULL_LOCK);
3625  new_mode = lock_Conv[lock][entry_ptr->granted_mode];
3626  assert (new_mode != NA_LOCK);
3627 
3628  if (new_mode == entry_ptr->granted_mode)
3629  {
3630  /* a request with either a less exclusive or an equal mode of lock */
3631  entry_ptr->count += 1;
3632  if (is_instant_duration)
3633  {
3634  compat1 = lock_Comp[lock][entry_ptr->granted_mode];
3635  assert (compat1 != LOCK_COMPAT_UNKNOWN);
3636 
3637  if ((lock >= IX_LOCK && (entry_ptr->instant_lock_count == 0 && entry_ptr->granted_mode >= IX_LOCK))
3638  && compat1 != LOCK_COMPAT_YES)
3639  {
3640  /* if the lock is already acquired with incompatible mode by current transaction, remove instant instance
3641  * locks */
3642  lock_stop_instant_lock_mode (thread_p, tran_index, false);
3643  }
3644  else
3645  {
3646  entry_ptr->instant_lock_count++;
3647  assert (entry_ptr->instant_lock_count > 0);
3648  }
3649  }
3650 
3651  if (is_res_mutex_locked)
3652  {
3653  pthread_mutex_unlock (&res_ptr->res_mutex);
3654  }
3655  perfmon_inc_stat (thread_p, PSTAT_LK_NUM_RE_REQUESTED_ON_OBJECTS); /* monitoring */
3656  *entry_addr_ptr = entry_ptr;
3657 
3658  ret_val = LK_GRANTED;
3659  goto end;
3660  }
3661 
3662  if (!is_res_mutex_locked)
3663  {
3664  /* We need to lock resource mutex. */
3665  pthread_mutex_lock (&res_ptr->res_mutex);
3666  is_res_mutex_locked = true;
3667  }
3668 
3669  /* check the compatibility with other holders' granted mode */
3670  group_mode = NULL_LOCK;
3671  for (i = res_ptr->holder; i != NULL; i = i->next)
3672  {
3673  if (i != entry_ptr)
3674  {
3675  assert (i->granted_mode >= NULL_LOCK && group_mode >= NULL_LOCK);
3676  group_mode = lock_Conv[i->granted_mode][group_mode];
3677  assert (group_mode != NA_LOCK);
3678  }
3679  }
3680 
3681  assert (new_mode >= NULL_LOCK && group_mode >= NULL_LOCK);
3682  compat1 = lock_Comp[new_mode][group_mode];
3683  assert (compat1 != LOCK_COMPAT_UNKNOWN);
3684 
3685  if (compat1 == LOCK_COMPAT_YES)
3686  {
3687  entry_ptr->granted_mode = new_mode;
3688  entry_ptr->count += 1;
3689  if (is_instant_duration)
3690  {
3691  entry_ptr->instant_lock_count++;
3692  assert (entry_ptr->instant_lock_count > 0);
3693  }
3694 
3695  assert (lock >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
3696  res_ptr->total_holders_mode = lock_Conv[lock][res_ptr->total_holders_mode];
3697  assert (res_ptr->total_holders_mode != NA_LOCK);
3698 
3699  lock_update_non2pl_list (thread_p, res_ptr, tran_index, lock);
3700  assert (is_res_mutex_locked);
3701  pthread_mutex_unlock (&res_ptr->res_mutex);
3702 
3703  goto lock_conversion_treatement;
3704  }
3705 
3706  /* I am a holder & my request cannot be granted. */
3707  if (wait_msecs == LK_ZERO_WAIT || wait_msecs == LK_FORCE_ZERO_WAIT)
3708  {
3709  pthread_mutex_unlock (&res_ptr->res_mutex);
3710  if (wait_msecs == LK_ZERO_WAIT)
3711  {
3712  LK_ENTRY *p = lock_get_new_entry (tran_index, t_entry_ent,
3713  &lk_Gl.obj_free_entry_list);
3714 
3715  if (p != NULL)
3716  {
3717  lock_initialize_entry_as_blocked (p, thread_p, tran_index, res_ptr, lock);
3718  lock_set_error_for_timeout (thread_p, p);
3719  lock_free_entry (tran_index, t_entry_ent, &lk_Gl.obj_free_entry_list, p);
3720  }
3721  }
3722 
3723  ret_val = LK_NOTGRANTED_DUE_TIMEOUT;
3724  goto end;
3725  }
3726 
3727  /* Upgrader Positioning Rule (UPR) */
3728 
3729  /* check if another thread is waiting for the same resource */
3730  if (entry_ptr->blocked_mode != NULL_LOCK)
3731  {
3733  thread_lock_entry (thrd_entry);
3734  thread_lock_entry (entry_ptr->thrd_entry);
3735 
3736  if (entry_ptr->thrd_entry->lockwait == NULL)
3737  {
3738  thread_unlock_entry (entry_ptr->thrd_entry);
3739  thread_unlock_entry (thrd_entry);
3740  assert (is_res_mutex_locked);
3741  pthread_mutex_unlock (&res_ptr->res_mutex);
3742  is_res_mutex_locked = false;
3743  goto start;
3744  }
3745 
3746  thrd_entry->tran_next_wait = entry_ptr->thrd_entry->tran_next_wait;
3747  entry_ptr->thrd_entry->tran_next_wait = thrd_entry;
3748 
3749  thread_unlock_entry (entry_ptr->thrd_entry);
3750 
3751  assert (is_res_mutex_locked);
3752  pthread_mutex_unlock (&res_ptr->res_mutex);
3753  is_res_mutex_locked = false;
3754 
3756  if (thrd_entry->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
3757  {
3758  /* a shutdown thread wakes me up */
3760  ret_val = LK_NOTGRANTED_DUE_ERROR;
3761  goto end;
3762  }
3763  else if (thrd_entry->resume_status != THREAD_LOCK_RESUMED)
3764  {
3765  /* wake up with other reason */
3766  assert (0);
3767 
3768  if (er_errid () == NO_ERROR)
3769  {
3771  }
3772  ret_val = LK_NOTGRANTED_DUE_ERROR;
3773  goto end;
3774  }
3775  else
3776  {
3777  assert (thrd_entry->resume_status == THREAD_LOCK_RESUMED);
3778  }
3779 
3780  goto start;
3781  }
3782 
3783  entry_ptr->blocked_mode = new_mode;
3784  entry_ptr->count += 1;
3785  if (is_instant_duration)
3786  {
3787  entry_ptr->instant_lock_count++;
3788  assert (entry_ptr->instant_lock_count > 0);
3789  }
3790 
3791  entry_ptr->thrd_entry = thread_p;
3792 
3793  assert (lock >= NULL_LOCK && res_ptr->total_holders_mode >= NULL_LOCK);
3794  res_ptr->total_holders_mode = lock_Conv[lock][res_ptr->total_holders_mode];
3795  assert (res_ptr->total_holders_mode != NA_LOCK);
3796 
3797  /* remove the lock entry from the holder list */
3798  prev = NULL;
3799  curr = res_ptr->holder;
3800  while ((curr != NULL) && (curr != entry_ptr))
3801  {
3802  prev = curr;
3803  curr = curr->next;
3804  }
3805  if (prev == NULL)
3806  {
3807  res_ptr->holder = entry_ptr->next;
3808  }
3809  else
3810  {
3811  prev->next = entry_ptr->next;
3812  }
3813 
3814  /* position the lock entry in the holder list according to UPR */
3815  lock_position_holder_entry (res_ptr, entry_ptr);
3816 
3817 blocked:
3818 
3820  {
3821  tsc_getticks (&start_tick);
3822  }
3823 
3824  /* LK_CANWAIT(wait_msecs) : wait_msecs > 0 */
3826 #if defined(LK_TRACE_OBJECT)
3827  LK_MSG_LOCK_WAITFOR (entry_ptr);
3828 #endif /* LK_TRACE_OBJECT */
3829 
3830  thread_lock_entry (entry_ptr->thrd_entry);
3831  if (is_res_mutex_locked)
3832  {
3833  pthread_mutex_unlock (&res_ptr->res_mutex);
3834  }
3835  ret_val = lock_suspend (thread_p, entry_ptr, wait_msecs);
3836 
3838  {
3839  tsc_getticks (&end_tick);
3840  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
3841  lock_wait_time = tv_diff.tv_sec * 1000000LL + tv_diff.tv_usec;
3842  perfmon_lk_waited_time_on_objects (thread_p, lock, lock_wait_time);
3843  }
3844 
3845  if (ret_val != LOCK_RESUMED)
3846  {
3847  /* Following three cases are possible. 1. lock timeout 2. deadlock victim 3. interrupt In any case, current
3848  * thread must remove the wait info. */
3849  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, false);
3850 
3851  if (ret_val == LOCK_RESUMED_ABORTED)
3852  {
3853  ret_val = LK_NOTGRANTED_DUE_ABORTED;
3854  goto end;
3855  }
3856  else if (ret_val == LOCK_RESUMED_INTERRUPT)
3857  {
3858  ret_val = LK_NOTGRANTED_DUE_ERROR;
3859  goto end;
3860  }
3861  else /* LOCK_RESUMED_TIMEOUT || LOCK_SUSPENDED */
3862  {
3863  ret_val = LK_NOTGRANTED_DUE_TIMEOUT;
3864  goto end;
3865  }
3866  }
3867 
3868  /* The transaction now got the lock on the object */
3869 lock_conversion_treatement:
3870 
3871  if (entry_ptr->res_head->key.type == LOCK_RESOURCE_CLASS && lock_conversion == true)
3872  {
3873  new_mode = entry_ptr->granted_mode;
3874  switch (old_mode)
3875  {
3876  case IS_LOCK:
3877  if (IS_WRITE_EXCLUSIVE_LOCK (new_mode) || new_mode == S_LOCK || new_mode == SIX_LOCK)
3878  {
3879  lock_remove_all_inst_locks (thread_p, tran_index, oid, S_LOCK);
3880  }
3881  break;
3882 
3883  case IX_LOCK:
3884  if (new_mode == SIX_LOCK)
3885  {
3886  lock_remove_all_inst_locks (thread_p, tran_index, oid, S_LOCK);
3887  }
3888  else if (IS_WRITE_EXCLUSIVE_LOCK (new_mode))
3889  {
3890  lock_remove_all_inst_locks (thread_p, tran_index, oid, X_LOCK);
3891  }
3892  break;
3893 
3894  case SIX_LOCK:
3895  /* new_mode == X_LOCK */
3896  lock_remove_all_inst_locks (thread_p, tran_index, oid, X_LOCK);
3897  break;
3898 
3899  default:
3900  break;
3901  }
3902 
3904 #if defined(LK_TRACE_OBJECT)
3905  LK_MSG_LOCK_CONVERTED (entry_ptr);
3906 #endif /* LK_TRACE_OBJECT */
3907  }
3908 
3909  if (lock_conversion == false)
3910  {
3911  /* to manage granules */
3912  entry_ptr->class_entry = class_entry;
3913  lock_increment_class_granules (class_entry);
3914  }
3915 
3916  *entry_addr_ptr = entry_ptr;
3917  ret_val = LK_GRANTED;
3918 
3919 end:
3920 #if defined(ENABLE_SYSTEMTAP)
3921  CUBRID_LOCK_ACQUIRE_END (oid_for_marker_p, class_oid_for_marker_p, lock, ret_val != LK_GRANTED);
3922 #endif /* ENABLE_SYSTEMTAP */
3923 
3924  return ret_val;
3925 }
3926 #endif /* SERVER_MODE */
3927 
3928 #if defined(SERVER_MODE)
3929 /*
3930  * lock_internal_perform_unlock_object - Performs actual object unlock operation
3931  *
3932  * return:
3933  *
3934  * entry_ptr(in):
3935  * release_flag(in):
3936  * move_to_non2pl(in):
3937  *
3938  * Note:Unlock a lock specified by entry_ptr.
3939  * Therefore, for the 2 phase locking, the caller must unlock from leaf
3940  * to root or atomically all locks to which the transaction is related.
3941  *
3942  * if release_flag is true, release the lock item.
3943  * Otherwise, just decrement the lock count for supporting isolation level.
3944  */
3945 static void
3946 lock_internal_perform_unlock_object (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, bool release_flag,
3947  bool move_to_non2pl)
3948 {
3950  int tran_index;
3951  LK_RES *res_ptr;
3952  LK_ENTRY *i;
3953  LK_ENTRY *prev, *curr;
3954  LK_ENTRY *from_whom;
3955  LOCK mode;
3956  int rv;
3957 
3958 #if defined(LK_DUMP)
3959  if (lk_Gl.dump_level >= 1)
3960  {
3961  fprintf (stderr,
3962  "LK_DUMP::lk_internal_unlock_object()\n"
3963  " tran(%2d) : oid(%2d|%3d|%3d), class_oid(%2d|%3d|%3d), LOCK(%7s)\n", entry_ptr->tran_index,
3964  entry_ptr->res_head->oid.volid, entry_ptr->res_head->oid.pageid, entry_ptr->res_head->oid.slotid,
3965  entry_ptr->res_head->class_oid.volid, entry_ptr->res_head->class_oid.pageid,
3966  entry_ptr->res_head->class_oid.slotid, LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode));
3967  }
3968 #endif /* LK_DUMP */
3969 
3970  if (entry_ptr == NULL)
3971  {
3972  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_internal_unlock_object",
3973  "NULL entry pointer");
3974  return;
3975  }
3976 
3977  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
3978  if (entry_ptr->tran_index != tran_index)
3979  {
3980  assert (false);
3981  return;
3982  }
3983 
3984  if (release_flag == false)
3985  {
3986  entry_ptr->count--;
3987  if (lock_is_instant_lock_mode (tran_index))
3988  {
3989  entry_ptr->instant_lock_count--;
3990  assert (entry_ptr->instant_lock_count >= 0);
3991  }
3992 
3993  if (entry_ptr->blocked_mode == NULL_LOCK && entry_ptr->count > 0)
3994  {
3995  return;
3996  }
3997  }
3998 
3999  /* hold resource mutex */
4000  res_ptr = entry_ptr->res_head;
4001  rv = pthread_mutex_lock (&res_ptr->res_mutex);
4002 
4003  /* check if the transaction is in the holder list */
4004  prev = NULL;
4005  curr = res_ptr->holder;
4006  while (curr != NULL)
4007  {
4008  if (curr->tran_index == tran_index)
4009  {
4010  break;
4011  }
4012  prev = curr;
4013  curr = curr->next;
4014  }
4015 
4016  if (curr == NULL)
4017  {
4018  /* the transaction is not in the holder list, check the waiter. */
4019  prev = NULL;
4020  curr = res_ptr->waiter;
4021  while (curr != NULL)
4022  {
4023  if (curr->tran_index == tran_index)
4024  {
4025  break;
4026  }
4027  prev = curr;
4028  curr = curr->next;
4029  }
4030 
4031  if (curr != NULL)
4032  {
4033  /* get the next lock waiter */
4034  from_whom = curr->next;
4035 
4036  /* remove the lock entry from the waiter */
4037  if (prev == NULL)
4038  {
4039  res_ptr->waiter = curr->next;
4040  }
4041  else
4042  {
4043  prev->next = curr->next;
4044  }
4045 
4046  /* free the lock entry */
4047  lock_free_entry (tran_index, t_entry, &lk_Gl.obj_free_entry_list, curr);
4048 
4049  if (from_whom != NULL)
4050  {
4051  /* grant blocked waiter & change total_waiters_mode */
4052  lock_grant_blocked_waiter_partial (thread_p, res_ptr, from_whom);
4053  }
4054  else
4055  {
4056  /* change only total_waiters_mode */
4057  mode = NULL_LOCK;
4058  for (i = res_ptr->waiter; i != NULL; i = i->next)
4059  {
4060  assert (i->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
4061  mode = lock_Conv[i->blocked_mode][mode];
4062  assert (mode != NA_LOCK);
4063  }
4064  res_ptr->total_waiters_mode = mode;
4065  }
4066  }
4067  else
4068  {
4069  assert (false);
4070  /* The transaction is neither the lock holder nor the lock waiter */
4072  res_ptr->key.oid.pageid, res_ptr->key.oid.slotid);
4073  }
4074 
4075  pthread_mutex_unlock (&res_ptr->res_mutex);
4076 
4077  return;
4078  }
4079 
4080  /* The transaction is in the holder list. Consult the holder list. */
4081 
4082  /* remove the entry from the holder list */
4083  if (prev == NULL)
4084  {
4085  res_ptr->holder = curr->next;
4086  }
4087  else
4088  {
4089  prev->next = curr->next;
4090  }
4091 
4092  if (release_flag == false && curr->count > 0)
4093  {
4094  /* The current transaction was a blocked holder. lock timeout is called or it is selected as a deadlock victim */
4095  curr->blocked_mode = NULL_LOCK;
4096  lock_position_holder_entry (res_ptr, entry_ptr);
4097  }
4098  else
4099  {
4100  /* remove the lock entry from the transaction lock hold list */
4101  (void) lock_delete_from_tran_hold_list (curr, tran_index);
4102 
4103  /* to manage granules */
4104  lock_decrement_class_granules (curr->class_entry);
4105 
4106  /* If it's not the end of transaction, it's a non2pl lock */
4107  if (release_flag == false && move_to_non2pl == true)
4108  {
4109  (void) lock_add_non2pl_lock (thread_p, res_ptr, tran_index, curr->granted_mode);
4110  }
4111  /* free the lock entry */
4112  lock_free_entry (tran_index, t_entry, &lk_Gl.obj_free_entry_list, curr);
4113  }
4114 
4115  /* change total_holders_mode */
4116  mode = NULL_LOCK;
4117  for (i = res_ptr->holder; i != NULL; i = i->next)
4118  {
4119  assert (i->granted_mode >= NULL_LOCK && mode >= NULL_LOCK);
4120  mode = lock_Conv[i->granted_mode][mode];
4121  assert (mode != NA_LOCK);
4122 
4123  assert (i->blocked_mode >= NULL_LOCK && mode >= NULL_LOCK);
4124  mode = lock_Conv[i->blocked_mode][mode];
4125  assert (mode != NA_LOCK);
4126  }
4127  res_ptr->total_holders_mode = mode;
4128 
4129  if (res_ptr->holder == NULL && res_ptr->waiter == NULL)
4130  {
4131  if (res_ptr->non2pl == NULL)
4132  {
4133  /* if resource entry is empty, remove it. */
4134  (void) lock_remove_resource (thread_p, res_ptr);
4135  }
4136  else
4137  {
4138  pthread_mutex_unlock (&res_ptr->res_mutex);
4139  }
4140  }
4141  else
4142  {
4143  /* grant blocked holders and blocked waiters */
4144  lock_grant_blocked_holder (thread_p, res_ptr);
4145 
4146  (void) lock_grant_blocked_waiter (thread_p, res_ptr);
4147  pthread_mutex_unlock (&res_ptr->res_mutex);
4148  }
4149 }
4150 #endif /* SERVER_MODE */
4151 
4152 /*
4153  * lock_demote_class_lock - Demote the class lock to to_be_lock
4154  *
4155  * return: error code
4156  * oid(in): class oid
4157  * lock(in): lock mode to be set
4158  * ex_lock(out): ex-lock mode
4159  *
4160  * Note: This function demotes the lock mode of given class lock.
4161  * After the demotion, this function grants the blocked requestors if the blocked lock mode is grantable.
4162  *
4163  * WARNING: Demoting a write lock in the middle of a transaction may cause recovery issues.
4164  * It should be carefully used for some particular cases.
4165  * Since I don't see any usage of an instance lock and it is very DANGEROUS,
4166  * the current function only supports demotion of a class lock.
4167  */
4168 int
4169 lock_demote_class_lock (THREAD_ENTRY * thread_p, const OID * oid, LOCK lock, LOCK * ex_lock)
4170 {
4171 #if defined(SERVER_MODE)
4172  LK_ENTRY *entry_ptr;
4173  int tran_index;
4174 
4175  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
4176 
4177  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, true);
4178  if (entry_ptr == NULL)
4179  {
4180  assert (entry_ptr != NULL);
4181  return ER_FAILED;
4182  }
4183 
4184  return lock_internal_demote_class_lock (thread_p, entry_ptr, lock, ex_lock);
4185 #else // SERVER_MODE
4186  return NO_ERROR;
4187 #endif // !SERVER_MODE = SA_MODE
4188 }
4189 
4190 #if defined (SERVER_MODE)
4191 /*
4192  * lock_internal_demote_class_lock - helper function to lock_demote_class_lock
4193  *
4194  * return: error code
4195  * entry_ptr(in):
4196  * to_be_lock(in):
4197  * ex_lock(out): ex-lock mode
4198  *
4199  */
4200 static int
4201 lock_internal_demote_class_lock (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr, LOCK to_be_lock, LOCK * ex_lock)
4202 {
4203  LK_RES *res_ptr; /* lock resource entry pointer */
4204  LK_ENTRY *holder, *h; /* lock entry pointer */
4205  LOCK total_mode;
4206  int rv;
4207 
4208  /* The caller is not holding any mutex */
4209  assert (entry_ptr != NULL);
4210 
4211  res_ptr = entry_ptr->res_head;
4212 
4213  // expects a class lock entry
4214  assert (res_ptr->key.type == LOCK_RESOURCE_CLASS);
4215 
4216  rv = pthread_mutex_lock (&res_ptr->res_mutex);
4217 
4218  /* find the given lock entry in the holder list */
4219  for (holder = res_ptr->holder; holder != NULL; holder = holder->next)
4220  {
4221  if (holder == entry_ptr)
4222  {
4223  break;
4224  }
4225  }
4226 
4227  if (holder == NULL)
4228  {
4229  /* not found */
4230  assert (holder != NULL);
4231  pthread_mutex_unlock (&res_ptr->res_mutex);
4233  LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), entry_ptr->tran_index,
4234  OID_AS_ARGS (&res_ptr->key.oid));
4236  }
4237 
4238  // to_be_lock mode should be weaker than the current lock.
4239  assert (NULL_LOCK < to_be_lock && to_be_lock != U_LOCK && to_be_lock < holder->granted_mode);
4240 
4241 #if defined(LK_DUMP)
4242  if (lk_Gl.dump_level >= 1)
4243  {
4244  fprintf (stderr, "LK_DUMP::lk_demote_class_lock ()\n"
4245  " tran(%2d) : oid(%d|%d|%d), class_oid(%d|%d|%d), LOCK(%7s -> %7s)\n", entry_ptr->tran_index,
4246  OID_AS_ARGS (&entry_ptr->res_head->key.oid), OID_AS_ARGS (&entry_ptr->res_head->key.class_oid),
4247  LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), LOCK_TO_LOCKMODE_STRING (to_be_lock));
4248  }
4249 #endif /* LK_DUMP */
4250 
4251  *ex_lock = holder->granted_mode;
4252 
4253  /* demote the class lock(granted mode) of the lock entry */
4254  holder->granted_mode = to_be_lock;
4255 
4256  /* change total_holders_mode */
4257  total_mode = NULL_LOCK;
4258  for (h = res_ptr->holder; h != NULL; h = h->next)
4259  {
4260  assert (h->granted_mode >= NULL_LOCK && total_mode >= NULL_LOCK);
4261  total_mode = lock_Conv[h->granted_mode][total_mode];
4262  assert (total_mode != NA_LOCK);
4263 
4264  assert (h->blocked_mode >= NULL_LOCK && total_mode >= NULL_LOCK);
4265  total_mode = lock_Conv[h->blocked_mode][total_mode];
4266  assert (total_mode != NA_LOCK);
4267  }
4268  res_ptr->total_holders_mode = total_mode;
4269 
4270  /* grant the blocked holders and blocked waiters */
4271  lock_grant_blocked_holder (thread_p, res_ptr);
4272  (void) lock_grant_blocked_waiter (thread_p, res_ptr);
4273 
4274  pthread_mutex_unlock (&res_ptr->res_mutex);
4275 
4276  return NO_ERROR;
4277 }
4278 #endif /* SERVER_MODE */
4279 
4280 /*
4281  * Private Functions Group: demote, unlock and remove locks
4282  *
4283  * - lock_demote_all_shared_class_locks()
4284  * - lock_unlock_shared_inst_lock()
4285  * - lock_remove_all_class_locks()
4286  * - lock_remove_all_inst_locks()
4287  */
4288 
4289 #if defined(SERVER_MODE)
4290 /*
4291  * lock_demote_read_class_lock_for_checksumdb - Demote one shared class lock to intention shared only for checksumdb
4292  *
4293  * return:
4294  *
4295  * tran_index(in):
4296  * class_oid(in):
4297  *
4298  * Note: This is exported ONLY for checksumdb. NEVER consider to use this function for any other clients/threads.
4299  *
4300  * Note:This function finds the lock entry whose lock object id is same with the given class_oid in the transaction
4301  * lock hold list. And then, demote the class lock if the class lock is shared mode.
4302  */
4303 void
4304 lock_demote_read_class_lock_for_checksumdb (THREAD_ENTRY * thread_p, int tran_index, const OID * class_oid)
4305 {
4306  LK_ENTRY *entry_ptr;
4307  LOCK ex_lock;
4308 
4309  /* The caller is not holding any mutex */
4310 
4311  /* demote only one class lock */
4312  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, class_oid, true);
4313  if (entry_ptr == NULL)
4314  {
4315  assert (entry_ptr != NULL);
4316  return;
4317  }
4318 
4319  if (entry_ptr->granted_mode == S_LOCK)
4320  {
4321  (void) lock_internal_demote_class_lock (thread_p, entry_ptr, IS_LOCK, &ex_lock);
4322  }
4323 }
4324 #endif /* SERVER_MODE */
4325 
4326 #if defined(SERVER_MODE)
4327 /*
4328  * lock_demote_all_shared_class_locks - Demote all shared class locks
4329  *
4330  * return:
4331  *
4332  * tran_index(in):
4333  *
4334  * Note:This function demotes all shared class locks that are held
4335  * by the given transaction.
4336  */
4337 static void
4338 lock_demote_all_shared_class_locks (THREAD_ENTRY * thread_p, int tran_index)
4339 {
4340  LK_TRAN_LOCK *tran_lock;
4341  LK_ENTRY *curr, *next;
4342  LOCK ex_lock;
4343 
4344  /* When this function is called, only one thread is executing for the transaction. (transaction : thread = 1 : 1)
4345  * Therefore, there is no need to hold tran_lock->hold_mutex. */
4346 
4347  tran_lock = &lk_Gl.tran_lock_table[tran_index];
4348 
4349  /* 1. demote general class locks */
4350  curr = tran_lock->class_hold_list;
4351  while (curr != NULL)
4352  {
4353  assert (tran_index == curr->tran_index);
4354 
4355  next = curr->tran_next;
4356 
4357  if (curr->granted_mode == S_LOCK)
4358  {
4359  // S -> IS
4360  (void) lock_internal_demote_class_lock (thread_p, curr, IS_LOCK, &ex_lock);
4361  }
4362  else if (curr->granted_mode == SIX_LOCK)
4363  {
4364  // SIX -> IX
4365  (void) lock_internal_demote_class_lock (thread_p, curr, IX_LOCK, &ex_lock);
4366  }
4367 
4368  curr = next;
4369  }
4370 
4371  /* 2. demote root class lock */
4372  curr = tran_lock->root_class_hold;
4373  if (curr != NULL)
4374  {
4375  assert (tran_index == curr->tran_index);
4376 
4377  if (curr->granted_mode == S_LOCK)
4378  {
4379  // S -> IS
4380  (void) lock_internal_demote_class_lock (thread_p, curr, IS_LOCK, &ex_lock);
4381  }
4382  else if (curr->granted_mode == SIX_LOCK)
4383  {
4384  // SIX -> IX
4385  (void) lock_internal_demote_class_lock (thread_p, curr, IX_LOCK, &ex_lock);
4386  }
4387  }
4388 }
4389 #endif /* SERVER_MODE */
4390 
4391 #if defined(SERVER_MODE)
4392 /*
4393  * lk_unlock_shared_inst_lock - Unlock one shared instance lock
4394  *
4395  * return:
4396  *
4397  * tran_index(in):
4398  * inst_oid(in):
4399  *
4400  * Note:This function finds the lock entry whose lock object id is same
4401  * with the given inst_oid in the transaction lock hold list. And then,
4402  * unlock the instance lock if the instance lock is shared lock.
4403  */
4404 static void
4405 lock_unlock_shared_inst_lock (THREAD_ENTRY * thread_p, int tran_index, const OID * inst_oid)
4406 {
4407  LK_ENTRY *entry_ptr;
4408 
4409  /* unlock the shared instance lock (S_LOCK) */
4410  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, inst_oid, false);
4411 
4412  if (entry_ptr != NULL && entry_ptr->granted_mode == S_LOCK)
4413  {
4414  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, true);
4415  }
4416 }
4417 #endif /* SERVER_MODE */
4418 
4419 #if defined(SERVER_MODE)
4420 /*
4421  * lock_remove_all_class_locks - Remove class locks whose lock mode is lower than the given lock mode
4422  *
4423  * return:
4424  *
4425  * tran_index(in):
4426  * lock(in):
4427  *
4428  * Note:This function removes class locks whose lock mode is lower than the given lock mode.
4429  */
4430 static void
4431 lock_remove_all_class_locks (THREAD_ENTRY * thread_p, int tran_index, LOCK lock)
4432 {
4433  LK_TRAN_LOCK *tran_lock;
4434  LK_ENTRY *curr, *next;
4435 
4436  /* When this function is called, only one thread is executing for the transaction. (transaction : thread = 1 : 1)
4437  * Therefore, there is no need to hold tran_lock->hold_mutex. */
4438 
4439  tran_lock = &lk_Gl.tran_lock_table[tran_index];
4440 
4441  /* remove class locks if given condition is satisfied */
4442  curr = tran_lock->class_hold_list;
4443  while (curr != NULL)
4444  {
4445  assert (tran_index == curr->tran_index);
4446 
4447  next = curr->tran_next;
4448  if (curr->granted_mode <= lock)
4449  {
4450  lock_internal_perform_unlock_object (thread_p, curr, true, false);
4451  }
4452  curr = next;
4453  }
4454 
4455  /* remove root class lock if given condition is satisfied */
4456  curr = tran_lock->root_class_hold;
4457  if (curr != NULL)
4458  {
4459  assert (tran_index == curr->tran_index);
4460 
4461  if (curr->granted_mode <= lock)
4462  {
4463  lock_internal_perform_unlock_object (thread_p, curr, true, false);
4464  }
4465  }
4466 
4467 }
4468 #endif /* SERVER_MODE */
4469 
4470 #if defined(SERVER_MODE)
4471 /*
4472  * lock_remove_all_inst_locks - Remove instance locks whose lock mode is lower than the given lock mode
4473  *
4474  * return:
4475  *
4476  * tran_index(in):
4477  * class_oid(in):
4478  * lock(in):
4479  *
4480  * Note:This function removes instance locks whose lock mode is lower than the given lock mode.
4481  */
4482 void
4483 lock_remove_all_inst_locks (THREAD_ENTRY * thread_p, int tran_index, const OID * class_oid, LOCK lock)
4484 {
4485  LK_TRAN_LOCK *tran_lock;
4486  LK_ENTRY *curr, *next;
4487 
4488  tran_lock = &lk_Gl.tran_lock_table[tran_index];
4489 
4490  /* remove instance locks if given condition is satisfied */
4491  curr = tran_lock->inst_hold_list;
4492  while (curr != NULL)
4493  {
4494  assert (tran_index == curr->tran_index);
4495 
4496  next = curr->tran_next;
4497  if (class_oid == NULL || OID_ISNULL (class_oid) || OID_EQ (&curr->res_head->key.class_oid, class_oid))
4498  {
4499  if (curr->granted_mode <= lock || lock == X_LOCK)
4500  {
4501  /* found : the same class_oid and interesting lock mode --> unlock it. */
4502  lock_internal_perform_unlock_object (thread_p, curr, true, false);
4503  }
4504  }
4505  curr = next;
4506  }
4507 
4508 }
4509 #endif /* SERVER_MODE */
4510 
4511 /*
4512  * Private Functions Group: non two pahse locks
4513  *
4514  * - lk_remove_non2pl()
4515  * - lk_update_non2pl_list()
4516  */
4517 
4518 #if defined(SERVER_MODE)
4519 /*
4520  * lock_remove_non2pl -
4521  *
4522  * return:
4523  *
4524  * non2pl(in):
4525  * tran_index(in):
4526  */
4527 static void
4528 lock_remove_non2pl (THREAD_ENTRY * thread_p, LK_ENTRY * non2pl, int tran_index)
4529 {
4531  LK_RES *res_ptr;
4532  LK_ENTRY *prev, *curr;
4533  int rv;
4534 
4535  /* The given non2pl entry has already been removed from the transaction non2pl list. Therefore, This function removes
4536  * the given non2pl entry from the resource non2pl list and then frees the entry. */
4537 
4538  res_ptr = non2pl->res_head;
4539  rv = pthread_mutex_lock (&res_ptr->res_mutex);
4540 
4541  /* find the given non2pl in non2pl list of resource entry */
4542  prev = NULL;
4543  curr = res_ptr->non2pl;
4544  while (curr != NULL)
4545  {
4546  if (curr->tran_index == tran_index)
4547  {
4548  break;
4549  }
4550  prev = curr;
4551  curr = curr->next;
4552  }
4553  if (curr == NULL)
4554  { /* not found */
4555  pthread_mutex_unlock (&res_ptr->res_mutex);
4556  return;
4557  }
4558 
4559  /* found : remove it */
4560  if (prev == NULL)
4561  {
4562  res_ptr->non2pl = curr->next;
4563  }
4564  else
4565  {
4566  prev->next = curr->next;
4567  }
4568  /* (void)lk_delete_from_tran_non2pl_list(curr); */
4569 
4570  /* free the lock entry */
4571  lock_free_entry (tran_index, t_entry, &lk_Gl.obj_free_entry_list, curr);
4572 
4573  if (res_ptr->holder == NULL && res_ptr->waiter == NULL && res_ptr->non2pl == NULL)
4574  {
4575  (void) lock_remove_resource (thread_p, res_ptr);
4576  }
4577  else
4578  {
4579  pthread_mutex_unlock (&res_ptr->res_mutex);
4580  }
4581 
4582 }
4583 #endif /* SERVER_MODE */
4584 
4585 #if defined(SERVER_MODE)
4586 /*
4587  * lock_update_non2pl_list -
4588  *
4589  * return:
4590  *
4591  * res_ptr(in):
4592  * tran_index(in):
4593  * lock(in):
4594  */
4595 static void
4596 lock_update_non2pl_list (THREAD_ENTRY * thread_p, LK_RES * res_ptr, int tran_index, LOCK lock)
4597 {
4599  LK_ENTRY *prev;
4600  LK_ENTRY *curr;
4601  LK_ENTRY *next;
4602  LK_TRAN_LOCK *tran_lock;
4603  int rv;
4604  LOCK_COMPATIBILITY compat;
4605 
4606  /* The caller is holding a resource mutex */
4607 
4608  prev = NULL;
4609  curr = res_ptr->non2pl;
4610  while (curr != NULL)
4611  {
4612  if (curr->tran_index == tran_index)
4613  { /* same transaction */
4614  /* remove current non2pl entry */
4615  next = curr->next;
4616  if (prev == NULL)
4617  {
4618  res_ptr->non2pl = curr->next;
4619  }
4620  else
4621  {
4622  prev->next = curr->next;
4623  }
4624  (void) lock_delete_from_tran_non2pl_list (curr, tran_index);
4625  lock_free_entry (tran_index, t_entry, &lk_Gl.obj_free_entry_list, curr);
4626  curr = next;
4627  }
4628  else
4629  { /* different transaction */
4630  if (curr->granted_mode != INCON_NON_TWO_PHASE_LOCK)
4631  {
4632  /* The transaction with the released lock must decache the lock object since an incompatible locks has
4633  * been acquired. This implies that the transaction with the released lock may not be serializable
4634  * (repeatable read consistent) any longer. */
4635  assert (lock >= NULL_LOCK && curr->granted_mode >= NULL_LOCK);
4636  compat = lock_Comp[lock][curr->granted_mode];
4637  assert (compat != LOCK_COMPAT_UNKNOWN);
4638 
4639  if (compat == LOCK_COMPAT_NO)
4640  {
4641  curr->granted_mode = INCON_NON_TWO_PHASE_LOCK;
4642  tran_lock = &lk_Gl.tran_lock_table[curr->tran_index];
4643  rv = pthread_mutex_lock (&tran_lock->non2pl_mutex);
4644  tran_lock->num_incons_non2pl += 1;
4645  pthread_mutex_unlock (&tran_lock->non2pl_mutex);
4646  }
4647  }
4648  prev = curr;
4649  curr = curr->next;
4650  }
4651  }
4652 
4653 }
4654 #endif /* SERVER_MODE */
4655 
4656 /*
4657  * Private Functions Group: local deadlock detection and resolution
4658  *
4659  * - lk_add_WFG_edge()
4660  */
4661 
4662 #if defined(SERVER_MODE)
4663 /*
4664  * lock_add_WFG_edge -
4665  *
4666  * return: error code
4667  *
4668  * from_tran_index(in): waiting transaction index
4669  * to_tran_index(in): waited transaction index
4670  * holder_flag(in): true(if to_tran_index is Holder), false(otherwise)
4671  * edge_wait_stime(in):
4672  *
4673  * Note:add an edge to WFG which represents that
4674  * 'from_tran_index' transaction waits for 'to_tran_index' transaction.
4675  */
4676 static int
4677 lock_add_WFG_edge (int from_tran_index, int to_tran_index, int holder_flag, INT64 edge_wait_stime)
4678 {
4679  int prev, curr;
4680  int i;
4681  int alloc_idx;
4682  char *temp_ptr;
4683 
4684  /* check if the transactions has been selected as victims */
4685  /* Note that the transactions might be old deadlock victims */
4686  if (lk_Gl.TWFG_node[from_tran_index].DL_victim == true || lk_Gl.TWFG_node[to_tran_index].DL_victim == true)
4687  {
4688  return NO_ERROR;
4689  }
4690 
4691  /* increment global edge sequence number */
4692  lk_Gl.global_edge_seq_num++;
4693 
4694  if (lk_Gl.TWFG_node[from_tran_index].checked_by_deadlock_detector == false)
4695  {
4696  /* a new transaction started */
4697  if (lk_Gl.TWFG_node[from_tran_index].first_edge != -1)
4698  {
4699  prev = -1;
4700  curr = lk_Gl.TWFG_node[from_tran_index].first_edge;
4701  while (curr != -1)
4702  {
4703  prev = curr;
4704  curr = lk_Gl.TWFG_edge[curr].next;
4705  }
4706  lk_Gl.TWFG_edge[prev].next = lk_Gl.TWFG_free_edge_idx;
4707  lk_Gl.TWFG_free_edge_idx = lk_Gl.TWFG_node[from_tran_index].first_edge;
4708  lk_Gl.TWFG_node[from_tran_index].first_edge = -1;
4709  }
4710  lk_Gl.TWFG_node[from_tran_index].checked_by_deadlock_detector = true;
4711  lk_Gl.TWFG_node[from_tran_index].tran_edge_seq_num = lk_Gl.global_edge_seq_num;
4712  }
4713 
4714  if (lk_Gl.TWFG_node[to_tran_index].checked_by_deadlock_detector == false)
4715  {
4716  /* a new transaction started */
4717  if (lk_Gl.TWFG_node[to_tran_index].first_edge != -1)
4718  {
4719  prev = -1;
4720  curr = lk_Gl.TWFG_node[to_tran_index].first_edge;
4721  while (curr != -1)
4722  {
4723  prev = curr;
4724  curr = lk_Gl.TWFG_edge[curr].next;
4725  }
4726  lk_Gl.TWFG_edge[prev].next = lk_Gl.TWFG_free_edge_idx;
4727  lk_Gl.TWFG_free_edge_idx = lk_Gl.TWFG_node[to_tran_index].first_edge;
4728  lk_Gl.TWFG_node[to_tran_index].first_edge = -1;
4729  }
4730  lk_Gl.TWFG_node[to_tran_index].checked_by_deadlock_detector = true;
4731  lk_Gl.TWFG_node[to_tran_index].tran_edge_seq_num = lk_Gl.global_edge_seq_num;
4732  }
4733 
4734  /* NOTE the following description.. According to the above code, whenever it is identified that a transaction has
4735  * been terminated during deadlock detection, the transaction is checked again as a new transaction. And, the current
4736  * edge is based on the current active transactions. */
4737 
4738  if (lk_Gl.TWFG_free_edge_idx == -1)
4739  { /* too many WFG edges */
4740  if (lk_Gl.max_TWFG_edge == LK_MIN_TWFG_EDGE_COUNT)
4741  {
4742  lk_Gl.max_TWFG_edge = LK_MID_TWFG_EDGE_COUNT;
4743  for (i = LK_MIN_TWFG_EDGE_COUNT; i < lk_Gl.max_TWFG_edge; i++)
4744  {
4745  lk_Gl.TWFG_edge[i].to_tran_index = -1;
4746  lk_Gl.TWFG_edge[i].next = (i + 1);
4747  }
4748  lk_Gl.TWFG_edge[lk_Gl.max_TWFG_edge - 1].next = -1;
4749  lk_Gl.TWFG_free_edge_idx = LK_MIN_TWFG_EDGE_COUNT;
4750  }
4751  else if (lk_Gl.max_TWFG_edge == LK_MID_TWFG_EDGE_COUNT)
4752  {
4753  temp_ptr = (char *) lk_Gl.TWFG_edge;
4754  lk_Gl.max_TWFG_edge = LK_MAX_TWFG_EDGE_COUNT;
4755  lk_Gl.TWFG_edge = (LK_WFG_EDGE *) malloc (SIZEOF_LK_WFG_EDGE * lk_Gl.max_TWFG_edge);
4756  if (lk_Gl.TWFG_edge == NULL)
4757  {
4759  (size_t) (SIZEOF_LK_WFG_EDGE * lk_Gl.max_TWFG_edge));
4760  return ER_OUT_OF_VIRTUAL_MEMORY; /* no method */
4761  }
4762  (void) memcpy ((char *) lk_Gl.TWFG_edge, temp_ptr, (SIZEOF_LK_WFG_EDGE * LK_MID_TWFG_EDGE_COUNT));
4763  for (i = LK_MID_TWFG_EDGE_COUNT; i < lk_Gl.max_TWFG_edge; i++)
4764  {
4765  lk_Gl.TWFG_edge[i].to_tran_index = -1;
4766  lk_Gl.TWFG_edge[i].next = (i + 1);
4767  }
4768  lk_Gl.TWFG_edge[lk_Gl.max_TWFG_edge - 1].next = -1;
4769  lk_Gl.TWFG_free_edge_idx = LK_MID_TWFG_EDGE_COUNT;
4770  }
4771  else
4772  {
4773 #if defined(CUBRID_DEBUG)
4774  er_log_debug (ARG_FILE_LINE, "So many TWFG edges are used..\n");
4775 #endif /* CUBRID_DEBUG */
4776  return ER_FAILED; /* no method */
4777  }
4778  }
4779 
4780  /* allocate free WFG edge */
4781  alloc_idx = lk_Gl.TWFG_free_edge_idx;
4782  lk_Gl.TWFG_free_edge_idx = lk_Gl.TWFG_edge[alloc_idx].next;
4783 
4784  /* set WFG edge with given information */
4785  lk_Gl.TWFG_edge[alloc_idx].to_tran_index = to_tran_index;
4786  lk_Gl.TWFG_edge[alloc_idx].edge_seq_num = lk_Gl.global_edge_seq_num;
4787  lk_Gl.TWFG_edge[alloc_idx].holder_flag = holder_flag;
4788  lk_Gl.TWFG_edge[alloc_idx].edge_wait_stime = edge_wait_stime;
4789 
4790  /* connect the WFG edge into WFG */
4791  lk_Gl.TWFG_edge[alloc_idx].next = lk_Gl.TWFG_node[from_tran_index].first_edge;
4792  lk_Gl.TWFG_node[from_tran_index].first_edge = alloc_idx;
4793 
4794  return NO_ERROR;
4795 }
4796 #endif /* SERVER_MODE */
4797 
4798 #if defined(SERVER_MODE)
4799 /*
4800  * lock_select_deadlock_victim -
4801  *
4802  * return:
4803  *
4804  * s(in):
4805  * t(in):
4806  *
4807  * Note:
4808  */
4809 static void
4810 lock_select_deadlock_victim (THREAD_ENTRY * thread_p, int s, int t)
4811 {
4812  LK_WFG_NODE *TWFG_node;
4813  LK_WFG_EDGE *TWFG_edge;
4814  TRANID tranid;
4815  TRANID victim_tranid;
4816  bool can_timeout;
4817  int i, u, v, w, n;
4818  bool false_dd_cycle = false;
4819  bool lock_holder_found = false;
4820  bool inact_trans_found = false;
4821  int tot_WFG_nodes;
4822 #if defined(CUBRID_DEBUG)
4823  int num_WFG_nodes;
4824  int WFG_nidx;
4825  int tran_index_area[20];
4826  int *tran_index_set = &tran_index_area[0];
4827 #endif
4828  char *cycle_info_string;
4829  char *ptr;
4830  int num_tran_in_cycle;
4831  int unit_size = LOG_USERNAME_MAX + CUB_MAXHOSTNAMELEN + PATH_MAX + 10;
4832  const char *client_prog_name, *client_user_name, *client_host_name;
4833  int client_pid;
4834  int next_node;
4835  int *tran_index_in_cycle = NULL;
4836  int victim_tran_index;
4837  int tran_log_count, victim_tran_log_count;
4838 
4839  /* simple notation */
4840  TWFG_node = lk_Gl.TWFG_node;
4841  TWFG_edge = lk_Gl.TWFG_edge;
4842 
4843  /*
4844  * check if current deadlock cycle is false deadlock cycle
4845  */
4846  tot_WFG_nodes = 0;
4847  if (TWFG_node[t].current == -1)
4848  {
4849  /* old WFG edge : remove it */
4850  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
4851  false_dd_cycle = true;
4852  }
4853  else
4854  {
4855  if (TWFG_node[t].checked_by_deadlock_detector == false || TWFG_node[t].thrd_wait_stime == 0
4856  || (TWFG_node[t].thrd_wait_stime > TWFG_edge[TWFG_node[t].current].edge_wait_stime))
4857  {
4858  /* old transaction, not lockwait state, or incorrect WFG edge */
4859  /* remove all outgoing edges */
4860  TWFG_node[t].first_edge = -1;
4861  TWFG_node[t].current = -1;
4862  /* remove incoming edge */
4863  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
4864  false_dd_cycle = true;
4865  }
4866  else
4867  {
4868  if (TWFG_edge[TWFG_node[s].current].edge_seq_num < TWFG_node[t].tran_edge_seq_num)
4869  {
4870  /* old WFG edge : remove it */
4871  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
4872  false_dd_cycle = true;
4873  }
4874  else
4875  {
4876  tot_WFG_nodes += 1;
4877  }
4878  }
4879  }
4880  for (v = s; v != t;)
4881  {
4882  u = lk_Gl.TWFG_node[v].ancestor;
4883  if (TWFG_node[v].current == -1)
4884  {
4885  /* old WFG edge : remove it */
4886  TWFG_edge[TWFG_node[u].current].to_tran_index = -2;
4887  false_dd_cycle = true;
4888  }
4889  else
4890  {
4891  if (TWFG_node[v].checked_by_deadlock_detector == false || TWFG_node[v].thrd_wait_stime == 0
4892  || (TWFG_node[v].thrd_wait_stime > TWFG_edge[TWFG_node[v].current].edge_wait_stime))
4893  {
4894  /* old transaction, not lockwait state, or incorrect WFG edge */
4895  /* remove all outgoing edges */
4896  TWFG_node[v].first_edge = -1;
4897  TWFG_node[v].current = -1;
4898  /* remove incoming edge */
4899  TWFG_edge[TWFG_node[u].current].to_tran_index = -2;
4900  false_dd_cycle = true;
4901  }
4902  else
4903  {
4904  if (TWFG_edge[TWFG_node[u].current].edge_seq_num < TWFG_node[v].tran_edge_seq_num)
4905  {
4906  /* old WFG edge : remove it */
4907  TWFG_edge[TWFG_node[u].current].to_tran_index = -2;
4908  false_dd_cycle = true;
4909  }
4910  else
4911  {
4912  tot_WFG_nodes += 1;
4913  }
4914  }
4915  }
4916  v = u;
4917  }
4918 
4919  if (false_dd_cycle == true)
4920  { /* clear deadlock cycle */
4921  for (v = s; v != t;)
4922  {
4923  w = TWFG_node[v].ancestor;
4924  TWFG_node[v].ancestor = -1;
4925  v = w;
4926  }
4927  return;
4928  }
4929 
4930  /*
4931  * Victim Selection Strategy 1) Must be lock holder. 2) Must be active transaction. 3) Prefer a transaction does not
4932  * have victim priority. 4) Prefer a transaction has written less log records. 5) Prefer a transaction with a closer
4933  * timeout. 6) Prefer the youngest transaction. */
4934 #if defined(CUBRID_DEBUG)
4935  num_WFG_nodes = tot_WFG_nodes;
4936  if (num_WFG_nodes > 20)
4937  {
4938  tran_index_set = (int *) malloc (sizeof (int) * num_WFG_nodes);
4939  if (tran_index_set == NULL)
4940  {
4941  num_WFG_nodes = 20;
4942  tran_index_set = &tran_index_area[0];
4943  }
4944  }
4945  WFG_nidx = 0;
4946 
4947  if (TWFG_node[t].checked_by_deadlock_detector == false)
4948  {
4949  er_log_debug (ARG_FILE_LINE, "transaction(index=%d) is old in deadlock cycle\n", t);
4950  }
4951 #endif /* CUBRID_DEBUG */
4952  if (TWFG_edge[TWFG_node[s].current].holder_flag)
4953  {
4954  tranid = logtb_find_tranid (t);
4955  if (logtb_is_active (thread_p, tranid) == false)
4956  {
4957  victims[victim_count].tran_index = NULL_TRAN_INDEX;
4958  inact_trans_found = true;
4959 #if defined(CUBRID_DEBUG)
4961  "Inactive transaction is found in a deadlock cycle\n(tran_index=%d, tranid=%d, state=%s)\n",
4962  t, tranid, log_state_string (logtb_find_state (t)));
4963  tran_index_set[WFG_nidx] = t;
4964  WFG_nidx += 1;
4965 #endif /* CUBRID_DEBUG */
4966  }
4967  else
4968  {
4969  victims[victim_count].tran_index = t;
4970  victims[victim_count].tranid = tranid;
4971  victims[victim_count].can_timeout = LK_CAN_TIMEOUT (logtb_find_wait_msecs (t));
4972  lock_holder_found = true;
4973  }
4974  }
4975  else
4976  {
4977  victims[victim_count].tran_index = NULL_TRAN_INDEX;
4978 #if defined(CUBRID_DEBUG)
4979  tran_index_set[WFG_nidx] = t;
4980  WFG_nidx += 1;
4981 #endif
4982  }
4983 
4984  victims[victim_count].tran_index_in_cycle = NULL;
4985  victims[victim_count].num_trans_in_cycle = 0;
4986 
4987  num_tran_in_cycle = 1;
4988  for (v = s; v != t; v = TWFG_node[v].ancestor)
4989  {
4990  num_tran_in_cycle++;
4991  }
4992 
4993  cycle_info_string = (char *) malloc (unit_size * num_tran_in_cycle);
4994  tran_index_in_cycle = (int *) malloc (sizeof (int) * num_tran_in_cycle);
4995 
4996  if (cycle_info_string != NULL && tran_index_in_cycle != NULL)
4997  {
4998  int i;
4999 
5000  ptr = cycle_info_string;
5001 
5002  for (i = 0, v = s; i < num_tran_in_cycle; i++, v = TWFG_node[v].ancestor)
5003  {
5004  (void) logtb_find_client_name_host_pid (v, &client_prog_name, &client_user_name, &client_host_name,
5005  &client_pid);
5006 
5007  n =
5008  snprintf (ptr, unit_size, "%s%s@%s|%s(%d)", ((v == s) ? "" : ", "), client_user_name, client_host_name,
5009  client_prog_name, client_pid);
5010  ptr += n;
5011  assert_release (ptr < cycle_info_string + unit_size * num_tran_in_cycle);
5012 
5013  tran_index_in_cycle[i] = v;
5014  }
5015  }
5016 
5018  (cycle_info_string) ? cycle_info_string : "");
5019 
5020  if (cycle_info_string != NULL)
5021  {
5022  free_and_init (cycle_info_string);
5023  }
5024 
5025  for (v = s; v != t;)
5026  {
5027 #if defined(CUBRID_DEBUG)
5028  if (TWFG_node[v].checked_by_deadlock_detector == false)
5029  {
5030  er_log_debug (ARG_FILE_LINE, "transaction(index=%d) is old in deadlock cycle\n", v);
5031  }
5032 #endif /* CUBRID_DEBUG */
5033  if (TWFG_node[v].candidate == true)
5034  {
5035  tranid = logtb_find_tranid (v);
5036  victim_tran_index = victims[victim_count].tran_index;
5037  if (logtb_is_active (thread_p, tranid) == false) /* Must be active transaction. */
5038  {
5039  inact_trans_found = true;
5040 #if defined(CUBRID_DEBUG)
5042  "Inactive transaction is found in a deadlock cycle\n"
5043  "(tran_index=%d, tranid=%d, state=%s)\n", v, tranid,
5045  tran_index_set[WFG_nidx] = v;
5046  WFG_nidx += 1;
5047 #endif /* CUBRID_DEBUG */
5048  }
5049  else
5050  {
5051  victim_tranid = NULL_TRANID;
5052  lock_holder_found = true;
5053  can_timeout = LK_CAN_TIMEOUT (logtb_find_wait_msecs (v));
5054  if (victim_tran_index == NULL_TRAN_INDEX)
5055  {
5056  victim_tranid = tranid;
5057  }
5058  else if (logtb_has_deadlock_priority (victim_tran_index) != logtb_has_deadlock_priority (v))
5059  {
5060  /* Prefer a transaction does not have victim priority. */
5061  if (logtb_has_deadlock_priority (v) == false)
5062  {
5063  victim_tranid = tranid;
5064  }
5065  }
5066  else
5067  {
5068  tran_log_count = logtb_find_log_records_count (v);
5069  victim_tran_log_count = logtb_find_log_records_count (victim_tran_index);
5070 
5071  if (tran_log_count != victim_tran_log_count)
5072  {
5073  if (tran_log_count < victim_tran_log_count)
5074  {
5075  /* Prefer a transaction has written less log records. */
5076  victim_tranid = tranid;
5077  }
5078  }
5079  else
5080  {
5081  /*
5082  * Prefer a transaction with a closer timeout.
5083  * Prefer the youngest transaction.
5084  */
5085  if ((victims[victim_count].can_timeout == false && can_timeout == true)
5086  || (victims[victim_count].can_timeout == can_timeout
5087  && (LK_ISYOUNGER (tranid, victims[victim_count].tranid))))
5088  {
5089  victim_tranid = tranid;
5090  }
5091  }
5092  }
5093 
5094  if (victim_tranid != NULL_TRANID)
5095  {
5096  victims[victim_count].tran_index = v;
5097  victims[victim_count].tranid = victim_tranid;
5098  victims[victim_count].can_timeout = can_timeout;
5099  }
5100  }
5101  }
5102 #if defined(CUBRID_DEBUG)
5103  else
5104  { /* TWFG_node[v].candidate == false */
5105  tran_index_set[WFG_nidx] = v;
5106  WFG_nidx += 1;
5107  }
5108 #endif
5109  v = TWFG_node[v].ancestor;
5110  }
5111 
5112  if (victims[victim_count].tran_index != NULL_TRAN_INDEX)
5113  {
5114 #if defined(CUBRID_DEBUG)
5115  if (TWFG_node[victims[victim_count].tran_index].checked_by_deadlock_detector == false)
5116  {
5117  er_log_debug (ARG_FILE_LINE, "victim(index=%d) is old in deadlock cycle\n", victims[victim_count].tran_index);
5118  }
5119 #endif /* CUBRID_DEBUG */
5120  TWFG_node[victims[victim_count].tran_index].current = -1;
5121  victims[victim_count].tran_index_in_cycle = tran_index_in_cycle;
5122  victims[victim_count].num_trans_in_cycle = num_tran_in_cycle;
5123  victim_count++;
5124  }
5125  else
5126  {
5127  /* We can't find active holder. In this case, this cycle is regarded as a false deadlock. */
5128  for (i = 0, v = s; i < num_tran_in_cycle; v = TWFG_node[v].ancestor, i++)
5129  {
5130  assert_release (TWFG_node[v].current >= 0 && TWFG_node[v].current < lk_Gl.max_TWFG_edge);
5131 
5132  next_node = TWFG_edge[TWFG_node[v].current].to_tran_index;
5133 
5134  if (TWFG_node[next_node].checked_by_deadlock_detector == false || TWFG_node[next_node].thrd_wait_stime == 0
5135  || TWFG_node[next_node].thrd_wait_stime > TWFG_edge[TWFG_node[next_node].current].edge_wait_stime)
5136  {
5137  /* The edge from v to next_node is removed(false edge). */
5138  TWFG_node[next_node].first_edge = -1;
5139  TWFG_node[next_node].current = -1;
5140  TWFG_edge[TWFG_node[v].current].to_tran_index = -2;
5141  TWFG_node[v].current = TWFG_edge[TWFG_node[v].current].next;
5142  break;
5143  }
5144  }
5145 
5146  if (i == num_tran_in_cycle)
5147  {
5148  /* can't find false edge */
5149  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
5150  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
5151  }
5152 
5153  if (tran_index_in_cycle != NULL)
5154  {
5155  free_and_init (tran_index_in_cycle);
5156  }
5157 
5158 #if defined(CUBRID_DEBUG)
5159  er_log_debug (ARG_FILE_LINE, "No victim in deadlock cycle....\n");
5160  if (lock_holder_found == false)
5161  {
5162  er_log_debug (ARG_FILE_LINE, "Any Lock holder is not found in deadlock cycle.\n");
5163  }
5164  if (inact_trans_found == true)
5165  {
5166  er_log_debug (ARG_FILE_LINE, "Inactive transactions are found in deadlock cycle.\n");
5167  }
5168  er_log_debug (ARG_FILE_LINE, "total_edges=%d, free_edge_idx=%d, global_edge_seq=%d\n", lk_Gl.max_TWFG_edge,
5169  lk_Gl.TWFG_free_edge_idx, lk_Gl.global_edge_seq_num);
5170  er_log_debug (ARG_FILE_LINE, "# of WFG nodes in deadlock cycle = %d (%d printed)\n", tot_WFG_nodes,
5171  num_WFG_nodes);
5172  for (WFG_nidx = 0; WFG_nidx < num_WFG_nodes; WFG_nidx++)
5173  {
5174  er_log_debug (ARG_FILE_LINE, "%3d ", tran_index_set[WFG_nidx]);
5175  if ((WFG_nidx + 1) == num_WFG_nodes || (WFG_nidx % 10) == 9)
5176  {
5177  er_log_debug (ARG_FILE_LINE, "\n");
5178  }
5179  }
5180 #endif /* CUBRID_DEBUG */
5181  }
5182 
5183  for (v = s; v != t;)
5184  {
5185  w = TWFG_node[v].ancestor;
5186  TWFG_node[v].ancestor = -1;
5187  v = w;
5188  }
5189 }
5190 #endif /* SERVER_MODE */
5191 
5192 #if defined(SERVER_MODE)
5193 /*
5194  * lock_dump_deadlock_victims -
5195  *
5196  * return:
5197  */
5198 static void
5199 lock_dump_deadlock_victims (THREAD_ENTRY * thread_p, FILE * outfile)
5200 {
5201  int k, count;
5202 
5203  fprintf (outfile, "*** Deadlock Victim Information ***\n");
5204  fprintf (outfile, "Victim count = %d\n", victim_count);
5205  /* print aborted transactions (deadlock victims) */
5207  count = 0;
5208  for (k = 0; k < victim_count; k++)
5209  {
5210  if (!victims[k].can_timeout)
5211  {
5213  victims[k].tran_index);
5214  if ((count % 10) == 9)
5215  {
5217  }
5218  count++;
5219  }
5220  }
5222  /* print timeout transactions (deadlock victims) */
5224  count = 0;
5225  for (k = 0; k < victim_count; k++)
5226  {
5227  if (victims[k].can_timeout)
5228  {
5230  victims[k].tran_index);
5231  if ((count % 10) == 9)
5232  {
5234  }
5235  count++;
5236  }
5237  }
5238 
5239  xlock_dump (thread_p, outfile);
5240 }
5241 #endif /* SERVER_MODE */
5242 
5243 /*
5244  * Private Functions Group: miscellaneous functions
5245  *
5246  * - lk_lockinfo_compare()
5247  * - lk_dump_res()
5248  * - lk_consistent_res()
5249  * - lk_consistent_tran_lock()
5250  */
5251 
5252 #if defined(SERVER_MODE)
5253 /*
5254  * lock_compare_lock_info -
5255  *
5256  * return:
5257  *
5258  * lockinfo1(in):
5259  * lockinfo2(in):
5260  *
5261  * Note:compare two OID of lockable objects.
5262  */
5263 static int
5264 lock_compare_lock_info (const void *lockinfo1, const void *lockinfo2)
5265 {
5266  const OID *oid1;
5267  const OID *oid2;
5268 
5269  oid1 = &(((LK_LOCKINFO *) (lockinfo1))->oid);
5270  oid2 = &(((LK_LOCKINFO *) (lockinfo2))->oid);
5271 
5272  return oid_compare (oid1, oid2);
5273 }
5274 #endif /* SERVER_MODE */
5275 
5276 #if defined(SERVER_MODE)
5277 /*
5278  * lock_wait_msecs_to_secs -
5279  *
5280  * return: seconds
5281  *
5282  * msecs(in): milliseconds
5283  */
5284 static float
5285 lock_wait_msecs_to_secs (int msecs)
5286 {
5287  if (msecs > 0)
5288  {
5289  return (float) msecs / 1000;
5290  }
5291 
5292  return (float) msecs;
5293 }
5294 #endif /* SERVER_MODE */
5295 
5296 #if defined(SERVER_MODE)
5297 /*
5298  * lock_dump_resource - Dump locks acquired on a resource
5299  *
5300  * return:
5301  *
5302  * outfp(in): FILE stream where to dump the lock resource entry.
5303  * res_ptr(in): pointer to lock resource entry
5304  *
5305  * Note:Dump contents of the lock resource entry pointed by res_ptr.
5306  */
5307 static void
5308 lock_dump_resource (THREAD_ENTRY * thread_p, FILE * outfp, LK_RES * res_ptr)
5309 {
5310 #define TEMP_BUFFER_SIZE 128
5311  LK_ENTRY *entry_ptr;
5312  char *classname; /* Name of the class */
5313  int num_holders, num_blocked_holders, num_waiters;
5314  char time_val[CTIME_MAX];
5315  size_t time_str_len;
5316  OID *oid_rr = NULL;
5317  HEAP_SCANCACHE scan_cache;
5318  OID real_class_oid;
5319 
5320  memset (time_val, 0, sizeof (time_val));
5321 
5322  /* dump object identifier */
5324  res_ptr->key.oid.pageid, res_ptr->key.oid.slotid);
5325 
5326  /* dump object type related information */
5327  switch (res_ptr->key.type)
5328  {
5331  break;
5332  case LOCK_RESOURCE_CLASS:
5333  oid_rr = oid_get_rep_read_tran_oid ();
5334  if (oid_rr != NULL && OID_EQ (&res_ptr->key.oid, oid_rr))
5335  {
5336  /* This is the generic object for RR transactions */
5338  }
5339  else if (!OID_ISTEMP (&res_ptr->key.oid))
5340  {
5341  if (OID_IS_VIRTUAL_CLASS_OF_DIR_OID (&res_ptr->key.oid))
5342  {
5343  OID_GET_REAL_CLASS_OF_DIR_OID (&res_ptr->key.oid, &real_class_oid);
5344  }
5345  else
5346  {
5347  COPY_OID (&real_class_oid, &res_ptr->key.oid);
5348  }
5349  /* Don't get class names for temporary class objects. */
5350  if (heap_get_class_name (thread_p, &real_class_oid, &classname) != NO_ERROR || classname == NULL)
5351  {
5352  /* We must stop processing if an interrupt occurs */
5353  if (er_errid () == ER_INTERRUPTED)
5354  {
5355  return;
5356  }
5357 
5358  /* Otherwise continue */
5359  er_clear ();
5360  }
5361  else
5362  {
5364  classname);
5365  free_and_init (classname);
5366  }
5367  }
5368  break;
5370  if (!OID_ISTEMP (&res_ptr->key.class_oid))
5371  {
5372  bool is_virtual_directory_oid;
5373 
5374  /* Don't get class names for temporary class objects. */
5376  {
5377  is_virtual_directory_oid = true;
5378  OID_GET_REAL_CLASS_OF_DIR_OID (&res_ptr->key.class_oid, &real_class_oid);
5379  }
5380  else
5381  {
5382  is_virtual_directory_oid = false;
5383  COPY_OID (&real_class_oid, &res_ptr->key.class_oid);
5384  }
5385 
5386  if (heap_get_class_name (thread_p, &real_class_oid, &classname) != NO_ERROR || classname == NULL)
5387  {
5388  /* We must stop processing if an interrupt occurs */
5389  if (er_errid () == ER_INTERRUPTED)
5390  {
5391  return;
5392  }
5393 
5394  /* Otherwise continue */
5395  er_clear ();
5396  }
5397  else
5398  {
5400  res_ptr->key.class_oid.volid, res_ptr->key.class_oid.pageid, res_ptr->key.class_oid.slotid,
5401  classname);
5402  free_and_init (classname);
5403  }
5404 
5405  /* Dump MVCC info */
5406  if (is_virtual_directory_oid == false && heap_scancache_quick_start (&scan_cache) == NO_ERROR)
5407  {
5409 
5410  if (heap_get_visible_version (thread_p, &res_ptr->key.oid, &res_ptr->key.class_oid, &recdes, &scan_cache,
5411  PEEK, NULL_CHN) == S_SUCCESS)
5412  {
5414  if (or_mvcc_get_header (&recdes, &mvcc_rec_header) == NO_ERROR)
5415  {
5416  char str_insid[128], str_delid[128];
5417  if (MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_INSID))
5418  {
5419  sprintf (str_insid, "%llu", (unsigned long long int) MVCC_GET_INSID (&mvcc_rec_header));
5420  }
5421  else
5422  {
5423  strcpy (str_insid, "missing");
5424  }
5425  if (MVCC_IS_HEADER_DELID_VALID (&mvcc_rec_header))
5426  {
5427  sprintf (str_delid, "%llu", (unsigned long long int) MVCC_GET_DELID (&mvcc_rec_header));
5428  }
5429  else
5430  {
5431  strcpy (str_delid, "missing");
5432  }
5434  str_insid, str_delid);
5435  }
5436  }
5437  heap_scancache_end (thread_p, &scan_cache);
5438  }
5439  }
5440  break;
5441  default:
5443  }
5444 
5445  /* dump total modes of holders and waiters */
5449 
5450  num_holders = num_blocked_holders = 0;
5451  if (res_ptr->holder != NULL)
5452  {
5453  entry_ptr = res_ptr->holder;
5454  while (entry_ptr != NULL)
5455  {
5456  if (entry_ptr->blocked_mode == NULL_LOCK)
5457  {
5458  num_holders++;
5459  }
5460  else
5461  {
5462  num_blocked_holders++;
5463  }
5464  entry_ptr = entry_ptr->next;
5465  }
5466  }
5467  num_waiters = 0;
5468  if (res_ptr->waiter != NULL)
5469  {
5470  entry_ptr = res_ptr->waiter;
5471  while (entry_ptr != NULL)
5472  {
5473  num_waiters++;
5474  entry_ptr = entry_ptr->next;
5475  }
5476  }
5477 
5479  num_blocked_holders, num_waiters);
5480 
5481  /* dump holders */
5482  if (num_holders > 0)
5483  {
5484  /* dump non blocked holders */
5486  entry_ptr = res_ptr->holder;
5487  while (entry_ptr != NULL)
5488  {
5489  if (entry_ptr->blocked_mode == NULL_LOCK)
5490  {
5491  if (res_ptr->key.type == LOCK_RESOURCE_INSTANCE)
5492  {
5493  fprintf (outfp,
5495  MSGCAT_LK_RES_NON_BLOCKED_HOLDER_ENTRY), "", entry_ptr->tran_index,
5496  LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), entry_ptr->count);
5497  }
5498  else
5499  {
5500  fprintf (outfp,
5503  entry_ptr->tran_index, LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), entry_ptr->count,
5504  entry_ptr->ngranules);
5505  }
5506  }
5507  entry_ptr = entry_ptr->next;
5508  }
5509  }
5510 
5511  if (num_blocked_holders > 0)
5512  {
5513  /* dump blocked holders */
5515  entry_ptr = res_ptr->holder;
5516  while (entry_ptr != NULL)
5517  {
5518  if (entry_ptr->blocked_mode != NULL_LOCK)
5519  {
5520  time_t stime = (time_t) (entry_ptr->thrd_entry->lockwait_stime / 1000LL);
5521  if (ctime_r (&stime, time_val) == NULL)
5522  {
5523  strcpy (time_val, "???");
5524  }
5525 
5526  time_str_len = strlen (time_val);
5527  if (time_str_len > 0 && time_val[time_str_len - 1] == '\n')
5528  {
5529  time_val[time_str_len - 1] = 0;
5530  }
5531  if (res_ptr->key.type == LOCK_RESOURCE_INSTANCE)
5532  {
5533  fprintf (outfp,
5535  "", entry_ptr->tran_index, LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode),
5536  entry_ptr->count, "", LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), "", time_val, "",
5537  lock_wait_msecs_to_secs (entry_ptr->thrd_entry->lockwait_msecs));
5538  }
5539  else
5540  {
5541  fprintf (outfp,
5543  MSGCAT_LK_RES_BLOCKED_HOLDER_ENTRY_WITH_GRANULE), "", entry_ptr->tran_index,
5544  LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode), entry_ptr->count, entry_ptr->ngranules,
5545  "", LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), "", time_val, "",
5546  lock_wait_msecs_to_secs (entry_ptr->thrd_entry->lockwait_msecs));
5547  }
5548  }
5549  entry_ptr = entry_ptr->next;
5550  }
5551  }
5552 
5553  /* dump blocked waiters */
5554  if (res_ptr->waiter != NULL)
5555  {
5557  entry_ptr = res_ptr->waiter;
5558  while (entry_ptr != NULL)
5559  {
5560  time_t stime = (time_t) (entry_ptr->thrd_entry->lockwait_stime / 1000LL);
5561  (void) ctime_r (&stime, time_val);
5562 
5563  time_str_len = strlen (time_val);
5564  if (time_str_len > 0 && time_val[time_str_len - 1] == '\n')
5565  {
5566  time_val[time_str_len - 1] = 0;
5567  }
5569  "", entry_ptr->tran_index, LOCK_TO_LOCKMODE_STRING (entry_ptr->blocked_mode), "", time_val, "",
5570  lock_wait_msecs_to_secs (entry_ptr->thrd_entry->lockwait_msecs));
5571  entry_ptr = entry_ptr->next;
5572  }
5573  }
5574 
5575  /* dump non two phase locks */
5576  if (res_ptr->non2pl != NULL)
5577  {
5579  entry_ptr = res_ptr->non2pl;
5580  while (entry_ptr != NULL)
5581  {
5583  "", entry_ptr->tran_index,
5584  ((entry_ptr->granted_mode == INCON_NON_TWO_PHASE_LOCK) ? "INCON_NON_TWO_PHASE_LOCK"
5585  : LOCK_TO_LOCKMODE_STRING (entry_ptr->granted_mode)));
5586  entry_ptr = entry_ptr->next;
5587  }
5588  }
5590 
5591 }
5592 #endif /* SERVER_MODE */
5593 
5594 /*
5595  * lock_initialize - Initialize the lock manager
5596  *
5597  * return: error code
5598  *
5599  * estimate_nobj_locks(in): estimate_nobj_locks(useless)
5600  *
5601  * Note:Initialize the lock manager memory structures.
5602  */
5603 int
5605 {
5606 #if !defined (SERVER_MODE)
5607  lk_Standalone_has_xlock = false;
5608  return NO_ERROR;
5609 #else /* !SERVER_MODE */
5610  const char *env_value;
5611  int error_code = NO_ERROR;
5612 
5613  error_code = lock_initialize_tran_lock_table ();
5614  if (error_code != NO_ERROR)
5615  {
5616  goto error;
5617  }
5618  lock_initialize_object_hash_table ();
5619  error_code = lock_initialize_object_lock_entry_list ();
5620  if (error_code != NO_ERROR)
5621  {
5622  goto error;
5623  }
5624  error_code = lock_initialize_deadlock_detection ();
5625  if (error_code != NO_ERROR)
5626  {
5627  goto error;
5628  }
5629 
5630  /* initialize some parameters */
5631 #if defined(CUBRID_DEBUG)
5632  lk_Gl.verbose_mode = true;
5633  lk_Gl.no_victim_case_count = 0;
5634 #else /* !CUBRID_DEBUG */
5635  env_value = envvar_get ("LK_VERBOSE_SUSPENDED");
5636  if (env_value != NULL)
5637  {
5638  lk_Gl.verbose_mode = (bool) atoi (env_value);
5639  if (lk_Gl.verbose_mode != false)
5640  {
5641  lk_Gl.verbose_mode = true;
5642  }
5643  }
5644  lk_Gl.no_victim_case_count = 0;
5645 #endif /* !CUBRID_DEBUG */
5646 
5647 #if defined(LK_DUMP)
5648  lk_Gl.dump_level = 0;
5649  env_value = envvar_get ("LK_DUMP_LEVEL");
5650  if (env_value != NULL)
5651  {
5652  lk_Gl.dump_level = atoi (env_value);
5653  if (lk_Gl.dump_level < 0 || lk_Gl.dump_level > 3)
5654  {
5655  lk_Gl.dump_level = 0;
5656  }
5657  }
5658 #endif /* LK_DUMP */
5659 
5660  lock_deadlock_detect_daemon_init ();
5661 
5662  return error_code;
5663 
5664 error:
5665  (void) lock_finalize ();
5666  return error_code;
5667 #endif /* !SERVER_MODE */
5668 }
5669 
5670 // *INDENT-OFF*
5671 #if defined(SERVER_MODE)
5672 
5673 //
5674 // lock_check_timeout_expired_and_count_suspended_mapfunc - function to map over all thread entries to find timed out
5675 // locks and suspended threads
5676 //
5677 // thread_ref (in) : thread entry
5678 // stop_mapper (out) : ignored
5679 // suspend_count (out) : count suspended threads
5680 //
5681 static void
5682 lock_check_timeout_expired_and_count_suspended_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper,
5683  size_t & suspend_count)
5684 {
5685  (void) stop_mapper; // suppress unused parameter warning
5686 
5687  // skip dead/free threads
5688  if (thread_ref.m_status == cubthread::entry::status::TS_DEAD
5689  || thread_ref.m_status == cubthread::entry::status::TS_FREE)
5690  {
5691  return;
5692  }
5693  if (thread_ref.lockwait == NULL)
5694  {
5695  return;
5696  }
5697  // suspended thread
5698 
5699  /* The transaction, for which the current thread is working, might be interrupted.
5700  * lock_force_timeout_expired_wait_transactions() performs not only interrupt but timeout checking.
5701  */
5703  {
5704  // not timed out. count as suspended
5705  suspend_count++;
5706  }
5707 }
5708 
5709 // class deadlock_detect_task
5710 //
5711 // description:
5712 // deadlock detect daemon task
5713 // It does
5714 // (1) to resume an interrupted lock waiter
5715 // (2) to resume a timedout lock waiter
5716 // (3) to detect and resolve a deadlock.
5717 // It operates (1) and (2) for every 100ms and does (3) for every PRM_ID_LK_RUN_DEADLOCK_INTERVAL.
5718 //
5719 void
5720 deadlock_detect_task_execute (cubthread::entry & thread_ref)
5721 {
5722  if (!BO_IS_SERVER_RESTARTED ())
5723  {
5724  // wait for boot to finish
5725  return;
5726  }
5727 
5728  if (lk_Gl.deadlock_and_timeout_detector == 0)
5729  {
5730  // if none of the threads were suspended then just return
5731  return;
5732  }
5733 
5734  /* check if the lock-wait thread exists */
5735  size_t lock_wait_count = 0;
5736  thread_get_manager ()->map_entries (lock_check_timeout_expired_and_count_suspended_mapfunc, lock_wait_count);
5737 
5738  if (lock_is_local_deadlock_detection_interval_up () && lock_wait_count >= 2)
5739  {
5740  lock_detect_local_deadlock (&thread_ref);
5741  }
5742 }
5743 #endif /* SERVER_MODE */
5744 
5745 #if defined(SERVER_MODE)
5746 /*
5747  * lock_deadlock_detect_daemon_init () - initialize deadlock detect daemon thread
5748  */
5749 void
5750 lock_deadlock_detect_daemon_init ()
5751 {
5752  assert (lock_Deadlock_detect_daemon == NULL);
5753 
5754  cubthread::looper looper = cubthread::looper (std::chrono::milliseconds (100));
5755  cubthread::entry_callable_task *daemon_task = new cubthread::entry_callable_task (deadlock_detect_task_execute);
5756 
5757  // create deadlock detect daemon thread
5758  lock_Deadlock_detect_daemon = cubthread::get_manager ()->create_daemon (looper, daemon_task, "lock_deadlock_detect");
5759 }
5760 #endif /* SERVER_MODE */
5761 
5762 #if defined(SERVER_MODE)
5763 /*
5764  * lock_deadlock_detect_daemon_destroy () - destroy deadlock detect daemon thread
5765  */
5766 void
5767 lock_deadlock_detect_daemon_destroy ()
5768 {
5769  cubthread::get_manager ()->destroy_daemon (lock_Deadlock_detect_daemon);
5770 }
5771 #endif /* SERVER_MODE */
5772 
5773 #if defined(SERVER_MODE)
5774 /*
5775  * lock_deadlock_detect_daemon_get_stats - get deadlock detector daemon thread statistics into statsp
5776  */
5777 void
5778 lock_deadlock_detect_daemon_get_stats (UINT64 * statsp)
5779 {
5780  if (lock_Deadlock_detect_daemon != NULL)
5781  {
5782  lock_Deadlock_detect_daemon->get_stats (statsp);
5783  }
5784 }
5785 #endif /* SERVER_MODE */
5786 // *INDENT-ON*
5787 
5788 /*
5789  * lock_finalize - Finalize the lock manager
5790  *
5791  * return: nothing
5792  *
5793  * Note:This function finalize the lock manager.
5794  * Memory structures of the lock manager are deallocated.
5795  */
5796 void
5798 {
5799 #if !defined (SERVER_MODE)
5800  lk_Standalone_has_xlock = false;
5801 #else /* !SERVER_MODE */
5802  LK_TRAN_LOCK *tran_lock;
5803  int i;
5804 
5805  /* Release all the locks and awake all transactions */
5806  /* TODO: Why ? */
5807  /* transaction deadlock information table */
5808  /* deallocate memory space for the transaction deadlock info. */
5809  if (lk_Gl.TWFG_node != NULL)
5810  {
5811  free_and_init (lk_Gl.TWFG_node);
5812  }
5813 
5814  /* transaction lock information table */
5815  /* deallocate memory space for transaction lock table */
5816  if (lk_Gl.tran_lock_table != NULL)
5817  {
5818  for (i = 0; i < lk_Gl.num_trans; i++)
5819  {
5820  tran_lock = &lk_Gl.tran_lock_table[i];
5821  pthread_mutex_destroy (&tran_lock->hold_mutex);
5822  pthread_mutex_destroy (&tran_lock->non2pl_mutex);
5823  while (tran_lock->lk_entry_pool != NULL)
5824  {
5825  LK_ENTRY *entry = tran_lock->lk_entry_pool;
5826  tran_lock->lk_entry_pool = tran_lock->lk_entry_pool->next;
5827  free (entry);
5828  }
5829  }
5830  free_and_init (lk_Gl.tran_lock_table);
5831  }
5832  /* reset the number of transactions */
5833  lk_Gl.num_trans = 0;
5834  pthread_mutex_destroy (&lk_Gl.DL_detection_mutex);
5835 
5836  /* reset max number of object locks */
5837  lk_Gl.max_obj_locks = 0;
5838 
5839  /* destroy hash table and freelists */
5840  lk_Gl.m_obj_hash_table.destroy ();
5841  lf_freelist_destroy (&lk_Gl.obj_free_entry_list);
5842 
5843  lock_deadlock_detect_daemon_destroy ();
5844 #endif /* !SERVER_MODE */
5845 }
5846 
5847 /*
5848  * lock_hold_object_instant - Hold object lock with instant duration
5849  *
5850  * return: one of following values
5851  * LK_GRANTED
5852  * LK_NOTGRANTED
5853  * LK_NOTGRANTED_DUE_ERROR
5854  *
5855  * oid(in):
5856  * class_oid(in):
5857  * lock(in):
5858  */
5859 int
5860 lock_hold_object_instant (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock)
5861 {
5862 #if !defined (SERVER_MODE)
5863  LK_SET_STANDALONE_XLOCK (lock);
5864  return LK_GRANTED;
5865 #else /* !SERVER_MODE */
5866  int tran_index;
5867  if (oid == NULL)
5868  {
5869  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_object_instant", "NULL OID pointer");
5870  return LK_NOTGRANTED_DUE_ERROR;
5871  }
5872  if (class_oid == NULL)
5873  {
5874  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_object_instant", "NULL ClassOID pointer");
5875  return LK_NOTGRANTED_DUE_ERROR;
5876  }
5877  if (lock == NULL_LOCK)
5878  {
5879  return LK_GRANTED;
5880  }
5881 
5882  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
5883  return lock_internal_hold_lock_object_instant (thread_p, tran_index, oid, class_oid, lock);
5884 
5885 #endif /* !SERVER_MODE */
5886 }
5887 
5888 /*
5889  * lock_object - Lock an object
5890  *
5891  * return: one of following values)
5892  * LK_GRANTED
5893  * LK_NOTGRANTED_DUE_ABORTED
5894  * LK_NOTGRANTED_DUE_TIMEOUT
5895  * LK_NOTGRANTED_DUE_ERROR
5896  *
5897  * oid(in): Identifier of object(instance, class, root class) to lock
5898  * class_oid(in): Identifier of the class instance of the given object
5899  * lock(in): Requested lock mode
5900  * cond_flag(in):
5901  *
5902  */
5903 int
5904 lock_object (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock, int cond_flag)
5905 {
5906 #if !defined (SERVER_MODE)
5907  LK_SET_STANDALONE_XLOCK (lock);
5908  return LK_GRANTED;
5909 #else /* !SERVER_MODE */
5910  int tran_index;
5911  int wait_msecs;
5912  TRAN_ISOLATION isolation;
5913  LOCK new_class_lock;
5914  LOCK old_class_lock;
5915  int granted;
5916  LK_ENTRY *root_class_entry = NULL;
5917  LK_ENTRY *class_entry = NULL, *superclass_entry = NULL;
5918  LK_ENTRY *inst_entry = NULL;
5919 #if defined (EnableThreadMonitoring)
5920  TSC_TICKS start_tick, end_tick;
5921  TSCTIMEVAL elapsed_time;
5922 #endif
5923 
5924  if (oid == NULL)
5925  {
5926  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_object", "NULL OID pointer");
5927  return LK_NOTGRANTED_DUE_ERROR;
5928  }
5929  if (class_oid == NULL)
5930  {
5931  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_object", "NULL ClassOID pointer");
5932  return LK_NOTGRANTED_DUE_ERROR;
5933  }
5934 
5935  if (lock == NULL_LOCK)
5936  {
5937  return LK_GRANTED;
5938  }
5939 
5940 #if defined (EnableThreadMonitoring)
5942  {
5943  tsc_getticks (&start_tick);
5944  }
5945 #endif
5946 
5947  if (thread_p->type == TT_LOADDB)
5948  {
5949  // load worker don't lock; they rely on session transaction locks
5950  if (class_oid != NULL && !OID_IS_ROOTOID (class_oid))
5951  {
5952  // instance lock
5954  {
5955  // no instance locking is required
5956  return LK_GRANTED;
5957  }
5958  else
5959  {
5960  // should be locked
5961  assert (false);
5962  return LK_NOTGRANTED;
5963  }
5964  }
5965  else
5966  {
5967  // class lock
5968  if (lock != SCH_S_LOCK && lock != BU_LOCK)
5969  {
5970  // unacceptable
5971  assert (false);
5972  return LK_NOTGRANTED;
5973  }
5974  if (!lock_has_lock_on_object (oid, class_oid, BU_LOCK))
5975  {
5976  assert (false);
5977  return LK_NOTGRANTED;
5978  }
5979  return LK_GRANTED;
5980  }
5981  // should have returned
5982  assert (false);
5983  }
5984 
5985  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
5986  if (cond_flag == LK_COND_LOCK) /* conditional request */
5987  {
5988  wait_msecs = LK_FORCE_ZERO_WAIT;
5989  }
5990  else
5991  {
5992  wait_msecs = logtb_find_wait_msecs (tran_index);
5993  }
5994  isolation = logtb_find_isolation (tran_index);
5995 
5996  /* check if the given oid is root class oid */
5997  if (OID_IS_ROOTOID (oid))
5998  {
5999  /* case 1 : resource type is LOCK_RESOURCE_ROOT_CLASS acquire a lock on the root class oid. NOTE that in case of
6000  * acquiring a lock on a class object, the higher lock granule of the class object must not be given. */
6001  granted = lock_internal_perform_lock_object (thread_p, tran_index, oid, NULL, lock, wait_msecs,
6002  &root_class_entry, NULL);
6003  goto end;
6004  }
6005 
6006  /* get the intentional lock mode to be acquired on class oid */
6007  if (lock <= S_LOCK)
6008  {
6009  new_class_lock = IS_LOCK;
6010  }
6011  else
6012  {
6013  new_class_lock = IX_LOCK;
6014  }
6015 
6016  /* Check if current transaction has already held the class lock. If the class lock is not held, hold the class lock,
6017  * now. */
6018  class_entry = lock_get_class_lock (thread_p, class_oid);
6019  old_class_lock = (class_entry) ? class_entry->granted_mode : NULL_LOCK;
6020 
6021  if (OID_IS_ROOTOID (class_oid))
6022  {
6023  if (old_class_lock < new_class_lock)
6024  {
6025  granted = lock_internal_perform_lock_object (thread_p, tran_index, class_oid, NULL, new_class_lock,
6026  wait_msecs, &root_class_entry, NULL);
6027  if (granted != LK_GRANTED)
6028  {
6029  goto end;
6030  }
6031  }
6032  /* case 2 : resource type is LOCK_RESOURCE_CLASS */
6033  /* acquire a lock on the given class object */
6034 
6035  /* NOTE that in case of acquiring a lock on a class object, the higher lock granule of the class object must not
6036  * be given. */
6037  granted = lock_internal_perform_lock_object (thread_p, tran_index, oid, NULL, lock, wait_msecs, &class_entry,
6038  root_class_entry);
6039  goto end;
6040  }
6041  else
6042  {
6043  if (old_class_lock < new_class_lock)
6044  {
6045  if (class_entry != NULL && class_entry->class_entry != NULL
6046  && !OID_IS_ROOTOID (&class_entry->class_entry->res_head->key.oid))
6047  {
6048  /* preserve class hierarchy */
6049  superclass_entry = class_entry->class_entry;
6050  }
6051  else
6052  {
6053  superclass_entry = lock_get_class_lock (thread_p, oid_Root_class_oid);
6054  }
6055 
6056  granted =
6057  lock_internal_perform_lock_object (thread_p, tran_index, class_oid, NULL, new_class_lock, wait_msecs,
6058  &class_entry, superclass_entry);
6059  if (granted != LK_GRANTED)
6060  {
6061  goto end;
6062  }
6063  }
6064 
6065  /* case 3 : resource type is LOCK_RESOURCE_INSTANCE */
6066  if (lock_is_class_lock_escalated (old_class_lock, lock) == true)
6067  { /* already granted on the class level */
6068  /* if incompatible old class lock with requested lock, remove instant class locks */
6069  lock_stop_instant_lock_mode (thread_p, tran_index, false);
6070  granted = LK_GRANTED;
6071  goto end;
6072  }
6073  /* acquire a lock on the given instance oid */
6074 
6075  /* NOTE that in case of acquiring a lock on an instance object, the class oid of the instance object must be
6076  * given. */
6077  granted = lock_internal_perform_lock_object (thread_p, tran_index, oid, class_oid, lock, wait_msecs, &inst_entry,
6078  class_entry);
6079  goto end;
6080  }
6081 
6082 end:
6083 #if defined (EnableThreadMonitoring)
6085  {
6086  tsc_getticks (&end_tick);
6087  tsc_elapsed_time_usec (&elapsed_time, end_tick, start_tick);
6088  }
6089  if (MONITOR_WAITING_THREAD (elapsed_time))
6090  {
6091  er_set (ER_NOTIFICATION_SEVERITY, ARG_FILE_LINE, ER_MNT_WAITING_THREAD, 2, "lock object (lock_object)",
6093  er_log_debug (ARG_FILE_LINE, "lock_object: %6d.%06d\n", elapsed_time.tv_sec, elapsed_time.tv_usec);
6094  }
6095 #endif
6096 
6097  return granted;
6098 #endif /* !SERVER_MODE */
6099 }
6100 
6101 /*
6102  * lock_subclass () - Lock a class in a class hierarchy
6103  *
6104  * return: one of following values)
6105  * LK_GRANTED
6106  * LK_NOTGRANTED_DUE_ABORTED
6107  * LK_NOTGRANTED_DUE_TIMEOUT
6108  * LK_NOTGRANTED_DUE_ERROR
6109  *
6110  * subclass_oid(in): Identifier of subclass to lock
6111  * superclass_oid(in): Identifier of the superclass
6112  * lock(in): Requested lock mode
6113  * cond_flag(in):
6114  */
6115 int
6116 lock_subclass (THREAD_ENTRY * thread_p, const OID * subclass_oid, const OID * superclass_oid, LOCK lock, int cond_flag)
6117 {
6118 #if !defined (SERVER_MODE)
6119  LK_SET_STANDALONE_XLOCK (lock);
6120  return LK_GRANTED;
6121 #else /* !SERVER_MODE */
6122  LOCK new_superclass_lock, old_superclass_lock;
6123  LK_ENTRY *superclass_entry = NULL, *subclass_entry = NULL;
6124  int granted;
6125  int tran_index;
6126  int wait_msecs;
6127  TRAN_ISOLATION isolation;
6128 #if defined (EnableThreadMonitoring)
6129  TSC_TICKS start_tick, end_tick;
6130  TSCTIMEVAL elapsed_time;
6131 #endif
6132 
6133  if (subclass_oid == NULL)
6134  {
6135  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_subclass", "NULL subclass OID pointer");
6136  return LK_NOTGRANTED_DUE_ERROR;
6137  }
6138 
6139  if (superclass_oid == NULL)
6140  {
6141  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_subclass", "NULL superclass OID pointer");
6142  return LK_NOTGRANTED_DUE_ERROR;
6143  }
6144 
6145  if (lock == NULL_LOCK)
6146  {
6147  return LK_GRANTED;
6148  }
6149 
6150 #if defined (EnableThreadMonitoring)
6152  {
6153  tsc_getticks (&start_tick);
6154  }
6155 #endif
6156 
6157  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6158  if (cond_flag == LK_COND_LOCK) /* conditional request */
6159  {
6160  wait_msecs = LK_FORCE_ZERO_WAIT;
6161  }
6162  else
6163  {
6164  wait_msecs = logtb_find_wait_msecs (tran_index);
6165  }
6166  isolation = logtb_find_isolation (tran_index);
6167 
6168  /* get the intentional lock mode to be acquired on class oid */
6169  if (lock <= S_LOCK)
6170  {
6171  new_superclass_lock = IS_LOCK;
6172  }
6173  else
6174  {
6175  new_superclass_lock = IX_LOCK;
6176  }
6177 
6178  /* Check if current transaction has already held the class lock. If the class lock is not held, hold the class lock,
6179  * now. */
6180  superclass_entry = lock_get_class_lock (thread_p, superclass_oid);
6181  old_superclass_lock = (superclass_entry) ? superclass_entry->granted_mode : NULL_LOCK;
6182 
6183 
6184  if (old_superclass_lock < new_superclass_lock)
6185  {
6186  /* superclass is already locked, just promote to the new lock */
6187  granted = lock_internal_perform_lock_object (thread_p, tran_index, superclass_oid, NULL, new_superclass_lock,
6188  wait_msecs, &superclass_entry, NULL);
6189  if (granted != LK_GRANTED)
6190  {
6191  goto end;
6192  }
6193  }
6194  /* case 2 : resource type is LOCK_RESOURCE_CLASS */
6195  /* acquire a lock on the given class object */
6196 
6197  /* NOTE that in case of acquiring a lock on a class object, the higher lock granule of the class object must not be
6198  * given. */
6199 
6200  granted = lock_internal_perform_lock_object (thread_p, tran_index, subclass_oid, NULL, lock, wait_msecs,
6201  &subclass_entry, superclass_entry);
6202 end:
6203 #if defined (EnableThreadMonitoring)
6205  {
6206  tsc_getticks (&end_tick);
6207  tsc_elapsed_time_usec (&elapsed_time, end_tick, start_tick);
6208  }
6209  if (MONITOR_WAITING_THREAD (elapsed_time))
6210  {
6211  er_set (ER_NOTIFICATION_SEVERITY, ARG_FILE_LINE, ER_MNT_WAITING_THREAD, 2, "lock object (lock_object)",
6213  er_log_debug (ARG_FILE_LINE, "lock_object: %6d.%06d\n", elapsed_time.tv_sec, elapsed_time.tv_usec);
6214  }
6215 #endif
6216 
6217  return granted;
6218 #endif /* !SERVER_MODE */
6219 }
6220 
6221 /*
6222  * lock_object_wait_msecs - Lock an object
6223  *
6224  * return: one of following values)
6225  * LK_GRANTED
6226  * LK_NOTGRANTED_DUE_ABORTED
6227  * LK_NOTGRANTED_DUE_TIMEOUT
6228  * LK_NOTGRANTED_DUE_ERROR
6229  *
6230  * oid(in): Identifier of object(instance, class, root class) to lock
6231  * class_oid(in): Identifier of the class instance of the given object
6232  * lock(in): Requested lock mode
6233  * cond_flag(in):
6234  * wait_msecs(in):
6235  *
6236  */
6237 int
6238 lock_object_wait_msecs (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock, int cond_flag,
6239  int wait_msecs)
6240 {
6241 #if !defined (SERVER_MODE)
6242  LK_SET_STANDALONE_XLOCK (lock);
6243  return LK_GRANTED;
6244 #else /* !SERVER_MODE */
6245  int old_wait_msecs = xlogtb_reset_wait_msecs (thread_p, wait_msecs);
6246  int lock_result = lock_object (thread_p, oid, class_oid, lock, cond_flag);
6247 
6248  xlogtb_reset_wait_msecs (thread_p, old_wait_msecs);
6249 
6250  return lock_result;
6251 #endif
6252 }
6253 
6254 /*
6255  * lock_scan - Lock for scanning a heap
6256  *
6257  * return: one of following values)
6258  * LK_GRANTED
6259  * LK_NOTGRANTED_DUE_ABORTED
6260  * LK_NOTGRANTED_DUE_TIMEOUT
6261  * LK_NOTGRANTED_DUE_ERROR
6262  *
6263  * class_oid(in): class oid of the instances to be scanned
6264  * current_lock(in): acquired lock
6265  *
6266  */
6267 int
6268 lock_scan (THREAD_ENTRY * thread_p, const OID * class_oid, int cond_flag, LOCK class_lock)
6269 {
6270 #if !defined (SERVER_MODE)
6271  return LK_GRANTED;
6272 #else /* !SERVER_MODE */
6273  int tran_index;
6274  int wait_msecs;
6275  TRAN_ISOLATION isolation;
6276  int granted;
6277  LK_ENTRY *root_class_entry = NULL;
6278  LK_ENTRY *class_entry = NULL;
6279 #if defined (EnableThreadMonitoring)
6280  TSC_TICKS start_tick, end_tick;
6281  TSCTIMEVAL elapsed_time;
6282 #endif
6283 
6284  if (class_oid == NULL)
6285  {
6286  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_scan", "NULL ClassOID pointer");
6287  return LK_NOTGRANTED_DUE_ERROR;
6288  }
6289 
6290 #if defined (EnableThreadMonitoring)
6292  {
6293  tsc_getticks (&start_tick);
6294  }
6295 #endif
6296 
6297  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6298  if (cond_flag == LK_COND_LOCK)
6299  {
6300  wait_msecs = LK_FORCE_ZERO_WAIT;
6301  }
6302  else
6303  {
6304  assert (cond_flag == LK_UNCOND_LOCK);
6305  wait_msecs = logtb_find_wait_msecs (tran_index);
6306  }
6307  isolation = logtb_find_isolation (tran_index);
6308 
6309  /* acquire the lock on the class */
6310  /* NOTE that in case of acquiring a lock on a class object, the higher lock granule of the class object is not given. */
6311  root_class_entry = lock_get_class_lock (thread_p, oid_Root_class_oid);
6312  granted = lock_internal_perform_lock_object (thread_p, tran_index, class_oid, NULL, class_lock, wait_msecs,
6313  &class_entry, root_class_entry);
6314  assert (granted == LK_GRANTED || cond_flag == LK_COND_LOCK || er_errid () != NO_ERROR);
6315 
6316 #if defined (EnableThreadMonitoring)
6318  {
6319  tsc_getticks (&end_tick);
6320  tsc_elapsed_time_usec (&elapsed_time, end_tick, start_tick);
6321  }
6322  if (MONITOR_WAITING_THREAD (elapsed_time))
6323  {
6324  er_set (ER_NOTIFICATION_SEVERITY, ARG_FILE_LINE, ER_MNT_WAITING_THREAD, 2, "lock object (lock_scan)",
6326  er_log_debug (ARG_FILE_LINE, "lock_scan: %6d.%06d\n", elapsed_time.tv_sec, elapsed_time.tv_usec);
6327  }
6328 #endif
6329 
6330  return granted;
6331 #endif /* !SERVER_MODE */
6332 }
6333 
6334 /*
6335  * lock_classes_lock_hint - Lock many classes that has been hinted
6336  *
6337  * return: one of following values
6338  * LK_GRANTED
6339  * LK_NOTGRANTED_DUE_ABORTED
6340  * LK_NOTGRANTED_DUE_TIMEOUT
6341  * LK_NOTGRANTED_DUE_ERROR
6342  *
6343  * lockhint(in): description of hinted classes
6344  *
6345  */
6346 int
6348 {
6349 #if !defined (SERVER_MODE)
6350  int i;
6351 
6352  for (i = 0; i < lockhint->num_classes; i++)
6353  {
6354  if (lockhint->classes[i].lock == SCH_M_LOCK || lockhint->classes[i].lock == X_LOCK
6355  || lockhint->classes[i].lock == IX_LOCK || lockhint->classes[i].lock == SIX_LOCK)
6356  {
6357  lk_Standalone_has_xlock = true;
6358  break;
6359  }
6360  }
6361  return LK_GRANTED;
6362 #else /* !SERVER_MODE */
6363  int tran_index;
6364  int wait_msecs;
6365  TRAN_ISOLATION isolation;
6366  LK_LOCKINFO cls_lockinfo_space[LK_LOCKINFO_FIXED_COUNT];
6367  LK_LOCKINFO *cls_lockinfo;
6368  LK_ENTRY *root_class_entry = NULL;
6369  LK_ENTRY *class_entry = NULL;
6370  OID *root_oidp;
6371  LOCK root_lock;
6372  LOCK intention_mode;
6373  int cls_count;
6374  int granted, i;
6375 #if defined (EnableThreadMonitoring)
6376  TSC_TICKS start_tick, end_tick;
6377  TSCTIMEVAL elapsed_time;
6378 #endif
6379 
6380  if (lockhint == NULL)
6381  {
6382  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_classes_lock_hint",
6383  "NULL lockhint pointer");
6384  return LK_NOTGRANTED_DUE_ERROR;
6385  }
6386 
6387  /* If there is nothing to lock, returns */
6388  if (lockhint->num_classes <= 0)
6389  {
6390  return LK_GRANTED;
6391  }
6392 
6393 #if defined (EnableThreadMonitoring)
6395  {
6396  tsc_getticks (&start_tick);
6397  }
6398 #endif
6399 
6400  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6401  wait_msecs = logtb_find_wait_msecs (tran_index);
6402  isolation = logtb_find_isolation (tran_index);
6403 
6404  /* We do not want to rollback the transaction in the event of a deadlock. For now, let's just wait a long time. If
6405  * deadlock, the transaction is going to be notified of lock timeout instead of aborted. */
6406  if (lockhint->quit_on_errors == false && wait_msecs == LK_INFINITE_WAIT)
6407  {
6408  wait_msecs = INT_MAX; /* to be notified of lock timeout */
6409  }
6410 
6411  /* prepare cls_lockinfo array */
6412  if (lockhint->num_classes <= LK_LOCKINFO_FIXED_COUNT)
6413  {
6414  cls_lockinfo = &cls_lockinfo_space[0];
6415  }
6416  else
6417  { /* num_classes > LK_LOCKINFO_FIXED_COUNT */
6418  cls_lockinfo = (LK_LOCKINFO *) db_private_alloc (thread_p, SIZEOF_LK_LOCKINFO * lockhint->num_classes);
6419  if (cls_lockinfo == NULL)
6420  {
6421  return LK_NOTGRANTED_DUE_ERROR;
6422  }
6423  }
6424 
6425  /* Define the desired locks for all classes */
6426  /* get class_oids and class_locks */
6427  cls_count = 0;
6428  for (i = 0; i < lockhint->num_classes; i++)
6429  {
6430  if (OID_ISNULL (&lockhint->classes[i].oid) || lockhint->classes[i].lock == NULL_LOCK)
6431  {
6432  continue;
6433  }
6434 
6435  if (OID_IS_ROOTOID (&lockhint->classes[i].oid))
6436  {
6437  /* When the given class is the root class */
6438  root_oidp = &lockhint->classes[i].oid;
6439  root_lock = lockhint->classes[i].lock;
6440 
6441  /* hold an explicit lock on the root class */
6442  granted = lock_internal_perform_lock_object (thread_p, tran_index, root_oidp, NULL, root_lock, wait_msecs,
6443  &root_class_entry, NULL);
6444  if (granted != LK_GRANTED)
6445  {
6446  if (lockhint->quit_on_errors == (int) true || granted != LK_NOTGRANTED_DUE_TIMEOUT)
6447  {
6448  goto error;
6449  }
6450  else
6451  {
6452  OID_SET_NULL (root_oidp);
6453  }
6454  }
6455  }
6456  else
6457  {
6458  /* build cls_lockinfo[cls_count] */
6459  COPY_OID (&cls_lockinfo[cls_count].oid, &lockhint->classes[i].oid);
6460  cls_lockinfo[cls_count].org_oidp = &lockhint->classes[i].oid;
6461  cls_lockinfo[cls_count].lock = lockhint->classes[i].lock;
6462 
6463  /* increment cls_count */
6464  cls_count++;
6465  }
6466  }
6467 
6468  /* sort class oids before hold the locks in order to avoid deadlocks */
6469  if (cls_count > 1)
6470  {
6471  (void) qsort (cls_lockinfo, cls_count, SIZEOF_LK_LOCKINFO, lock_compare_lock_info);
6472  }
6473 
6474  /* get root class lock mode */
6475  root_class_entry = lock_get_class_lock (thread_p, oid_Root_class_oid);
6476 
6477  for (i = 0; i < cls_count; i++)
6478  {
6479  /* hold the intentional lock on the root class if needed. */
6480  if (cls_lockinfo[i].lock <= S_LOCK)
6481  {
6482  intention_mode = IS_LOCK;
6483  }
6484  else
6485  {
6486  intention_mode = IX_LOCK;
6487  }
6488 
6489  if (root_class_entry == NULL || root_class_entry->granted_mode < intention_mode)
6490  {
6491  granted = lock_internal_perform_lock_object (thread_p, tran_index, oid_Root_class_oid, NULL, intention_mode,
6492  wait_msecs, &root_class_entry, NULL);
6493  if (granted != LK_GRANTED)
6494  {
6495  if (lockhint->quit_on_errors == false && granted == LK_NOTGRANTED_DUE_TIMEOUT)
6496  {
6497  OID_SET_NULL (cls_lockinfo[i].org_oidp);
6498  continue;
6499  }
6500  goto error;
6501  }
6502  }
6503 
6504  /* hold the lock on the given class. */
6505  granted = lock_internal_perform_lock_object (thread_p, tran_index, &cls_lockinfo[i].oid, NULL,
6506  cls_lockinfo[i].lock, wait_msecs, &class_entry, root_class_entry);
6507 
6508  if (granted != LK_GRANTED)
6509  {
6510  if (lockhint->quit_on_errors == false && granted == LK_NOTGRANTED_DUE_TIMEOUT)
6511  {
6512  OID_SET_NULL (cls_lockinfo[i].org_oidp);
6513  continue;
6514  }
6515  goto error;
6516  }
6517  }
6518 
6519  /* release memory space for cls_lockinfo */
6520  if (cls_lockinfo != &cls_lockinfo_space[0])
6521  {
6522  db_private_free_and_init (thread_p, cls_lockinfo);
6523  }
6524 
6525 #if defined (EnableThreadMonitoring)
6527  {
6528  tsc_getticks (&end_tick);
6529  tsc_elapsed_time_usec (&elapsed_time, end_tick, start_tick);
6530  }
6531  if (MONITOR_WAITING_THREAD (elapsed_time))
6532  {
6533  er_set (ER_NOTIFICATION_SEVERITY, ARG_FILE_LINE, ER_MNT_WAITING_THREAD, 2, "lock object (lock_classes_lock_hint)",
6535  er_log_debug (ARG_FILE_LINE, "lock_classes_lock_hint: %6d.%06d\n", elapsed_time.tv_sec, elapsed_time.tv_usec);
6536  }
6537 #endif
6538 
6539  return LK_GRANTED;
6540 
6541 error:
6542  if (cls_lockinfo != &cls_lockinfo_space[0])
6543  {
6544  db_private_free_and_init (thread_p, cls_lockinfo);
6545  }
6546  return granted;
6547 #endif /* !SERVER_MODE */
6548 }
6549 
6550 static void
6551 lock_unlock_object_lock_internal (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock,
6552  int release_flag, int move_to_non2pl)
6553 {
6554 #if !defined (SERVER_MODE)
6555  return;
6556 #else /* !SERVER_MODE */
6557  LK_ENTRY *entry_ptr = NULL;
6558  int tran_index;
6559  bool is_class;
6560 
6561  is_class = (OID_IS_ROOTOID (oid) || OID_IS_ROOTOID (class_oid)) ? true : false;
6562 
6563  /* get transaction table index */
6564  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6565  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, is_class);
6566 
6567  if (entry_ptr != NULL)
6568  {
6569  lock_internal_perform_unlock_object (thread_p, entry_ptr, release_flag, move_to_non2pl);
6570  }
6571 #endif
6572 }
6573 
6574 /*
6575  * lock_unlock_object_donot_move_to_non2pl - Unlock an object lock on the specified object
6576  * return:
6577  * thread_p(in):
6578  * oid(in): Identifier of instance to unlock from
6579  * class_oid(in): Identifier of the class of the instance
6580  * lock(in): Lock to release
6581  *
6582  */
6583 void
6584 lock_unlock_object_donot_move_to_non2pl (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock)
6585 {
6586  lock_unlock_object_lock_internal (thread_p, oid, class_oid, lock, false, false);
6587 }
6588 
6589 /*
6590  * lock_remove_object_lock - Removes a lock on the specified object
6591  * return:
6592  * thread_p(in):
6593  * oid(in): Identifier of instance to remove lock from
6594  * class_oid(in): Identifier of the class of the instance
6595  * lock(in): Lock to remove
6596  *
6597  */
6598 void
6599 lock_remove_object_lock (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock)
6600 {
6601  lock_unlock_object_lock_internal (thread_p, oid, class_oid, lock, true, false);
6602 }
6603 
6604 /*
6605  * lock_unlock_object - Unlock an object according to transaction isolation level
6606  *
6607  * return: nothing..
6608  *
6609  * oid(in): Identifier of instance to lock
6610  * class_oid(in): Identifier of the class of the instance
6611  * lock(in): Lock to release
6612  * force(in): Unlock the object no matter what it is the isolation level.
6613  *
6614  */
6615 void
6616 lock_unlock_object (THREAD_ENTRY * thread_p, const OID * oid, const OID * class_oid, LOCK lock, bool force)
6617 {
6618 #if !defined (SERVER_MODE)
6619  return;
6620 #else /* !SERVER_MODE */
6621  int tran_index; /* transaction table index */
6622  TRAN_ISOLATION isolation; /* transaction isolation level */
6623  LK_ENTRY *entry_ptr;
6624  bool is_class;
6625 
6626  if (oid == NULL)
6627  {
6628  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_unlock_object", "NULL OID pointer");
6629  return;
6630  }
6631  if (class_oid == NULL)
6632  {
6633  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_unlock_object", "NULL ClassOID pointer");
6634  return;
6635  }
6636 
6637  is_class = (OID_IS_ROOTOID (oid) || OID_IS_ROOTOID (class_oid)) ? true : false;
6638 
6639  /* get transaction table index */
6640  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6641 
6642  if (force == true)
6643  {
6644 #if defined(ENABLE_SYSTEMTAP)
6645  CUBRID_LOCK_RELEASE_START (oid, class_oid, lock);
6646 #endif /* ENABLE_SYSTEMTAP */
6647 
6648  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, is_class);
6649 
6650  if (entry_ptr != NULL)
6651  {
6652  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, true);
6653  }
6654 
6655 #if defined(ENABLE_SYSTEMTAP)
6656  CUBRID_LOCK_RELEASE_END (oid, class_oid, lock);
6657 #endif /* ENABLE_SYSTEMTAP */
6658 
6659  return;
6660  }
6661 
6662  /* force != true */
6663  if (lock != S_LOCK)
6664  {
6665  assert (lock != NULL_LOCK);
6666  /* These will not be released. */
6667  return;
6668  }
6669 
6670  isolation = logtb_find_isolation (tran_index);
6671  switch (isolation)
6672  {
6673  case TRAN_SERIALIZABLE:
6674  case TRAN_REPEATABLE_READ:
6675  return; /* nothing to do */
6676 
6677  case TRAN_READ_COMMITTED:
6678 #if defined(ENABLE_SYSTEMTAP)
6679  CUBRID_LOCK_RELEASE_START (oid, class_oid, lock);
6680 #endif /* ENABLE_SYSTEMTAP */
6681 
6682  /* The intentional lock on the higher lock granule must be kept. */
6683  lock_unlock_object_by_isolation (thread_p, tran_index, isolation, class_oid, oid);
6684 
6685 #if defined(ENABLE_SYSTEMTAP)
6686  CUBRID_LOCK_RELEASE_END (oid, class_oid, lock);
6687 #endif /* ENABLE_SYSTEMTAP */
6688  break;
6689 
6690  default: /* TRAN_UNKNOWN_ISOLATION */
6691  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_UNKNOWN_ISOLATION, 2, isolation, tran_index);
6692  break;
6693  }
6694 
6695  return;
6696 #endif /* !SERVER_MODE */
6697 }
6698 
6699 /*
6700  * lock_unlock_objects_lock_set - Unlock many objects according to isolation level
6701  *
6702  * return: nothing..
6703  *
6704  * lockset(in):
6705  *
6706  */
6707 void
6709 {
6710 #if !defined (SERVER_MODE)
6711  return;
6712 #else /* !SERVER_MODE */
6713  int tran_index; /* transaction table index */
6714  TRAN_ISOLATION isolation; /* transaction isolation level */
6715  LOCK reqobj_class_unlock;
6716  OID *oid, *class_oid;
6717  int i;
6718 
6719  if (lockset == NULL)
6720  {
6721  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_unlock_objects_lockset",
6722  "NULL lockset pointer");
6723  return;
6724  }
6725 
6726  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6727  isolation = logtb_find_isolation (tran_index);
6728 
6729  if (isolation == TRAN_SERIALIZABLE || isolation == TRAN_REPEATABLE_READ)
6730  {
6731  return; /* Nothing to release */
6732  }
6733  else if (isolation != TRAN_READ_COMMITTED)
6734  {
6735  assert (0);
6736  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_UNKNOWN_ISOLATION, 2, isolation, tran_index);
6737  return;
6738  }
6739 
6740  assert (isolation == TRAN_READ_COMMITTED);
6741 
6742  reqobj_class_unlock = lockset->reqobj_class_lock;
6743  if (reqobj_class_unlock == X_LOCK)
6744  {
6745  return; /* Don't release the lock */
6746  }
6747 
6748  for (i = 0; i < lockset->num_reqobjs_processed; i++)
6749  {
6750  oid = &lockset->objects[i].oid;
6751  if (OID_ISNULL (oid) || lockset->objects[i].class_index == -1)
6752  {
6753  continue;
6754  }
6755 
6756  class_oid = &lockset->classes[lockset->objects[i].class_index].oid;
6757  if (OID_ISNULL (class_oid))
6758  {
6759  continue;
6760  }
6761 
6762  /* The intentional lock on the higher lock granule must be kept. */
6763  lock_unlock_object_by_isolation (thread_p, tran_index, isolation, class_oid, oid);
6764  }
6765 #endif /* !SERVER_MODE */
6766 }
6767 
6768 /*
6769  * lock_unlock_classes_lock_hint - Unlock many hinted classes according to
6770  * transaction isolation level
6771  *
6772  * return: nothing..
6773  *
6774  * lockhint(in): Description of hinted classses
6775  *
6776  */
6777 void
6779 {
6780 #if !defined (SERVER_MODE)
6781  return;
6782 #else /* !SERVER_MODE */
6783  int tran_index; /* transaction table index */
6784  TRAN_ISOLATION isolation; /* transaction isolation level */
6785  int i;
6786 
6787  if (lockhint == NULL)
6788  {
6789  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_unlock_classes_lockhint",
6790  "NULL lockhint pointer");
6791  return;
6792  }
6793 
6794  /* If there is nothing to unlock, returns */
6795  if (lockhint->num_classes <= 0)
6796  {
6797  return;
6798  }
6799 
6800  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6801  isolation = logtb_find_isolation (tran_index);
6802 
6803  switch (isolation)
6804  {
6805  case TRAN_SERIALIZABLE:
6806  case TRAN_REPEATABLE_READ:
6807  return; /* nothing to do */
6808 
6809  case TRAN_READ_COMMITTED:
6810  for (i = 0; i < lockhint->num_classes; i++)
6811  {
6812  if (OID_ISNULL (&lockhint->classes[i].oid) || lockhint->classes[i].lock == NULL_LOCK)
6813  {
6814  continue;
6815  }
6816  lock_unlock_inst_locks_of_class_by_isolation (thread_p, tran_index, isolation, &lockhint->classes[i].oid);
6817  }
6818  return;
6819 
6820  default: /* TRAN_UNKNOWN_ISOLATION */
6821  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_UNKNOWN_ISOLATION, 2, isolation, tran_index);
6822  return;
6823  }
6824 #endif /* !SERVER_MODE */
6825 }
6826 
6827 /*
6828  * lock_unlock_all - Release all locks of current transaction
6829  *
6830  * return: nothing
6831  *
6832  * Note:Release all locks acquired by the current transaction.
6833  *
6834  * This function must be called at the end of the transaction.
6835  */
6836 void
6838 {
6839 #if !defined (SERVER_MODE)
6840  lk_Standalone_has_xlock = false;
6841  pgbuf_unfix_all (thread_p);
6842 
6843  return;
6844 #else /* !SERVER_MODE */
6845  int tran_index;
6846  LK_TRAN_LOCK *tran_lock;
6847  LK_ENTRY *entry_ptr;
6848 
6849  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
6850  tran_lock = &lk_Gl.tran_lock_table[tran_index];
6851 
6852  /* remove all instance locks */
6853  entry_ptr = tran_lock->inst_hold_list;
6854  while (entry_ptr != NULL)
6855  {
6856  assert (tran_index == entry_ptr->tran_index);
6857 
6858  lock_internal_perform_unlock_object (thread_p, entry_ptr, true, false);
6859  entry_ptr = tran_lock->inst_hold_list;
6860  }
6861 
6862  /* remove all class locks */
6863  entry_ptr = tran_lock->class_hold_list;
6864  while (entry_ptr != NULL)
6865  {
6866  assert (tran_index == entry_ptr->tran_index);
6867 
6868  lock_internal_perform_unlock_object (thread_p, entry_ptr, true, false);
6869  entry_ptr = tran_lock->class_hold_list;
6870  }
6871 
6872  /* remove root class lock */
6873  entry_ptr = tran_lock->root_class_hold;
6874  if (entry_ptr != NULL)
6875  {
6876  assert (tran_index == entry_ptr->tran_index);
6877 
6878  lock_internal_perform_unlock_object (thread_p, entry_ptr, true, false);
6879  }
6880 
6881  /* remove non2pl locks */
6882  while (tran_lock->non2pl_list != NULL)
6883  {
6884  /* remove the non2pl entry from transaction non2pl list */
6885  entry_ptr = tran_lock->non2pl_list;
6886  tran_lock->non2pl_list = entry_ptr->tran_next;
6887 
6888  assert (tran_index == entry_ptr->tran_index);
6889 
6890  if (entry_ptr->granted_mode == INCON_NON_TWO_PHASE_LOCK)
6891  {
6892  tran_lock->num_incons_non2pl -= 1;
6893  }
6894  /* remove the non2pl entry from resource non2pl list and free it */
6895  lock_remove_non2pl (thread_p, entry_ptr, tran_index);
6896  }
6897 
6898  lock_clear_deadlock_victim (tran_index);
6899 
6900  pgbuf_unfix_all (thread_p);
6901 #endif /* !SERVER_MODE */
6902 }
6903 
6904 static LK_ENTRY *
6905 lock_find_tran_hold_entry (THREAD_ENTRY * thread_p, int tran_index, const OID * oid, bool is_class)
6906 {
6907 #if !defined (SERVER_MODE)
6908  return NULL;
6909 #else /* !SERVER_MODE */
6910  LK_RES_KEY search_key;
6911  LK_RES *res_ptr;
6912  LK_ENTRY *entry_ptr;
6913 
6914  if (is_class)
6915  {
6916  return lock_find_class_entry (tran_index, oid);
6917  }
6918 
6919  /* search hash */
6920  search_key = lock_create_search_key ((OID *) oid, NULL);
6921  if (search_key.type != LOCK_RESOURCE_ROOT_CLASS)
6922  {
6923  /* override type; we don't insert here, so class_oid is neither passed to us nor needed for the search */
6924  search_key.type = (is_class ? LOCK_RESOURCE_CLASS : LOCK_RESOURCE_INSTANCE);
6925  }
6926  res_ptr = lk_Gl.m_obj_hash_table.find (thread_p, search_key);
6927  if (res_ptr == NULL)
6928  {
6929  /* not found */
6930  return NULL;
6931  }
6932 
6933  entry_ptr = res_ptr->holder;
6934  for (; entry_ptr != NULL; entry_ptr = entry_ptr->next)
6935  {
6936  if (entry_ptr->tran_index == tran_index)
6937  {
6938  break;
6939  }
6940  }
6941 
6942  pthread_mutex_unlock (&res_ptr->res_mutex);
6943  return entry_ptr;
6944 #endif
6945 }
6946 
6947 /*
6948  * lock_get_object_lock - Find the acquired lock mode
6949  *
6950  * return:
6951  *
6952  * oid(in): target object identifier
6953  * class_oid(in): class identifier of the target object
6954  *
6955  * Note:Find the acquired lock on the given object by the current transaction.
6956  *
6957  */
6958 LOCK
6959 lock_get_object_lock (const OID * oid, const OID * class_oid)
6960 {
6961 #if !defined (SERVER_MODE)
6962  return X_LOCK;
6963 #else /* !SERVER_MODE */
6964  LOCK lock_mode = NULL_LOCK; /* return value */
6965  LK_TRAN_LOCK *tran_lock;
6966  LK_ENTRY *entry_ptr;
6968  int rv, tran_index;
6969 
6970  if (oid == NULL)
6971  {
6972  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_get_object_lock", "NULL OID pointer");
6973  return NULL_LOCK;
6974  }
6975 
6976  if (thread_p->type == thread_type::TT_LOADDB)
6977  {
6978  /* Loaddb workers does not acquire locks. Get tran_index of loaddb workers manager thread. */
6979  tran_index = thread_p->conn_entry->get_tran_index ();
6980  }
6981  else
6982  {
6983  tran_index = logtb_get_current_tran_index ();
6984  }
6985 
6986  if (tran_index == NULL_TRAN_INDEX)
6987  {
6988  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_get_object_lock", "NULL_TRAN_INDEX");
6989  return NULL_LOCK;
6990  }
6991 
6992  /* get a pointer to transaction lock info entry */
6993  tran_lock = &lk_Gl.tran_lock_table[tran_index];
6994 
6995  /*
6996  * case 1: root class lock
6997  */
6998  /* get the granted lock mode acquired on the root class oid */
6999  if (OID_EQ (oid, oid_Root_class_oid))
7000  {
7001  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
7002  if (tran_lock->root_class_hold != NULL)
7003  {
7004  lock_mode = tran_lock->root_class_hold->granted_mode;
7005  }
7006  pthread_mutex_unlock (&tran_lock->hold_mutex);
7007  return lock_mode; /* might be NULL_LOCK */
7008  }
7009 
7010  /*
7011  * case 2: general class lock
7012  */
7013  /* get the granted lock mode acquired on the given class oid */
7014  if (class_oid == NULL || OID_EQ (class_oid, oid_Root_class_oid))
7015  {
7016  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, true);
7017  if (entry_ptr != NULL)
7018  {
7019  lock_mode = entry_ptr->granted_mode;
7020  }
7021  return lock_mode; /* might be NULL_LOCK */
7022  }
7023 
7024  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, class_oid, true);
7025  if (entry_ptr != NULL)
7026  {
7027  lock_mode = entry_ptr->granted_mode;
7028  }
7029 
7030  /* If the class lock mode is one of S_LOCK, X_LOCK or SCH_M_LOCK, the lock is held on the instance implicitly. In
7031  * this case, there is no need to check instance lock. If the class lock mode is SIX_LOCK, S_LOCK is held on the
7032  * instance implicitly. In this case, we must check for a possible X_LOCK on the instance. In other cases, we must
7033  * check the lock held on the instance. */
7034  if (lock_mode == SCH_M_LOCK)
7035  {
7036  return X_LOCK;
7037  }
7038  else if (lock_mode != S_LOCK && lock_mode != X_LOCK)
7039  {
7040  if (lock_mode == SIX_LOCK)
7041  {
7042  lock_mode = S_LOCK;
7043  }
7044  else
7045  {
7046  lock_mode = NULL_LOCK;
7047  }
7048 
7049  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, false);
7050  if (entry_ptr != NULL)
7051  {
7052  lock_mode = entry_ptr->granted_mode;
7053  }
7054  }
7055 
7056  return lock_mode; /* might be NULL_LOCK */
7057 #endif /* !SERVER_MODE */
7058 }
7059 
7060 /*
7061  * lock_has_lock_on_object -
7062  *
7063  * return:
7064  *
7065  * oid(in): target object ientifier
7066  * class_oid(in): class identifier of the target object
7067  * lock(in): the lock mode
7068  *
7069  * Note: Find whether the transaction holds an enough lock on the object
7070  *
7071  */
7072 int
7073 lock_has_lock_on_object (const OID * oid, const OID * class_oid, LOCK lock)
7074 {
7075 #if !defined (SERVER_MODE)
7076  return 1;
7077 #else /* !SERVER_MODE */
7078  LOCK granted_lock_mode = NULL_LOCK;
7079  LK_TRAN_LOCK *tran_lock;
7080  LK_ENTRY *entry_ptr;
7082  int rv, tran_index;
7083 
7084  if (oid == NULL)
7085  {
7086  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_has_lock_on_object", "NULL OID pointer");
7087  return ER_LK_BAD_ARGUMENT;
7088  }
7089 
7090  if (thread_p->type == thread_type::TT_LOADDB)
7091  {
7092  /* Loaddb workers does not acquire locks. Get tran_index of loaddb workers manager thread. */
7093  tran_index = thread_p->conn_entry->get_tran_index ();
7094 
7095  if (class_oid != NULL && !OID_IS_ROOTOID (class_oid))
7096  {
7097  return lock_has_lock_on_object (class_oid, oid_Root_class_oid, BU_LOCK);
7098  }
7099  else
7100  {
7101  // fall through
7102  }
7103  }
7104  else
7105  {
7106  tran_index = logtb_get_current_tran_index ();
7107  }
7108 
7109  if (tran_index == NULL_TRAN_INDEX)
7110  {
7111  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_has_lock_on_object", "NULL_TRAN_INDEX");
7112  return ER_LK_BAD_ARGUMENT;
7113  }
7114 
7115  /* get a pointer to transaction lock info entry */
7116  tran_lock = &lk_Gl.tran_lock_table[tran_index];
7117 
7118  /*
7119  * case 1: root class lock
7120  */
7121  /* get the granted lock mode acquired on the root class oid */
7122  if (OID_EQ (oid, oid_Root_class_oid))
7123  {
7124  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
7125  if (tran_lock->root_class_hold != NULL)
7126  {
7127  granted_lock_mode = tran_lock->root_class_hold->granted_mode;
7128  }
7129  pthread_mutex_unlock (&tran_lock->hold_mutex);
7130  return (lock_Conv[lock][granted_lock_mode] == granted_lock_mode);
7131  }
7132 
7133  /*
7134  * case 2: general class lock
7135  */
7136  /* get the granted lock mode acquired on the given class oid */
7137  if (class_oid == NULL || OID_EQ (class_oid, oid_Root_class_oid))
7138  {
7139  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, true);
7140  if (entry_ptr != NULL)
7141  {
7142  granted_lock_mode = entry_ptr->granted_mode;
7143  }
7144  return (lock_Conv[lock][granted_lock_mode] == granted_lock_mode);
7145  }
7146 
7147  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, class_oid, true);
7148  if (entry_ptr != NULL)
7149  {
7150  granted_lock_mode = entry_ptr->granted_mode;
7151  if (lock_Conv[lock][granted_lock_mode] == granted_lock_mode)
7152  {
7153  return 1;
7154  }
7155  }
7156 
7157  /*
7158  * case 3: object lock
7159  */
7160  /* get the granted lock mode acquired on the given instance/pseudo oid */
7161  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, oid, false);
7162  if (entry_ptr != NULL)
7163  {
7164  granted_lock_mode = entry_ptr->granted_mode;
7165  return 1;
7166  }
7167 
7168  return 0;
7169 #endif /* !SERVER_MODE */
7170 }
7171 
7172 /*
7173  * lock_has_xlock - Does transaction have an exclusive lock on any resource ?
7174  *
7175  * return:
7176  *
7177  * Note:Find if the current transaction has any kind of exclusive lock
7178  * on any lock resource.
7179  */
7180 bool
7182 {
7183 #if !defined (SERVER_MODE)
7184  return lk_Standalone_has_xlock;
7185 #else /* !SERVER_MODE */
7186  int tran_index;
7187  LK_TRAN_LOCK *tran_lock;
7188  LOCK lock_mode;
7189  LK_ENTRY *entry_ptr;
7190  int rv;
7191 
7192  /*
7193  * Exclusive locks in this context mean IX_LOCK, SIX_LOCK, X_LOCK and
7194  * SCH_M_LOCK. NOTE that U_LOCK are excluded from exclusive locks.
7195  * Because U_LOCK is currently for reading the object.
7196  */
7197  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
7198  tran_lock = &lk_Gl.tran_lock_table[tran_index];
7199  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
7200 
7201  /* 1. check root class lock */
7202  if (tran_lock->root_class_hold != NULL)
7203  {
7204  lock_mode = tran_lock->root_class_hold->granted_mode;
7205  if (lock_mode == X_LOCK || lock_mode == IX_LOCK || lock_mode == SIX_LOCK || lock_mode == SCH_M_LOCK)
7206  {
7207  pthread_mutex_unlock (&tran_lock->hold_mutex);
7208  return true;
7209  }
7210  }
7211 
7212  /* 2. check general class locks */
7213  entry_ptr = tran_lock->class_hold_list;
7214  while (entry_ptr != NULL)
7215  {
7216  lock_mode = entry_ptr->granted_mode;
7217  if (lock_mode == X_LOCK || lock_mode == IX_LOCK || lock_mode == SIX_LOCK || lock_mode == SCH_M_LOCK)
7218  {
7219  pthread_mutex_unlock (&tran_lock->hold_mutex);
7220  return true;
7221  }
7222  entry_ptr = entry_ptr->tran_next;
7223  }
7224 
7225  /* 3. checking instance locks is not needed. According to MGL ptotocol, an exclusive class lock has been acquired
7226  * with intention mode before an exclusive instance is acquired. */
7227 
7228  pthread_mutex_unlock (&tran_lock->hold_mutex);
7229  return false;
7230 #endif /* !SERVER_MODE */
7231 }
7232 
7233 #if defined(ENABLE_UNUSED_FUNCTION)
7234 /*
7235  * lock_has_lock_transaction - Does transaction have any lock on any resource ?
7236  *
7237  * return:
7238  *
7239  * tran_index(in):
7240  *
7241  * Note:Find if given transaction has any kind of lock.
7242  * Used by css_check_for_clients_down() to eliminate needless pinging.
7243  */
7244 bool
7245 lock_has_lock_transaction (int tran_index)
7246 {
7247 #if !defined (SERVER_MODE)
7248  return lk_Standalone_has_xlock;
7249 #else /* !SERVER_MODE */
7250  LK_TRAN_LOCK *tran_lock;
7251  bool lock_hold;
7252  int rv;
7253 
7254  tran_lock = &lk_Gl.tran_lock_table[tran_index];
7255  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
7256  if (tran_lock->root_class_hold != NULL || tran_lock->class_hold_list != NULL || tran_lock->inst_hold_list != NULL
7257  || tran_lock->non2pl_list != NULL)
7258  {
7259  lock_hold = true;
7260  }
7261  else
7262  {
7263  lock_hold = false;
7264  }
7265  pthread_mutex_unlock (&tran_lock->hold_mutex);
7266 
7267  return lock_hold;
7268 #endif /* !SERVER_MODE */
7269 }
7270 #endif
7271 
7272 /*
7273  * lock_is_waiting_transaction -
7274  *
7275  * return:
7276  *
7277  * tran_index(in):
7278  */
7279 bool
7281 {
7282 #if !defined (SERVER_MODE)
7283  return false;
7284 #else /* !SERVER_MODE */
7285  tran_lock_waiters_array_type tran_lock_waiters;
7286  THREAD_ENTRY **thrd_array;
7287  size_t thrd_count, i;
7288  THREAD_ENTRY *thrd_ptr;
7289 
7290  thrd_count = 0;
7291  lock_get_transaction_lock_waiting_threads (tran_index, tran_lock_waiters, thrd_count);
7292  thrd_array = tran_lock_waiters.data ();
7293 
7294  for (i = 0; i < thrd_count; i++)
7295  {
7296  thrd_ptr = thrd_array[i];
7297  thread_lock_entry (thrd_ptr);
7298  if (LK_IS_LOCKWAIT_THREAD (thrd_ptr))
7299  {
7300  thread_unlock_entry (thrd_ptr);
7301  return true;
7302  }
7303  else
7304  {
7305  if (thrd_ptr->lockwait != NULL || thrd_ptr->lockwait_state == (int) LOCK_SUSPENDED)
7306  {
7307  /* some strange lock wait state.. */
7309  thrd_ptr->lockwait_state, thrd_ptr->index, thrd_ptr->get_posix_id (), thrd_ptr->tran_index);
7310  }
7311  }
7312  thread_unlock_entry (thrd_ptr);
7313  }
7314 
7315  return false;
7316 #endif /* !SERVER_MODE */
7317 }
7318 
7319 /*
7320  * lock_get_class_lock - Get a pointer to lock heap entry acquired by
7321  * the current transaction on given class object
7322  *
7323  * return:
7324  *
7325  * class_oid(in): target class object identifier
7326  *
7327  * Note:This function finds lock entry acquired by the current transaction
7328  * on the given class and then return a pointer to the lock entry.
7329  */
7330 LK_ENTRY *
7331 lock_get_class_lock (THREAD_ENTRY * thread_p, const OID * class_oid)
7332 {
7333 #if !defined (SERVER_MODE)
7334  assert (false);
7335 
7336  return NULL;
7337 #else /* !SERVER_MODE */
7338  LK_TRAN_LOCK *tran_lock;
7339  LK_ENTRY *entry_ptr;
7340  int rv, tran_index;
7341 
7342  if (class_oid == NULL)
7343  {
7344  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_get_class_lock_ptr",
7345  "NULL ClassOID pointer");
7346  return NULL;
7347  }
7348  if (OID_ISNULL (class_oid))
7349  {
7350  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_get_class_lock_ptr", "NULL_ClassOID");
7351  return NULL;
7352  }
7353 
7354  if (thread_p->type == thread_type::TT_LOADDB)
7355  {
7356  /* Loaddb workers does not acquire locks. Get tran_index of loaddb workers manager thread. */
7357  tran_index = thread_p->conn_entry->get_tran_index ();
7358  }
7359  else
7360  {
7361  tran_index = logtb_get_current_tran_index ();
7362  }
7363 
7364  if (tran_index == NULL_TRAN_INDEX)
7365  {
7366  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lock_get_class_lock", "NULL_TRAN_INDEX");
7367  return NULL;
7368  }
7369 
7370  /* get a pointer to transaction lock info entry */
7371  tran_lock = &lk_Gl.tran_lock_table[tran_index];
7372 
7373  /* case 1: root class lock */
7374  if (OID_EQ (class_oid, oid_Root_class_oid))
7375  {
7376  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
7377  entry_ptr = tran_lock->root_class_hold;
7378  pthread_mutex_unlock (&tran_lock->hold_mutex);
7379  }
7380  else
7381  {
7382  entry_ptr = lock_find_tran_hold_entry (thread_p, tran_index, class_oid, true);
7383  }
7384 
7385  return entry_ptr;
7386 #endif /* !SERVER_MODE */
7387 }
7388 
7389 void
7391 {
7392 #if defined (SERVER_MODE)
7393  thread_lock_entry (thrd);
7394  if (LK_IS_LOCKWAIT_THREAD (thrd))
7395  {
7396  /* wake up the thread */
7397  lock_resume ((LK_ENTRY *) thrd->lockwait, LOCK_RESUMED_TIMEOUT);
7398  }
7399  else
7400  {
7401  if (thrd->lockwait != NULL || thrd->lockwait_state == (int) LOCK_SUSPENDED)
7402  {
7403  /* some strange lock wait state.. */
7404  assert (false);
7406  thrd->lockwait_state, thrd->index, thrd->get_posix_id (), thrd->tran_index);
7407  }
7408  /* release the thread entry mutex */
7409  thread_unlock_entry (thrd);
7410  }
7411 #endif // SERVER_MODE
7412 }
7413 
7414 /*
7415  * lock_force_timeout_expired_wait_transactions - Transaction is timeout if its waiting time has
7416  * expired or it is interrupted
7417  *
7418  * return: true if the thread was timed out or
7419  * false if the thread was not timed out.
7420  *
7421  * thrd_entry(in): thread entry pointer
7422  *
7423  * Note:If the given thread is waiting on a lock to be granted, and
7424  * either its expiration time has expired or it is interrupted,
7425  * the thread is timed-out.
7426  * If NULL is given, it applies to all threads.
7427  */
7428 static bool
7430 {
7431 #if !defined (SERVER_MODE)
7432  return true;
7433 #else /* !SERVER_MODE */
7434  bool ignore;
7435  THREAD_ENTRY *thrd;
7436 
7437  assert (thrd_entry != NULL);
7438 
7439  thrd = (THREAD_ENTRY *) thrd_entry;
7440 
7441  thread_lock_entry (thrd);
7442  if (LK_IS_LOCKWAIT_THREAD (thrd))
7443  {
7444  if (logtb_is_interrupted_tran (thrd, true, &ignore, thrd->tran_index))
7445  {
7446  /* wake up the thread */
7447  lock_resume ((LK_ENTRY *) thrd->lockwait, LOCK_RESUMED_INTERRUPT);
7448  return true;
7449  }
7450  else if (LK_CAN_TIMEOUT (thrd->lockwait_msecs))
7451  {
7452  struct timeval tv;
7453  INT64 etime;
7454 
7455  (void) gettimeofday (&tv, NULL);
7456  etime = (tv.tv_sec * 1000000LL + tv.tv_usec) / 1000LL;
7457  if (etime - thrd->lockwait_stime > thrd->lockwait_msecs)
7458  {
7459  /* wake up the thread */
7460  lock_resume ((LK_ENTRY *) thrd->lockwait, LOCK_RESUMED_TIMEOUT);
7461  return true;
7462  }
7463  }
7464 
7465  /* release the thread entry mutex */
7466  thread_unlock_entry (thrd);
7467  return false;
7468  }
7469  else
7470  {
7471  if (thrd->lockwait != NULL || thrd->lockwait_state == (int) LOCK_SUSPENDED)
7472  {
7473  /* some strange lock wait state.. */
7474  assert (false);
7476  thrd->lockwait_state, thrd->index, thrd->get_posix_id (), thrd->tran_index);
7477  }
7478 
7479  /* release the thread entry mutex */
7480  thread_unlock_entry (thrd);
7481  return false;
7482  }
7483 #endif /* !SERVER_MODE */
7484 }
7485 
7486 /*
7487  * lock_notify_isolation_incons - Notify of possible inconsistencies (no
7488  * repeatable reads) due to transaction isolation
7489  * level
7490  *
7491  * return: nothing.
7492  *
7493  * fun(in): Function to notify
7494  * args(in): Extra arguments for function
7495  *
7496  * Note:The current transaction is notified of any possible
7497  * inconsistencies due to its isolation level. For each possible
7498  * inconsistency the given function is called to decache any
7499  * copies of the object.
7500  */
7501 void
7502 lock_notify_isolation_incons (THREAD_ENTRY * thread_p, bool (*fun) (const OID * class_oid, const OID * oid, void *args),
7503  void *args)
7504 {
7505 #if !defined (SERVER_MODE)
7506  return;
7507 #else /* !SERVER_MODE */
7508  TRAN_ISOLATION isolation;
7509  int tran_index;
7510  LK_TRAN_LOCK *tran_lock;
7511  LK_ENTRY *curr, *prev, *next;
7512  LK_ENTRY *incon_non2pl_list_header = NULL;
7513  LK_ENTRY *incon_non2pl_list_tail = NULL;
7514  bool ret_val;
7515  int rv;
7516 
7517  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
7518  isolation = logtb_find_isolation (tran_index);
7519  if (isolation == TRAN_REPEATABLE_READ || isolation == TRAN_SERIALIZABLE)
7520  {
7521  return; /* Nothing was released */
7522  }
7523 
7524  tran_lock = &lk_Gl.tran_lock_table[tran_index];
7525  rv = pthread_mutex_lock (&tran_lock->non2pl_mutex);
7526 
7527  prev = NULL;
7528  curr = tran_lock->non2pl_list;
7529  while (tran_lock->num_incons_non2pl > 0 && curr != NULL)
7530  {
7531  if (curr->granted_mode != INCON_NON_TWO_PHASE_LOCK)
7532  {
7533  prev = curr;
7534  curr = curr->tran_next;
7535  continue;
7536  }
7537 
7538  /* curr->granted_mode == INCON_NON_TWO_PHASE_LOCK */
7539  assert (curr->res_head->key.type != LOCK_RESOURCE_INSTANCE || !OID_ISNULL (&curr->res_head->key.class_oid));
7540 
7541  ret_val = (*fun) (&curr->res_head->key.class_oid, &curr->res_head->key.oid, args);
7542  if (ret_val != true)
7543  {
7544  /* the notification area is full */
7545  pthread_mutex_unlock (&(tran_lock->non2pl_mutex));
7546 
7547  goto end;
7548  }
7549 
7550  /* the non-2pl entry should be freed. */
7551  /* 1. remove it from transaction non2pl list */
7552  next = curr->tran_next;
7553  if (prev == NULL)
7554  {
7555  tran_lock->non2pl_list = next;
7556  }
7557  else
7558  {
7559  prev->tran_next = next;
7560  }
7561 
7562  tran_lock->num_incons_non2pl -= 1;
7563 
7564  /* 2. append current entry to incon_non2pl_list */
7565  curr->tran_next = NULL;
7566  if (incon_non2pl_list_header == NULL)
7567  {
7568  incon_non2pl_list_header = curr;
7569  incon_non2pl_list_tail = curr;
7570  }
7571  else
7572  {
7573  incon_non2pl_list_tail->tran_next = curr;
7574  incon_non2pl_list_tail = curr;
7575  }
7576 
7577  curr = next;
7578  }
7579 
7580  /* release transaction non2pl mutex */
7581  pthread_mutex_unlock (&tran_lock->non2pl_mutex);
7582 
7583 end:
7584 
7585  curr = incon_non2pl_list_header;
7586  while (curr != NULL)
7587  {
7588  next = curr->tran_next;
7589 
7590  /* remove it from resource non2pl list and free it */
7591  lock_remove_non2pl (thread_p, curr, tran_index);
7592 
7593  curr = next;
7594  }
7595 
7596  return;
7597 #endif /* !SERVER_MODE */
7598 }
7599 
7600 /*
7601  * lock_is_local_deadlock_detection_interval_up - Check local deadlock detection interval
7602  *
7603  * return:
7604  *
7605  * Note:check if the local deadlock detection should be performed.
7606  */
7607 static bool
7609 {
7610 #if defined (SERVER_MODE)
7611  struct timeval now, elapsed;
7612  double elapsed_sec;
7613 
7614  /* check deadlock detection interval */
7615  gettimeofday (&now, NULL);
7616  perfmon_diff_timeval (&elapsed, &lk_Gl.last_deadlock_run, &now);
7617  elapsed_sec = elapsed.tv_sec + (elapsed.tv_usec / 1000000.0);
7618 
7620  {
7621  return false;
7622  }
7623 
7624  /* update the last deadlock run time */
7625  lk_Gl.last_deadlock_run = now;
7626 
7627  return true;
7628 #else /* !SERVER_MODE */
7629  return false;
7630 #endif /* SERVER_MODE */
7631 }
7632 
7633 //
7634 // lock_victimize_first_thread_mapfunc - map function on all entries until one lock waiter is victimized
7635 //
7636 // thread_ref (in) : current thread
7637 // stop_mapper (in) : output to stop mapper when a thread was victimized
7638 //
7639 static void
7640 lock_victimize_first_thread_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper)
7641 {
7642 #if defined (SERVER_MODE)
7643  if (thread_ref.lockwait == NULL)
7644  {
7645  return;
7646  }
7647  int tran_index = thread_ref.tran_index;
7648  if (lock_wakeup_deadlock_victim_timeout (tran_index))
7649  {
7650  stop_mapper = true;
7653  }
7654 #endif // SERVER_MODE
7655 }
7656 
7657 /*
7658  * lock_detect_local_deadlock - Run the local deadlock detection
7659  *
7660  * return: nothing
7661  *
7662  * Note:Run the deadlock detection. For every cycle either timeout or
7663  * abort a transaction. The timeout option is always preferred over
7664  * the unilaterally abort option. When the unilaterally abort option
7665  * is exercised, the youngest transaction in the cycle is selected.
7666  * The youngest transaction is hopefully the one that has done less work.
7667  *
7668  * First, allocate heaps for WFG table from local memory.
7669  * Check whether deadlock(s) have been occurred or not.
7670  *
7671  * Deadlock detection is performed via exhaustive loop construction
7672  * which indicates the wait-for-relationship.
7673  * If deadlock is detected,
7674  * the first transaction which enables a cycle
7675  * when scanning from the first of object lock table to the last of it.
7676  *
7677  * The deadlock of victims are waken up and aborted by themselves.
7678  *
7679  * Last, free WFG framework.
7680  */
7681 static void
7683 {
7684 #if !defined (SERVER_MODE)
7685  return;
7686 #else /* !SERVER_MODE */
7687  int k, s, t;
7688  LK_RES *res_ptr;
7689  LK_ENTRY *hi, *hj;
7690  LK_WFG_NODE *TWFG_node;
7691  LK_WFG_EDGE *TWFG_edge;
7692  int i, rv;
7693  LOCK_COMPATIBILITY compat1, compat2;
7694  int tran_index;
7695  FILE *log_fp;
7696 
7697  /* initialize deadlock detection related structures */
7698 
7699  /* initialize transaction WFG node table.. The current transaction might be old deadlock victim. And, the transaction
7700  * may have not been aborted, until now. Even if the transaction(old deadlock victim) has not been aborted, set
7701  * checked_by_deadlock_detector of the transaction to true. */
7702  for (i = 1; i < lk_Gl.num_trans; i++)
7703  {
7704  lk_Gl.TWFG_node[i].first_edge = -1;
7705  lk_Gl.TWFG_node[i].tran_edge_seq_num = 0;
7706  lk_Gl.TWFG_node[i].checked_by_deadlock_detector = true;
7707  }
7708 
7709  /* initialize transaction WFG edge table */
7710  lk_Gl.TWFG_edge = &TWFG_edge_block[0];
7711  lk_Gl.max_TWFG_edge = LK_MIN_TWFG_EDGE_COUNT; /* initial value */
7712  for (i = 0; i < LK_MIN_TWFG_EDGE_COUNT; i++)
7713  {
7714  lk_Gl.TWFG_edge[i].to_tran_index = -1;
7715  lk_Gl.TWFG_edge[i].next = (i + 1);
7716  }
7717  lk_Gl.TWFG_edge[lk_Gl.max_TWFG_edge - 1].next = -1;
7718  lk_Gl.TWFG_free_edge_idx = 0;
7719 
7720  /* initialize global_edge_seq_num */
7721  lk_Gl.global_edge_seq_num = 0;
7722 
7723  /* initialize victim count */
7724  victim_count = 0; /* used as index of victims array */
7725 
7726  /* hold the deadlock detection mutex */
7727  rv = pthread_mutex_lock (&lk_Gl.DL_detection_mutex);
7728 
7729  // *INDENT-OFF*
7730  lk_hashmap_iterator iterator { thread_p, lk_Gl.m_obj_hash_table };
7731  // *INDENT-ON*
7732  for (res_ptr = iterator.iterate (); res_ptr != NULL; res_ptr = iterator.iterate ())
7733  {
7734  /* holding resource mutex */
7735  if (res_ptr->holder == NULL)
7736  {
7737  if (res_ptr->waiter == NULL)
7738  {
7739  continue;
7740  }
7741  else
7742  {
7743 #if defined(CUBRID_DEBUG)
7744  FILE *lk_fp;
7745  time_t cur_time;
7746  char time_val[CTIME_MAX];
7747 
7748  lk_fp = fopen ("lock_waiter_only_info.log", "a");
7749  if (lk_fp != NULL)
7750  {
7751  cur_time = time (NULL);
7752  (void) ctime_r (&cur_time, time_val);
7753  fprintf (lk_fp, "##########################################\n");
7754  fprintf (lk_fp, "# current time: %s\n", time_val);
7755  lock_dump_resource (lk_fp, res_ptr);
7756  fprintf (lk_fp, "##########################################\n");
7757  fclose (lk_fp);
7758  }
7759 #endif /* CUBRID_DEBUG */
7760  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, ER_LK_LOCK_WAITER_ONLY, 1, "lock_waiter_only_info.log");
7761 
7762  if (res_ptr->total_holders_mode != NULL_LOCK)
7763  {
7765  res_ptr->total_holders_mode = NULL_LOCK;
7766  }
7767  (void) lock_grant_blocked_waiter (thread_p, res_ptr);
7768  }
7769  }
7770 
7771  /* among holders */
7772  for (hi = res_ptr->holder; hi != NULL; hi = hi->next)
7773  {
7774  if (hi->blocked_mode == NULL_LOCK)
7775  {
7776  break;
7777  }
7778  for (hj = hi->next; hj != NULL; hj = hj->next)
7779  {
7780  assert (hi->granted_mode >= NULL_LOCK && hi->blocked_mode >= NULL_LOCK);
7781  assert (hj->granted_mode >= NULL_LOCK && hj->blocked_mode >= NULL_LOCK);
7782 
7783  compat1 = lock_Comp[hj->blocked_mode][hi->granted_mode];
7784  compat2 = lock_Comp[hj->blocked_mode][hi->blocked_mode];
7785  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
7786 
7787  if (compat1 == LOCK_COMPAT_NO || compat2 == LOCK_COMPAT_NO)
7788  {
7789  (void) lock_add_WFG_edge (hj->tran_index, hi->tran_index, true, hj->thrd_entry->lockwait_stime);
7790  }
7791 
7792  compat1 = lock_Comp[hi->blocked_mode][hj->granted_mode];
7793  assert (compat1 != LOCK_COMPAT_UNKNOWN);
7794 
7795  if (compat1 == LOCK_COMPAT_NO)
7796  {
7797  (void) lock_add_WFG_edge (hi->tran_index, hj->tran_index, true, hi->thrd_entry->lockwait_stime);
7798  }
7799  }
7800  }
7801 
7802  /* from waiters in the waiter to holders */
7803  for (hi = res_ptr->holder; hi != NULL; hi = hi->next)
7804  {
7805  for (hj = res_ptr->waiter; hj != NULL; hj = hj->next)
7806  {
7807  assert (hi->granted_mode >= NULL_LOCK && hi->blocked_mode >= NULL_LOCK);
7808  assert (hj->granted_mode >= NULL_LOCK && hj->blocked_mode >= NULL_LOCK);
7809 
7810  compat1 = lock_Comp[hj->blocked_mode][hi->granted_mode];
7811  compat2 = lock_Comp[hj->blocked_mode][hi->blocked_mode];
7812  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
7813 
7814  if (compat1 == LOCK_COMPAT_NO || compat2 == LOCK_COMPAT_NO)
7815  {
7816  (void) lock_add_WFG_edge (hj->tran_index, hi->tran_index, true, hj->thrd_entry->lockwait_stime);
7817  }
7818  }
7819  }
7820 
7821  /* from waiters in the waiter to other waiters in the waiter */
7822  for (hi = res_ptr->waiter; hi != NULL; hi = hi->next)
7823  {
7824  for (hj = hi->next; hj != NULL; hj = hj->next)
7825  {
7826  assert (hj->blocked_mode >= NULL_LOCK && hi->blocked_mode >= NULL_LOCK);
7827 
7828  compat1 = lock_Comp[hj->blocked_mode][hi->blocked_mode];
7829  assert (compat1 != LOCK_COMPAT_UNKNOWN);
7830 
7831  if (compat1 == LOCK_COMPAT_NO)
7832  {
7833  (void) lock_add_WFG_edge (hj->tran_index, hi->tran_index, false, hj->thrd_entry->lockwait_stime);
7834  }
7835  }
7836  }
7837  }
7838 
7839  /* release DL detection mutex */
7840  pthread_mutex_unlock (&lk_Gl.DL_detection_mutex);
7841 
7842  /* simple notation for using in the following statements */
7843  TWFG_node = lk_Gl.TWFG_node;
7844  TWFG_edge = lk_Gl.TWFG_edge;
7845 
7846  /*
7847  * deadlock detection and victim selection
7848  */
7849 
7850  for (k = 1; k < lk_Gl.num_trans; k++)
7851  {
7852  TWFG_node[k].current = TWFG_node[k].first_edge;
7853  TWFG_node[k].ancestor = -1;
7854  }
7855  for (k = 1; k < lk_Gl.num_trans; k++)
7856  {
7857  if (TWFG_node[k].current == -1)
7858  {
7859  continue;
7860  }
7861  s = k;
7862  TWFG_node[s].ancestor = -2;
7863  for (; s != -2;)
7864  {
7865  if (TWFG_node[s].checked_by_deadlock_detector == false || TWFG_node[s].thrd_wait_stime == 0
7866  || (TWFG_node[s].current != -1
7867  && (TWFG_node[s].thrd_wait_stime > TWFG_edge[TWFG_node[s].current].edge_wait_stime)))
7868  {
7869  /* A new transaction started */
7870  TWFG_node[s].first_edge = -1;
7871  TWFG_node[s].current = -1;
7872  }
7873 
7874  if (TWFG_node[s].current == -1)
7875  {
7876  t = TWFG_node[s].ancestor;
7877  TWFG_node[s].ancestor = -1;
7878  s = t;
7879  if (s != -2 && TWFG_node[s].current != -1)
7880  {
7881  assert_release (TWFG_node[s].current >= 0 && TWFG_node[s].current < lk_Gl.max_TWFG_edge);
7882  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
7883  }
7884  continue;
7885  }
7886 
7887  assert_release (TWFG_node[s].current >= 0 && TWFG_node[s].current < lk_Gl.max_TWFG_edge);
7888 
7889  t = TWFG_edge[TWFG_node[s].current].to_tran_index;
7890 
7891  if (t == -2)
7892  { /* old WFG edge */
7893  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
7894  continue;
7895  }
7896 
7897  if (TWFG_node[t].current == -1)
7898  {
7899  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
7900  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
7901  continue;
7902  }
7903 
7904  if (TWFG_node[t].checked_by_deadlock_detector == false || TWFG_node[t].thrd_wait_stime == 0
7905  || TWFG_node[t].thrd_wait_stime > TWFG_edge[TWFG_node[t].current].edge_wait_stime)
7906  {
7907  TWFG_node[t].first_edge = -1;
7908  TWFG_node[t].current = -1;
7909  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
7910  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
7911  continue;
7912  }
7913 
7914  if (TWFG_edge[TWFG_node[s].current].edge_seq_num < TWFG_node[t].tran_edge_seq_num)
7915  { /* old WFG edge */
7916  TWFG_edge[TWFG_node[s].current].to_tran_index = -2;
7917  TWFG_node[s].current = TWFG_edge[TWFG_node[s].current].next;
7918  continue;
7919  }
7920 
7921  if (TWFG_node[t].ancestor != -1)
7922  {
7923  /* A deadlock cycle is found */
7924  lock_select_deadlock_victim (thread_p, s, t);
7925  if (victim_count >= LK_MAX_VICTIM_COUNT)
7926  {
7927  goto final_;
7928  }
7929  }
7930  else
7931  {
7932  TWFG_node[t].ancestor = s;
7933  TWFG_node[t].candidate = TWFG_edge[TWFG_node[s].current].holder_flag;
7934  }
7935  s = t;
7936  }
7937  }
7938 
7939 final_:
7940 
7941 #if defined(ENABLE_SYSTEMTAP)
7942  if (victim_count > 0)
7943  {
7944  CUBRID_TRAN_DEADLOCK ();
7945  }
7946 #endif /* ENABLE_SYSTEMTAP */
7947 
7948 #if defined (ENABLE_UNUSED_FUNCTION)
7949  if (victim_count > 0)
7950  {
7951  size_t size_loc;
7952  char *ptr;
7953  FILE *fp = port_open_memstream (&ptr, &size_loc);
7954 
7955  if (fp)
7956  {
7957  lock_dump_deadlock_victims (thread_p, fp);
7958  port_close_memstream (fp, &ptr, &size_loc);
7959 
7961 
7962  if (ptr != NULL)
7963  {
7964  free (ptr);
7965  }
7966  }
7967  }
7968 #endif /* ENABLE_UNUSED_FUNCTION */
7969 
7970  /* dump deadlock cycle to event log file */
7971  for (k = 0; k < victim_count; k++)
7972  {
7973  if (victims[k].tran_index_in_cycle == NULL)
7974  {
7975  continue;
7976  }
7977 
7978  log_fp = event_log_start (thread_p, "DEADLOCK");
7979  if (log_fp != NULL)
7980  {
7981  for (i = 0; i < victims[k].num_trans_in_cycle; i++)
7982  {
7983  tran_index = victims[k].tran_index_in_cycle[i];
7984  event_log_print_client_info (tran_index, 0);
7985  lock_event_log_tran_locks (thread_p, log_fp, tran_index);
7986  }
7987 
7988  event_log_end (thread_p);
7989  }
7990 
7991  free_and_init (victims[k].tran_index_in_cycle);
7992  }
7993 
7994  /* Now solve the deadlocks (cycles) by executing the cycle resolution function (e.g., aborting victim) */
7995  for (k = 0; k < victim_count; k++)
7996  {
7997  if (victims[k].can_timeout)
7998  {
7999  (void) lock_wakeup_deadlock_victim_timeout (victims[k].tran_index);
8000  }
8001  else
8002  {
8003  (void) lock_wakeup_deadlock_victim_aborted (victims[k].tran_index);
8004  }
8005  }
8006 
8007  /* deallocate memory space used for deadlock detection */
8008  if (lk_Gl.max_TWFG_edge > LK_MID_TWFG_EDGE_COUNT)
8009  {
8010  free_and_init (lk_Gl.TWFG_edge);
8011  }
8012 
8013  if (victim_count == 0)
8014  {
8015  if (lk_Gl.no_victim_case_count < 60)
8016  {
8017  lk_Gl.no_victim_case_count += 1;
8018  }
8019  else
8020  {
8021  /* Make sure that we have threads available for another client to execute, otherwise Panic... */
8023  {
8024  /* We must timeout at least one thread, so other clients can execute, otherwise, the server will hang. */
8026  }
8027  lk_Gl.no_victim_case_count = 0;
8028  }
8029  }
8030 
8031  return;
8032 #endif /* !SERVER_MODE */
8033 }
8034 
8035 #if 0 /* NOT_USED */
8036 /*
8037  */
8038 
8039 /*
8040  * lk_global_deadlock_detection: RUN THE GLOBAL DEADLOCK DETECTION
8041  * arguments:
8042  * returns/side-effects: nothing
8043  * Note: Run the deadlock detection. For every cycle either timeout or
8044  * abort a transaction. The timeout option is always preferred
8045  * over the unilaterally abort option. When the unilaterally
8046  * abort option is exercised, the youngest transaction in the
8047  * cycle is selected. The youngest transaction is hopefully the
8048  * one that has done less work.
8049  */
8050 void
8051 lk_global_deadlock_detection (void)
8052 {
8053 #if !defined (SERVER_MODE)
8054  return;
8055 #else /* !SERVER_MODE */
8056  int i, j;
8057  WFG_CYCLE *cycles, *cur_cycle;
8058  WFG_CYCLE_CASE cycle_case;
8059  int num_victims;
8060  int tot_num_victims = 0;
8061  LK_DEADLOCK_VICTIM victims[LK_MAX_VICTIM_COUNT];
8062  LK_DEADLOCK_VICTIM *v_p;
8063  int tran_index;
8064  TRANID tranid;
8065  int can_timeout;
8066  int already_picked;
8067  int ok;
8068  int error;
8069  bool isvictim_tg_waiting;
8070  bool iscandidate_tg_waiting;
8071 
8072  /* 1. Find all the cycles that are currently involved in the system */
8073  cycle_case = WFG_CYCLE_YES_PRUNE;
8074  while (cycle_case == WFG_CYCLE_YES_PRUNE)
8075  {
8076  error = wfg_detect_cycle (&cycle_case, &cycles);
8077 
8078  if (error == NO_ERROR && (cycle_case == WFG_CYCLE_YES_PRUNE || cycle_case == WFG_CYCLE_YES))
8079  {
8080  /* There are deadlocks, we must select a victim for each cycle. We try to break a cycle by timing out a
8081  * transaction whenever is possible. In any other case, we select a victim for an unilaterally abort. */
8082  num_victims = 0;
8083  for (cur_cycle = cycles; cur_cycle != NULL && num_victims < LK_MAX_VICTIM_COUNT; cur_cycle = cur_cycle->next)
8084  {
8085  victims[num_victims].tran_index = NULL_TRAN_INDEX;
8086  victims[num_victims].can_timeout = false;
8087  already_picked = false;
8088 
8089  /* Pick a victim for next cycle */
8090  for (i = 0; i < cur_cycle->num_trans && already_picked == false; i++)
8091  {
8092  tran_index = cur_cycle->waiters[i].tran_index;
8093  for (j = 0; j < num_victims; j++)
8094  {
8095  if (tran_index == victims[j].tran_index)
8096  {
8097  /* A victim for this cycle has already been picked. The index is part of another cycle */
8098  already_picked = true;
8099  break;
8100  }
8101  }
8102  if (already_picked != true)
8103  {
8104  tranid = logtb_find_tranid (tran_index);
8105  can_timeout = LK_CAN_TIMEOUT (logtb_find_wait_msecs (tran_index));
8106  /* Victim selection: 1) Avoid unactive transactions. 2) Prefer a waiter of TG resources. 3)
8107  * Prefer a transaction with a closer tiemout. 4) Prefer the youngest transaction. */
8108  /* Have we selected a victim or the currently victim is inactive (i.e., in rollback or commit
8109  * process), select the new candidate as the victim. */
8110  ok = 0;
8111 
8112  /*
8113  * never consider the unactive one as a victim
8114  */
8115  if (logtb_is_active (tranid) == false)
8116  continue;
8117 
8118  if (victims[num_victims].tran_index == NULL_TRAN_INDEX
8119  || (logtb_is_active (victims[num_victims].tranid) == false
8120  && logtb_is_active (tranid) != false))
8121  {
8122  ok = 1;
8123  }
8124  else
8125  {
8126  isvictim_tg_waiting = wfg_is_tran_group_waiting (victims[num_victims].tran_index);
8127 
8128  iscandidate_tg_waiting = wfg_is_tran_group_waiting (tran_index);
8129 
8130  if (isvictim_tg_waiting != NO_ERROR)
8131  {
8132  if (iscandidate_tg_waiting == NO_ERROR
8133  || (victims[num_victims].can_timeout == false && can_timeout == true)
8134  || (victims[num_victims].can_timeout == can_timeout
8135  && LK_ISYOUNGER (tranid, victims[num_victims].tranid)))
8136  {
8137  ok = 1;
8138  }
8139  }
8140  else
8141  {
8142  if (iscandidate_tg_waiting == NO_ERROR
8143  && ((victims[num_victims].can_timeout == false && can_timeout == true)
8144  || (victims[num_victims].can_timeout == can_timeout
8145  && LK_ISYOUNGER (tranid, victims[num_victims].tranid))))
8146  {
8147  ok = 1;
8148  }
8149  }
8150  }
8151 
8152  if (ok == 1)
8153  {
8154  victims[num_victims].tran_index = tran_index;
8155  victims[num_victims].tranid = tranid;
8156  victims[num_victims].can_timeout = can_timeout;
8157  victims[num_victims].cycle_fun = cur_cycle->waiters[i].cycle_fun;
8158  victims[num_victims].args = cur_cycle->waiters[i].args;
8159  }
8160  }
8161  }
8162  if (already_picked != true && victims[num_victims].tran_index != NULL_TRAN_INDEX)
8163  {
8164  num_victims++;
8165  }
8166  }
8167 
8168  /* Now, solve the deadlocks (cycles) by executing the cycle resolution function (e.g., aborting victim) */
8169  for (i = 0; i < num_victims; i++)
8170  {
8171  *v_p = victims[i];
8172  if (v_p->cycle_fun != NULL)
8173  {
8174  /* There is a function to solve the cycle. */
8175  if ((*v_p->cycle_fun) (v_p->tran_index, v_p->args) == NO_ERROR)
8176  ok = true;
8177  else
8178  ok = false;
8179  }
8180  else
8181  {
8182  ok = false;
8183  }
8184 
8185  /* If a function to break the cycle was not provided or the function failed, the transaction is
8186  * aborted/timed-out */
8187  if (ok == false)
8188  {
8189  if (v_p->can_timeout == false)
8190  {
8191  if (lock_wakeup_deadlock_victim_aborted (v_p->tran_index) == false)
8192  msql_tm_abort_detected (v_p->tran_index, NULL);
8193  }
8194  else
8195  {
8196  if (lock_wakeup_deadlock_victim_timeout (v_p->tran_index) == false)
8197  msql_tm_timeout_detected (v_p->tran_index, NULL);
8198  }
8199  }
8200  }
8201  wfg_free_cycle (cycles);
8202 
8203  tot_num_victims += num_victims;
8204 
8205  if (num_victims >= LK_MAX_VICTIM_COUNT)
8206  cycle_case = WFG_CYCLE_YES_PRUNE;
8207  }
8208  }
8209 #endif /* !SERVER_MODE */
8210 }
8211 #endif /* NOT_USED */
8212 
8213 /*
8214  * lock_reacquire_crash_locks - Reacquire given (exclusive) locks
8215  *
8216  * return: returns one value of following three:
8217  * (LK_GRANTED, LK_NOTGRANTED_DUE_TIMEOUT, LK_NOTGRANTED_DUE_ERROR)
8218  *
8219  * acqlocks(in): list of locks to be acquired
8220  * tran_index(in): transaction index
8221  * whose transaction needs to obtain the given locks
8222  *
8223  * Note:This function acquires locks (likely exclusive locks) which were
8224  * acquired before a crash on behalf of the specified transaction.
8225  *
8226  * Note: This function should only be called during recovery restart
8227  * time. The function does not try to get all or none of the locks
8228  * since they have already been granted to the transaction before
8229  * the crash. If a lock cannot be granted, an error is set and
8230  * returned, however, the fucntion will not stop acquiring the rest
8231  * of the indicated locks.
8232  */
8233 int
8234 lock_reacquire_crash_locks (THREAD_ENTRY * thread_p, LK_ACQUIRED_LOCKS * acqlocks, int tran_index)
8235 {
8236 #if !defined (SERVER_MODE)
8237  return LK_GRANTED;
8238 #else /* !SERVER_MODE */
8239  int granted = LK_GRANTED, r;
8240  unsigned int i;
8241  LK_ENTRY *dummy_ptr;
8242 
8243  if (acqlocks == NULL)
8244  {
8245  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_LK_BAD_ARGUMENT, 2, "lk_reacquire_crash_locks",
8246  "NULL acqlocks pointer");
8247  return LK_NOTGRANTED_DUE_ERROR;
8248  }
8249 
8250  /* reacquire given exclusive locks on behalf of the transaction */
8251  for (i = 0; i < acqlocks->nobj_locks; i++)
8252  {
8253  /*
8254  * lock wait duration : LK_INFINITE_WAIT
8255  * conditional lock request : false
8256  */
8257  r = lock_internal_perform_lock_object (thread_p, tran_index, &acqlocks->obj[i].oid, &acqlocks->obj[i].class_oid,
8258  acqlocks->obj[i].lock, LK_INFINITE_WAIT, &dummy_ptr, NULL);
8259  if (r != LK_GRANTED)
8260  {
8261  er_log_debug (ARG_FILE_LINE, "lk_reacquire_crash_locks: The lock cannot be reacquired...");
8262  granted = r;
8263  continue;
8264  }
8265  }
8266  return granted;
8267 #endif /* !SERVER_MODE */
8268 }
8269 
8270 /*
8271  * lock_unlock_all_shared_get_all_exclusive - Release all shared type locks and
8272  * optionally list the exclusive type locks
8273  *
8274  * return: nothing
8275  *
8276  * acqlocks(in/out):Get the list of acquired exclusive locks or NULL
8277  *
8278  * Note:Release all shared type locks (i.e., S_LOCK, IS_LOCK, SIX_LOCK
8279  * -- demoted to IX_LOCK), and obtain all remianing locks (i.e.,
8280  * exclusive locks such as IX_LOCK, X_LOCK).
8281  *
8282  * Note: This function must be called during the two phase commit
8283  * protocol of a distributed transaction.
8284  */
8285 void
8287 {
8288 #if !defined (SERVER_MODE)
8289  /* No locks in standalone */
8290  if (acqlocks != NULL)
8291  {
8292  acqlocks->nobj_locks = 0;
8293  acqlocks->obj = NULL;
8294  }
8295 #else /* !SERVER_MODE */
8296  int tran_index;
8297  LK_TRAN_LOCK *tran_lock;
8298  int idx;
8299  LK_ENTRY *entry_ptr;
8300  int rv;
8301 
8302  /* some preparation */
8303  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
8304 
8305  /************************************/
8306  /* phase 1: unlock all shared locks */
8307  /************************************/
8308  lock_demote_all_shared_class_locks (thread_p, tran_index);
8309  lock_remove_all_inst_locks (thread_p, tran_index, NULL, S_LOCK);
8310  lock_remove_all_class_locks (thread_p, tran_index, S_LOCK);
8311 
8312  /************************************/
8313  /* phase 2: get all exclusive locks */
8314  /************************************/
8315  if (acqlocks != NULL)
8316  {
8317  /* get a pointer to transaction lock info entry */
8318  tran_lock = &lk_Gl.tran_lock_table[tran_index];
8319 
8320  /* hold transction lock hold mutex */
8321  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
8322 
8323  /* get nobj_locks */
8324  acqlocks->nobj_locks = (unsigned int) (tran_lock->class_hold_count + tran_lock->inst_hold_count);
8325  if (tran_lock->root_class_hold != NULL)
8326  {
8327  acqlocks->nobj_locks += 1;
8328  }
8329 
8330  /* allocate momory space for saving exclusive lock information */
8331  acqlocks->obj = (LK_ACQOBJ_LOCK *) malloc (SIZEOF_LK_ACQOBJ_LOCK * acqlocks->nobj_locks);
8332  if (acqlocks->obj == NULL)
8333  {
8334  pthread_mutex_unlock (&tran_lock->hold_mutex);
8336  (size_t) (SIZEOF_LK_ACQOBJ_LOCK * acqlocks->nobj_locks));
8337  acqlocks->nobj_locks = 0;
8338  return;
8339  }
8340 
8341  /* initialize idx in acqlocks->obj array */
8342  idx = 0;
8343 
8344  /* collect root class lock information */
8345  entry_ptr = tran_lock->root_class_hold;
8346  if (entry_ptr != NULL)
8347  {
8348  assert (tran_index == entry_ptr->tran_index);
8349 
8350  COPY_OID (&acqlocks->obj[idx].oid, oid_Root_class_oid);
8351  OID_SET_NULL (&acqlocks->obj[idx].class_oid);
8352  acqlocks->obj[idx].lock = entry_ptr->granted_mode;
8353  idx += 1;
8354  }
8355 
8356  /* collect general class lock information */
8357  for (entry_ptr = tran_lock->class_hold_list; entry_ptr != NULL; entry_ptr = entry_ptr->tran_next)
8358  {
8359  assert (tran_index == entry_ptr->tran_index);
8360 
8361  COPY_OID (&acqlocks->obj[idx].oid, &entry_ptr->res_head->key.oid);
8362  COPY_OID (&acqlocks->obj[idx].class_oid, oid_Root_class_oid);
8363  acqlocks->obj[idx].lock = entry_ptr->granted_mode;
8364  idx += 1;
8365  }
8366 
8367  /* collect instance lock information */
8368  for (entry_ptr = tran_lock->inst_hold_list; entry_ptr != NULL; entry_ptr = entry_ptr->tran_next)
8369  {
8370  assert (tran_index == entry_ptr->tran_index);
8371 
8372  COPY_OID (&acqlocks->obj[idx].oid, &entry_ptr->res_head->key.oid);
8373  COPY_OID (&acqlocks->obj[idx].class_oid, &entry_ptr->res_head->key.class_oid);
8374  acqlocks->obj[idx].lock = entry_ptr->granted_mode;
8375  idx += 1;
8376  }
8377 
8378  /* release transaction lock hold mutex */
8379  pthread_mutex_unlock (&tran_lock->hold_mutex);
8380  }
8381 #endif /* !SERVER_MODE */
8382 }
8383 
8384 /*
8385  * lock_dump_acquired - Dump structure of acquired locks
8386  *
8387  * return: nothing
8388  *
8389  * acqlocks(in): The acquired locks
8390  *
8391  * Note:Dump the structure of acquired locks
8392  */
8393 void
8394 lock_dump_acquired (FILE * fp, LK_ACQUIRED_LOCKS * acqlocks)
8395 {
8396 #if !defined (SERVER_MODE)
8397  return;
8398 #else /* !SERVER_MODE */
8399  unsigned int i;
8400 
8401  /* Dump object locks */
8402  if (acqlocks->obj != NULL && acqlocks->nobj_locks > 0)
8403  {
8404  fprintf (fp, "Object_locks: count = %d\n", acqlocks->nobj_locks);
8405  for (i = 0; i < acqlocks->nobj_locks; i++)
8406  {
8407  fprintf (fp, " |%d|%d|%d| %s\n", acqlocks->obj[i].oid.volid, acqlocks->obj[i].oid.pageid,
8408  acqlocks->obj[i].oid.slotid, LOCK_TO_LOCKMODE_STRING (acqlocks->obj[i].lock));
8409  }
8410  }
8411 #endif /* !SERVER_MODE */
8412 }
8413 
8414 /*
8415  * xlock_dump - Dump the contents of lock table
8416  *
8417  * return: nothing
8418  *
8419  * outfp(in): FILE stream where to dump the lock table. If NULL is given,
8420  * it is dumped to stdout.
8421  *
8422  * Note:Dump the lock and waiting tables for both objects and pages.
8423  * That is, the lock activity of the datbase. It may be useful
8424  * for finding concurrency problems and locking bottlenecks on
8425  * an application, so that you can set the appropiate isolation
8426  * level or modify the design of the application.
8427  */
8428 void
8429 xlock_dump (THREAD_ENTRY * thread_p, FILE * outfp)
8430 {
8431 #if !defined (SERVER_MODE)
8432  return;
8433 #else /* !SERVER_MODE */
8434  const char *client_prog_name; /* Client program name for tran */
8435  const char *client_user_name; /* Client user name for tran */
8436  const char *client_host_name; /* Client host for tran */
8437  int client_pid; /* Client process id for tran */
8438  TRAN_ISOLATION isolation; /* Isolation for client tran */
8439  TRAN_STATE state;
8440  int wait_msecs;
8441  int old_wait_msecs = 0; /* Old transaction lock wait */
8442  int tran_index;
8443  LK_RES *res_ptr;
8444  int num_locked;
8445  float lock_timeout_sec;
8446  char lock_timeout_string[64];
8447 
8448  if (outfp == NULL)
8449  {
8450  outfp = stdout;
8451  }
8452 
8456 
8457  /* Don't get block from anything when dumping object lock table. */
8458  old_wait_msecs = xlogtb_reset_wait_msecs (thread_p, LK_FORCE_ZERO_WAIT);
8459 
8460  /* Dump some information about all transactions */
8462  for (tran_index = 0; tran_index < lk_Gl.num_trans; tran_index++)
8463  {
8464  if (logtb_find_client_name_host_pid (tran_index, &client_prog_name, &client_user_name, &client_host_name,
8465  &client_pid) != NO_ERROR)
8466  {
8467  /* Likely this index is not assigned */
8468  continue;
8469  }
8470  isolation = logtb_find_isolation (tran_index);
8471  state = logtb_find_state (tran_index);
8472  wait_msecs = logtb_find_wait_msecs (tran_index);
8473  lock_timeout_sec = lock_wait_msecs_to_secs (wait_msecs);
8474 
8475  if (lock_timeout_sec > 0)
8476  {
8477  sprintf (lock_timeout_string, ": %.2f", lock_timeout_sec);
8478  }
8479  else if ((int) lock_timeout_sec == LK_ZERO_WAIT || (int) lock_timeout_sec == LK_FORCE_ZERO_WAIT)
8480  {
8481  sprintf (lock_timeout_string, ": No wait");
8482  }
8483  else if ((int) lock_timeout_sec == LK_INFINITE_WAIT)
8484  {
8485  sprintf (lock_timeout_string, ": Infinite wait");
8486  }
8487  else
8488  {
8489  assert_release (0);
8490  sprintf (lock_timeout_string, ": %d", (int) lock_timeout_sec);
8491  }
8492 
8494  tran_index, client_prog_name, client_user_name, client_host_name, client_pid);
8496  log_isolation_string (isolation));
8498  log_state_string (state));
8500  lock_timeout_string);
8502  }
8503 
8504  /* compute number of lock res entries */
8505  num_locked = (int) lk_Gl.m_obj_hash_table.get_element_count ();
8506 
8507  /* dump object lock table */
8508  fprintf (outfp, "Object Lock Table:\n");
8509  fprintf (outfp, "\tCurrent number of objects which are locked = %d\n", num_locked);
8510  fprintf (outfp, "\tMaximum number of objects which can be locked = %d\n\n", lk_Gl.max_obj_locks);
8511 
8512  // *INDENT-OFF*
8513  lk_hashmap_iterator iterator { thread_p, lk_Gl.m_obj_hash_table };
8514  // *INDENT-ON*
8515  for (res_ptr = iterator.iterate (); res_ptr != NULL; res_ptr = iterator.iterate ())
8516  {
8517  lock_dump_resource (thread_p, outfp, res_ptr);
8518  }
8519 
8520  /* Reset the wait back to the way it was */
8521  (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msecs);
8522 #endif /* !SERVER_MODE */
8523 }
8524 
8525 /*
8526  * lock_initialize_composite_lock -
8527  *
8528  * return: error code
8529  *
8530  * comp_lock(in):
8531  */
8532 int
8534 {
8535 #if !defined (SERVER_MODE)
8536  return NO_ERROR;
8537 #else /* !SERVER_MODE */
8538  LK_LOCKCOMP *lockcomp;
8539 
8540  lockcomp = &(comp_lock->lockcomp);
8541  lockcomp->tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
8542  lockcomp->wait_msecs = logtb_find_wait_msecs (lockcomp->tran_index);
8543  lockcomp->class_list = NULL;
8544  lockcomp->root_class_ptr = NULL;
8545  return NO_ERROR;
8546 #endif /* !SERVER_MODE */
8547 }
8548 
8549 /*
8550  * lock_add_composite_lock -
8551  *
8552  * return: error code
8553  *
8554  * comp_lock(in):
8555  * oid(in):
8556  * class_oid(in):
8557  * lock(in):
8558  */
8559 int
8560 lock_add_composite_lock (THREAD_ENTRY * thread_p, LK_COMPOSITE_LOCK * comp_lock, const OID * oid, const OID * class_oid)
8561 {
8562 #if !defined (SERVER_MODE)
8563  return NO_ERROR;
8564 #else /* !SERVER_MODE */
8565  LK_LOCKCOMP *lockcomp;
8566  LK_LOCKCOMP_CLASS *lockcomp_class;
8567  OID *p;
8568  int max_oids;
8569  bool need_free;
8570  int ret = NO_ERROR;
8571 
8572  need_free = false; /* init */
8573 
8574  lockcomp = &(comp_lock->lockcomp);
8575  for (lockcomp_class = lockcomp->class_list; lockcomp_class != NULL; lockcomp_class = lockcomp_class->next)
8576  {
8577  if (OID_EQ (class_oid, &lockcomp_class->class_oid))
8578  {
8579  break;
8580  }
8581  }
8582 
8583  if (lockcomp_class == NULL)
8584  { /* class is not found */
8585  /* allocate lockcomp_class */
8586  lockcomp_class = (LK_LOCKCOMP_CLASS *) db_private_alloc (thread_p, sizeof (LK_LOCKCOMP_CLASS));
8587  if (lockcomp_class == NULL)
8588  {
8590  goto exit_on_error;
8591  }
8592 
8593  need_free = true;
8594 
8595  lockcomp_class->inst_oid_space = NULL; /* init */
8596 
8597  if (lockcomp->root_class_ptr == NULL)
8598  {
8599  lockcomp->root_class_ptr = lock_get_class_lock (thread_p, oid_Root_class_oid);
8600  }
8601 
8602  /* initialize lockcomp_class */
8603  COPY_OID (&lockcomp_class->class_oid, class_oid);
8604  if (lock_internal_perform_lock_object (thread_p, lockcomp->tran_index, class_oid, NULL, IX_LOCK,
8605  lockcomp->wait_msecs, &lockcomp_class->class_lock_ptr,
8606  lockcomp->root_class_ptr) != LK_GRANTED)
8607  {
8608  ret = ER_FAILED;
8609  goto exit_on_error;
8610  }
8611  if (IS_WRITE_EXCLUSIVE_LOCK (lockcomp_class->class_lock_ptr->granted_mode))
8612  {
8613  lockcomp_class->inst_oid_space = NULL;
8614  }
8615  else
8616  {
8617  if (LK_COMPOSITE_LOCK_OID_INCREMENT < prm_get_integer_value (PRM_ID_LK_ESCALATION_AT))
8618  {
8619  lockcomp_class->max_inst_oids = LK_COMPOSITE_LOCK_OID_INCREMENT;
8620  }
8621  else
8622  {
8624  }
8625 
8626  lockcomp_class->inst_oid_space =
8627  (OID *) db_private_alloc (thread_p, sizeof (OID) * lockcomp_class->max_inst_oids);
8628  if (lockcomp_class->inst_oid_space == NULL)
8629  {
8631  goto exit_on_error;
8632  }
8633  lockcomp_class->num_inst_oids = 0;
8634  }
8635 
8636  /* connect lockcomp_class into the class_list of lockcomp */
8637  lockcomp_class->next = lockcomp->class_list;
8638  lockcomp->class_list = lockcomp_class;
8639 
8640  need_free = false;
8641  }
8642 
8643  if (lockcomp_class->class_lock_ptr->granted_mode < X_LOCK)
8644  {
8645  if (lockcomp_class->num_inst_oids == lockcomp_class->max_inst_oids)
8646  {
8648  {
8649  if ((lockcomp_class->max_inst_oids + LK_COMPOSITE_LOCK_OID_INCREMENT) <
8651  {
8652  max_oids = lockcomp_class->max_inst_oids + LK_COMPOSITE_LOCK_OID_INCREMENT;
8653  }
8654  else
8655  {
8657  }
8658  p = (OID *) db_private_realloc (thread_p, lockcomp_class->inst_oid_space, sizeof (OID) * max_oids);
8659  if (p == NULL)
8660  {
8662  goto exit_on_error;
8663  }
8664 
8665  lockcomp_class->inst_oid_space = p;
8666  lockcomp_class->max_inst_oids = max_oids;
8667  }
8668  }
8669 
8670  if (lockcomp_class->num_inst_oids < lockcomp_class->max_inst_oids)
8671  {
8672  COPY_OID (&lockcomp_class->inst_oid_space[lockcomp_class->num_inst_oids], oid);
8673  lockcomp_class->num_inst_oids++;
8674  }
8675  /* else, lockcomp_class->max_inst_oids equals PRM_LK_ESCALATION_AT. lock escalation will be performed. so no more
8676  * instance OID is stored. */
8677  }
8678 
8679  assert (ret == NO_ERROR);
8680 
8681 end:
8682 
8683  if (need_free)
8684  {
8685  if (lockcomp_class->inst_oid_space)
8686  {
8687  db_private_free_and_init (thread_p, lockcomp_class->inst_oid_space);
8688  }
8689  db_private_free_and_init (thread_p, lockcomp_class);
8690  }
8691 
8692  return ret;
8693 
8694 exit_on_error:
8695 
8696  assert (ret != NO_ERROR);
8697  if (ret == NO_ERROR)
8698  {
8699  ret = ER_FAILED;
8700  }
8701 
8702  goto end;
8703 #endif /* !SERVER_MODE */
8704 }
8705 
8706 /*
8707  * lock_finalize_composite_lock -
8708  *
8709  * return:
8710  *
8711  * comp_lock(in):
8712  */
8713 int
8715 {
8716 #if !defined (SERVER_MODE)
8717  return LK_GRANTED;
8718 #else /* !SERVER_MODE */
8719  LK_LOCKCOMP *lockcomp;
8720  LK_LOCKCOMP_CLASS *lockcomp_class;
8721  LK_ENTRY *dummy;
8722  int i, value = LK_GRANTED;
8723 
8724  lockcomp = &(comp_lock->lockcomp);
8725  for (lockcomp_class = lockcomp->class_list; lockcomp_class != NULL; lockcomp_class = lockcomp_class->next)
8726  {
8727  if (IS_WRITE_EXCLUSIVE_LOCK (lockcomp_class->class_lock_ptr->granted_mode)
8729  {
8730  /* hold X_LOCK on the class object */
8731  value = lock_internal_perform_lock_object (thread_p, lockcomp->tran_index, &lockcomp_class->class_oid, NULL,
8732  X_LOCK, lockcomp->wait_msecs, &dummy, lockcomp->root_class_ptr);
8733  if (value != LK_GRANTED)
8734  {
8735  break;
8736  }
8737  }
8738  else
8739  {
8740  /* hold X_LOCKs on the instance objects */
8741  for (i = 0; i < lockcomp_class->num_inst_oids; i++)
8742  {
8743  value = lock_internal_perform_lock_object (thread_p, lockcomp->tran_index,
8744  &lockcomp_class->inst_oid_space[i],
8745  &lockcomp_class->class_oid, X_LOCK, lockcomp->wait_msecs,
8746  &dummy, lockcomp_class->class_lock_ptr);
8747  if (value != LK_GRANTED)
8748  {
8749  break;
8750  }
8751  }
8752  if (value != LK_GRANTED)
8753  {
8754  break;
8755  }
8756  }
8757  }
8758 
8759  /* free alloced memory for composite locking */
8760  lock_abort_composite_lock (comp_lock);
8761 
8762  return value;
8763 #endif /* !SERVER_MODE */
8764 }
8765 
8766 /*
8767  * lock_abort_composite_lock -
8768  *
8769  * return:
8770  *
8771  * comp_lock(in):
8772  */
8773 void
8775 {
8776 #if !defined (SERVER_MODE)
8777  return;
8778 #else /* !SERVER_MODE */
8779  LK_LOCKCOMP *lockcomp;
8780  LK_LOCKCOMP_CLASS *lockcomp_class;
8781 
8782  lockcomp = &(comp_lock->lockcomp);
8783  lockcomp->tran_index = NULL_TRAN_INDEX;
8784  lockcomp->wait_msecs = 0;
8785  while (lockcomp->class_list != NULL)
8786  {
8787  lockcomp_class = lockcomp->class_list;
8788  lockcomp->class_list = lockcomp_class->next;
8789  if (lockcomp_class->inst_oid_space)
8790  {
8791  db_private_free_and_init (NULL, lockcomp_class->inst_oid_space);
8792  }
8793  db_private_free_and_init (NULL, lockcomp_class);
8794  }
8795 
8796 #endif /* !SERVER_MODE */
8797 }
8798 
8799 /*
8800  * lock_is_class_lock_escalated - check if class lock is escalated
8801  *
8802  * return: true if class lock is escalated, false otherwise
8803  *
8804  * class_lock(in): class lock
8805  * lock_escalation(in): lock escalation
8806  */
8807 static bool
8808 lock_is_class_lock_escalated (LOCK class_lock, LOCK lock_escalation)
8809 {
8810 #if !defined (SERVER_MODE)
8811  return false;
8812 #else
8813  if (class_lock < lock_escalation && !IS_WRITE_EXCLUSIVE_LOCK (class_lock))
8814  {
8815  return false;
8816  }
8817 
8818  if (class_lock == IX_LOCK && lock_escalation == S_LOCK)
8819  {
8820  return false;
8821  }
8822 
8823  return true;
8824 #endif
8825 }
8826 
8827 /*
8828  * lock_get_number_object_locks - Number of object lock entries
8829  *
8830  * return:
8831  *
8832  * Note:Find the number of total object lock entries of all
8833  * transactions
8834  */
8835 unsigned int
8837 {
8838 #if defined(SA_MODE)
8839  return 0;
8840 #else
8841  return (unsigned int) lk_Gl.m_obj_hash_table.get_element_count ();
8842 #endif
8843 }
8844 
8845 /*
8846  * lock_start_instant_lock_mode -
8847  *
8848  * return:
8849  *
8850  * tran_index(in):
8851  */
8852 void
8854 {
8855 #if !defined (SERVER_MODE)
8856  return;
8857 #else /* !SERVER_MODE */
8858  LK_TRAN_LOCK *tran_lock;
8859 
8860  tran_lock = &lk_Gl.tran_lock_table[tran_index];
8861  tran_lock->is_instant_duration = true;
8862  return;
8863 #endif /* !SERVER_MODE */
8864 }
8865 
8866 /*
8867  * lock_stop_instant_lock_mode -
8868  *
8869  * return:
8870  *
8871  * tran_index(in):
8872  * need_unlock(in):
8873  */
8874 void
8875 lock_stop_instant_lock_mode (THREAD_ENTRY * thread_p, int tran_index, bool need_unlock)
8876 {
8877 #if !defined (SERVER_MODE)
8878  return;
8879 #else /* !SERVER_MODE */
8880  LK_TRAN_LOCK *tran_lock;
8881  LK_ENTRY *entry_ptr, *next_ptr;
8882  int count;
8883 
8884  tran_lock = &lk_Gl.tran_lock_table[tran_index];
8885 
8886  if (!tran_lock->is_instant_duration)
8887  {
8888  /* if already stopped, return */
8889  return;
8890  }
8891 
8892  /* remove instance locks */
8893  entry_ptr = tran_lock->inst_hold_list;
8894  while (entry_ptr != NULL)
8895  {
8896  assert (tran_index == entry_ptr->tran_index);
8897 
8898  next_ptr = entry_ptr->tran_next;
8899  count = entry_ptr->instant_lock_count;
8900  assert_release (count >= 0);
8901  if (need_unlock)
8902  {
8903  assert_release (count >= 0);
8904  while (count > 0)
8905  {
8906  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, true);
8907  count--;
8908  }
8909  }
8910  entry_ptr->instant_lock_count = 0;
8911  entry_ptr = next_ptr;
8912  }
8913 
8914  /* remove class locks */
8915  entry_ptr = tran_lock->class_hold_list;
8916  while (entry_ptr != NULL)
8917  {
8918  assert (tran_index == entry_ptr->tran_index);
8919 
8920  next_ptr = entry_ptr->tran_next;
8921  count = entry_ptr->instant_lock_count;
8922  assert_release (count >= 0);
8923  if (need_unlock)
8924  {
8925  assert_release (count >= 0);
8926  while (count > 0)
8927  {
8928  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, true);
8929  count--;
8930  }
8931  }
8932  entry_ptr->instant_lock_count = 0;
8933  entry_ptr = next_ptr;
8934  }
8935 
8936  /* remove root class lock */
8937  entry_ptr = tran_lock->root_class_hold;
8938  if (entry_ptr != NULL)
8939  {
8940  assert (tran_index == entry_ptr->tran_index);
8941 
8942  count = entry_ptr->instant_lock_count;
8943  assert_release (count >= 0);
8944  if (need_unlock)
8945  {
8946  assert_release (count >= 0);
8947  while (count > 0)
8948  {
8949  lock_internal_perform_unlock_object (thread_p, entry_ptr, false, true);
8950  count--;
8951  }
8952  }
8953  entry_ptr->instant_lock_count = 0;
8954  }
8955 
8956  /* change locking phase as normal */
8957  tran_lock->is_instant_duration = false;
8958  return;
8959 #endif /* !SERVER_MODE */
8960 }
8961 
8962 /* lock_clear_deadlock_victim:
8963  *
8964  * tran_index(in):
8965  */
8966 void
8968 {
8969 #if !defined (SERVER_MODE)
8970  return;
8971 #else /* !SERVER_MODE */
8972  int rv;
8973 
8974  /* communication with deadlock detector */
8975  if (lk_Gl.TWFG_node[tran_index].checked_by_deadlock_detector)
8976  {
8977  lk_Gl.TWFG_node[tran_index].checked_by_deadlock_detector = false;
8978  }
8979  if (lk_Gl.TWFG_node[tran_index].DL_victim)
8980  {
8981  rv = pthread_mutex_lock (&lk_Gl.DL_detection_mutex);
8982  lk_Gl.TWFG_node[tran_index].DL_victim = false;
8983  pthread_mutex_unlock (&lk_Gl.DL_detection_mutex);
8984 
8985  // reset its tran_abort_reason
8986  lock_set_tran_abort_reason (tran_index, TRAN_NORMAL);
8987  }
8988 #endif /* !SERVER_MODE */
8989 }
8990 
8991 /*
8992  * lock_is_instant_lock_mode -
8993  *
8994  * return:
8995  *
8996  * tran_index(in):
8997  */
8998 bool
9000 {
9001 #if !defined (SERVER_MODE)
9002  return false;
9003 #else /* !SERVER_MODE */
9004  LK_TRAN_LOCK *tran_lock;
9005 
9006  tran_lock = &lk_Gl.tran_lock_table[tran_index];
9007  return tran_lock->is_instant_duration;
9008 #endif /* !SERVER_MODE */
9009 }
9010 
9011 #if defined(SERVER_MODE)
9012 /*
9013  * lock_increment_class_granules () - increment the lock counter for a class
9014  * return : void
9015  * class_entry (in/out) : class entry
9016  *
9017  */
9018 static void
9019 lock_increment_class_granules (LK_ENTRY * class_entry)
9020 {
9021  if (class_entry == NULL || class_entry->res_head->key.type != LOCK_RESOURCE_CLASS)
9022  {
9023  return;
9024  }
9025 
9026  class_entry->ngranules++;
9027  if (class_entry->class_entry != NULL && !OID_IS_ROOTOID (&class_entry->class_entry->res_head->key.oid))
9028  {
9029  /* This is a class in a class hierarchy so increment the number of granules for the superclass */
9030  class_entry->class_entry->ngranules++;
9031  }
9032 }
9033 
9034 /*
9035  * lock_decrement_class_granules () - decrement the lock counter for a class
9036  * return : void
9037  * class_entry (in/out) : class entry
9038  *
9039  */
9040 static void
9041 lock_decrement_class_granules (LK_ENTRY * class_entry)
9042 {
9043  if (class_entry == NULL || class_entry->res_head->key.type != LOCK_RESOURCE_CLASS)
9044  {
9045  return;
9046  }
9047 
9048  class_entry->ngranules--;
9049  if (class_entry->class_entry != NULL && !OID_IS_ROOTOID (&class_entry->class_entry->res_head->key.oid))
9050  {
9051  /* This is a class in a class hierarchy so decrement the number of granules for the superclass */
9052  class_entry->class_entry->ngranules--;
9053  }
9054 }
9055 #endif /* SERVER_MODE */
9056 
9057 /*
9058  * lock_get_lock_holder_tran_index -
9059  *
9060  * return:
9061  * out_buf(out):
9062  * waiter_index(in):
9063  * res (in):
9064  *
9065  * note : caller must free *out_buf.
9066  */
9067 int
9068 lock_get_lock_holder_tran_index (THREAD_ENTRY * thread_p, char **out_buf, int waiter_index, LK_RES * res)
9069 {
9070 #if !defined (SERVER_MODE)
9071  if (res == NULL)
9072  {
9073  return NO_ERROR;
9074  }
9075 
9076  if (out_buf == NULL)
9077  {
9078  assert_release (0);
9079  return ER_FAILED;
9080  }
9081 
9082  *out_buf = NULL;
9083 
9084  return NO_ERROR;
9085 
9086 #else
9087 
9088 #define HOLDER_ENTRY_LENGTH (12)
9089  int rv;
9090  LK_ENTRY *holder, *waiter;
9091  int holder_number = 0;
9092  int buf_size, n, remained_size;
9093  bool is_valid = false; /* validation check */
9094  char *buf, *p;
9095 
9096  if (res == NULL)
9097  {
9098  return NO_ERROR;
9099  }
9100 
9101  if (out_buf == NULL)
9102  {
9103  assert_release (0);
9104  return ER_FAILED;
9105  }
9106 
9107  *out_buf = NULL;
9108 
9109  rv = pthread_mutex_lock (&res->res_mutex);
9110  if (rv != 0)
9111  {
9112  return ER_FAILED;
9113  }
9114 
9115  if (OID_ISNULL (&res->key.oid))
9116  {
9118  return NO_ERROR;
9119  }
9120 
9121  waiter = res->waiter;
9122  while (waiter != NULL)
9123  {
9124  if (waiter->tran_index == waiter_index)
9125  {
9126  is_valid = true;
9127  break;
9128  }
9129  waiter = waiter->next;
9130  }
9131 
9132  if (is_valid == false)
9133  {
9134  holder = res->holder;
9135  while (holder != NULL)
9136  {
9137  if (holder->blocked_mode != NULL_LOCK && holder->tran_index == waiter_index)
9138  {
9139  is_valid = true;
9140  break;
9141  }
9142  holder = holder->next;
9143  }
9144  }
9145 
9146  if (is_valid == false)
9147  {
9148  /* not a valid waiter of this resource */
9150  return NO_ERROR;
9151  }
9152 
9153  holder = res->holder;
9154  while (holder != NULL)
9155  {
9156  if (holder->tran_index != waiter_index)
9157  {
9158  holder_number++;
9159  }
9160  holder = holder->next;
9161  }
9162 
9163  if (holder_number == 0)
9164  {
9166  return NO_ERROR;
9167  }
9168 
9169  buf_size = holder_number * HOLDER_ENTRY_LENGTH + 1;
9170  buf = (char *) malloc (sizeof (char) * buf_size);
9171 
9172  if (buf == NULL)
9173  {
9175 
9177  return ER_OUT_OF_VIRTUAL_MEMORY;
9178  }
9179 
9180  remained_size = buf_size;
9181  p = buf;
9182 
9183  /* write first holder index */
9184  holder = res->holder;
9185  while (holder && holder->tran_index == waiter_index)
9186  {
9187  holder = holder->next;
9188  }
9189 
9190  assert_release (holder != NULL);
9191 
9192  n = snprintf (p, remained_size, "%d", holder->tran_index);
9193  remained_size -= n;
9194  p += n;
9195  assert_release (remained_size >= 0);
9196 
9197  /* write remained holder index */
9198  holder = holder->next;
9199  while (holder != NULL)
9200  {
9201  if (holder->tran_index != waiter_index)
9202  {
9203  n = snprintf (p, remained_size, ", %d", holder->tran_index);
9204  remained_size -= n;
9205  p += n;
9206  assert_release (remained_size >= 0);
9207  }
9208  holder = holder->next;
9209  }
9210 
9211  *out_buf = buf;
9212 
9214 
9215  return NO_ERROR;
9216 #endif
9217 }
9218 
9219 /*
9220  * lock_wait_state_to_string () - Translate lock wait state into string
9221  * representation
9222  * return:
9223  * state(in): lock wait state
9224  */
9225 const char *
9227 {
9228  switch (state)
9229  {
9230  case LOCK_SUSPENDED:
9231  return "SUSPENDED";
9232  case LOCK_RESUMED:
9233  return "RESUMED";
9234  case LOCK_RESUMED_TIMEOUT:
9235  return "RESUMED_TIMEOUT";
9237  return "RESUMED_DEADLOCK_TIMEOUT";
9238  case LOCK_RESUMED_ABORTED:
9239  return "RESUMED_ABORTED";
9241  return "RESUMED_ABORTED_FIRST";
9243  return "RESUMED_ABORTED_OTHER";
9245  return "RESUMED_INTERRUPT";
9246  }
9247  return "UNKNOWN";
9248 }
9249 
9250 /*
9251  * lock dump to event log file (lock timeout, deadlock)
9252  */
9253 
9254 #if defined(SERVER_MODE)
9255 /*
9256  * lock_event_log_tran_locks - dump transaction locks to event log file
9257  * return:
9258  * thread_p(in):
9259  * log_fp(in):
9260  * tran_index(in):
9261  *
9262  * note: for deadlock
9263  */
9264 static void
9265 lock_event_log_tran_locks (THREAD_ENTRY * thread_p, FILE * log_fp, int tran_index)
9266 {
9267  int rv, i, indent = 2;
9268  LK_TRAN_LOCK *tran_lock;
9269  LK_ENTRY *entry;
9270 
9271  assert (csect_check_own (thread_p, CSECT_EVENT_LOG_FILE) == 1);
9272 
9273  fprintf (log_fp, "hold:\n");
9274 
9275  tran_lock = &lk_Gl.tran_lock_table[tran_index];
9276  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
9277 
9278  entry = tran_lock->inst_hold_list;
9279  for (i = 0; entry != NULL && i < MAX_NUM_LOCKS_DUMP_TO_EVENT_LOG; entry = entry->tran_next, i++)
9280  {
9281  assert (tran_index == entry->tran_index);
9282 
9283  fprintf (log_fp, "%*clock: %s", indent, ' ', LOCK_TO_LOCKMODE_STRING (entry->granted_mode));
9284 
9285  SET_EMULATE_THREAD_WITH_LOCK_ENTRY (thread_p, entry);
9286  lock_event_log_lock_info (thread_p, log_fp, entry);
9287 
9288  event_log_sql_string (thread_p, log_fp, &entry->xasl_id, indent);
9289  event_log_bind_values (thread_p, log_fp, tran_index, entry->bind_index_in_tran);
9290 
9291  fprintf (log_fp, "\n");
9292 
9293  CLEAR_EMULATE_THREAD (thread_p);
9294  }
9295 
9296  if (entry != NULL)
9297  {
9298  fprintf (log_fp, "%*c...\n", indent, ' ');
9299  }
9300 
9301  entry = tran_lock->waiting;
9302  if (entry != NULL)
9303  {
9304  fprintf (log_fp, "wait:\n");
9305  fprintf (log_fp, "%*clock: %s", indent, ' ', LOCK_TO_LOCKMODE_STRING (entry->blocked_mode));
9306 
9307  SET_EMULATE_THREAD_WITH_LOCK_ENTRY (thread_p, entry);
9308 
9309  lock_event_log_lock_info (thread_p, log_fp, entry);
9310 
9311  event_log_sql_string (thread_p, log_fp, &entry->xasl_id, indent);
9312  event_log_bind_values (thread_p, log_fp, tran_index, entry->bind_index_in_tran);
9313 
9314  fprintf (log_fp, "\n");
9315  }
9316  CLEAR_EMULATE_THREAD (thread_p);
9317 
9318  pthread_mutex_unlock (&tran_lock->hold_mutex);
9319 }
9320 
9321 /*
9322  * lock_event_log_blocked_lock - dump lock waiter info to event log file
9323  * return:
9324  * thread_p(in):
9325  * log_fp(in):
9326  * entry(in):
9327  *
9328  * note: for lock timeout
9329  */
9330 static void
9331 lock_event_log_blocked_lock (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * entry)
9332 {
9333  int indent = 2;
9334 
9335  assert (csect_check_own (thread_p, CSECT_EVENT_LOG_FILE) == 1);
9336 
9337  SET_EMULATE_THREAD_WITH_LOCK_ENTRY (thread_p, entry);
9338 
9339  fprintf (log_fp, "waiter:\n");
9340  event_log_print_client_info (entry->tran_index, indent);
9341 
9342  fprintf (log_fp, "%*clock: %s", indent, ' ', LOCK_TO_LOCKMODE_STRING (entry->blocked_mode));
9343  lock_event_log_lock_info (thread_p, log_fp, entry);
9344 
9345  event_log_sql_string (thread_p, log_fp, &entry->xasl_id, indent);
9346  event_log_bind_values (thread_p, log_fp, entry->tran_index, entry->bind_index_in_tran);
9347 
9348  CLEAR_EMULATE_THREAD (thread_p);
9349 
9350  fprintf (log_fp, "\n");
9351 }
9352 
9353 /*
9354  * lock_event_log_blocking_locks - dump lock blocker info to event log file
9355  * return:
9356  * thread_p(in):
9357  * log_fp(in):
9358  * wait_entry(in):
9359  *
9360  * note: for lock timeout
9361  */
9362 static void
9363 lock_event_log_blocking_locks (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * wait_entry)
9364 {
9365  LK_ENTRY *entry;
9366  LK_RES *res_ptr = NULL;
9367  LOCK_COMPATIBILITY compat1, compat2;
9368  int rv, indent = 2;
9369 
9370  assert (csect_check_own (thread_p, CSECT_EVENT_LOG_FILE) == 1);
9371 
9372  res_ptr = wait_entry->res_head;
9373  rv = pthread_mutex_lock (&res_ptr->res_mutex);
9374 
9375  fprintf (log_fp, "blocker:\n");
9376 
9377  for (entry = res_ptr->holder; entry != NULL; entry = entry->next)
9378  {
9379  if (entry == wait_entry)
9380  {
9381  continue;
9382  }
9383 
9384  compat1 = lock_Comp[entry->granted_mode][wait_entry->blocked_mode];
9385  compat2 = lock_Comp[entry->blocked_mode][wait_entry->blocked_mode];
9386  assert (compat1 != LOCK_COMPAT_UNKNOWN && compat2 != LOCK_COMPAT_UNKNOWN);
9387 
9388  if (compat1 == LOCK_COMPAT_NO || compat2 == LOCK_COMPAT_NO)
9389  {
9390  event_log_print_client_info (entry->tran_index, indent);
9391 
9392  fprintf (log_fp, "%*clock: %s", indent, ' ', LOCK_TO_LOCKMODE_STRING (entry->granted_mode));
9393 
9394  SET_EMULATE_THREAD_WITH_LOCK_ENTRY (thread_p, entry);
9395 
9396  lock_event_log_lock_info (thread_p, log_fp, entry);
9397 
9398  event_log_sql_string (thread_p, log_fp, &entry->xasl_id, indent);
9399  event_log_bind_values (thread_p, log_fp, entry->tran_index, entry->bind_index_in_tran);
9400 
9401  CLEAR_EMULATE_THREAD (thread_p);
9402 
9403  fprintf (log_fp, "\n");
9404  }
9405  }
9406 
9407  for (entry = res_ptr->waiter; entry != NULL; entry = entry->next)
9408  {
9409  if (entry == wait_entry)
9410  {
9411  continue;
9412  }
9413 
9414  compat1 = lock_Comp[entry->blocked_mode][wait_entry->blocked_mode];
9415  assert (compat1 != LOCK_COMPAT_UNKNOWN);
9416 
9417  if (compat1 == LOCK_COMPAT_NO)
9418  {
9419  event_log_print_client_info (entry->tran_index, indent);
9420 
9421  fprintf (log_fp, "%*clock: %s", indent, ' ', LOCK_TO_LOCKMODE_STRING (entry->granted_mode));
9422 
9423  SET_EMULATE_THREAD_WITH_LOCK_ENTRY (thread_p, entry);
9424 
9425  lock_event_log_lock_info (thread_p, log_fp, entry);
9426 
9427  event_log_sql_string (thread_p, log_fp, &entry->xasl_id, indent);
9428  event_log_bind_values (thread_p, log_fp, entry->tran_index, entry->bind_index_in_tran);
9429 
9430  CLEAR_EMULATE_THREAD (thread_p);
9431 
9432  fprintf (log_fp, "\n");
9433  }
9434  }
9435 
9436  pthread_mutex_unlock (&res_ptr->res_mutex);
9437 }
9438 
9439 /*
9440  * lock_event_log_lock_info - dump lock resource info to event log file
9441  * return:
9442  * thread_p(in):
9443  * log_fp(in):
9444  * entry(in):
9445  */
9446 static void
9447 lock_event_log_lock_info (THREAD_ENTRY * thread_p, FILE * log_fp, LK_ENTRY * entry)
9448 {
9449  LK_RES *res_ptr;
9450  char *classname;
9451  OID *oid_rr;
9452 
9453  assert (csect_check_own (thread_p, CSECT_EVENT_LOG_FILE) == 1);
9454 
9455  res_ptr = entry->res_head;
9456 
9457  fprintf (log_fp, " (oid=%d|%d|%d", res_ptr->key.oid.volid, res_ptr->key.oid.pageid, res_ptr->key.oid.slotid);
9458 
9459  switch (res_ptr->key.type)
9460  {
9462  fprintf (log_fp, ", table=db_root");
9463  break;
9464 
9465  case LOCK_RESOURCE_CLASS:
9466  oid_rr = oid_get_rep_read_tran_oid ();
9467  if (oid_rr != NULL && OID_EQ (&res_ptr->key.oid, oid_rr))
9468  {
9469  /* This is the generic object for RR transactions */
9470  fprintf (log_fp, ", Generic object for Repeatable Read consistency");
9471  }
9472  else if (!OID_ISTEMP (&res_ptr->key.oid))
9473  {
9474  OID real_class_oid;
9475 
9476  if (OID_IS_VIRTUAL_CLASS_OF_DIR_OID (&res_ptr->key.oid))
9477  {
9478  OID_GET_REAL_CLASS_OF_DIR_OID (&res_ptr->key.oid, &real_class_oid);
9479  }
9480  else
9481  {
9482  COPY_OID (&real_class_oid, &res_ptr->key.oid);
9483  }
9484 
9485  /* never propagate an error to get class name and keep the existing error if any. */
9486  er_stack_push ();
9487  (void) heap_get_class_name (thread_p, &real_class_oid, &classname);
9488  er_stack_pop ();
9489 
9490  if (classname != NULL)
9491  {
9492  fprintf (log_fp, ", table=%s", classname);
9493  free_and_init (classname);
9494  }
9495  }
9496  break;
9497 
9499  if (!OID_ISTEMP (&res_ptr->key.class_oid))
9500  {
9501  OID real_class_oid;
9502 
9504  {
9505  OID_GET_REAL_CLASS_OF_DIR_OID (&res_ptr->key.class_oid, &real_class_oid);
9506  }
9507  else
9508  {
9509  COPY_OID (&real_class_oid, &res_ptr->key.class_oid);
9510  }
9511 
9512  /* never propagate an error to get class name and keep the existing error if any. */
9513  er_stack_push ();
9514  (void) heap_get_class_name (thread_p, &real_class_oid, &classname);
9515  er_stack_pop ();
9516 
9517  if (classname != NULL)
9518  {
9519  fprintf (log_fp, ", table=%s", classname);
9520  free_and_init (classname);
9521  }
9522  }
9523  break;
9524 
9525  default:
9526  break;
9527  }
9528 
9529  fprintf (log_fp, ")\n");
9530 }
9531 
9532 /*
9533  * lock_event_set_tran_wait_entry - save the lock entry tran is waiting
9534  * return:
9535  * entry(in):
9536  */
9537 static void
9538 lock_event_set_tran_wait_entry (int tran_index, LK_ENTRY * entry)
9539 {
9540  LK_TRAN_LOCK *tran_lock;
9541  int rv;
9542 
9543  tran_lock = &lk_Gl.tran_lock_table[tran_index];
9544  rv = pthread_mutex_lock (&tran_lock->hold_mutex);
9545 
9546  tran_lock->waiting = entry;
9547 
9548  if (entry != NULL)
9549  {
9550  lock_event_set_xasl_id_to_entry (tran_index, entry);
9551  }
9552 
9553  pthread_mutex_unlock (&tran_lock->hold_mutex);
9554 }
9555 
9556 /*
9557  * lock_event_set_xasl_id_to_entry - save the xasl id related lock entry
9558  * return:
9559  * entry(in):
9560  */
9561 static void
9562 lock_event_set_xasl_id_to_entry (int tran_index, LK_ENTRY * entry)
9563 {
9564  LOG_TDES *tdes;
9565 
9566  tdes = LOG_FIND_TDES (tran_index);
9567  if (tdes != NULL && !XASL_ID_IS_NULL (&tdes->xasl_id))
9568  {
9570  {
9571  entry->bind_index_in_tran = tdes->num_exec_queries - 1;
9572  }
9573  else
9574  {
9575  entry->bind_index_in_tran = -1;
9576  }
9577 
9578  XASL_ID_COPY (&entry->xasl_id, &tdes->xasl_id);
9579  }
9580  else
9581  {
9582  XASL_ID_SET_NULL (&entry->xasl_id);
9583  entry->bind_index_in_tran = -1;
9584  }
9585 }
9586 #endif /* SERVER_MODE */
9587 
9588 /*
9589  * lock_rep_read_tran - lock the object used in RR transaction with ALTER TABLE
9590  * ADDCOLUMN NOT NULL scenario
9591  * return:
9592  * thread_p(in):
9593  * lock(in): type of lock
9594  * cond_flag
9595  */
9596 int
9597 lock_rep_read_tran (THREAD_ENTRY * thread_p, LOCK lock, int cond_flag)
9598 {
9599 #if !defined (SERVER_MODE)
9600  LK_SET_STANDALONE_XLOCK (lock);
9601  return NO_ERROR;
9602 #else /* !SERVER_MODE */
9603  int tran_index;
9604  int wait_msecs;
9605  OID *rep_read_oid = oid_get_rep_read_tran_oid ();
9606  LK_ENTRY *entry_addr = NULL;
9607 
9608  if (lock == NULL_LOCK)
9609  {
9610  return NO_ERROR;
9611  }
9612 
9613  tran_index = LOG_FIND_THREAD_TRAN_INDEX (thread_p);
9614  if (cond_flag == LK_COND_LOCK) /* conditional request */
9615  {
9616  wait_msecs = LK_FORCE_ZERO_WAIT;
9617  }
9618  else
9619  {
9620  wait_msecs = logtb_find_wait_msecs (tran_index);
9621  }
9622 
9623  if (lock_internal_perform_lock_object (thread_p, tran_index, rep_read_oid, NULL, lock, wait_msecs, &entry_addr,
9624  NULL) != LK_GRANTED)
9625  {
9626  return ER_FAILED;
9627  }
9628 
9629  return NO_ERROR;
9630 #endif
9631 }
9632 
9633 #if defined (SERVER_MODE)
9634 static bool
9635 lock_is_safe_lock_with_page (THREAD_ENTRY * thread_p, LK_ENTRY * entry_ptr)
9636 {
9637  LK_RES *lock_res;
9638  bool is_safe = true;
9639 
9640  lock_res = entry_ptr->res_head;
9641  if (lock_res != NULL)
9642  {
9643  is_safe = false;
9645  {
9646  is_safe = true;
9647  }
9648  else if (lock_res->key.type == LOCK_RESOURCE_CLASS && OID_IS_VIRTUAL_CLASS_OF_DIR_OID (&lock_res->key.oid))
9649  {
9650  is_safe = true;
9651  }
9652  }
9653  return is_safe;
9654 }
9655 #endif /* SERVER_MODE */
9656 
9657 #if defined (SERVER_MODE)
9658 /*
9659  * lock_get_new_entry () - Get new lock entry. Local pool of free entries is
9660  * first used. When this pool is depleted, a new
9661  * entry is claimed from shared list of lock entries.
9662  *
9663  * return : New lock entry.
9664  * tran_index (in) : Transaction index of requester.
9665  * tran_entry (in) : Lock-free transaction entry.
9666  * freelist (in) : Lock-free shared list of entries.
9667  */
9668 static LK_ENTRY *
9669 lock_get_new_entry (int tran_index, LF_TRAN_ENTRY * tran_entry, LF_FREELIST * freelist)
9670 {
9671  LK_TRAN_LOCK *tran_lock = &lk_Gl.tran_lock_table[tran_index];
9672  LK_ENTRY *lock_entry;
9673 
9674  /* Check if local pool has free entries. */
9675  if (tran_lock->lk_entry_pool)
9676  {
9677  assert (tran_lock->lk_entry_pool_count > 0);
9678  lock_entry = tran_lock->lk_entry_pool;
9679  tran_lock->lk_entry_pool = tran_lock->lk_entry_pool->next;
9680  tran_lock->lk_entry_pool_count--;
9681  return lock_entry;
9682  }
9683 
9684  /* Claim from shared freelist. */
9685  return (LK_ENTRY *) lf_freelist_claim (tran_entry, freelist);
9686 }
9687 
9688 /*
9689  * lock_free_entry () - Free lock entry. Local pool has high priority if its
9690  * maximum size is not reached. Otherwise, the entry
9691  * is "retired" to shared list of free lock entries.
9692  *
9693  * return : Error code.
9694  * tran_index (in) : Transaction index.
9695  * tran_entry (in) : Lock-free transaction entry.
9696  * freelist (in) : Lock-free shared list of lock entries.
9697  * lock_entry (in) : Lock entry being freed.
9698  */
9699 static void
9700 lock_free_entry (int tran_index, LF_TRAN_ENTRY * tran_entry, LF_FREELIST * freelist, LK_ENTRY * lock_entry)
9701 {
9702  LK_TRAN_LOCK *tran_lock = &lk_Gl.tran_lock_table[tran_index];
9703 
9704  assert (tran_lock->lk_entry_pool_count >= 0 && tran_lock->lk_entry_pool_count <= LOCK_TRAN_LOCAL_POOL_MAX_SIZE);
9705 
9706  /* "Free" entry to local pool or shared list. */
9707  if (tran_lock->lk_entry_pool_count < LOCK_TRAN_LOCAL_POOL_MAX_SIZE)
9708  {
9709  lock_uninit_entry (lock_entry);
9710  lock_entry->next = tran_lock->lk_entry_pool;
9711  tran_lock->lk_entry_pool = lock_entry;
9712  tran_lock->lk_entry_pool_count++;
9713  }
9714  else
9715  {
9716  lf_freelist_retire (tran_entry, freelist, lock_entry);
9717  }
9718 }
9719 #endif
9720 
9721 #if defined (SERVER_MODE)
9722 /*
9723  * lock_unlock_object_by_isolation - No lock is unlocked/demoted for MVCC tables.
9724  * Shared instance lock on non-MVCC table is unlocked for TRAN_READ_COMMITTED.
9725  *
9726  * return : nothing
9727  * tran_index(in): Transaction index.
9728  * class_oid(in): class oid.
9729  * oid(in): instance oid.
9730  */
9731 static void
9732 lock_unlock_object_by_isolation (THREAD_ENTRY * thread_p, int tran_index, TRAN_ISOLATION isolation,
9733  const OID * class_oid, const OID * oid)
9734 {
9735  assert (class_oid != NULL && oid != NULL);
9736  assert (!OID_ISNULL (class_oid) && !OID_ISNULL (oid));
9737 
9738  if (isolation != TRAN_READ_COMMITTED)
9739  {
9740  return; /* do nothing */
9741  }
9742 
9743  /* The intentional lock on the higher lock granule must be kept. */
9744  if (OID_IS_ROOTOID (oid) || OID_IS_ROOTOID (class_oid))
9745  {
9746  /* Don't release locks on classes. READ COMMITTED isolation is only applied on instances, classes must
9747  * have at least REPEATABLE READ isolation. */
9748  }
9749  else if (mvcc_is_mvcc_disabled_class (class_oid))
9750  {
9751  /* Release S_LOCK after reading object. */
9752  lock_unlock_shared_inst_lock (thread_p, tran_index, oid);
9753  }
9754  else
9755  {
9756  /* MVCC table. READ COMMITTED isolation uses snapshot instead of locks. We don't have to release anything here. */
9757  }
9758 }
9759 
9760 /*
9761  * lock_unlock_inst_locks_of_class_by_isolation - No lock is unlocked/demoted for MVCC tables.
9762  * Shared instance locks on non-MVCC table is unlocked for
9763  * TRAN_READ_COMMITTED.
9764  *
9765  * return : nothing
9766  * tran_index(in): Transaction index.
9767  * class_oid(in): class oid.
9768  */
9769 static void
9770 lock_unlock_inst_locks_of_class_by_isolation (THREAD_ENTRY * thread_p, int tran_index, TRAN_ISOLATION isolation,
9771  const OID * class_oid)
9772 {
9773  assert (class_oid != NULL);
9774  assert (!OID_ISNULL (class_oid));
9775 
9776  if (isolation != TRAN_READ_COMMITTED)
9777  {
9778  return; /* do nothing */
9779  }
9780 
9781  if (mvcc_is_mvcc_disabled_class (class_oid))
9782  {
9783  /* Release S_LOCKs of non-MVCC tables. */
9784  lock_remove_all_inst_locks (thread_p, tran_index, class_oid, S_LOCK);
9785  }
9786  else
9787  {
9788  /* MVCC table. READ COMMITTED isolation uses snapshot instead of locks. We don't have to release anything here. */
9789  }
9790 }
9791 #endif /* SERVER_MODE */
9792 
9793 //
9794 // lock_get_transaction_lock_waiting_threads_mapfunc -
9795 //
9796 // thread_ref (in) : thread entry
9797 // stop_mapper (out) : ignored
9798 // tran_index (in) : transaction index
9799 // tran_lock_waiters (out) : output lock waiter threads belonging to transaction
9800 // count (out) : output thread count
9801 //
9802 static void
9803 lock_get_transaction_lock_waiting_threads_mapfunc (THREAD_ENTRY & thread_ref, bool & stop_mapper, int tran_index,
9804  tran_lock_waiters_array_type & tran_lock_waiters, size_t & count)
9805 {
9806  (void) stop_mapper; // suppress unused parameter warning
9807 
9808  if (thread_ref.tran_index != tran_index)
9809  {
9810  // not the right transaction
9811  return;
9812  }
9813  if (thread_ref.lockwait == NULL)
9814  {
9815  // not a lock waiter
9816  return;
9817  }
9818  tran_lock_waiters[count++] = &thread_ref;
9819 }
9820 
9821 //
9822 // lock_get_transaction_lock_waiting_threads - gather all threads belonging to transaction and waiting for lock
9823 //
9824 // tran_index (in) : transaction index
9825 // tran_lock_waiters (out) : output lock waiter threads belonging to transaction
9826 // count (out) : output thread count
9827 //
9828 static void
9830  size_t & count)
9831 {
9833  tran_lock_waiters, count);
9834 }
bool lock_has_xlock(THREAD_ENTRY *thread_p)
#define MSGCAT_LK_RES_ROOT_CLASS_TYPE
Definition: lock_manager.c:203
size_t css_get_num_request_workers(void)
#define ER_LK_UNILATERALLY_ABORTED
Definition: error_code.h:130
#define OID_IS_VIRTUAL_CLASS_OF_DIR_OID(oidp)
Definition: oid.h:136
void xlock_dump(THREAD_ENTRY *thread_p, FILE *outfp)
bool pgbuf_has_perm_pages_fixed(THREAD_ENTRY *thread_p)
OID * oid_Root_class_oid
Definition: oid.c:73
LOCK_RESOURCE_TYPE type
Definition: lock_manager.h:166
LC_LOCKSET_CLASSOF * classes
Definition: locator.h:303
LK_LOCKCOMP lockcomp
Definition: lock_manager.h:148
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
#define MVCC_IS_HEADER_DELID_VALID(rec_header_p)
Definition: mvcc.h:87
#define ER_LK_INVALID_OBJECT_TYPE
Definition: error_code.h:1057
#define MSGCAT_LK_RES_NON2PL_RELEASED_HEAD
Definition: lock_manager.c:212
void er_stack_push(void)
#define MVCC_GET_INSID(header)
Definition: mvcc.h:51
LK_ENTRY * class_lock_ptr
Definition: lock_manager.h:129
#define LF_EM_NOT_USING_MUTEX
Definition: lock_free.h:59
static LK_ENTRY * lock_find_tran_hold_entry(THREAD_ENTRY *thread_p, int tran_index, const OID *oid, bool is_class)
#define BO_IS_SERVER_RESTARTED()
Definition: boot_sr.h:84
void lock_finalize(void)
#define MAX_NUM_LOCKS_DUMP_TO_EVENT_LOG
Definition: lock_manager.c:169
#define ER_LK_ROLLBACK_ON_LOCK_ESCALATION
Definition: error_code.h:1441
size_t css_count_transaction_worker_threads(THREAD_ENTRY *thread_p, int tran_index, int client_id)
#define MSGCAT_LK_RES_BLOCKED_WAITER_HEAD
Definition: lock_manager.c:211
int TRANID
#define MSGCAT_LK_RES_BLOCKED_WAITER_ENTRY
Definition: lock_manager.c:217
#define pthread_mutex_init(a, b)
Definition: area_alloc.c:48
SCAN_CODE heap_get_visible_version(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24272
static void lock_detect_local_deadlock(THREAD_ENTRY *thread_p)
#define ER_LK_OBJECT_DL_TIMEOUT_CLASS_MSG
Definition: error_code.h:1204
#define ER_LK_LOST_TRANSACTION
Definition: error_code.h:1062
#define MSGCAT_LK_DEADLOCK_ABORT
Definition: lock_manager.c:226
#define ER_FAILED
Definition: error_code.h:47
#define MSGCAT_LK_RES_LOCK_COUNT
Definition: lock_manager.c:208
void event_log_sql_string(THREAD_ENTRY *thread_p, FILE *log_fp, XASL_ID *xasl_id, int indent)
Definition: event_log.c:362
bool mvcc_is_mvcc_disabled_class(const OID *class_oid)
Definition: mvcc.c:616
LOCK total_waiters_mode
Definition: lock_manager.h:179
#define ER_LK_OBJECT_TIMEOUT_SIMPLE_MSG
Definition: error_code.h:131
#define NULL_TRANID
TRAN_ABORT_REASON tran_abort_reason
Definition: log_impl.h:526
#define pthread_mutex_unlock(a)
Definition: area_alloc.c:51
#define MSGCAT_LK_DUMP_TRAN_ISOLATION
Definition: lock_manager.c:222
int lock_reacquire_crash_locks(THREAD_ENTRY *thread_p, LK_ACQUIRED_LOCKS *acqlocks, int tran_index)
void pgbuf_unfix_all(THREAD_ENTRY *thread_p)
Definition: page_buffer.c:2656
#define LK_SET_STANDALONE_XLOCK(lock)
Definition: lock_manager.c:457
unsigned int lock_get_number_object_locks(void)
void lock_notify_isolation_incons(THREAD_ENTRY *thread_p, bool(*fun)(const OID *class_oid, const OID *oid, void *args), void *args)
bool logtb_set_tran_index_interrupt(THREAD_ENTRY *thread_p, int tran_index, bool set)
void thread_sleep(double millisec)
Definition: lock_free.h:63
#define OR_MVCC_FLAG_VALID_INSID
void get_stats(cubperf::stat_value *stats_out)
#define assert_release(e)
Definition: error_manager.h:96
int lock_demote_class_lock(THREAD_ENTRY *thread_p, const OID *oid, LOCK lock, LOCK *ex_lock)
Definition: lock_free.h:120
#define ER_LK_MANY_LOCK_WAIT_TRAN
Definition: error_code.h:1067
int or_mvcc_get_header(RECDES *record, MVCC_REC_HEADER *mvcc_header)
int lock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int cond_flag)
void lock_abort_composite_lock(LK_COMPOSITE_LOCK *comp_lock)
cubthread::manager * thread_get_manager(void)
#define ER_LK_NOTFOUND_IN_LOCK_HOLDER_LIST
Definition: error_code.h:1058
void lock_dump_acquired(FILE *fp, LK_ACQUIRED_LOCKS *acqlocks)
int lock_rep_read_tran(THREAD_ENTRY *thread_p, LOCK lock, int cond_flag)
struct timeval TSCTIMEVAL
Definition: tsc_timer.h:40
#define OID_SET_NULL(oidp)
Definition: oid.h:85
static FILE * log_fp
Definition: cas_log.c:91
FILE * event_log_start(THREAD_ENTRY *thread_p, const char *event_name)
Definition: event_log.c:222
int logtb_get_number_assigned_tran_indices(void)
void thread_suspend_wakeup_and_unlock_entry(cubthread::entry *thread_p, thread_resume_suspend_status suspended_reason)
LOG_TDES * LOG_FIND_TDES(int tran_index)
Definition: log_impl.h:1095
void tsc_elapsed_time_usec(TSCTIMEVAL *tv, TSC_TICKS end_tick, TSC_TICKS start_tick)
Definition: tsc_timer.c:101
int lock_scan(THREAD_ENTRY *thread_p, const OID *class_oid, int cond_flag, LOCK class_lock)
#define MAX_NTRANS
LC_LOCKSET_REQOBJ * objects
Definition: locator.h:305
int er_errid(void)
void * lf_freelist_claim(LF_TRAN_ENTRY *tran_entry, LF_FREELIST *freelist)
Definition: lock_free.c:751
const char * log_state_string(TRAN_STATE state)
Definition: log_comm.c:125
#define bool
Definition: dbi_compat.h:31
void lock_unlock_object_donot_move_to_non2pl(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock)
#define OID_AS_ARGS(oidp)
Definition: oid.h:39
TRAN_ISOLATION logtb_find_isolation(int tran_index)
#define er_log_debug(...)
int lock_initialize_composite_lock(THREAD_ENTRY *thread_p, LK_COMPOSITE_LOCK *comp_lock)
int logtb_find_log_records_count(int tran_index)
struct lk_acqobj_lock LK_ACQOBJ_LOCK
Definition: lock_manager.h:103
int heap_scancache_end(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7195
#define MSGCAT_LK_RES_UNKNOWN_TYPE
Definition: lock_manager.c:206
#define ER_LK_NOTFOUND_IN_TRAN_HOLD_LIST
Definition: error_code.h:1059
static int lk_Standalone_has_xlock
Definition: lock_manager.c:456
#define MSGCAT_LK_MVCC_INFO
Definition: lock_manager.c:234
#define COPY_OID(dest_oid_ptr, src_oid_ptr)
Definition: oid.h:63
LOCK lock_get_object_lock(const OID *oid, const OID *class_oid)
int xlogtb_reset_wait_msecs(THREAD_ENTRY *thread_p, int wait_msecs)
void THREAD_ENTRY
void lock_remove_object_lock(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock)
static DB_OBJECT * is_class(OID *obj_oid, OID *class_oid)
Definition: compactdb.c:637
LOCK
#define MSGCAT_LK_RES_INSTANCE_TYPE
Definition: lock_manager.c:205
static void lock_get_transaction_lock_waiting_threads_mapfunc(THREAD_ENTRY &thread_ref, bool &stop_mapper, int tran_index, tran_lock_waiters_array_type &tran_lock_waiters, size_t &count)
void port_close_memstream(FILE *fp, char **ptr, size_t *sizeloc)
Definition: porting.c:2220
static bool lock_is_class_lock_escalated(LOCK class_lock, LOCK lock_escalation)
manager * get_manager(void)
#define MSGCAT_LK_RES_CLASS_TYPE
Definition: lock_manager.c:204
#define RECDES_INITIALIZER
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
LF_TRAN_SYSTEM obj_lock_ent_Ts
Definition: lock_free.c:48
void lock_unlock_all(THREAD_ENTRY *thread_p)
#define csect_check_own(a, b)
LOCK total_holders_mode
Definition: lock_manager.h:178
#define MSGCAT_LK_DUMP_TRAN_TIMEOUT_PERIOD
Definition: lock_manager.c:224
#define ER_LK_ALLOC_RESOURCE
Definition: error_code.h:1063
#define XASL_ID_COPY(X1, X2)
Definition: xasl.h:562
#define assert(x)
int quit_on_errors
Definition: locator.h:326
int prm_get_integer_value(PARAM_ID prm_id)
#define OID_IS_ROOTOID(oidp)
Definition: oid.h:82
LOCK_COMPATIBILITY lock_Comp[12][12]
Definition: lock_table.c:67
#define MSGCAT_LK_DEADLOCK_ABORT_HDR
Definition: lock_manager.c:225
LK_ENTRY * waiter
Definition: lock_manager.h:181
#define MSGCAT_LK_SUSPEND_TRAN
Definition: lock_manager.c:190
int num_exec_queries
Definition: log_impl.h:529
#define ER_LK_TOTAL_HOLDERS_MODE
Definition: error_code.h:1064
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
#define OID_ISTEMP(oidp)
Definition: oid.h:80
#define MSGCAT_LK_RES_BLOCKED_HOLDER_HEAD
Definition: lock_manager.c:210
lf_tran_entry * thread_get_tran_entry(cubthread::entry *thread_p, int entry_idx)
bool logtb_is_interrupted_tran(THREAD_ENTRY *thread_p, bool clear, bool *continue_checking, int tran_index)
int lf_freelist_retire(LF_TRAN_ENTRY *tran_entry, LF_FREELIST *freelist, void *entry)
Definition: lock_free.c:864
bool css_are_all_request_handlers_suspended(void)
static bool lock_is_local_deadlock_detection_interval_up(void)
void lock_unlock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, bool force)
LK_ENTRY * non2pl
Definition: lock_manager.h:182
DB_TRAN_ISOLATION
Definition: dbtran_def.h:26
static enum scanner_mode mode
bool logtb_is_current_active(THREAD_ENTRY *thread_p)
int lock_hold_object_instant(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock)
int lock_initialize(void)
void event_log_bind_values(THREAD_ENTRY *thread_p, FILE *log_fp, int tran_index, int bind_index)
Definition: event_log.c:405
#define MSGCAT_LK_DUMP_TRAN_IDENTIFIERS
Definition: lock_manager.c:221
void lock_force_thread_timeout_lock(THREAD_ENTRY *thrd)
#define OID_EQ(oidp1, oidp2)
Definition: oid.h:92
#define MSGCAT_LK_DEADLOCK_TIMEOUT
Definition: lock_manager.c:228
static int rv
Definition: area_alloc.c:52
OID * oid_get_rep_read_tran_oid(void)
Definition: oid.c:386
#define LOCK_TO_LOCKMODE_STRING(lock)
void lock_stop_instant_lock_mode(THREAD_ENTRY *thread_p, int tran_index, bool need_unlock)
#define NULL
Definition: freelistheap.h:34
void event_log_end(THREAD_ENTRY *thread_p)
Definition: event_log.c:290
#define MSGCAT_LK_DUMP_TRAN_STATE
Definition: lock_manager.c:223
const size_t DEFAULT_LOCK_WAITING_THREAD_ARRAY_SIZE
Definition: lock_manager.c:467
#define CTIME_MAX
Definition: porting.h:72
#define MSGCAT_LK_RES_RR_TYPE
Definition: lock_manager.c:233
#define MVCC_IS_FLAG_SET(rec_header_p, flags)
Definition: mvcc.h:84
void tsc_getticks(TSC_TICKS *tck)
Definition: tsc_timer.c:81
#define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_ENTRY_WITH_GRANULE
Definition: lock_manager.c:214
if(extra_options)
Definition: dynamic_load.c:958
int logtb_get_current_tran_index(void)
STATIC_INLINE bool perfmon_is_perf_tracking_and_active(int activation_flag) __attribute__((ALWAYS_INLINE))
int lock_subclass(THREAD_ENTRY *thread_p, const OID *subclass_oid, const OID *superclass_oid, LOCK lock, int cond_flag)
int lock_add_composite_lock(THREAD_ENTRY *thread_p, LK_COMPOSITE_LOCK *comp_lock, const OID *oid, const OID *class_oid)
LK_LOCKCOMP_CLASS * next
Definition: lock_manager.h:133
struct lk_res LK_RES
Definition: lock_manager.h:174
#define ER_LK_UNKNOWN_ISOLATION
Definition: error_code.h:1056
#define db_private_free_and_init(thrd, ptr)
Definition: memory_alloc.h:141
int lf_freelist_init(LF_FREELIST *freelist, int initial_blocks, int block_size, LF_ENTRY_DESCRIPTOR *edesc, LF_TRAN_SYSTEM *tran_system)
Definition: lock_free.c:666
void thread_lock_entry(cubthread::entry *thread_p)
bool logtb_has_deadlock_priority(int tran_index)
#define db_private_alloc(thrd, size)
Definition: memory_alloc.h:227
const OID oid_Null_oid
Definition: oid.c:68
LOCK_COMPATIBILITY
int css_get_client_id(THREAD_ENTRY *thread_p)
int count(int &result, const cub_regex_object &reg, const std::string &src, const int position, const INTL_CODESET codeset)
#define MSGCAT_CATALOG_CUBRID
LK_ACQOBJ_LOCK * obj
Definition: lock_manager.h:114
static void lock_victimize_first_thread_mapfunc(THREAD_ENTRY &thread_ref, bool &stop_mapper)
XASL_ID xasl_id
Definition: log_impl.h:522
static void lock_get_transaction_lock_waiting_threads(int tran_index, tran_lock_waiters_array_type &tran_lock_waiters, size_t &count)
#define ER_MNT_WAITING_THREAD
Definition: error_code.h:1226
#define LF_FREELIST_INITIALIZER
Definition: lock_free.h:248
STATIC_INLINE void perfmon_diff_timeval(struct timeval *elapsed, struct timeval *start, struct timeval *end) __attribute__((ALWAYS_INLINE))
void er_stack_pop(void)
TRANID logtb_find_tranid(int tran_index)
void lock_start_instant_lock_mode(int tran_index)
const char * envvar_get(const char *name)
#define NULL_TRAN_INDEX
#define ER_LK_OBJECT_TIMEOUT_CLASS_MSG
Definition: error_code.h:132
void map_entries(Func &&func, Args &&...args)
static void error(const char *msg)
Definition: gencat.c:331
void thread_unlock_entry(cubthread::entry *thread_p)
#define ER_LK_NOTFOUND_IN_TRAN_NON2PL_LIST
Definition: error_code.h:1060
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
const char * log_isolation_string(TRAN_ISOLATION isolation)
Definition: log_comm.c:176
#define ER_INTERRUPTED
Definition: error_code.h:51
static bool lock_force_timeout_expired_wait_transactions(void *thrd_entry)
LOG_TDES * LOG_FIND_CURRENT_TDES(THREAD_ENTRY *thread_p=NULL)
Definition: log_impl.h:1115
#define LOG_FIND_THREAD_TRAN_INDEX(thrd)
Definition: perf_monitor.h:158
#define MSGCAT_LK_RESUME_TRAN
Definition: lock_manager.c:191
#define ARG_FILE_LINE
Definition: error_manager.h:44
void lock_unlock_classes_lock_hint(THREAD_ENTRY *thread_p, LC_LOCKHINT *lockhint)
#define MSGCAT_LK_RES_BLOCKED_HOLDER_ENTRY
Definition: lock_manager.c:215
void destroy_daemon(daemon *&daemon_arg)
void event_log_print_client_info(int tran_index, int indent)
Definition: event_log.c:335
#define MSGCAT_SET_LOCK
#define ER_LK_NOTENOUGH_ACTIVE_THREADS
Definition: error_code.h:817
float prm_get_float_value(PARAM_ID prm_id)
int lock_get_lock_holder_tran_index(THREAD_ENTRY *thread_p, char **out_buf, int waiter_index, LK_RES *res)
void lock_demote_read_class_lock_for_checksumdb(THREAD_ENTRY *thread_p, int tran_index, const OID *class_oid)
int lock_object_wait_msecs(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int cond_flag, int wait_msecs)
#define free_and_init(ptr)
Definition: memory_alloc.h:147
#define strlen(s1)
Definition: intl_support.c:43
#define ER_LK_BAD_ARGUMENT
Definition: error_code.h:1055
int oid_compare(const void *a, const void *b)
Definition: oid.c:243
int lock_classes_lock_hint(THREAD_ENTRY *thread_p, LC_LOCKHINT *lockhint)
#define MVCC_GET_DELID(header)
Definition: mvcc.h:57
void lf_freelist_destroy(LF_FREELIST *freelist)
Definition: lock_free.c:711
bool prm_get_bool_value(PARAM_ID prm_id)
int logtb_find_client_name_host_pid(int tran_index, const char **client_prog_name, const char **client_user_name, const char **client_host_name, int *client_pid)
#define MSGCAT_LK_DUMP_LOCK_TABLE
Definition: lock_manager.c:220
unsigned int nobj_locks
Definition: lock_manager.h:115
TRAN_STATE logtb_find_state(int tran_index)
void er_clear(void)
#define IS_WRITE_EXCLUSIVE_LOCK(lock)
LOCK lock_Conv[12][12]
Definition: lock_table.c:179
#define MSGCAT_LK_NEWLINE
Definition: lock_manager.c:189
const size_t LOG_USERNAME_MAX
int i
Definition: dynamic_load.c:954
char * msgcat_message(int cat_id, int set_id, int msg_id)
LK_ENTRY * lock_get_class_lock(THREAD_ENTRY *thread_p, const OID *class_oid)
#define XASL_ID_IS_NULL(X)
Definition: xasl.h:560
#define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_HEAD
Definition: lock_manager.c:209
#define MSGCAT_LK_RES_TOTAL_MODE
Definition: lock_manager.c:207
LK_ENTRY * holder
Definition: lock_manager.h:180
#define ER_LK_OBJECT_DL_TIMEOUT_CLASSOF_MSG
Definition: error_code.h:1205
LK_LOCKCOMP_CLASS * class_list
Definition: lock_manager.h:142
int lock_has_lock_on_object(const OID *oid, const OID *class_oid, LOCK lock)
LK_ENTRY * root_class_ptr
Definition: lock_manager.h:141
TRAN_STATE
Definition: log_comm.h:36
#define MSGCAT_LK_RES_BLOCKED_HOLDER_ENTRY_WITH_GRANULE
Definition: lock_manager.c:216
#define pthread_mutex_lock(a)
Definition: area_alloc.c:50
LK_RES * waiting_for_res
Definition: log_impl.h:523
std::array< THREAD_ENTRY *, DEFAULT_LOCK_WAITING_THREAD_ARRAY_SIZE > tran_lock_waiters_array_type
Definition: lock_manager.c:469
#define MSGCAT_LK_RES_OID
Definition: lock_manager.c:202
#define ER_LK_LOCK_WAITER_ONLY
Definition: error_code.h:1066
void thread_wakeup(cubthread::entry *thread_p, thread_resume_suspend_status resume_reason)
int heap_scancache_quick_start(HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7040
#define OID_ISNULL(oidp)
Definition: oid.h:81
bool logtb_is_active(THREAD_ENTRY *thread_p, TRANID trid)
LOCK reqobj_class_lock
Definition: locator.h:293
static void lock_unlock_object_lock_internal(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int release_flag, int move_to_non2pl)
int lock_finalize_composite_lock(THREAD_ENTRY *thread_p, LK_COMPOSITE_LOCK *comp_lock)
daemon * create_daemon(const looper &looper_arg, entry_task *exec_p, const char *daemon_name="", entry_manager *context_manager=NULL)
#define CUB_MAXHOSTNAMELEN
Definition: porting.h:379
void lock_unlock_objects_lock_set(THREAD_ENTRY *thread_p, LC_LOCKSET *lockset)
int num_reqobjs_processed
Definition: locator.h:289
#define ER_LK_ABORT_TRAN_TWICE
Definition: error_code.h:1061
#define MSGCAT_LK_RES_NON_BLOCKED_HOLDER_ENTRY
Definition: lock_manager.c:213
int heap_get_class_name(THREAD_ENTRY *thread_p, const OID *class_oid, char **class_name)
Definition: heap_file.c:9328
#define db_private_realloc(thrd, ptr, size)
Definition: memory_alloc.h:231
#define XASL_ID_SET_NULL(X)
Definition: xasl.h:546
#define ER_LK_OBJECT_TIMEOUT_CLASSOF_MSG
Definition: error_code.h:133
#define PEEK
Definition: file_io.h:74
#define MONITOR_WAITING_THREAD(elapsed)
callable_task< entry > entry_callable_task
#define MSGCAT_LK_DEADLOCK_TIMEOUT_HDR
Definition: lock_manager.c:227
#define MSGCAT_LK_RES_NON2PL_RELEASED_ENTRY
Definition: lock_manager.c:218
Definition: lock_manager.h:79
int num_classes
Definition: locator.h:324
bool lock_is_waiting_transaction(int tran_index)
#define LF_EM_USING_MUTEX
Definition: lock_free.h:60
int logtb_find_wait_msecs(int tran_index)
bool lock_is_instant_lock_mode(int tran_index)
#define ER_LK_DEADLOCK_CYCLE_DETECTED
Definition: error_code.h:1276
LK_RES_KEY key
Definition: lock_manager.h:177
enum tran_abort_reason TRAN_ABORT_REASON
Definition: log_impl.h:360
const char ** p
Definition: dynamic_load.c:945
#define ER_LK_STRANGE_LOCK_WAIT
Definition: error_code.h:1068
LC_LOCKHINT_CLASS * classes
Definition: locator.h:329
pthread_mutex_t res_mutex
Definition: lock_manager.h:183
void lock_clear_deadlock_victim(int tran_index)
void lock_unlock_all_shared_get_all_exclusive(THREAD_ENTRY *thread_p, LK_ACQUIRED_LOCKS *acqlocks)
#define ER_LK_OBJECT_DL_TIMEOUT_SIMPLE_MSG
Definition: error_code.h:1203
#define ER_LK_DEADLOCK_SPECIFIC_INFO
Definition: error_code.h:1358
const char * lock_wait_state_to_string(int state)
#define OID_GET_REAL_CLASS_OF_DIR_OID(virtual_oidp, class_oidp)
Definition: oid.h:150
#define MAX_NUM_EXEC_QUERY_HISTORY
Definition: log_impl.h:275
#define pthread_mutex_destroy(a)
Definition: area_alloc.c:49
FILE * port_open_memstream(char **ptr, size_t *sizeloc)
Definition: porting.c:2198
LK_RES * hash_next
Definition: lock_manager.h:184
LF_TRAN_SYSTEM obj_lock_res_Ts
Definition: lock_free.c:47
LOCK_WAIT_STATE
Definition: lock_manager.c:172