CUBRID Engine  latest
critical_section.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * critical_section.c - critical section support
21  */
22 
23 #ident "$Id$"
24 
25 #include "critical_section.h"
26 
27 #include "connection_defs.h"
28 #include "connection_error.h"
29 #include "config.h"
31 #include "dbtype.h"
32 #include "numeric_opfunc.h"
33 #include "perf_monitor.h"
34 #include "porting.h"
35 #include "resource_tracker.hpp"
36 #include "show_scan.h"
37 #include "system_parameter.h"
38 #include "thread_entry.hpp"
39 #include "thread_manager.hpp"
40 #include "tsc_timer.h"
41 
42 #include <stdio.h>
43 #include <assert.h>
44 
45 // belongs to cubsync
46 using namespace cubsync;
47 
48 #undef csect_initialize_critical_section
49 #undef csect_finalize_critical_section
50 #undef csect_enter
51 #undef csect_enter_as_reader
52 #undef csect_exit
53 #undef csect_enter_critical_section
54 #undef csect_exit_critical_section
55 
56 #define TOTAL_AND_MAX_TIMEVAL(total, max, elapsed) \
57  do \
58  { \
59  (total).tv_sec += elapsed.tv_sec; \
60  (total).tv_usec += elapsed.tv_usec; \
61  (total).tv_sec += (total).tv_usec / 1000000; \
62  (total).tv_usec %= 1000000; \
63  if (((max).tv_sec < elapsed.tv_sec) || ((max).tv_sec == elapsed.tv_sec && (max).tv_usec < elapsed.tv_usec)) \
64  { \
65  (max).tv_sec = elapsed.tv_sec; \
66  (max).tv_usec = elapsed.tv_usec; \
67  } \
68  } \
69  while (0)
70 
71 /* define critical section array */
73 
74 static const char *csect_Names[] = {
75  "WFG",
76  "LOG",
77  "LOCATOR_CLASSNAME_TABLE",
78  "QPROC_QUERY_TABLE",
79  "QPROC_LIST_CACHE",
80  "DISK_CHECK",
81  "CNV_FMT_LEXER",
82  "HEAP_CHNGUESS",
83  "TRAN_TABLE",
84  "CT_OID_TABLE",
85  "HA_SERVER_STATE",
86  "COMPACTDB_ONE_INSTANCE",
87  "ACL",
88  "PARTITION_CACHE",
89  "EVENT_LOG_FILE",
90  "LOG_ARCHIVE",
91  "ACCESS_STATUS"
92 };
93 
94 static const char *
96 {
97  return c != NULL ? c->name : "UNKNOWN";
98 }
99 
100 const char *
101 csect_name_at (int cs_index)
102 {
103  assert (cs_index >= 0 && cs_index < CRITICAL_SECTION_COUNT);
104  return csect_Names[cs_index];
105 }
106 
107 /*
108  * Synchronization Primitives Statistics Monitor
109  */
110 
111 #define NUM_ENTRIES_OF_SYNC_STATS_BLOCK 256
112 
115 {
120 };
121 
123 pthread_mutex_t sync_Stats_lock;
124 
125 static int csect_wait_on_writer_queue (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int timeout,
126  struct timespec *to);
127 static int csect_wait_on_promoter_queue (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int timeout,
128  struct timespec *to);
131 static int csect_demote_critical_section (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int wait_secs);
132 static int csect_promote_critical_section (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int wait_secs);
134 
138  SYNC_PRIMITIVE_TYPE sync_prim_type, const char *name);
140 static SYNC_STATS *sync_allocate_sync_stats (SYNC_PRIMITIVE_TYPE sync_prim_type, const char *name);
141 static int sync_deallocate_sync_stats (SYNC_STATS * stats);
142 static void sync_reset_stats_metrics (SYNC_STATS * stats);
143 
144 /*
145  * csect_initialize_critical_section() - initialize critical section
146  * return: 0 if success, or error code
147  * csect(in): critical section
148  */
149 int
151 {
152  int error_code = NO_ERROR;
153 
154  assert (csect != NULL);
155 
156  csect->cs_index = -1;
157 
158  error_code = pthread_mutex_init (&csect->lock, NULL);
159 
160  if (error_code != NO_ERROR)
161  {
163  assert (0);
165  }
166 
167  error_code = pthread_cond_init (&csect->readers_ok, NULL);
168  if (error_code != NO_ERROR)
169  {
171  assert (0);
173  }
174 
175  csect->name = name;
176  csect->rwlock = 0;
177  csect->owner = thread_id_t ();
178  csect->tran_index = -1;
179  csect->waiting_readers = 0;
180  csect->waiting_writers = 0;
181  csect->waiting_writers_queue = NULL;
182  csect->waiting_promoters_queue = NULL;
183 
185  if (csect->stats == NULL)
186  {
187  ASSERT_ERROR_AND_SET (error_code);
188  return error_code;
189  }
190 
191  return NO_ERROR;
192 }
193 
194 /*
195  * csect_finalize_critical_section() - free critical section
196  * return: 0 if success, or error code
197  * csect(in): critical section
198  */
199 int
201 {
202  int error_code = NO_ERROR;
203 
204  error_code = pthread_mutex_destroy (&csect->lock);
205  if (error_code != NO_ERROR)
206  {
208  assert (0);
210  }
211 
212  error_code = pthread_cond_destroy (&csect->readers_ok);
213  if (error_code != NO_ERROR)
214  {
216  assert (0);
218  }
219 
220  csect->name = NULL;
221  csect->rwlock = 0;
222  csect->owner = thread_id_t ();
223  csect->tran_index = -1;
224  csect->waiting_readers = 0;
225  csect->waiting_writers = 0;
226  csect->waiting_writers_queue = NULL;
227  csect->waiting_promoters_queue = NULL;
228 
229  error_code = sync_deallocate_sync_stats (csect->stats);
230  csect->stats = NULL;
231 
232  return NO_ERROR;
233 }
234 
235 /*
236  * csect_initialize_static_critical_sections() - initialize all the critical section lock structures
237  * return: 0 if success, or error code
238  */
239 int
241 {
242  SYNC_CRITICAL_SECTION *csect;
243  int i, error_code = NO_ERROR;
244 
245  for (i = 0; i < CRITICAL_SECTION_COUNT; i++)
246  {
247  csect = &csectgl_Critical_sections[i];
248 
249  error_code = csect_initialize_critical_section (csect, csect_Names[i]);
250  if (error_code != NO_ERROR)
251  {
252  break;
253  }
254 
255  csect->cs_index = i;
256  }
257 
258  return error_code;
259 }
260 
261 /*
262  * csect_finalize_static_critical_sections() - free all the critical section lock structures
263  * return: 0 if success, or error code
264  */
265 int
267 {
268  SYNC_CRITICAL_SECTION *csect;
269  int i, error_code = NO_ERROR;
270 
271  for (i = 0; i < CRITICAL_SECTION_COUNT; i++)
272  {
273  csect = &csectgl_Critical_sections[i];
274  assert (csect->cs_index == i);
275 
276  error_code = csect_finalize_critical_section (csect);
277  if (error_code != NO_ERROR)
278  {
279  break;
280  }
281 
282  csect->cs_index = -1;
283  }
284 
285  return error_code;
286 }
287 
288 static int
289 csect_wait_on_writer_queue (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int timeout, struct timespec *to)
290 {
291  THREAD_ENTRY *prev_thread_p = NULL;
292  int err = NO_ERROR;
293 
294  thread_p->next_wait_thrd = NULL;
295 
296  if (csect->waiting_writers_queue == NULL)
297  {
298  /* nobody is waiting. */
299  csect->waiting_writers_queue = thread_p;
300  }
301  else
302  {
303  /* waits on the rear of the queue */
304  prev_thread_p = csect->waiting_writers_queue;
305  while (prev_thread_p->next_wait_thrd != NULL)
306  {
307  assert (prev_thread_p != thread_p);
308 
309  prev_thread_p = prev_thread_p->next_wait_thrd;
310  }
311 
312  assert (prev_thread_p != thread_p);
313  prev_thread_p->next_wait_thrd = thread_p;
314  }
315 
316  while (true)
317  {
318  err = thread_suspend_with_other_mutex (thread_p, &csect->lock, timeout, to, THREAD_CSECT_WRITER_SUSPENDED);
319 
320  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT && thread_p->interrupted)
321  {
322  /* check if i'm in the queue */
323  prev_thread_p = csect->waiting_writers_queue;
324  while (prev_thread_p != NULL)
325  {
326  if (prev_thread_p == thread_p)
327  {
328  break;
329  }
330  prev_thread_p = prev_thread_p->next_wait_thrd;
331  }
332 
333  if (prev_thread_p != NULL)
334  {
335  continue;
336  }
337 
338  /* In case I wake up by shutdown thread and there's not me in the writers Q, I proceed anyway assuming that
339  * followings occurred in order. 1. Critical section holder wakes me up after removing me from the writers
340  * Q.(mutex lock is not released yet). 2. I wake up and then wait for the mutex to be released. 3. The
341  * shutdown thread wakes me up by a server shutdown command. (resume_status is changed to
342  * THREAD_RESUME_DUE_TO_INTERRUPT) 4. Critical section holder releases the mutex lock. 5. I wake up with
343  * holding the mutex. Currently, resume_status of mine is THREAD_RESUME_DUE_TO_INTERRUPT and there's not me
344  * in the writers Q. */
345  }
346  else if (thread_p->resume_status != THREAD_CSECT_WRITER_RESUMED)
347  {
348  assert (0);
349  }
350 
351  break;
352  }
353 
354  return err;
355 }
356 
357 static int
358 csect_wait_on_promoter_queue (THREAD_ENTRY * thread_p, SYNC_CRITICAL_SECTION * csect, int timeout, struct timespec *to)
359 {
360  THREAD_ENTRY *prev_thread_p = NULL;
361  int err = NO_ERROR;
362 
363  thread_p->next_wait_thrd = NULL;
364 
365  if (csect->waiting_promoters_queue == NULL)
366  {
367  /* nobody is waiting. */
368  csect->waiting_promoters_queue = thread_p;
369  }
370  else
371  {
372  /* waits on the rear of the queue */
373  prev_thread_p = csect->waiting_promoters_queue;
374  while (prev_thread_p->next_wait_thrd != NULL)
375  {
376  assert (prev_thread_p != thread_p);
377 
378  prev_thread_p = prev_thread_p->next_wait_thrd;
379  }
380 
381  assert (prev_thread_p != thread_p);
382  prev_thread_p->next_wait_thrd = thread_p;
383  }
384 
385  while (1)
386  {
387  err = thread_suspend_with_other_mutex (thread_p, &csect->lock, timeout, to, THREAD_CSECT_PROMOTER_SUSPENDED);
388 
389  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT && thread_p->interrupted)
390  {
391  /* check if i'm in the queue */
392  prev_thread_p = csect->waiting_promoters_queue;
393  while (prev_thread_p != NULL)
394  {
395  if (prev_thread_p == thread_p)
396  {
397  break;
398  }
399  prev_thread_p = prev_thread_p->next_wait_thrd;
400  }
401 
402  if (prev_thread_p != NULL)
403  {
404  continue;
405  }
406 
407  /* In case I wake up by shutdown thread and there's not me in the promoters Q, I proceed anyway assuming that
408  * followings occurred in order. 1. Critical section holder wakes me up after removing me from the promoters
409  * Q.(mutex lock is not released yet). 2. I wake up and then wait for the mutex to be released. 3. The
410  * shutdown thread wakes me up by a server shutdown command. (resume_status is changed to
411  * THREAD_RESUME_DUE_TO_INTERRUPT) 4. Critical section holder releases the mutex lock. 5. I wake up with
412  * holding the mutex. Currently, resume_status of mine is THREAD_RESUME_DUE_TO_INTERRUPT and there's not me
413  * in the promoters Q. */
414  }
415  else if (thread_p->resume_status != THREAD_CSECT_PROMOTER_RESUMED)
416  {
417  assert (0);
418  }
419 
420  break;
421  }
422 
423  return err;
424 }
425 
426 static int
428 {
429  THREAD_ENTRY *waiting_thread_p = NULL;
430  int error_code = NO_ERROR;
431 
432  waiting_thread_p = csect->waiting_writers_queue;
433 
434  if (waiting_thread_p != NULL)
435  {
436  csect->waiting_writers_queue = waiting_thread_p->next_wait_thrd;
437  waiting_thread_p->next_wait_thrd = NULL;
438 
439  thread_wakeup (waiting_thread_p, THREAD_CSECT_WRITER_RESUMED);
440  }
441 
442  return error_code;
443 }
444 
445 static int
447 {
448  THREAD_ENTRY *waiting_thread_p = NULL;
449  int error_code = NO_ERROR;
450 
451  waiting_thread_p = csect->waiting_promoters_queue;
452 
453  if (waiting_thread_p != NULL)
454  {
455  csect->waiting_promoters_queue = waiting_thread_p->next_wait_thrd;
456  waiting_thread_p->next_wait_thrd = NULL;
457 
458  thread_wakeup (waiting_thread_p, THREAD_CSECT_PROMOTER_RESUMED);
459  }
460 
461  return error_code;
462 }
463 
464 /*
465  * csect_enter_critical_section() - lock critical section
466  * return: 0 if success, or error code
467  * csect(in): critical section
468  * wait_secs(in): timeout second
469  */
470 int
472 {
473  int error_code = NO_ERROR, r;
474  TSC_TICKS start_tick, end_tick;
475  TSCTIMEVAL tv_diff;
476 
477  TSC_TICKS wait_start_tick, wait_end_tick;
478  TSCTIMEVAL wait_tv_diff;
479 
480  assert (csect != NULL);
481 
482  if (thread_p == NULL)
483  {
484  thread_p = thread_get_thread_entry_info ();
485  }
486 
487  csect->stats->nenter++;
488 
489  tsc_getticks (&start_tick);
490 
491  error_code = pthread_mutex_lock (&csect->lock);
492  if (error_code != NO_ERROR)
493  {
495  assert (0);
497  }
498 
499  while (csect->rwlock != 0 || csect->owner != thread_id_t ())
500  {
501  if (csect->rwlock < 0 && csect->owner == thread_p->get_id ())
502  {
503  /*
504  * I am holding the csect, and reenter it again as writer.
505  * Note that rwlock will be decremented.
506  */
507  csect->stats->nreenter++;
508  break;
509  }
510  else
511  {
512  if (wait_secs == INF_WAIT)
513  {
514  csect->waiting_writers++;
515  csect->stats->nwait++;
516 
517  if (thread_p->event_stats.trace_slow_query == true)
518  {
519  tsc_getticks (&wait_start_tick);
520  }
521 
522  error_code = csect_wait_on_writer_queue (thread_p, csect, INF_WAIT, NULL);
523  if (thread_p->event_stats.trace_slow_query == true)
524  {
525  tsc_getticks (&wait_end_tick);
526  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
527  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
528  }
529 
530  csect->waiting_writers--;
531  if (error_code != NO_ERROR)
532  {
533  r = pthread_mutex_unlock (&csect->lock);
534  if (r != NO_ERROR)
535  {
537  assert (0);
539  }
540  assert (0);
543  }
544  if (csect->owner != thread_id_t () && csect->waiting_writers > 0)
545  {
546  /*
547  * There's one waiting to be promoted.
548  * Note that 'owner' was not reset while demoting.
549  * I have to yield to the waiter
550  */
551  error_code = csect_wakeup_waiting_promoter (csect);
552  if (error_code != NO_ERROR)
553  {
554  r = pthread_mutex_unlock (&csect->lock);
555  if (r != NO_ERROR)
556  {
558  assert (0);
560  }
561  assert (0);
564  }
565  continue;
566  }
567  }
568  else if (wait_secs > 0)
569  {
570  struct timespec to;
571  to.tv_sec = time (NULL) + wait_secs;
572  to.tv_nsec = 0;
573 
574  csect->waiting_writers++;
575 
576  if (thread_p->event_stats.trace_slow_query == true)
577  {
578  tsc_getticks (&wait_start_tick);
579  }
580 
581  error_code = csect_wait_on_writer_queue (thread_p, csect, NOT_WAIT, &to);
582  if (thread_p->event_stats.trace_slow_query == true)
583  {
584  tsc_getticks (&wait_end_tick);
585  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
586  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
587  }
588 
589  csect->waiting_writers--;
590  if (error_code != NO_ERROR)
591  {
592  r = pthread_mutex_unlock (&csect->lock);
593  if (r != NO_ERROR)
594  {
596  assert (0);
598  }
599  if (error_code != ER_CSS_PTHREAD_COND_WAIT)
600  {
602  assert (0);
604  }
605  return error_code;
606  }
607  }
608  else
609  {
610  error_code = pthread_mutex_unlock (&csect->lock);
611  if (error_code != NO_ERROR)
612  {
614  assert (0);
616  }
617  return ETIMEDOUT;
618  }
619  }
620  }
621 
622  /* rwlock will be < 0. It denotes that a writer owns the csect. */
623  csect->rwlock--;
624 
625  /* record that I am the writer of the csect. */
626  csect->owner = thread_p->get_id ();
627  csect->tran_index = thread_p->tran_index;
628 
629  tsc_getticks (&end_tick);
630  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
631  TOTAL_AND_MAX_TIMEVAL (csect->stats->total_elapsed, csect->stats->max_elapsed, tv_diff);
632 
633  error_code = pthread_mutex_unlock (&csect->lock);
634  if (error_code != NO_ERROR)
635  {
637  assert (0);
639  }
640 
641  if (MONITOR_WAITING_THREAD (tv_diff))
642  {
643  if (csect->cs_index > 0)
644  {
647  }
649  "csect_enter_critical_section_as_reader: %6d.%06d"
650  " %s total_enter %d ntotal_elapsed %d max_elapsed %d.%06d total_elapsed %d.06d\n", tv_diff.tv_sec,
651  tv_diff.tv_usec, csect_name (csect), csect->stats->nenter, csect->stats->nwait,
652  csect->stats->max_elapsed.tv_sec, csect->stats->max_elapsed.tv_usec,
653  csect->stats->total_elapsed.tv_sec, csect->stats->total_elapsed.tv_usec);
654  }
655 
656  thread_p->get_csect_tracker ().on_enter_as_writer (csect->cs_index);
657  return NO_ERROR;
658 }
659 
660 /*
661  * csect_enter() - lock out other threads from concurrent execution
662  * through a critical section of code
663  * return: 0 if success, or error code
664  * cs_index(in): identifier of the section to lock
665  * wait_secs(in): timeout second
666  *
667  * Note: locks the critical section, or suspends the thread until the critical
668  * section has been freed by another thread
669  */
670 int
671 csect_enter (THREAD_ENTRY * thread_p, int cs_index, int wait_secs)
672 {
673  SYNC_CRITICAL_SECTION *csect;
674 
675  assert (cs_index >= 0);
676  assert (cs_index < CRITICAL_SECTION_COUNT);
677 
678  csect = &csectgl_Critical_sections[cs_index];
679 #if defined (SERVER_MODE)
680  assert (csect->cs_index == cs_index);
681 #endif
682 
683  return csect_enter_critical_section (thread_p, csect, wait_secs);
684 }
685 
686 /*
687  * csect_enter_critical_section_as_reader () - acquire a read lock
688  * return: 0 if success, or error code
689  * csect(in): critical section
690  * wait_secs(in): timeout second
691  */
692 int
694 {
695  int error_code = NO_ERROR, r;
696  TSC_TICKS start_tick, end_tick;
697  TSCTIMEVAL tv_diff;
698  TSC_TICKS wait_start_tick, wait_end_tick;
699  TSCTIMEVAL wait_tv_diff;
700 
701  assert (csect != NULL);
702 
703  if (thread_p == NULL)
704  {
705  thread_p = thread_get_thread_entry_info ();
706  }
707 
708  csect->stats->nenter++;
709 
710  tsc_getticks (&start_tick);
711 
712  error_code = pthread_mutex_lock (&csect->lock);
713  if (error_code != NO_ERROR)
714  {
716  assert (0);
718  }
719 
720  if (csect->rwlock < 0 && csect->owner == thread_p->get_id ())
721  {
722  /* writer reenters the csect as a reader. treat as writer. */
723  csect->rwlock--;
724  csect->stats->nreenter++;
725  }
726  else
727  {
728  /* reader can enter this csect without waiting writer(s) when the csect had been demoted by the other */
729  while (csect->rwlock < 0 || (csect->waiting_writers > 0 && csect->owner == thread_id_t ()))
730  {
731  /* reader should wait writer(s). */
732  if (wait_secs == INF_WAIT)
733  {
734  csect->waiting_readers++;
735  csect->stats->nwait++;
736  thread_p->resume_status = THREAD_CSECT_READER_SUSPENDED;
737 
738  if (thread_p->event_stats.trace_slow_query == true)
739  {
740  tsc_getticks (&wait_start_tick);
741  }
742 
743  error_code = pthread_cond_wait (&csect->readers_ok, &csect->lock);
744  if (thread_p->event_stats.trace_slow_query == true)
745  {
746  tsc_getticks (&wait_end_tick);
747  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
748  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
749  }
750 
751  csect->waiting_readers--;
752 
753  if (error_code != NO_ERROR)
754  {
755  r = pthread_mutex_unlock (&csect->lock);
756  if (r != NO_ERROR)
757  {
759  assert (0);
761  }
763  assert (0);
765  }
766  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
767  {
768  r = pthread_mutex_unlock (&csect->lock);
769  if (r != NO_ERROR)
770  {
772  assert (0);
774  }
775  continue;
776  }
777  }
778  else if (wait_secs > 0)
779  {
780  struct timespec to;
781  to.tv_sec = time (NULL) + wait_secs;
782  to.tv_nsec = 0;
783 
784  csect->waiting_readers++;
785  thread_p->resume_status = THREAD_CSECT_READER_SUSPENDED;
786 
787  if (thread_p->event_stats.trace_slow_query == true)
788  {
789  tsc_getticks (&wait_start_tick);
790  }
791 
792  error_code = pthread_cond_timedwait (&csect->readers_ok, &csect->lock, &to);
793  if (thread_p->event_stats.trace_slow_query == true)
794  {
795  tsc_getticks (&wait_end_tick);
796  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
797  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
798  }
799 
800  csect->waiting_readers--;
801 
802  if (error_code != 0)
803  {
804  r = pthread_mutex_unlock (&csect->lock);
805  if (r != NO_ERROR)
806  {
808  assert (0);
810  }
811  if (error_code != ETIMEDOUT)
812  {
814  assert (0);
816  }
817  return error_code;
818  }
819  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
820  {
821  r = pthread_mutex_unlock (&csect->lock);
822  if (r != NO_ERROR)
823  {
825  assert (0);
827  }
828  continue;
829  }
830  }
831  else
832  {
833  error_code = pthread_mutex_unlock (&csect->lock);
834  if (error_code != NO_ERROR)
835  {
837  assert (0);
839  }
840  return ETIMEDOUT;
841  }
842  }
843 
844  /* rwlock will be > 0. record that a reader enters the csect. */
845  csect->rwlock++;
846  }
847 
848  tsc_getticks (&end_tick);
849  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
850  TOTAL_AND_MAX_TIMEVAL (csect->stats->total_elapsed, csect->stats->max_elapsed, tv_diff);
851 
852  error_code = pthread_mutex_unlock (&csect->lock);
853  if (error_code != NO_ERROR)
854  {
856  assert (0);
858  }
859 
860  if (MONITOR_WAITING_THREAD (tv_diff))
861  {
862  if (csect->cs_index > 0)
863  {
866  }
868  "csect_enter_critical_section: %6d.%06d %s total_enter %d ntotal_elapsed %d max_elapsed %d.%06d"
869  " total_elapsed %d.06d\n", tv_diff.tv_sec, tv_diff.tv_usec, csect_name (csect),
870  csect->stats->nenter, csect->stats->nwait, csect->stats->max_elapsed.tv_sec,
871  csect->stats->max_elapsed.tv_usec, csect->stats->total_elapsed.tv_sec,
872  csect->stats->total_elapsed.tv_usec);
873  }
874 
875  thread_p->get_csect_tracker ().on_enter_as_reader (csect->cs_index);
876  return NO_ERROR;
877 }
878 
879 /*
880  * csect_enter_as_reader() - acquire a read lock
881  * return: 0 if success, or error code
882  * cs_index(in): identifier of the section to lock
883  * wait_secs(in): timeout second
884  *
885  * Note: Multiple readers go if there are no writers.
886  */
887 int
888 csect_enter_as_reader (THREAD_ENTRY * thread_p, int cs_index, int wait_secs)
889 {
890  SYNC_CRITICAL_SECTION *csect;
891 
892  assert (cs_index >= 0);
893  assert (cs_index < CRITICAL_SECTION_COUNT);
894 
895  csect = &csectgl_Critical_sections[cs_index];
896 #if defined (SERVER_MODE)
897  assert (csect->cs_index == cs_index);
898 #endif
899 
900  return csect_enter_critical_section_as_reader (thread_p, csect, wait_secs);
901 }
902 
903 /*
904  * csect_demote_critical_section () - acquire a read lock when it has write lock
905  * return: 0 if success, or error code
906  * csect(in): critical section
907  * wait_secs(in): timeout second
908  *
909  * Note: Always successful because I have the write lock.
910  */
911 static int
913 {
914  int error_code = NO_ERROR, r;
915  TSC_TICKS start_tick, end_tick;
916  TSCTIMEVAL tv_diff;
917  TSC_TICKS wait_start_tick, wait_end_tick;
918  TSCTIMEVAL wait_tv_diff;
919 
920  assert (csect != NULL);
921 
922  if (thread_p == NULL)
923  {
924  thread_p = thread_get_thread_entry_info ();
925  }
926 
927  csect->stats->nenter++;
928 
929  tsc_getticks (&start_tick);
930 
931  error_code = pthread_mutex_lock (&csect->lock);
932  if (error_code != NO_ERROR)
933  {
935  assert (0);
937  }
938 
939  if (csect->rwlock < 0 && csect->owner == thread_p->get_id ())
940  {
941  /*
942  * I have write lock. I was entered before as a writer.
943  * Every others are waiting on either 'reader_ok', if it is waiting as
944  * a reader, or 'writers_queue' with 'waiting_writers++', if waiting as
945  * a writer.
946  */
947 
948  csect->rwlock++; /* releasing */
949  if (csect->rwlock < 0)
950  {
951  /*
952  * In the middle of an outer critical section, it is not possible
953  * to be a reader. Treat as same as csect_enter_critical_section_as_reader().
954  */
955  csect->rwlock--; /* entering as a writer */
956  }
957  else
958  {
959  /* rwlock == 0 */
960  csect->rwlock++; /* entering as a reader */
961 #if 0
962  csect->owner = thread_id_t ();
963  csect->tran_index = -1;
964 #endif
965  }
966  }
967  else
968  {
969  /*
970  * I don't have write lock. Act like a normal reader request.
971  */
972  while (csect->rwlock < 0 || csect->waiting_writers > 0)
973  {
974  /* reader should wait writer(s). */
975  if (wait_secs == INF_WAIT)
976  {
977  csect->waiting_readers++;
978  csect->stats->nwait++;
979  thread_p->resume_status = THREAD_CSECT_READER_SUSPENDED;
980 
981  if (thread_p->event_stats.trace_slow_query == true)
982  {
983  tsc_getticks (&wait_start_tick);
984  }
985 
986  error_code = pthread_cond_wait (&csect->readers_ok, &csect->lock);
987  if (thread_p->event_stats.trace_slow_query == true)
988  {
989  tsc_getticks (&wait_end_tick);
990  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
991  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
992  }
993 
994  csect->waiting_readers--;
995 
996  if (error_code != NO_ERROR)
997  {
998  r = pthread_mutex_unlock (&csect->lock);
999  if (r != NO_ERROR)
1000  {
1002  assert (0);
1004  }
1006  assert (0);
1007  return ER_CSS_PTHREAD_COND_WAIT;
1008  }
1009  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
1010  {
1011  r = pthread_mutex_unlock (&csect->lock);
1012  if (r != NO_ERROR)
1013  {
1015  assert (0);
1017  }
1018  continue;
1019  }
1020  }
1021  else if (wait_secs > 0)
1022  {
1023  struct timespec to;
1024  to.tv_sec = time (NULL) + wait_secs;
1025  to.tv_nsec = 0;
1026 
1027  csect->waiting_readers++;
1028  thread_p->resume_status = THREAD_CSECT_READER_SUSPENDED;
1029 
1030  if (thread_p->event_stats.trace_slow_query == true)
1031  {
1032  tsc_getticks (&wait_start_tick);
1033  }
1034 
1035  error_code = pthread_cond_timedwait (&csect->readers_ok, &csect->lock, &to);
1036  if (thread_p->event_stats.trace_slow_query == true)
1037  {
1038  tsc_getticks (&wait_end_tick);
1039  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
1040  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
1041  }
1042 
1043  csect->waiting_readers--;
1044 
1045  if (error_code != 0)
1046  {
1047  r = pthread_mutex_unlock (&csect->lock);
1048  if (r != NO_ERROR)
1049  {
1051  assert (0);
1053  }
1054  if (error_code != ETIMEDOUT)
1055  {
1057  assert (0);
1058  return ER_CSS_PTHREAD_COND_WAIT;
1059  }
1060  return error_code;
1061  }
1062  if (thread_p->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT)
1063  {
1064  r = pthread_mutex_unlock (&csect->lock);
1065  if (r != NO_ERROR)
1066  {
1068  assert (0);
1070  }
1071  continue;
1072  }
1073  }
1074  else
1075  {
1076  error_code = pthread_mutex_unlock (&csect->lock);
1077  if (error_code != NO_ERROR)
1078  {
1080  assert (0);
1082  }
1083  return ETIMEDOUT;
1084  }
1085  }
1086 
1087  /* rwlock will be > 0. record that a reader enters the csect. */
1088  csect->rwlock++;
1089  }
1090 
1091  tsc_getticks (&end_tick);
1092  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
1093  TOTAL_AND_MAX_TIMEVAL (csect->stats->total_elapsed, csect->stats->max_elapsed, tv_diff);
1094 
1095  /* Someone can wait for being reader. Wakeup all readers. */
1096  error_code = pthread_cond_broadcast (&csect->readers_ok);
1097  if (error_code != NO_ERROR)
1098  {
1099  r = pthread_mutex_unlock (&csect->lock);
1100  if (r != NO_ERROR)
1101  {
1103  assert (0);
1105  }
1106 
1108  assert (0);
1110  }
1111 
1112  error_code = pthread_mutex_unlock (&csect->lock);
1113  if (error_code != NO_ERROR)
1114  {
1116  assert (0);
1118  }
1119 
1120  if (MONITOR_WAITING_THREAD (tv_diff))
1121  {
1122  if (csect->cs_index > 0)
1123  {
1126  }
1128  "csect_demote_critical_section: %6d.%06d %s total_enter %d ntotal_elapsed %d max_elapsed %d.%06d"
1129  " total_elapsed %d.06d\n", tv_diff.tv_sec, tv_diff.tv_usec, csect_name (csect),
1130  csect->stats->nenter, csect->stats->nwait, csect->stats->max_elapsed.tv_sec,
1131  csect->stats->max_elapsed.tv_usec, csect->stats->total_elapsed.tv_sec,
1132  csect->stats->total_elapsed.tv_usec);
1133  }
1134  thread_p->get_csect_tracker ().on_demote (csect->cs_index);
1135  return NO_ERROR;
1136 }
1137 
1138 /*
1139  * csect_demote () - acquire a read lock when it has write lock
1140  * return: 0 if success, or error code
1141  * cs_index(in): identifier of the section to lock
1142  * wait_secs(in): timeout second
1143  *
1144  * Note: Always successful because I have the write lock.
1145  */
1146 int
1147 csect_demote (THREAD_ENTRY * thread_p, int cs_index, int wait_secs)
1148 {
1149  SYNC_CRITICAL_SECTION *csect;
1150 
1151  assert (cs_index >= 0);
1152  assert (cs_index < CRITICAL_SECTION_COUNT);
1153 
1154  csect = &csectgl_Critical_sections[cs_index];
1155  return csect_demote_critical_section (thread_p, csect, wait_secs);
1156 }
1157 
1158 /*
1159  * csect_promote_critical_section () - acquire a write lock when it has read lock
1160  * return: 0 if success, or error code
1161  * csect(in): critical section
1162  * wait_secs(in): timeout second
1163  *
1164  * Note: Always successful because I have the write lock.
1165  */
1166 static int
1168 {
1169  int error_code = NO_ERROR, r;
1170  TSC_TICKS start_tick, end_tick;
1171  TSCTIMEVAL tv_diff;
1172  TSC_TICKS wait_start_tick, wait_end_tick;
1173  TSCTIMEVAL wait_tv_diff;
1174 
1175  assert (csect != NULL);
1176 
1177  if (thread_p == NULL)
1178  {
1179  thread_p = thread_get_thread_entry_info ();
1180  }
1181 
1182  csect->stats->nenter++;
1183 
1184  tsc_getticks (&start_tick);
1185 
1186  error_code = pthread_mutex_lock (&csect->lock);
1187  if (error_code != NO_ERROR)
1188  {
1190  assert (0);
1192  }
1193 
1194  if (csect->rwlock > 0)
1195  {
1196  /*
1197  * I am a reader so that no writer is in this csect but reader(s) could be.
1198  * All writers are waiting on 'writers_queue' with 'waiting_writers++'.
1199  */
1200  csect->rwlock--; /* releasing */
1201  }
1202  else
1203  {
1204  csect->rwlock++; /* releasing */
1205  /*
1206  * I don't have read lock. Act like a normal writer request.
1207  */
1208  }
1209 
1210  while (csect->rwlock != 0)
1211  {
1212  /* There's another readers. So I have to wait as a writer. */
1213  if (csect->rwlock < 0 && csect->owner == thread_p->get_id ())
1214  {
1215  /*
1216  * I am holding the csect, and reenter it again as writer.
1217  * Note that rwlock will be decremented.
1218  */
1219  break;
1220  }
1221  else
1222  {
1223  if (wait_secs == INF_WAIT)
1224  {
1225  csect->waiting_writers++;
1226  csect->stats->nwait++;
1227 
1228  if (thread_p->event_stats.trace_slow_query == true)
1229  {
1230  tsc_getticks (&wait_start_tick);
1231  }
1232 
1233  error_code = csect_wait_on_promoter_queue (thread_p, csect, INF_WAIT, NULL);
1234  if (thread_p->event_stats.trace_slow_query == true)
1235  {
1236  tsc_getticks (&wait_end_tick);
1237  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
1238  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
1239  }
1240 
1241  csect->waiting_writers--;
1242  if (error_code != NO_ERROR)
1243  {
1244  r = pthread_mutex_unlock (&csect->lock);
1245  if (r != NO_ERROR)
1246  {
1248  assert (0);
1250  }
1252  assert (0);
1253  return ER_CSS_PTHREAD_COND_WAIT;
1254  }
1255  }
1256  else if (wait_secs > 0)
1257  {
1258  struct timespec to;
1259  to.tv_sec = time (NULL) + wait_secs;
1260  to.tv_nsec = 0;
1261 
1262  csect->waiting_writers++;
1263 
1264  if (thread_p->event_stats.trace_slow_query == true)
1265  {
1266  tsc_getticks (&wait_start_tick);
1267  }
1268 
1269  error_code = csect_wait_on_promoter_queue (thread_p, csect, NOT_WAIT, &to);
1270  if (thread_p->event_stats.trace_slow_query == true)
1271  {
1272  tsc_getticks (&wait_end_tick);
1273  tsc_elapsed_time_usec (&wait_tv_diff, wait_end_tick, wait_start_tick);
1274  TSC_ADD_TIMEVAL (thread_p->event_stats.cs_waits, wait_tv_diff);
1275  }
1276 
1277  csect->waiting_writers--;
1278  if (error_code != NO_ERROR)
1279  {
1280  r = pthread_mutex_unlock (&csect->lock);
1281  if (r != NO_ERROR)
1282  {
1284  assert (0);
1286  }
1287  if (error_code != ER_CSS_PTHREAD_COND_WAIT && error_code != ER_CSS_PTHREAD_COND_TIMEDOUT)
1288  {
1290  assert (0);
1291  return ER_CSS_PTHREAD_COND_WAIT;
1292  }
1293  return error_code;
1294  }
1295  }
1296  else
1297  {
1298  error_code = pthread_mutex_unlock (&csect->lock);
1299  if (error_code != NO_ERROR)
1300  {
1302  assert (0);
1304  }
1305  return ETIMEDOUT;
1306  }
1307  }
1308  }
1309 
1310  /* rwlock will be < 0. It denotes that a writer owns the csect. */
1311  csect->rwlock--;
1312  /* record that I am the writer of the csect. */
1313  csect->owner = thread_p->get_id ();
1314  csect->tran_index = thread_p->tran_index;
1315 
1316  tsc_getticks (&end_tick);
1317  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
1318  TOTAL_AND_MAX_TIMEVAL (csect->stats->total_elapsed, csect->stats->max_elapsed, tv_diff);
1319 
1320  error_code = pthread_mutex_unlock (&csect->lock);
1321  if (error_code != NO_ERROR)
1322  {
1324  assert (0);
1326  }
1327 
1328  if (MONITOR_WAITING_THREAD (tv_diff))
1329  {
1330  if (csect->cs_index > 0)
1331  {
1334  }
1336  "csect_promote_critical_section: %6d.%06d %s total_enter %d ntotal_elapsed %d max_elapsed %d.%06d"
1337  " total_elapsed %d.06d\n", tv_diff.tv_sec, tv_diff.tv_usec, csect_name (csect),
1338  csect->stats->nenter, csect->stats->nwait, csect->stats->max_elapsed.tv_sec,
1339  csect->stats->max_elapsed.tv_usec, csect->stats->total_elapsed.tv_sec,
1340  csect->stats->total_elapsed.tv_usec);
1341  }
1342 
1343  thread_p->get_csect_tracker ().on_promote (csect->cs_index);
1344  return NO_ERROR;
1345 }
1346 
1347 /*
1348  * csect_promote () - acquire a write lock when it has read lock
1349  * return: 0 if success, or error code
1350  * cs_index(in): identifier of the section to lock
1351  * wait_secs(in): timeout second
1352  *
1353  * Note: Always successful because I have the write lock.
1354  */
1355 int
1356 csect_promote (THREAD_ENTRY * thread_p, int cs_index, int wait_secs)
1357 {
1358  SYNC_CRITICAL_SECTION *csect;
1359 
1360  assert (cs_index >= 0);
1361  assert (cs_index < CRITICAL_SECTION_COUNT);
1362 
1363  csect = &csectgl_Critical_sections[cs_index];
1364  return csect_promote_critical_section (thread_p, csect, wait_secs);
1365 }
1366 
1367 /*
1368  * csect_exit_critical_section() - unlock critical section
1369  * return: 0 if success, or error code
1370  * csect(in): critical section
1371  */
1372 int
1374 {
1375  int error_code = NO_ERROR;
1376  bool ww, wr, wp;
1377 
1378  assert (csect != NULL);
1379 
1380  if (thread_p == NULL)
1381  {
1382  thread_p = thread_get_thread_entry_info ();
1383  }
1384 
1385  error_code = pthread_mutex_lock (&csect->lock);
1386  if (error_code != NO_ERROR)
1387  {
1389  assert (0);
1391  }
1392 
1393  if (csect->rwlock < 0)
1394  { /* rwlock < 0 if locked for writing */
1395  csect->rwlock++;
1396  if (csect->rwlock < 0)
1397  {
1398  /* in the middle of an outer critical section */
1399  error_code = pthread_mutex_unlock (&csect->lock);
1400  if (error_code != NO_ERROR)
1401  {
1403  assert (0);
1405  }
1406  thread_p->get_csect_tracker ().on_exit (csect->cs_index);
1407  return NO_ERROR;
1408  }
1409  else
1410  {
1411  assert (csect->rwlock == 0);
1412  csect->owner = thread_id_t ();
1413  csect->tran_index = -1;
1414  }
1415  }
1416  else if (csect->rwlock > 0)
1417  {
1418  csect->rwlock--;
1419  }
1420  else
1421  {
1422  /* csect->rwlock == 0 */
1423  error_code = pthread_mutex_unlock (&csect->lock);
1424  if (error_code != NO_ERROR)
1425  {
1427  assert (0);
1429  }
1430 
1432  assert (0);
1433  return ER_CS_UNLOCKED_BEFORE;
1434  }
1435 
1436  /*
1437  * Keep flags that show if there are waiting readers or writers
1438  * so that we can wake them up outside the monitor lock.
1439  */
1440  ww = (csect->waiting_writers > 0 && csect->rwlock == 0 && csect->owner == thread_id_t ());
1441  wp = (csect->waiting_writers > 0 && csect->rwlock == 0 && csect->owner != thread_id_t ());
1442  wr = (csect->waiting_writers == 0);
1443 
1444  /* wakeup a waiting writer first. Otherwise wakeup all readers. */
1445  if (wp == true)
1446  {
1447  error_code = csect_wakeup_waiting_promoter (csect);
1448  if (error_code != NO_ERROR)
1449  {
1451  pthread_mutex_unlock (&csect->lock);
1452  assert (0);
1454  }
1455  }
1456  else if (ww == true)
1457  {
1458  error_code = csect_wakeup_waiting_writer (csect);
1459  if (error_code != NO_ERROR)
1460  {
1462  pthread_mutex_unlock (&csect->lock);
1463  assert (0);
1465  }
1466  }
1467  else
1468  {
1469  error_code = pthread_cond_broadcast (&csect->readers_ok);
1470  if (error_code != NO_ERROR)
1471  {
1473  pthread_mutex_unlock (&csect->lock);
1474  assert (0);
1476  }
1477  }
1478 
1479  error_code = pthread_mutex_unlock (&csect->lock);
1480  if (error_code != NO_ERROR)
1481  {
1483  assert (0);
1485  }
1486 
1487  thread_p->get_csect_tracker ().on_exit (csect->cs_index);
1488  return NO_ERROR;
1489 }
1490 
1491 /*
1492  * csect_exit() - free a lock that prevents other threads from
1493  * concurrent execution through a critical section of code
1494  * return: 0 if success, or error code
1495  * cs_index(in): identifier of the section to unlock
1496  *
1497  * Note: unlocks the critical section, which may restart another thread that
1498  * is suspended and waiting for the critical section.
1499  */
1500 int
1501 csect_exit (THREAD_ENTRY * thread_p, int cs_index)
1502 {
1503  SYNC_CRITICAL_SECTION *csect;
1504 
1505  assert (cs_index >= 0);
1506  assert (cs_index < CRITICAL_SECTION_COUNT);
1507 
1508  csect = &csectgl_Critical_sections[cs_index];
1509 #if defined (SERVER_MODE)
1510  assert (csect->cs_index == cs_index);
1511 #endif
1512 
1513  return csect_exit_critical_section (thread_p, csect);
1514 }
1515 
1516 /*
1517  * csect_dump_statistics() - dump critical section statistics
1518  * return: void
1519  */
1520 void
1522 {
1523  SYNC_CRITICAL_SECTION *csect;
1524  int i;
1525 
1526  fprintf (fp, " CS Name |Total Enter|Total Wait |Total Reenter| Max elapsed | Total elapsed\n");
1527 
1528  for (i = 0; i < CRITICAL_SECTION_COUNT; i++)
1529  {
1530  csect = &csectgl_Critical_sections[i];
1531 
1532  fprintf (fp, "%-23s |%10d |%10d | %10d | %6ld.%06ld | %6ld.%06ld\n",
1533  csect_name (csect), csect->stats->nenter, csect->stats->nreenter,
1534  csect->stats->nwait, csect->stats->max_elapsed.tv_sec, csect->stats->max_elapsed.tv_usec,
1535  csect->stats->total_elapsed.tv_sec, csect->stats->total_elapsed.tv_usec);
1536 
1538  }
1539 
1540  fflush (fp);
1541 }
1542 
1543 /*
1544  * csect_check_own() - check if current thread is critical section owner
1545  * return: true if cs's owner is me, false if not
1546  * cs_index(in): csectgl_Critical_sections's index
1547  */
1548 int
1549 csect_check_own (THREAD_ENTRY * thread_p, int cs_index)
1550 {
1551  SYNC_CRITICAL_SECTION *csect;
1552 
1553  assert (cs_index >= 0);
1554  assert (cs_index < CRITICAL_SECTION_COUNT);
1555 
1556  csect = &csectgl_Critical_sections[cs_index];
1557 
1558  return csect_check_own_critical_section (thread_p, csect);
1559 }
1560 
1561 /*
1562  * csect_check_own_critical_section() - check if current thread is critical section owner
1563  * return: true if cs's owner is me, false if not
1564  * csect(in): critical section
1565  */
1566 static int
1568 {
1569  int error_code = NO_ERROR, return_code;
1570 
1571  if (thread_p == NULL)
1572  {
1573  thread_p = thread_get_thread_entry_info ();
1574  }
1575 
1576  error_code = pthread_mutex_lock (&csect->lock);
1577  if (error_code != NO_ERROR)
1578  {
1580  assert (0);
1582  }
1583 
1584  if (csect->rwlock < 0 && csect->owner == thread_p->get_id ())
1585  {
1586  /* has the write lock */
1587  return_code = 1;
1588  }
1589  else if (csect->rwlock > 0)
1590  {
1591  /* has the read lock */
1592  return_code = 2;
1593  }
1594  else
1595  {
1596  return_code = 0;
1597  }
1598 
1599  error_code = pthread_mutex_unlock (&csect->lock);
1600  if (error_code != NO_ERROR)
1601  {
1603  assert (0);
1605  }
1606 
1607  return return_code;
1608 }
1609 
1610 /*
1611  * csect_start_scan () - start scan function for
1612  * show global critical sections
1613  * return: NO_ERROR, or ER_code
1614  *
1615  * thread_p(in):
1616  * show_type(in):
1617  * arg_values(in):
1618  * arg_cnt(in):
1619  * ptr(in/out):
1620  */
1621 int
1622 csect_start_scan (THREAD_ENTRY * thread_p, int show_type, DB_VALUE ** arg_values, int arg_cnt, void **ptr)
1623 {
1625  int i, idx, error = NO_ERROR;
1626  DB_VALUE *vals = NULL;
1627  SYNC_CRITICAL_SECTION *csect;
1628  char buf[256] = { 0 };
1629  double msec;
1630  DB_VALUE db_val;
1631  DB_DATA_STATUS data_status;
1632  thread_id_t owner_tid;
1633  int ival;
1634  THREAD_ENTRY *thread_entry = NULL;
1635  int num_cols = 12;
1636 
1637  *ptr = NULL;
1638  ctx = showstmt_alloc_array_context (thread_p, CRITICAL_SECTION_COUNT, num_cols);
1639  if (ctx == NULL)
1640  {
1641  error = er_errid ();
1642  goto exit_on_error;
1643  }
1644 
1645  for (i = 0; i < CRITICAL_SECTION_COUNT; i++)
1646  {
1647  idx = 0;
1648  vals = showstmt_alloc_tuple_in_context (thread_p, ctx);
1649  if (vals == NULL)
1650  {
1651  error = er_errid ();
1652  goto exit_on_error;
1653  }
1654 
1655  csect = &csectgl_Critical_sections[i];
1656 
1657  /* The index of the critical section */
1658  db_make_int (&vals[idx], csect->cs_index);
1659  idx++;
1660 
1661  /* The name of the critical section */
1662  db_make_string (&vals[idx], csect_name (csect));
1663  idx++;
1664 
1665  /* 'N readers', '1 writer', 'none' */
1666  ival = csect->rwlock;
1667  if (ival > 0)
1668  {
1669  snprintf (buf, sizeof (buf), "%d readers", ival);
1670  }
1671  else if (ival < 0)
1672  {
1673  snprintf (buf, sizeof (buf), "1 writer");
1674  }
1675  else
1676  {
1677  snprintf (buf, sizeof (buf), "none");
1678  }
1679 
1680  error = db_make_string_copy (&vals[idx], buf);
1681  idx++;
1682  if (error != NO_ERROR)
1683  {
1684  goto exit_on_error;
1685  }
1686 
1687  /* The number of waiting readers */
1688  db_make_int (&vals[idx], csect->waiting_readers);
1689  idx++;
1690 
1691  /* The number of waiting writers */
1692  db_make_int (&vals[idx], csect->waiting_writers);
1693  idx++;
1694 
1695  /* The thread index of CS owner writer, NULL if no owner */
1696  owner_tid = csect->owner;
1697  if (owner_tid == thread_id_t ())
1698  {
1699  db_make_null (&vals[idx]);
1700  }
1701  else
1702  {
1703  thread_entry = thread_get_manager ()->find_by_tid (owner_tid);
1704  if (thread_entry != NULL)
1705  {
1706  db_make_bigint (&vals[idx], thread_entry->index);
1707  }
1708  else
1709  {
1710  db_make_null (&vals[idx]);
1711  }
1712  }
1713  idx++;
1714 
1715  /* Transaction id of CS owner writer, NULL if no owner */
1716  ival = csect->tran_index;
1717  if (ival == -1)
1718  {
1719  db_make_null (&vals[idx]);
1720  }
1721  else
1722  {
1723  db_make_int (&vals[idx], ival);
1724  }
1725  idx++;
1726 
1727  /* Total count of enters */
1728  db_make_bigint (&vals[idx], csect->stats->nenter);
1729  idx++;
1730 
1731  /* Total count of waiters */
1732  db_make_bigint (&vals[idx], csect->stats->nwait);
1733  idx++;
1734 
1735  /* The thread index of waiting promoter, NULL if no waiting promoter */
1736  thread_entry = csect->waiting_promoters_queue;
1737  if (thread_entry != NULL)
1738  {
1739  db_make_int (&vals[idx], thread_entry->index);
1740  }
1741  else
1742  {
1743  db_make_null (&vals[idx]);
1744  }
1745  idx++;
1746 
1747  /* Maximum waiting time (millisecond) */
1748  msec = csect->stats->max_elapsed.tv_sec * 1000 + csect->stats->max_elapsed.tv_usec / 1000.0;
1749  db_make_double (&db_val, msec);
1750  db_value_domain_init (&vals[idx], DB_TYPE_NUMERIC, 10, 3);
1751  error = numeric_db_value_coerce_to_num (&db_val, &vals[idx], &data_status);
1752  idx++;
1753  if (error != NO_ERROR)
1754  {
1755  goto exit_on_error;
1756  }
1757 
1758  /* Total waiting time (millisecond) */
1759  msec = csect->stats->total_elapsed.tv_sec * 1000 + csect->stats->total_elapsed.tv_usec / 1000.0;
1760  db_make_double (&db_val, msec);
1761  db_value_domain_init (&vals[idx], DB_TYPE_NUMERIC, 10, 3);
1762  error = numeric_db_value_coerce_to_num (&db_val, &vals[idx], &data_status);
1763  idx++;
1764  if (error != NO_ERROR)
1765  {
1766  goto exit_on_error;
1767  }
1768 
1769  assert (idx == num_cols);
1770  }
1771 
1772  *ptr = ctx;
1773  return NO_ERROR;
1774 
1775 exit_on_error:
1776 
1777  if (ctx != NULL)
1778  {
1779  showstmt_free_array_context (thread_p, ctx);
1780  }
1781 
1782  return error;
1783 }
1784 
1785 
1786 /*
1787  * rwlock_initialize () - initialize a rwlock
1788  * return: NO_ERROR, or ER_code
1789  *
1790  * rwlock(in/out):
1791  * name(in):
1792  */
1793 int
1794 rwlock_initialize (SYNC_RWLOCK * rwlock, const char *name)
1795 {
1796  int error_code = NO_ERROR;
1797 
1798  assert (rwlock != NULL && name != NULL);
1799 
1800  rwlock->stats = NULL;
1801 
1802  error_code = pthread_mutex_init (&rwlock->read_lock, NULL);
1803  if (error_code != NO_ERROR)
1804  {
1806  assert (0);
1808  }
1809 
1810  error_code = pthread_mutex_init (&rwlock->global_lock, NULL);
1811  if (error_code != NO_ERROR)
1812  {
1814  assert (0);
1816  }
1817 
1818  rwlock->name = name;
1819  rwlock->num_readers = 0;
1820 
1821  rwlock->stats = sync_allocate_sync_stats (SYNC_TYPE_RWLOCK, rwlock->name);
1822  if (rwlock->stats == NULL)
1823  {
1824  ASSERT_ERROR_AND_SET (error_code);
1825  return error_code;
1826  }
1827 
1828  return error_code;
1829 }
1830 
1831 /*
1832  * rwlock_finalize () - finalize a rwlock
1833  * return: NO_ERROR, or ER_code
1834  *
1835  * rwlock(in/out):
1836  */
1837 int
1839 {
1840  int error_code = NO_ERROR;
1841 
1842  assert (rwlock != NULL && rwlock->num_readers == 0);
1843 
1844  rwlock->num_readers = 0;
1845  rwlock->name = NULL;
1846 
1847  error_code = pthread_mutex_destroy (&rwlock->read_lock);
1848  if (error_code != NO_ERROR)
1849  {
1851  assert (0);
1853  }
1854 
1855  error_code = pthread_mutex_destroy (&rwlock->global_lock);
1856  if (error_code != NO_ERROR)
1857  {
1859  assert (0);
1861  }
1862 
1863  error_code = sync_deallocate_sync_stats (rwlock->stats);
1864  rwlock->stats = NULL;
1865 
1866  return error_code;
1867 }
1868 
1869 /*
1870  * rwlock_read_lock () - acquire a read-lock of the given rwlock
1871  * return: NO_ERROR, or ER_code
1872  *
1873  * rwlock(in/out):
1874  */
1875 int
1877 {
1878  TSC_TICKS start_tick, end_tick;
1879  TSCTIMEVAL tv_diff;
1880  int error_code;
1881 
1882  assert (rwlock != NULL);
1883 
1884  tsc_getticks (&start_tick);
1885 
1886  /* hold the reader lock */
1887  error_code = pthread_mutex_lock (&rwlock->read_lock);
1888  if (error_code != NO_ERROR)
1889  {
1891  assert (0);
1893  }
1894 
1895  /* increment the number of readers */
1896  rwlock->num_readers++;
1897 
1898  /* hold the global lock if it is the first reader */
1899  if (rwlock->num_readers == 1)
1900  {
1901  error_code = pthread_mutex_lock (&rwlock->global_lock);
1902  if (error_code != NO_ERROR)
1903  {
1905  assert (0);
1906 
1907  (void) pthread_mutex_unlock (&rwlock->read_lock);
1908 
1910  }
1911  }
1912 
1913  /* collect statistics */
1914  tsc_getticks (&end_tick);
1915  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
1916  TOTAL_AND_MAX_TIMEVAL (rwlock->stats->total_elapsed, rwlock->stats->max_elapsed, tv_diff);
1917 
1918  rwlock->stats->nenter++;
1919 
1920  /* release the reader lock */
1921  error_code = pthread_mutex_unlock (&rwlock->read_lock);
1922  if (error_code != NO_ERROR)
1923  {
1925  assert (0);
1927  }
1928 
1929  return NO_ERROR;
1930 }
1931 
1932 /*
1933  * rwlock_read_unlock () - release a read-lock of the given rwlock
1934  * return: NO_ERROR, or ER_code
1935  *
1936  * rwlock(in/out):
1937  */
1938 int
1940 {
1941  int error_code;
1942 
1943  assert (rwlock != NULL);
1944 
1945  /* hold the reader lock */
1946  error_code = pthread_mutex_lock (&rwlock->read_lock);
1947  if (error_code != NO_ERROR)
1948  {
1950  assert (0);
1952  }
1953 
1954  /* decrement the number of readers */
1955  rwlock->num_readers--;
1956 
1957  /* release the global lock if it is the last reader */
1958  if (rwlock->num_readers == 0)
1959  {
1960  error_code = pthread_mutex_unlock (&rwlock->global_lock);
1961  if (error_code != NO_ERROR)
1962  {
1964  assert (0);
1965 
1966  (void) pthread_mutex_unlock (&rwlock->read_lock);
1967 
1969  }
1970  }
1971 
1972  /* release the reader lock */
1973  error_code = pthread_mutex_unlock (&rwlock->read_lock);
1974  if (error_code != NO_ERROR)
1975  {
1977  assert (0);
1979  }
1980 
1981  return NO_ERROR;
1982 }
1983 
1984 /*
1985  * rwlock_write_lock () - acquire write-lock of the given rwlock
1986  * return: NO_ERROR, or ER_code
1987  *
1988  * rwlock(in/out):
1989  */
1990 int
1992 {
1993  TSC_TICKS start_tick, end_tick;
1994  TSCTIMEVAL tv_diff;
1995  int error_code;
1996 
1997  assert (rwlock != NULL);
1998 
1999  tsc_getticks (&start_tick);
2000 
2001  error_code = pthread_mutex_lock (&rwlock->global_lock);
2002  if (error_code != NO_ERROR)
2003  {
2005  assert (0);
2007  }
2008 
2009  /* collect statistics */
2010  tsc_getticks (&end_tick);
2011  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
2012  TOTAL_AND_MAX_TIMEVAL (rwlock->stats->total_elapsed, rwlock->stats->max_elapsed, tv_diff);
2013 
2014  rwlock->stats->nenter++;
2015 
2016  return NO_ERROR;
2017 }
2018 
2019 /*
2020  * rwlock_write_unlock () - release write-lock of the given rwlock
2021  * return: NO_ERROR, or ER_code
2022  *
2023  * rwlock(in/out):
2024  */
2025 int
2027 {
2028  int error_code;
2029 
2030  assert (rwlock != NULL);
2031 
2032  error_code = pthread_mutex_unlock (&rwlock->global_lock);
2033  if (error_code != NO_ERROR)
2034  {
2036  assert (0);
2038  }
2039 
2040  return NO_ERROR;
2041 }
2042 
2043 /*
2044  * rwlock_dump_statistics() - dump rwlock statistics
2045  * return: void
2046  */
2047 void
2049 {
2051  SYNC_STATS *stats;
2052  int i, cnt;
2053 
2054  fprintf (fp, "\n RWlock Name |Total Enter| Max elapsed | Total elapsed\n");
2055 
2057 
2058  p = &sync_Stats;
2059  while (p != NULL)
2060  {
2061  for (i = 0, cnt = 0; cnt < p->num_entry_in_use && i < NUM_ENTRIES_OF_SYNC_STATS_BLOCK; i++)
2062  {
2063  stats = &p->block[i];
2064  if (stats->type == SYNC_TYPE_RWLOCK)
2065  {
2066  cnt++;
2067 
2068  fprintf (fp, "%-29s |%10d | %6ld.%06ld | %6ld.%06ld\n", stats->name, stats->nenter,
2069  stats->max_elapsed.tv_sec, stats->max_elapsed.tv_usec,
2070  stats->total_elapsed.tv_sec, stats->total_elapsed.tv_usec);
2071 
2072  /* reset statistics */
2073  sync_reset_stats_metrics (stats);
2074  }
2075  }
2076 
2077  p = p->next;
2078  }
2079 
2081 
2082  fflush (fp);
2083 }
2084 
2085 /*
2086  * rmutex_initialize () - initialize a reentrant mutex
2087  * return: NO_ERROR, or ER_code
2088  *
2089  * rmutex(in/out):
2090  * name(in):
2091  */
2092 int
2093 rmutex_initialize (SYNC_RMUTEX * rmutex, const char *name)
2094 {
2095  int error_code = NO_ERROR;
2096 
2097  assert (rmutex != NULL);
2098 
2099  error_code = pthread_mutex_init (&rmutex->lock, NULL);
2100  if (error_code != NO_ERROR)
2101  {
2103  assert (0);
2105  }
2106 
2107  rmutex->owner = thread_id_t ();
2108  rmutex->lock_cnt = 0;
2109 
2111  if (rmutex->stats == NULL)
2112  {
2113  ASSERT_ERROR_AND_SET (error_code);
2114  return error_code;
2115  }
2116 
2117  return NO_ERROR;
2118 }
2119 
2120 /*
2121  * rmutex_finalize () - finalize a rmutex
2122  * return: NO_ERROR, or ER_code
2123  *
2124  * rmutex(in/out):
2125  */
2126 int
2128 {
2129  int err;
2130 
2131  assert (rmutex != NULL);
2132 
2133  err = pthread_mutex_destroy (&rmutex->lock);
2134  if (err != NO_ERROR)
2135  {
2137  assert (0);
2139  }
2140 
2141  err = sync_deallocate_sync_stats (rmutex->stats);
2142  rmutex->stats = NULL;
2143 
2144  return err;
2145 }
2146 
2147 /*
2148  * rmutex_lock () - acquire lock of the given rmutex. The owner is allowed to hold it again.
2149  * return: NO_ERROR, or ER_code
2150  *
2151  * thread_p(in):
2152  * rmutex(in/out):
2153  */
2154 int
2155 rmutex_lock (THREAD_ENTRY * thread_p, SYNC_RMUTEX * rmutex)
2156 {
2157  TSC_TICKS start_tick, end_tick;
2158  TSCTIMEVAL tv_diff;
2159 
2161  {
2162  /* Regard the resource is available, since system is working as a single thread. */
2163  return NO_ERROR;
2164  }
2165 
2166  assert (rmutex != NULL);
2167 
2168  if (thread_p == NULL)
2169  {
2170  thread_p = thread_get_thread_entry_info ();
2171  }
2172  assert (thread_p->get_id () != thread_id_t ());
2173 
2174  if (rmutex->owner == thread_p->get_id ())
2175  {
2176  assert (rmutex->lock_cnt > 0);
2177  rmutex->lock_cnt++;
2178 
2179  rmutex->stats->nenter++;
2180  rmutex->stats->nreenter++;
2181  }
2182  else
2183  {
2184  tsc_getticks (&start_tick);
2185 
2186  if (pthread_mutex_lock (&rmutex->lock) != NO_ERROR)
2187  {
2189  assert (0);
2191  }
2192 
2193  /* collect statistics */
2194  tsc_getticks (&end_tick);
2195  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
2196  TOTAL_AND_MAX_TIMEVAL (rmutex->stats->total_elapsed, rmutex->stats->max_elapsed, tv_diff);
2197 
2198  rmutex->stats->nenter++;
2199 
2200  assert (rmutex->lock_cnt == 0);
2201  rmutex->lock_cnt++;
2202 
2203  rmutex->owner = thread_p->get_id ();
2204  }
2205 
2206  return NO_ERROR;
2207 }
2208 
2209 /*
2210  * rmutex_unlock () - decrement lock_cnt and release the given rmutex when lock_cnt returns to 0
2211  * return: NO_ERROR, or ER_code
2212  *
2213  * rwlock(in/out):
2214  */
2215 int
2217 {
2219  {
2220  /* Regard the resource is available, since system is working as a single thread. */
2221  return NO_ERROR;
2222  }
2223 
2224  assert (rmutex != NULL && rmutex->lock_cnt > 0);
2225 
2226  if (thread_p == NULL)
2227  {
2228  thread_p = thread_get_thread_entry_info ();
2229  }
2230 
2231  assert (rmutex->owner == thread_p->get_id ());
2232 
2233  rmutex->lock_cnt--;
2234 
2235  if (rmutex->lock_cnt == 0)
2236  {
2237  rmutex->owner = thread_id_t ();
2238 
2239  pthread_mutex_unlock (&rmutex->lock);
2240  }
2241 
2242  return NO_ERROR;
2243 }
2244 
2245 /*
2246  * sync_reset_stats_metrics () - reset stats metrics
2247  * return: void
2248  *
2249  */
2250 static void
2252 {
2253  assert (stats != NULL);
2254 
2255  stats->total_elapsed.tv_sec = 0;
2256  stats->total_elapsed.tv_usec = 0;
2257 
2258  stats->max_elapsed.tv_sec = 0;
2259  stats->max_elapsed.tv_usec = 0;
2260 
2261  stats->nenter = 0;
2262  stats->nwait = 0;
2263  stats->nreenter = 0;
2264 }
2265 
2266 /*
2267  * sync_initialize_sync_stats () - initialize synchronization primitives stats monitor
2268  * return: NO_ERROR
2269  *
2270  * called during server startup
2271  */
2272 int
2274 {
2275  int error_code = NO_ERROR;
2276 
2277  error_code = pthread_mutex_init (&sync_Stats_lock, NULL);
2278  if (error_code != NO_ERROR)
2279  {
2281  assert (0);
2283  }
2284 
2285  return sync_initialize_sync_stats_chunk (&sync_Stats);
2286 }
2287 
2288 /*
2289  * sync_finalize_sync_stats () - finalize synchronization primitives stats monitor
2290  * return: NO_ERROR
2291  *
2292  * called during server shutdown
2293  */
2294 int
2296 {
2297  SYNC_STATS_CHUNK *p, *next;
2298 
2299  p = &sync_Stats;
2300 
2301  /* the head entry will be kept. */
2302  for (p = p->next; p != NULL; p = next)
2303  {
2304  next = p->next;
2305 
2306  /* may require assertions on the chunk entry here. */
2307  free_and_init (p);
2308  }
2309 
2310  /* clear the head entry */
2311  (void) sync_initialize_sync_stats_chunk (&sync_Stats);
2312 
2314  {
2316  assert (0);
2318  }
2319 
2320  return NO_ERROR;
2321 }
2322 
2323 /*
2324  * sync_allocate_sync_stats_chunk () - allocate a sync stats chunk
2325  * return: the allocated sync stats entry or NULL
2326  *
2327  */
2328 static SYNC_STATS_CHUNK *
2330 {
2332 
2333  p = (SYNC_STATS_CHUNK *) malloc (sizeof (SYNC_STATS_CHUNK));
2334  if (p == NULL)
2335  {
2337  return NULL;
2338  }
2339 
2341 
2342  return p;
2343 }
2344 
2345 /*
2346  * sync_initialize_sync_stats_chunk () - initialize a sync stats chunk
2347  * return: NO_ERROR
2348  *
2349  */
2350 static int
2352 {
2353  assert (sync_stats_chunk != NULL);
2354 
2355  memset (sync_stats_chunk->block, 0, sizeof (SYNC_STATS) * NUM_ENTRIES_OF_SYNC_STATS_BLOCK);
2356  sync_stats_chunk->next = NULL;
2357  sync_stats_chunk->hint_free_entry_idx = 0;
2358  sync_stats_chunk->num_entry_in_use = 0;
2359 
2360  return NO_ERROR;
2361 }
2362 
2363 /*
2364  * sync_consume_sync_stats_from_pool () -
2365  * return: stats buffer
2366  *
2367  */
2368 static SYNC_STATS *
2370  const char *name)
2371 {
2372  SYNC_STATS *stats;
2373 
2374  assert (sync_stats_chunk != NULL);
2375  assert (SYNC_TYPE_NONE < sync_prim_type && sync_prim_type <= SYNC_TYPE_LAST);
2376  assert (0 <= idx && idx < NUM_ENTRIES_OF_SYNC_STATS_BLOCK);
2377  assert (sync_stats_chunk->block[idx].type == SYNC_TYPE_NONE);
2378  assert (0 <= sync_stats_chunk->num_entry_in_use
2379  && sync_stats_chunk->num_entry_in_use < NUM_ENTRIES_OF_SYNC_STATS_BLOCK);
2380 
2381  stats = &sync_stats_chunk->block[idx];
2382 
2383  stats->name = name;
2384  stats->type = sync_prim_type;
2385  sync_reset_stats_metrics (stats);
2386 
2387  sync_stats_chunk->num_entry_in_use++;
2388  sync_stats_chunk->hint_free_entry_idx = (idx + 1) % NUM_ENTRIES_OF_SYNC_STATS_BLOCK;
2389 
2390  return stats;
2391 }
2392 
2393 /*
2394  * sync_return_sync_stats_to_pool () -
2395  * return: NO_ERROR
2396  *
2397  */
2398 static int
2400 {
2401  assert (sync_stats_chunk != NULL);
2402  assert (0 <= idx && idx < NUM_ENTRIES_OF_SYNC_STATS_BLOCK);
2403  assert (SYNC_TYPE_NONE < sync_stats_chunk->block[idx].type && sync_stats_chunk->block[idx].type <= SYNC_TYPE_LAST);
2404  assert (0 < sync_stats_chunk->num_entry_in_use
2405  && sync_stats_chunk->num_entry_in_use <= NUM_ENTRIES_OF_SYNC_STATS_BLOCK);
2406 
2407  sync_stats_chunk->block[idx].type = SYNC_TYPE_NONE;
2408  sync_stats_chunk->block[idx].name = NULL;
2409 
2410  sync_stats_chunk->num_entry_in_use--;
2411  sync_stats_chunk->hint_free_entry_idx = idx;
2412 
2413  return NO_ERROR;
2414 }
2415 
2416 /*
2417  * sync_allocate_sync_stats () -
2418  * return: NO_ERROR
2419  *
2420  */
2421 static SYNC_STATS *
2422 sync_allocate_sync_stats (SYNC_PRIMITIVE_TYPE sync_prim_type, const char *name)
2423 {
2424  SYNC_STATS_CHUNK *p, *last_chunk, *new_chunk;
2425  SYNC_STATS *stats = NULL;
2426  int i, idx;
2427 
2429 
2430  p = &sync_Stats;
2431  while (p != NULL)
2432  {
2434  {
2436 
2437  for (i = 0, idx = p->hint_free_entry_idx; i < NUM_ENTRIES_OF_SYNC_STATS_BLOCK; i++)
2438  {
2439  if (p->block[idx].type == SYNC_TYPE_NONE)
2440  {
2441  stats = sync_consume_sync_stats_from_pool (p, idx, sync_prim_type, name);
2442 
2444  return stats;
2445  }
2446 
2447  idx = (idx + 1) % NUM_ENTRIES_OF_SYNC_STATS_BLOCK;
2448  }
2449  }
2450 
2451  last_chunk = p;
2452 
2453  p = p->next;
2454  }
2455 
2456  /* none is available. allocate a block */
2457  new_chunk = sync_allocate_sync_stats_chunk ();
2458  if (new_chunk == NULL)
2459  {
2460  /* error was set */
2462  return NULL;
2463  }
2464 
2465  last_chunk->next = new_chunk;
2466 
2467  stats = sync_consume_sync_stats_from_pool (new_chunk, 0, sync_prim_type, name);
2468 
2470 
2471  return stats;
2472 }
2473 
2474 /*
2475  * sync_deallocate_sync_stats () -
2476  * return: NO_ERROR
2477  *
2478  */
2479 static int
2481 {
2483  int idx;
2484  bool found = false;
2485 
2486  assert (stats != NULL);
2487 
2489 
2490  p = &sync_Stats;
2491  while (p != NULL)
2492  {
2493  if (0 < p->num_entry_in_use && p->block <= stats && stats <= p->block + NUM_ENTRIES_OF_SYNC_STATS_BLOCK)
2494  {
2495  idx = (int) (stats - p->block);
2496 
2497  assert (SYNC_TYPE_NONE < p->block[idx].type && p->block[idx].type <= SYNC_TYPE_LAST);
2498 
2500 
2501  found = true;
2502  break;
2503  }
2504 
2505  p = p->next;
2506  }
2507 
2509 
2510  assert (found == true);
2511 
2512  return NO_ERROR;
2513 }
2514 
2515 /*
2516  * rmutex_dump_statistics() - dump rmutex statistics
2517  * return: void
2518  */
2519 void
2521 {
2523  SYNC_STATS *stats;
2524  int i, cnt;
2525 
2526  fprintf (fp, "\n RMutex Name |Total Enter|Total Reenter| Max elapsed | Total elapsed\n");
2527 
2529 
2530  p = &sync_Stats;
2531  while (p != NULL)
2532  {
2533  for (i = 0, cnt = 0; cnt < p->num_entry_in_use && i < NUM_ENTRIES_OF_SYNC_STATS_BLOCK; i++)
2534  {
2535  stats = &p->block[i];
2536  if (stats->type == SYNC_TYPE_RMUTEX)
2537  {
2538  cnt++;
2539 
2540  fprintf (fp, "%-28s |%10d | %10d | %6ld.%06ld | %6ld.%06ld\n", stats->name, stats->nenter,
2541  stats->nreenter, stats->max_elapsed.tv_sec, stats->max_elapsed.tv_usec,
2542  stats->total_elapsed.tv_sec, stats->total_elapsed.tv_usec);
2543 
2544  /* reset statistics */
2545  sync_reset_stats_metrics (stats);
2546  }
2547  }
2548 
2549  p = p->next;
2550  }
2551 
2553 
2554  fflush (fp);
2555 }
2556 
2557 /*
2558  * sync_dump_statistics() - dump statistics of synchronization primitives
2559  * return: void
2560  */
2561 void
2563 {
2564  if (type == SYNC_TYPE_ALL || type == SYNC_TYPE_CSECT)
2565  {
2566  csect_dump_statistics (fp);
2567  }
2568 
2569  if (type == SYNC_TYPE_ALL || type == SYNC_TYPE_RWLOCK)
2570  {
2572  }
2573 
2574  if (type == SYNC_TYPE_ALL || type == SYNC_TYPE_RMUTEX)
2575  {
2577  }
2578 }
void rmutex_dump_statistics(FILE *fp)
int rwlock_finalize(SYNC_RWLOCK *rwlock)
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
int csect_enter_critical_section(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int wait_secs)
static int sync_initialize_sync_stats_chunk(SYNC_STATS_CHUNK *sync_stats_chunk)
static int csect_wakeup_waiting_promoter(SYNC_CRITICAL_SECTION *csect)
int sync_finalize_sync_stats(void)
pthread_mutex_t sync_Stats_lock
#define ER_CSS_PTHREAD_COND_TIMEDOUT
Definition: error_code.h:1428
void showstmt_free_array_context(THREAD_ENTRY *thread_p, SHOWSTMT_ARRAY_CONTEXT *ctx)
Definition: show_scan.c:373
#define pthread_mutex_init(a, b)
Definition: area_alloc.c:48
int db_make_bigint(DB_VALUE *value, const DB_BIGINT num)
THREAD_ENTRY * waiting_promoters_queue
#define pthread_mutex_unlock(a)
Definition: area_alloc.c:51
static int csect_wakeup_waiting_writer(SYNC_CRITICAL_SECTION *csect)
void sync_dump_statistics(FILE *fp, SYNC_PRIMITIVE_TYPE type)
#define NUM_ENTRIES_OF_SYNC_STATS_BLOCK
#define TOTAL_AND_MAX_TIMEVAL(total, max, elapsed)
static SYNC_STATS_CHUNK * sync_allocate_sync_stats_chunk(void)
#define ASSERT_ERROR_AND_SET(error_code)
int csect_enter_as_reader(THREAD_ENTRY *thread_p, int cs_index, int wait_secs)
unsigned long int thread_id_t
#define ER_CSS_PTHREAD_COND_DESTROY
Definition: error_code.h:1007
int csect_promote(THREAD_ENTRY *thread_p, int cs_index, int wait_secs)
#define ER_CSS_PTHREAD_MUTEX_LOCK
Definition: error_code.h:999
cubthread::manager * thread_get_manager(void)
struct timeval TSCTIMEVAL
Definition: tsc_timer.h:40
void tsc_elapsed_time_usec(TSCTIMEVAL *tv, TSC_TICKS end_tick, TSC_TICKS start_tick)
Definition: tsc_timer.c:101
static SYNC_STATS * sync_consume_sync_stats_from_pool(SYNC_STATS_CHUNK *sync_stats_chunk, int idx, SYNC_PRIMITIVE_TYPE sync_prim_type, const char *name)
int numeric_db_value_coerce_to_num(DB_VALUE *src, DB_VALUE *dest, DB_DATA_STATUS *data_status)
int er_errid(void)
void csect_dump_statistics(FILE *fp)
int csect_initialize_critical_section(SYNC_CRITICAL_SECTION *csect, const char *name)
SHOWSTMT_ARRAY_CONTEXT * showstmt_alloc_array_context(THREAD_ENTRY *thread_p, int num_total, int num_cols)
Definition: show_scan.c:336
#define ER_CSS_PTHREAD_COND_SIGNAL
Definition: error_code.h:1010
SYNC_STATS * stats
#define er_log_debug(...)
#define ER_CSS_PTHREAD_COND_WAIT
Definition: error_code.h:1008
static int sync_return_sync_stats_to_pool(SYNC_STATS_CHUNK *sync_stats_chunk, int idx)
unsigned int nreenter
int rwlock_write_unlock(SYNC_RWLOCK *rwlock)
SYNC_STATS_CHUNK sync_Stats
pthread_mutex_t global_lock
void THREAD_ENTRY
static const char * csect_name(SYNC_CRITICAL_SECTION *c)
static int csect_check_own_critical_section(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect)
unsigned int waiting_readers
int db_make_string(DB_VALUE *value, DB_CONST_C_CHAR str)
unsigned int nwait
bool is_single_thread(void)
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
#define assert(x)
int prm_get_integer_value(PARAM_ID prm_id)
int csect_exit_critical_section(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect)
static const char * csect_Names[]
struct timeval max_elapsed
SYNC_STATS_CHUNK * next
int rmutex_initialize(SYNC_RMUTEX *rmutex, const char *name)
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
int csect_finalize_static_critical_sections(void)
const char * name
unsigned int waiting_writers
int rmutex_lock(THREAD_ENTRY *thread_p, SYNC_RMUTEX *rmutex)
pthread_cond_t readers_ok
pthread_mutex_t read_lock
SYNC_PRIMITIVE_TYPE type
static int csect_demote_critical_section(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int wait_secs)
#define NULL
Definition: freelistheap.h:34
int csect_enter(THREAD_ENTRY *thread_p, int cs_index, int wait_secs)
static int csect_promote_critical_section(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int wait_secs)
void tsc_getticks(TSC_TICKS *tck)
Definition: tsc_timer.c:81
#define err(fd,...)
Definition: porting.h:431
SYNC_STATS * stats
int csect_finalize_critical_section(SYNC_CRITICAL_SECTION *csect)
DB_VALUE * showstmt_alloc_tuple_in_context(THREAD_ENTRY *thread_p, SHOWSTMT_ARRAY_CONTEXT *ctx)
Definition: show_scan.c:402
int rwlock_initialize(SYNC_RWLOCK *rwlock, const char *name)
int csect_start_scan(THREAD_ENTRY *thread_p, int show_type, DB_VALUE **arg_values, int arg_cnt, void **ptr)
void er_set_with_oserror(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
SYNC_CRITICAL_SECTION csectgl_Critical_sections[CRITICAL_SECTION_COUNT]
THREAD_ENTRY * waiting_writers_queue
void rwlock_dump_statistics(FILE *fp)
#define ER_CSS_PTHREAD_MUTEX_DESTROY
Definition: error_code.h:998
static SYNC_STATS * sync_allocate_sync_stats(SYNC_PRIMITIVE_TYPE sync_prim_type, const char *name)
SYNC_STATS block[NUM_ENTRIES_OF_SYNC_STATS_BLOCK]
#define ER_MNT_WAITING_THREAD
Definition: error_code.h:1226
static void error(const char *msg)
Definition: gencat.c:331
int rwlock_write_lock(SYNC_RWLOCK *rwlock)
const char * name
#define ARG_FILE_LINE
Definition: error_manager.h:44
int rmutex_unlock(THREAD_ENTRY *thread_p, SYNC_RMUTEX *rmutex)
#define TSC_ADD_TIMEVAL(total, diff)
Definition: tsc_timer.h:31
struct timeval total_elapsed
DB_DATA_STATUS
unsigned int nenter
thread_id_t owner
#define free_and_init(ptr)
Definition: memory_alloc.h:147
int rwlock_read_unlock(SYNC_RWLOCK *rwlock)
int csect_demote(THREAD_ENTRY *thread_p, int cs_index, int wait_secs)
int thread_suspend_with_other_mutex(cubthread::entry *thread_p, pthread_mutex_t *mutex_p, int timeout, struct timespec *to, thread_resume_suspend_status suspended_reason)
int db_make_string_copy(DB_VALUE *value, DB_CONST_C_CHAR str)
int csect_initialize_static_critical_sections(void)
#define ER_CSS_PTHREAD_MUTEX_UNLOCK
Definition: error_code.h:1001
int csect_exit(THREAD_ENTRY *thread_p, int cs_index)
entry * find_by_tid(thread_id_t tid)
int i
Definition: dynamic_load.c:954
int db_make_null(DB_VALUE *value)
#define ER_CSS_PTHREAD_COND_BROADCAST
Definition: error_code.h:1011
#define ER_CS_UNLOCKED_BEFORE
Definition: error_code.h:1028
SYNC_PRIMITIVE_TYPE
static int csect_wait_on_promoter_queue(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int timeout, struct timespec *to)
int db_make_double(DB_VALUE *value, const DB_C_DOUBLE num)
static const int CRITICAL_SECTION_COUNT
static void sync_reset_stats_metrics(SYNC_STATS *stats)
#define pthread_mutex_lock(a)
Definition: area_alloc.c:50
int csect_enter_critical_section_as_reader(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int wait_secs)
int db_make_int(DB_VALUE *value, const int num)
pthread_mutex_t lock
void thread_wakeup(cubthread::entry *thread_p, thread_resume_suspend_status resume_reason)
static int sync_deallocate_sync_stats(SYNC_STATS *stats)
static int csect_wait_on_writer_queue(THREAD_ENTRY *thread_p, SYNC_CRITICAL_SECTION *csect, int timeout, struct timespec *to)
int rmutex_finalize(SYNC_RMUTEX *rmutex)
#define ER_CSS_PTHREAD_MUTEX_INIT
Definition: error_code.h:997
#define MONITOR_WAITING_THREAD(elapsed)
int csect_check_own(THREAD_ENTRY *thread_p, int cs_index)
#define ER_CSS_PTHREAD_COND_INIT
Definition: error_code.h:1006
const char ** p
Definition: dynamic_load.c:945
int rwlock_read_lock(SYNC_RWLOCK *rwlock)
int db_value_domain_init(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale)
Definition: db_macro.c:153
int sync_initialize_sync_stats(void)
#define pthread_mutex_destroy(a)
Definition: area_alloc.c:49
const char * csect_name_at(int cs_index)