CUBRID Engine  latest
heap_file.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Search Solution Corporation
3  * Copyright 2016 CUBRID Corporation
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /*
20  * heap_file.c - heap file manager
21  */
22 
23 #ident "$Id$"
24 
25 #if !defined(WINDOWS)
26 #define __STDC_FORMAT_MACROS
27 #include <inttypes.h>
28 #endif
29 
30 #include "config.h"
31 
32 #include <stdio.h>
33 #include <string.h>
34 #include <errno.h>
35 
36 #include "heap_file.h"
37 
38 #include "porting.h"
39 #include "porting_inline.hpp"
40 #include "record_descriptor.hpp"
41 #include "slotted_page.h"
42 #include "overflow_file.h"
43 #include "boot_sr.h"
44 #include "locator_sr.h"
45 #include "btree.h"
46 #include "btree_unique.hpp"
47 #include "transform.h" /* for CT_SERIAL_NAME */
48 #include "serial.h"
49 #include "object_primitive.h"
50 #include "object_representation.h"
52 #include "xserver_interface.h"
53 #include "chartype.h"
54 #include "query_executor.h"
55 #include "fetch.h"
56 #include "server_interface.h"
57 #include "elo.h"
58 #include "db_elo.h"
59 #include "string_opfunc.h"
60 #include "xasl.h"
61 #include "xasl_unpack_info.hpp"
62 #include "stream_to_xasl.h"
63 #include "query_opfunc.h"
64 #include "set_object.h"
65 #if defined(ENABLE_SYSTEMTAP)
66 #include "probes.h"
67 #endif /* ENABLE_SYSTEMTAP */
68 #include "dbtype.h"
69 #include "thread_manager.hpp" // for thread_get_thread_entry_info
70 #include "db_value_printer.hpp"
71 #include "log_append.hpp"
72 #include "string_buffer.hpp"
73 #include "tde.h"
74 
75 #include <set>
76 
77 #if !defined(SERVER_MODE)
78 #define pthread_mutex_init(a, b)
79 #define pthread_mutex_destroy(a)
80 #define pthread_mutex_lock(a) 0
81 #define pthread_mutex_trylock(a) 0
82 #define pthread_mutex_unlock(a)
83 static int rv;
84 #endif /* not SERVER_MODE */
85 
86 #define HEAP_BESTSPACE_SYNC_THRESHOLD (0.1f)
87 
88 /* ATTRIBUTE LOCATION */
89 
90 #define OR_FIXED_ATTRIBUTES_OFFSET_BY_OBJ(obj, nvars) \
91  (OR_HEADER_SIZE(obj) + OR_VAR_TABLE_SIZE_INTERNAL(nvars, OR_GET_OFFSET_SIZE(obj)))
92 
93 #define HEAP_GUESS_NUM_ATTRS_REFOIDS 100
94 #define HEAP_GUESS_NUM_INDEXED_ATTRS 100
95 
96 #define HEAP_CLASSREPR_MAXCACHE 1024
97 
98 #define HEAP_STATS_ENTRY_MHT_EST_SIZE 1000
99 #define HEAP_STATS_ENTRY_FREELIST_SIZE 1000
100 
101 /* A good space to accept insertions */
102 #define HEAP_DROP_FREE_SPACE (int)(DB_PAGESIZE * 0.3)
103 
104 #define HEAP_DEBUG_SCANCACHE_INITPATTERN (12345)
105 
106 #if defined(CUBRID_DEBUG)
107 #define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range) \
108  heap_scanrange_isvalid(scan_range)
109 #else /* CUBRID_DEBUG */
110 #define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range) (DISK_VALID)
111 #endif /* !CUBRID_DEBUG */
112 
113 #define HEAP_IS_PAGE_OF_OID(thread_p, pgptr, oid) \
114  (((pgptr) != NULL) \
115  && pgbuf_get_volume_id (pgptr) == (oid)->volid \
116  && pgbuf_get_page_id (pgptr) == (oid)->pageid)
117 
118 #define MVCC_SET_DELETE_INFO(mvcc_delete_info_p, row_delete_id, \
119  satisfies_del_result) \
120  do \
121  { \
122  assert ((mvcc_delete_info_p) != NULL); \
123  (mvcc_delete_info_p)->row_delid = (row_delete_id); \
124  (mvcc_delete_info_p)->satisfies_delete_result = (satisfies_del_result); \
125  } \
126  while (0)
127 
128 #define HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE(mvcc_rec_header_p) \
129  do \
130  { \
131  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_INSID)) \
132  { \
133  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_INSID); \
134  MVCC_SET_INSID (mvcc_rec_header_p, MVCCID_ALL_VISIBLE); \
135  } \
136  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_DELID)) \
137  { \
138  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_DELID); \
139  MVCC_SET_DELID (mvcc_rec_header_p, MVCCID_NULL); \
140  } \
141  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_PREV_VERSION)) \
142  { \
143  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_PREV_VERSION); \
144  LSA_SET_NULL(&(mvcc_rec_header_p)->prev_version_lsa); \
145  } \
146  } \
147  while (0)
148 
149 #if defined (SERVER_MODE)
150 #define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style) \
151  ((is_mvcc_class) && (!HEAP_IS_UPDATE_INPLACE (update_style)) ? (true) : (false))
152 #else
153 #define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style) (false)
154 #endif
155 
156 #define HEAP_SCAN_ORDERED_HFID(scan) \
157  (((scan) != NULL) ? (&(scan)->node.hfid) : (PGBUF_ORDERED_NULL_HFID))
158 
159 typedef enum
160 {
165 
166 /*
167  * Prefetching directions
168  */
169 
170 typedef enum
171 {
172  HEAP_DIRECTION_NONE, /* No prefetching */
173  HEAP_DIRECTION_LEFT, /* Prefetching at the left */
174  HEAP_DIRECTION_RIGHT, /* Prefetching at the right */
175  HEAP_DIRECTION_BOTH /* Prefetching at both directions.. left and right */
177 
178 /*
179  * Heap file header
180  */
181 
182 #define HEAP_NUM_BEST_SPACESTATS 10
183 
184 /* calculate an index of best array */
185 #define HEAP_STATS_NEXT_BEST_INDEX(i) \
186  (((i) + 1) % HEAP_NUM_BEST_SPACESTATS)
187 #define HEAP_STATS_PREV_BEST_INDEX(i) \
188  (((i) == 0) ? (HEAP_NUM_BEST_SPACESTATS - 1) : ((i) - 1));
189 
192 {
193  /* the first must be class_oid */
195  VFID ovf_vfid; /* Overflow file identifier (if any) */
196  VPID next_vpid; /* Next page (i.e., the 2nd page of heap file) */
197  int unfill_space; /* Stop inserting when page has run below this. leave it for updates */
198  struct
199  {
200  int num_pages; /* Estimation of number of heap pages. Consult file manager if accurate number is
201  * needed */
202  int num_recs; /* Estimation of number of objects in heap */
203  float recs_sumlen; /* Estimation total length of records */
204  int num_other_high_best; /* Total of other believed known best pages, which are not included in the best array
205  * and we believe they have at least HEAP_DROP_FREE_SPACE */
206  int num_high_best; /* Number of pages in the best array that we believe have at least
207  * HEAP_DROP_FREE_SPACE. When this number goes to zero and there is at least other
208  * HEAP_NUM_BEST_SPACESTATS best pages, we look for them. */
209  int num_substitutions; /* Number of page substitutions. This will be used to insert a new second best page
210  * into second best hints. */
211  int num_second_best; /* Number of second best hints. The hints are in "second_best" array. They are used
212  * when finding new best pages. See the function "heap_stats_sync_bestspace". */
213  int head_second_best; /* Index of head of second best hints. */
214  int tail_second_best; /* Index of tail of second best hints. A new second best hint will be stored on this
215  * index. */
216  int head; /* Head of best circular array */
217  VPID last_vpid; /* todo: move out of estimates */
221  } estimates; /* Probably, the set of pages with more free space on the heap. Changes to any values
222  * of this array (either page or the free space for the page) are not logged since
223  * these values are only used for hints. These values may not be accurate at any given
224  * time and the entries may contain duplicated pages. */
225 
226  int reserve0_for_future; /* Nothing reserved for future */
227  int reserve1_for_future; /* Nothing reserved for future */
228  int reserve2_for_future; /* Nothing reserved for future */
229 };
230 
233 {
234  HFID hfid; /* heap file identifier */
235  HEAP_BESTSPACE best; /* best space info */
237 };
238 
239 /* Define heap page flags. */
240 #define HEAP_PAGE_FLAG_VACUUM_STATUS_MASK 0xC0000000
241 #define HEAP_PAGE_FLAG_VACUUM_ONCE 0x80000000
242 #define HEAP_PAGE_FLAG_VACUUM_UNKNOWN 0x40000000
243 
244 #define HEAP_PAGE_SET_VACUUM_STATUS(chain, status) \
245  do \
246  { \
247  assert ((status) == HEAP_PAGE_VACUUM_NONE \
248  || (status) == HEAP_PAGE_VACUUM_ONCE \
249  || (status) == HEAP_PAGE_VACUUM_UNKNOWN); \
250  (chain)->flags &= ~HEAP_PAGE_FLAG_VACUUM_STATUS_MASK; \
251  if ((status) == HEAP_PAGE_VACUUM_ONCE) \
252  { \
253  (chain)->flags |= HEAP_PAGE_FLAG_VACUUM_ONCE; \
254  } \
255  else if ((status) == HEAP_PAGE_VACUUM_UNKNOWN) \
256  { \
257  (chain)->flags |= HEAP_PAGE_FLAG_VACUUM_UNKNOWN; \
258  } \
259  } \
260  while (false)
261 
262 #define HEAP_PAGE_GET_VACUUM_STATUS(chain) \
263  (((chain)->flags & HEAP_PAGE_FLAG_VACUUM_STATUS_MASK) == 0 \
264  ? HEAP_PAGE_VACUUM_NONE \
265  : ((((chain)->flags & HEAP_PAGE_FLAG_VACUUM_STATUS_MASK) \
266  == HEAP_PAGE_FLAG_VACUUM_ONCE) \
267  ? HEAP_PAGE_VACUUM_ONCE : HEAP_PAGE_VACUUM_UNKNOWN))
268 
269 typedef struct heap_chain HEAP_CHAIN;
271 { /* Double-linked */
272  /* the first must be class_oid */
274  VPID prev_vpid; /* Previous page */
275  VPID next_vpid; /* Next page */
276  MVCCID max_mvccid; /* Max MVCCID of any MVCC operations in page. */
277  INT32 flags; /* Flags for heap page. 2 bits are used for vacuum state. */
278 };
279 
280 #define HEAP_CHK_ADD_UNFOUND_RELOCOIDS 100
281 
284 {
287 };
288 
291 {
292  MHT_TABLE *ht; /* Hash table to be used to keep relocated records The key of hash table is the
293  * relocation OID, the date is the real OID */
294  bool verify;
295  bool verify_not_vacuumed; /* if true then each record will be checked if it wasn't vacuumed although it must've
296  * be vacuumed */
297  DISK_ISVALID not_vacuumed_res; /* The validation result of the "not vacuumed" objects */
300  OID *unfound_reloc_oids; /* The relocation OIDs that have not been found in hash table */
301 };
302 
303 #define DEFAULT_REPR_INCREMENT 16
304 
305 enum
306 { ZONE_VOID = 1, ZONE_FREE = 2, ZONE_LRU = 3 };
307 
310 {
311  pthread_mutex_t mutex;
312  int idx; /* Cache index. Used to pass the index when a class representation is in the cache */
313  int fcnt; /* How many times this structure has been fixed. It cannot be deallocated until this
314  * value is zero. */
315  int zone; /* ZONE_VOID, ZONE_LRU, ZONE_FREE */
317 
320  HEAP_CLASSREPR_ENTRY *prev; /* prev. entry in LRU list */
321  HEAP_CLASSREPR_ENTRY *next; /* prev. entry in LRU or free list */
322 
323  /* real data */
324  OID class_oid; /* Identifier of the class representation */
325 
326  OR_CLASSREP **repr; /* A particular representation of the class */
329 };
330 
333 {
337 };
338 
341 {
342  pthread_mutex_t hash_mutex;
343  int idx;
346 };
347 
350 {
351  pthread_mutex_t LRU_mutex;
354 };
355 
358 {
359  pthread_mutex_t free_mutex;
361  int free_cnt;
362 };
363 
366 {
369  int num_hash;
375 #ifdef DEBUG_CLASSREPR_CACHE
376  int num_fix_entries;
377  pthread_mutex_t num_fix_entries_mutex;
378 #endif /* DEBUG_CLASSREPR_CACHE */
379 };
380 
382  -1,
383  NULL,
384  -1,
385  NULL,
386  NULL,
387  {
388  PTHREAD_MUTEX_INITIALIZER,
389  NULL,
390  NULL},
391  {
392  PTHREAD_MUTEX_INITIALIZER,
393  NULL,
394  -1},
395  {{NULL_FILEID, NULL_VOLID}, NULL_PAGEID} /* rootclass_hfid */
396 #ifdef DEBUG_CLASSREPR_CACHE
397  , 0, PTHREAD_MUTEX_INITIALIZER
398 #endif /* DEBUG_CLASSREPR_CACHE */
399 };
400 
401 #define CLASSREPR_REPR_INCREMENT 10
402 #define CLASSREPR_HASH_SIZE (heap_Classrepr_cache.num_entries * 2)
403 #define REPR_HASH(class_oid) (OID_PSEUDO_KEY(class_oid)%CLASSREPR_HASH_SIZE)
404 
405 #define HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS(class_oid, hfid) \
406  do \
407  { \
408  if (heap_Classrepr != NULL && (hfid) != NULL) \
409  { \
410  if (HFID_IS_NULL (&(heap_Classrepr->rootclass_hfid))) \
411  (void) boot_find_root_heap (&(heap_Classrepr->rootclass_hfid)); \
412  if (HFID_EQ ((hfid), &(heap_Classrepr->rootclass_hfid))) \
413  (void) heap_classrepr_decache_guessed_last (class_oid); \
414  } \
415  } \
416  while (0)
417 
418 #define HEAP_CHNGUESS_FUDGE_MININDICES (100)
419 #define HEAP_NBITS_IN_BYTE (8)
420 #define HEAP_NSHIFTS (3) /* For multiplication/division by 8 */
421 #define HEAP_BITMASK (HEAP_NBITS_IN_BYTE - 1)
422 #define HEAP_NBITS_TO_NBYTES(bit_cnt) \
423  ((unsigned int)((bit_cnt) + HEAP_BITMASK) >> HEAP_NSHIFTS)
424 #define HEAP_NBYTES_TO_NBITS(byte_cnt) ((unsigned int)(byte_cnt) << HEAP_NSHIFTS)
425 #define HEAP_NBYTES_CLEARED(byte_ptr, byte_cnt) \
426  memset((byte_ptr), '\0', (byte_cnt))
427 #define HEAP_BYTEOFFSET_OFBIT(bit_num) ((unsigned int)(bit_num) >> HEAP_NSHIFTS)
428 #define HEAP_BYTEGET(byte_ptr, bit_num) \
429  ((unsigned char *)(byte_ptr) + HEAP_BYTEOFFSET_OFBIT(bit_num))
430 
431 #define HEAP_BITMASK_INBYTE(bit_num) \
432  (1 << ((unsigned int)(bit_num) & HEAP_BITMASK))
433 #define HEAP_BIT_GET(byte_ptr, bit_num) \
434  (*HEAP_BYTEGET(byte_ptr, bit_num) & HEAP_BITMASK_INBYTE(bit_num))
435 #define HEAP_BIT_SET(byte_ptr, bit_num) \
436  (*HEAP_BYTEGET(byte_ptr, bit_num) = \
437  *HEAP_BYTEGET(byte_ptr, bit_num) | HEAP_BITMASK_INBYTE(bit_num))
438 #define HEAP_BIT_CLEAR(byte_ptr, bit_num) \
439  (*HEAP_BYTEGET(byte_ptr, bit_num) = \
440  *HEAP_BYTEGET(byte_ptr, bit_num) & ~HEAP_BITMASK_INBYTE(bit_num))
441 
444 { /* Currently, only classes are cached */
445  int idx; /* Index number of this entry */
446  int chn; /* Cache coherence number of object */
447  bool recently_accessed; /* Reference value 0/1 used by replacement clock algorithm */
448  OID oid; /* Identifier of object */
449  unsigned char *bits; /* Bit index array describing client transaction indices. Bit n corresponds to client
450  * tran index n If Bit is ON, we guess that the object is cached in the workspace of
451  * the client. */
452 };
453 
456 {
457  MHT_TABLE *ht; /* Hash table for guessing chn */
458  HEAP_CHNGUESS_ENTRY *entries; /* Pointers to entry structures. More than one entry */
459  unsigned char *bitindex; /* Bit index array for each entry. Describe all entries. Each entry is subdivided into
460  * nbytes. */
461  bool schema_change; /* Has the schema been changed */
462  int clock_hand; /* Clock hand for replacement */
463  int num_entries; /* Number of guesschn entries */
464  int num_clients; /* Number of clients in bitindex for each entry */
465  int nbytes; /* Number of bytes in bitindex. It must be aligned to multiples of 4 bytes (integers) */
466 };
467 
470 {
471  int num_stats_entries; /* number of cache entries in use */
472  MHT_TABLE *hfid_ht; /* HFID Hash table for best space */
473  MHT_TABLE *vpid_ht; /* VPID Hash table for best space */
475  int num_free;
476  int free_list_count; /* number of entries in free */
478  pthread_mutex_t bestspace_mutex;
479 };
480 
483 {
484  HFID *hfids; /* Array of class HFID */
485  int hfids_count; /* Count of above hfids array */
486 };
487 
489 static int heap_Slotted_overhead = 4; /* sizeof (SPAGE_SLOT) */
490 static const int heap_Find_best_page_limit = 100;
491 
494  0, 0, 0
495 };
496 
498 
500  { 0, NULL, NULL, 0, 0, 0, NULL, PTHREAD_MUTEX_INITIALIZER };
501 
503 
506 };
507 
509 
510 #define heap_hfid_table_log(thp, oidp, msg, ...) \
511  if (heap_Hfid_table->logging) \
512  er_print_callstack (ARG_FILE_LINE, "HEAP_INFO_CACHE[thr(%d),tran(%d,%d),OID(%d|%d|%d)]: " msg "\n", \
513  (thp)->index, LOG_FIND_CURRENT_TDES (thp)->tran_index, LOG_FIND_CURRENT_TDES (thp)->trid, \
514  OID_AS_ARGS (oidp), __VA_ARGS__)
515 
516 /* Recovery. */
517 #define HEAP_RV_FLAG_VACUUM_STATUS_CHANGE 0x8000
518 
519 #define HEAP_PERF_START(thread_p, context) \
520  PERF_UTIME_TRACKER_START (thread_p, (context)->time_track)
521 #define HEAP_PERF_TRACK_PREPARE(thread_p, context) \
522  do \
523  { \
524  if ((context)->time_track == NULL) break; \
525  switch ((context)->type) { \
526  case HEAP_OPERATION_INSERT: \
527  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_INSERT_PREPARE); \
528  break; \
529  case HEAP_OPERATION_DELETE: \
530  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_PREPARE); \
531  break; \
532  case HEAP_OPERATION_UPDATE: \
533  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_PREPARE); \
534  break; \
535  default: \
536  assert (false); \
537  } \
538  } \
539  while (false)
540 #define HEAP_PERF_TRACK_EXECUTE(thread_p, context) \
541  do \
542  { \
543  if ((context)->time_track == NULL) break; \
544  switch ((context)->type) { \
545  case HEAP_OPERATION_INSERT: \
546  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, \
547  (context)->time_track,\
548  PSTAT_HEAP_INSERT_EXECUTE); \
549  break; \
550  case HEAP_OPERATION_DELETE: \
551  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_EXECUTE); \
552  break; \
553  case HEAP_OPERATION_UPDATE: \
554  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_EXECUTE); \
555  break; \
556  default: \
557  assert (false); \
558  } \
559  } \
560  while (false)
561 #define HEAP_PERF_TRACK_LOGGING(thread_p, context) \
562  do \
563  { \
564  if ((context)->time_track == NULL) break; \
565  switch ((context)->type) { \
566  case HEAP_OPERATION_INSERT: \
567  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_INSERT_LOG); \
568  break; \
569  case HEAP_OPERATION_DELETE: \
570  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_LOG); \
571  break; \
572  case HEAP_OPERATION_UPDATE: \
573  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_LOG); \
574  break; \
575  default: \
576  assert (false); \
577  } \
578  } \
579  while (false)
580 
581 #define heap_bestspace_log(...) \
582  if (prm_get_bool_value (PRM_ID_DEBUG_BESTSPACE)) _er_log_debug (ARG_FILE_LINE, __VA_ARGS__)
583 
584 #if defined (NDEBUG)
586  LOCK lock, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher);
587 #else /* !NDEBUG */
588 #define heap_scan_pb_lock_and_fetch(...) \
589  heap_scan_pb_lock_and_fetch_debug (__VA_ARGS__, ARG_FILE_LINE)
590 
591 static PAGE_PTR heap_scan_pb_lock_and_fetch_debug (THREAD_ENTRY * thread_p, const VPID * vpid_ptr,
592  PAGE_FETCH_MODE fetch_mode, LOCK lock, HEAP_SCANCACHE * scan_cache,
593  PGBUF_WATCHER * pg_watcher, const char *caller_file,
594  const int caller_line);
595 #endif /* !NDEBUG */
596 
597 static int heap_classrepr_initialize_cache (void);
598 static int heap_classrepr_finalize_cache (void);
600 #ifdef SERVER_MODE
601 static int heap_classrepr_lock_class (THREAD_ENTRY * thread_p, HEAP_CLASSREPR_HASH * hash_anchor,
602  const OID * class_oid);
603 static int heap_classrepr_unlock_class (HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid, int need_hash_mutex);
604 #endif
605 
606 static int heap_classrepr_dump (THREAD_ENTRY * thread_p, FILE * fp, const OID * class_oid, const OR_CLASSREP * repr);
607 #ifdef DEBUG_CLASSREPR_CACHE
608 static int heap_classrepr_dump_cache (bool simple_dump);
609 #endif /* DEBUG_CLASSREPR_CACHE */
610 
611 static int heap_classrepr_entry_reset (HEAP_CLASSREPR_ENTRY * cache_entry);
614 static int heap_classrepr_entry_free (HEAP_CLASSREPR_ENTRY * cache_entry);
615 
616 static OR_CLASSREP *heap_classrepr_get_from_record (THREAD_ENTRY * thread_p, REPR_ID * last_reprid,
617  const OID * class_oid, RECDES * class_recdes, REPR_ID reprid);
618 static int heap_stats_get_min_freespace (HEAP_HDR_STATS * heap_hdr);
619 static int heap_stats_update_internal (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * lotspace_vpid,
620  int free_space);
621 static void heap_stats_put_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid);
622 static int heap_stats_get_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid);
623 #if defined(ENABLE_UNUSED_FUNCTION)
624 static int heap_stats_quick_num_fit_in_bestspace (HEAP_BESTSPACE * bestspace, int num_entries, int unit_size,
625  int unfill_space);
626 #endif
628  HEAP_BESTSPACE * bestspace, int *idx_badspace,
629  int record_length, int needed_space,
630  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher);
631 static PAGE_PTR heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int needed_space, bool isnew_rec,
632  int newrec_size, HEAP_SCANCACHE * space_cache, PGBUF_WATCHER * pg_watcher);
633 static int heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr,
634  VPID * hdr_vpid, bool scan_all, bool can_cycle);
635 
636 static int heap_get_last_page (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr,
637  HEAP_SCANCACHE * scan_cache, VPID * last_vpid, PGBUF_WATCHER * pg_watcher);
638 
639 static int heap_vpid_init_new (THREAD_ENTRY * thread_p, PAGE_PTR page, void *args);
640 static int heap_vpid_alloc (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR hdr_pgptr, HEAP_HDR_STATS * heap_hdr,
641  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * new_pg_watcher);
642 static VPID *heap_vpid_remove (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * rm_vpid);
643 
644 static int heap_create_internal (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, const bool reuse_oid);
645 static const HFID *heap_reuse (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid, const bool reuse_oid);
646 static bool heap_delete_all_page_records (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_PTR pgptr);
647 static int heap_reinitialize_page (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const bool is_header_page);
648 #if defined(CUBRID_DEBUG)
649 static DISK_ISVALID heap_hfid_isvalid (HFID * hfid);
650 static DISK_ISVALID heap_scanrange_isvalid (HEAP_SCANRANGE * scan_range);
651 #endif /* CUBRID_DEBUG */
652 static OID *heap_ovf_insert (THREAD_ENTRY * thread_p, const HFID * hfid, OID * ovf_oid, RECDES * recdes);
653 static const OID *heap_ovf_update (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, RECDES * recdes);
654 static int heap_ovf_flush (THREAD_ENTRY * thread_p, const OID * ovf_oid);
655 static int heap_ovf_get_length (THREAD_ENTRY * thread_p, const OID * ovf_oid);
656 static SCAN_CODE heap_ovf_get (THREAD_ENTRY * thread_p, const OID * ovf_oid, RECDES * recdes, int chn,
658 static int heap_ovf_get_capacity (THREAD_ENTRY * thread_p, const OID * ovf_oid, int *ovf_len, int *ovf_num_pages,
659  int *ovf_overhead, int *ovf_free_space);
660 
662  HEAP_SCANCACHE ** scan_cache);
663 static int heap_scancache_start_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
664  const OID * class_oid, int cache_last_fix_page, bool is_queryscan,
665  int is_indexscan, MVCC_SNAPSHOT * mvcc_snapshot);
666 static int heap_scancache_force_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache);
667 static int heap_scancache_reset_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
668  const OID * class_oid);
669 static int heap_scancache_quick_start_internal (HEAP_SCANCACHE * scan_cache, const HFID * hfid);
670 static int heap_scancache_quick_end (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache);
671 static int heap_scancache_end_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool scan_state);
672 static SCAN_CODE heap_get_if_diff_chn (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, INT16 slotid, RECDES * recdes,
673  bool ispeeking, int chn, MVCC_SNAPSHOT * mvcc_snapshot);
674 static int heap_estimate_avg_length (THREAD_ENTRY * thread_p, const HFID * hfid, int &avg_reclen);
675 static int heap_get_capacity (THREAD_ENTRY * thread_p, const HFID * hfid, INT64 * num_recs, INT64 * num_recs_relocated,
676  INT64 * num_recs_inovf, INT64 * num_pages, int *avg_freespace, int *avg_freespace_nolast,
677  int *avg_reclength, int *avg_overhead);
678 #if 0 /* TODO: remove unused */
679 static int heap_moreattr_attrinfo (int attrid, HEAP_CACHE_ATTRINFO * attr_info);
680 #endif
681 
682 static int heap_attrinfo_recache_attrepr (HEAP_CACHE_ATTRINFO * attr_info, bool islast_reset);
683 static int heap_attrinfo_recache (THREAD_ENTRY * thread_p, REPR_ID reprid, HEAP_CACHE_ATTRINFO * attr_info);
684 static int heap_attrinfo_check (const OID * inst_oid, HEAP_CACHE_ATTRINFO * attr_info);
685 static int heap_attrinfo_set_uninitialized (THREAD_ENTRY * thread_p, OID * inst_oid, RECDES * recdes,
686  HEAP_CACHE_ATTRINFO * attr_info);
687 static int heap_attrinfo_start_refoids (THREAD_ENTRY * thread_p, OID * class_oid, HEAP_CACHE_ATTRINFO * attr_info);
688 static int heap_attrinfo_get_disksize (HEAP_CACHE_ATTRINFO * attr_info, bool is_mvcc_class, int *offset_size_ptr);
689 
690 static int heap_attrvalue_read (RECDES * recdes, HEAP_ATTRVALUE * value, HEAP_CACHE_ATTRINFO * attr_info);
691 
692 static int heap_midxkey_get_value (RECDES * recdes, OR_ATTRIBUTE * att, DB_VALUE * value,
693  HEAP_CACHE_ATTRINFO * attr_info);
694 static OR_ATTRIBUTE *heap_locate_attribute (ATTR_ID attrid, HEAP_CACHE_ATTRINFO * attr_info);
695 
697  HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res, TP_DOMAIN * func_domain,
698  TP_DOMAIN ** key_domain);
699 static DB_MIDXKEY *heap_midxkey_key_generate (THREAD_ENTRY * thread_p, RECDES * recdes, DB_MIDXKEY * midxkey,
700  int *att_ids, HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res,
701  int func_col_id, int func_attr_index_start, TP_DOMAIN * midxkey_domain);
702 
703 static int heap_dump_hdr (FILE * fp, HEAP_HDR_STATS * heap_hdr);
704 
705 static int heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_index_info, int n_atts,
706  int *att_ids, HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, int btid_index,
707  DB_VALUE * result, FUNC_PRED_UNPACK_INFO * func_pred, TP_DOMAIN ** fi_domain);
708 
710  HEAP_CHKALL_RELOCOIDS * chk_objs, INT32 * num_checked);
711 
712 #if defined (SA_MODE)
713 static DISK_ISVALID heap_check_all_pages_by_file_table (THREAD_ENTRY * thread_p, HFID * hfid,
714  HEAP_CHKALL_RELOCOIDS * chk_objs);
715 static int heap_file_map_chkreloc (THREAD_ENTRY * thread_p, PAGE_PTR * page, bool * stop, void *args);
716 #endif /* SA_MODE */
717 
720 static int heap_chkreloc_print_notfound (const void *ignore_reloc_oid, void *ent, void *xchk);
722 
723 static int heap_chnguess_initialize (void);
724 static int heap_chnguess_realloc (void);
725 static int heap_chnguess_finalize (void);
726 static int heap_chnguess_decache (const OID * oid);
727 static int heap_chnguess_remove_entry (const void *oid_key, void *ent, void *xignore);
728 
729 static int heap_stats_bestspace_initialize (void);
730 static int heap_stats_bestspace_finalize (void);
731 
732 static int heap_get_spage_type (void);
733 static bool heap_is_reusable_oid (const FILE_TYPE file_type);
734 
736  RECDES * old_recdes, record_descriptor * new_recdes,
737  int lob_create_flag);
738 static int heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid);
739 static int heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid);
740 #if defined (ENABLE_UNUSED_FUNCTION)
741 static HEAP_BESTSPACE heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid);
742 #endif /* #if defined (ENABLE_UNUSED_FUNCTION) */
744  int freespace);
745 static int heap_stats_entry_free (THREAD_ENTRY * thread_p, void *data, void *args);
746 static int heap_get_partitions_from_subclasses (THREAD_ENTRY * thread_p, const OID * subclasses, int *parts_count,
747  OR_PARTITION * partitions);
748 static int heap_class_get_partition_info (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION * partition_info,
749  HFID * class_hfid, REPR_ID * repr_id, int *has_partition_info);
750 static int heap_get_partition_attributes (THREAD_ENTRY * thread_p, const OID * cls_oid, ATTR_ID * type_id,
751  ATTR_ID * values_id);
752 static int heap_get_class_subclasses (THREAD_ENTRY * thread_p, const OID * class_oid, int *count, OID ** subclasses);
753 static unsigned int heap_hash_vpid (const void *key_vpid, unsigned int htsize);
754 static int heap_compare_vpid (const void *key_vpid1, const void *key_vpid2);
755 static unsigned int heap_hash_hfid (const void *key_hfid, unsigned int htsize);
756 static int heap_compare_hfid (const void *key_hfid1, const void *key_hfid2);
757 
758 static char *heap_bestspace_to_string (char *buf, int buf_size, const HEAP_BESTSPACE * hb);
759 
760 static int fill_string_to_buffer (char **start, char *end, const char *str);
761 
762 static SCAN_CODE heap_get_record_info (THREAD_ENTRY * thread_p, const OID oid, RECDES * recdes, RECDES forward_recdes,
763  PGBUF_WATCHER * page_watcher, HEAP_SCANCACHE * scan_cache, bool ispeeking,
764  DB_VALUE ** record_info);
765 static SCAN_CODE heap_next_internal (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid,
766  RECDES * recdes, HEAP_SCANCACHE * scan_cache, bool ispeeking,
767  bool reversed_direction, DB_VALUE ** cache_recordinfo);
768 
769 static SCAN_CODE heap_get_page_info (THREAD_ENTRY * thread_p, const OID * cls_oid, const HFID * hfid, const VPID * vpid,
770  const PAGE_PTR pgptr, DB_VALUE ** page_info);
771 static SCAN_CODE heap_get_bigone_content (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool ispeeking,
772  OID * forward_oid, RECDES * recdes);
773 static void heap_mvcc_log_insert (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr);
774 static void heap_mvcc_log_delete (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr, LOG_RCVINDEX rcvindex);
775 static int heap_rv_mvcc_redo_delete_internal (THREAD_ENTRY * thread_p, PAGE_PTR page, PGSLOTID slotid, MVCCID mvccid);
776 static void heap_mvcc_log_home_change_on_delete (THREAD_ENTRY * thread_p, RECDES * old_recdes, RECDES * new_recdes,
777  LOG_DATA_ADDR * p_addr);
778 static void heap_mvcc_log_home_no_change (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr);
779 
780 static void heap_mvcc_log_redistribute (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr);
781 
782 #if defined(ENABLE_UNUSED_FUNCTION)
783 static INLINE int heap_try_fetch_header_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
784  const VPID * home_vpid_p, const OID * oid_p, PAGE_PTR * hdr_pgptr_p,
785  const VPID * hdr_vpid_p, HEAP_SCANCACHE * scan_cache, int *again_count,
786  int again_max) __attribute__ ((ALWAYS_INLINE));
787 static INLINE int heap_try_fetch_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
788  const VPID * home_vpid_p, const OID * oid_p, PAGE_PTR * fwd_pgptr_p,
789  const VPID * fwd_vpid_p, const OID * fwd_oid_p,
790  HEAP_SCANCACHE * scan_cache, int *again_count, int again_max)
792 static INLINE int heap_try_fetch_header_with_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
793  const VPID * home_vpid_p, const OID * oid_p,
794  PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
795  PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p,
796  const OID * fwd_oid_p, HEAP_SCANCACHE * scan_cache,
797  int *again_count, int again_max)
799 #endif /* ENABLE_UNUSED_FUNCTION */
800 
801 /* common */
803 static void heap_unfix_watchers (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
804 static void heap_clear_operation_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p);
805 static int heap_mark_class_as_modified (THREAD_ENTRY * thread_p, OID * oid_p, int chn, bool decache);
806 static FILE_TYPE heap_get_file_type (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
807 static int heap_is_valid_oid (THREAD_ENTRY * thread_p, OID * oid);
808 static int heap_fix_header_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
809 static int heap_fix_forward_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, OID * forward_oid_hint);
810 static void heap_build_forwarding_recdes (RECDES * recdes_p, INT16 rec_type, OID * forward_oid);
811 
812 /* heap insert related functions */
814  bool is_mvcc_class);
815 static int heap_update_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * update_context,
816  bool is_mvcc_class);
819  PGBUF_WATCHER * home_hint_p);
821 static int heap_insert_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * parent_context, RECDES * recdes_p,
822  OID * out_oid_p, PGBUF_WATCHER * newhome_pg_watcher);
823 static int heap_insert_physical (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
824 static void heap_log_insert_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
825  RECDES * recdes_p, bool is_mvcc_op, bool is_redistribute_op);
826 
827 /* heap delete related functions */
828 static void heap_delete_adjust_header (MVCC_REC_HEADER * header_p, MVCCID mvcc_id, bool need_mvcc_header_max_size);
829 static int heap_get_record_location (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
830 static int heap_delete_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
831 static int heap_delete_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
832 static int heap_delete_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
833 static int heap_delete_physical (THREAD_ENTRY * thread_p, HFID * hfid_p, PAGE_PTR page_p, OID * oid_p);
834 static void heap_log_delete_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
835  RECDES * recdes_p, bool mark_reusable, LOG_LSA * undo_lsa);
836 
837 /* heap update related functions */
838 static int heap_update_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
839 static int heap_update_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
840 static int heap_update_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
841 static int heap_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, short slot_id, RECDES * recdes_p);
842 static void heap_log_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
843  RECDES * old_recdes_p, RECDES * new_recdes_p, LOG_RCVINDEX rcvindex);
844 
845 static void *heap_hfid_table_entry_alloc (void);
846 static int heap_hfid_table_entry_free (void *unique_stat);
847 static int heap_hfid_table_entry_init (void *unique_stat);
848 static int heap_hfid_table_entry_uninit (void *entry);
849 static int heap_hfid_table_entry_key_copy (void *src, void *dest);
850 static unsigned int heap_hfid_table_entry_key_hash (void *key, int hash_table_size);
851 static int heap_hfid_table_entry_key_compare (void *k1, void *k2);
852 static int heap_hfid_cache_get (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid, FILE_TYPE * ftype_out,
853  char **classname_out);
854 static int heap_get_class_info_from_record (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid,
855  char **classname_out);
856 
857 static void heap_page_update_chain_after_mvcc_op (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid);
858 static void heap_page_rv_chain_update (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid,
859  bool vacuum_status_change);
860 
861 static int heap_scancache_add_partition_node (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache,
862  OID * partition_oid);
864  LOG_LSA * previous_version_lsa, HEAP_SCANCACHE * scan_cache,
865  int has_chn);
866 static int heap_update_set_prev_version (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * home_pg_watcher,
867  PGBUF_WATCHER * fwd_pg_watcher, LOG_LSA * prev_version_lsa);
868 static int heap_scan_cache_allocate_recdes_data (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache_p,
869  RECDES * recdes_p, int size);
870 
871 static int heap_get_header_page (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * header_vpid);
872 
875 STATIC_INLINE int heap_copy_header_stats (THREAD_ENTRY * thread_p, PAGE_PTR page_header, HEAP_HDR_STATS * header_stats)
879 STATIC_INLINE int heap_copy_chain (THREAD_ENTRY * thread_p, PAGE_PTR page_heap, HEAP_CHAIN * chain)
883 
884 // *INDENT-OFF*
885 static void heap_scancache_block_allocate (cubmem::block &b, size_t size);
887 
890 // *INDENT-ON*
891 
892 static int heap_get_page_with_watcher (THREAD_ENTRY * thread_p, const VPID * page_vpid, PGBUF_WATCHER * pg_watcher);
893 static int heap_add_chain_links (THREAD_ENTRY * thread_p, const HFID * hfid, const VPID * vpid, const VPID * next_link,
894  const VPID * prev_link, PGBUF_WATCHER * page_watcher, bool keep_page_fixed,
895  bool is_page_watcher_inited);
896 
897 static int heap_update_and_log_header (THREAD_ENTRY * thread_p, const HFID * hfid,
898  const PGBUF_WATCHER heap_header_watcher, HEAP_HDR_STATS * heap_hdr,
899  const VPID new_next_vpid, const VPID new_last_vpid, const int new_num_pages);
900 
901 /*
902  * heap_hash_vpid () - Hash a page identifier
903  * return: hash value
904  * key_vpid(in): VPID to hash
905  * htsize(in): Size of hash table
906  */
907 static unsigned int
908 heap_hash_vpid (const void *key_vpid, unsigned int htsize)
909 {
910  const VPID *vpid = (VPID *) key_vpid;
911 
912  return ((vpid->pageid | ((unsigned int) vpid->volid) << 24) % htsize);
913 }
914 
915 /*
916  * heap_compare_vpid () - Compare two vpids keys for hashing
917  * return: int (key_vpid1 == key_vpid2 ?)
918  * key_vpid1(in): First key
919  * key_vpid2(in): Second key
920  */
921 static int
922 heap_compare_vpid (const void *key_vpid1, const void *key_vpid2)
923 {
924  const VPID *vpid1 = (VPID *) key_vpid1;
925  const VPID *vpid2 = (VPID *) key_vpid2;
926 
927  return VPID_EQ (vpid1, vpid2);
928 }
929 
930 /*
931  * heap_hash_hfid () - Hash a file identifier
932  * return: hash value
933  * key_hfid(in): HFID to hash
934  * htsize(in): Size of hash table
935  */
936 static unsigned int
937 heap_hash_hfid (const void *key_hfid, unsigned int htsize)
938 {
939  const HFID *hfid = (HFID *) key_hfid;
940 
941  return ((hfid->hpgid | ((unsigned int) hfid->vfid.volid) << 24) % htsize);
942 }
943 
944 /*
945  * heap_compare_hfid () - Compare two hfids keys for hashing
946  * return: int (key_hfid1 == key_hfid2 ?)
947  * key_hfid1(in): First key
948  * key_hfid2(in): Second key
949  */
950 static int
951 heap_compare_hfid (const void *key_hfid1, const void *key_hfid2)
952 {
953  const HFID *hfid1 = (HFID *) key_hfid1;
954  const HFID *hfid2 = (HFID *) key_hfid2;
955 
956  return HFID_EQ (hfid1, hfid2);
957 }
958 
959 /*
960  * heap_stats_entry_free () - release all memory occupied by an best space
961  * return: NO_ERROR
962  * data(in): a best space associated with the key
963  * args(in): NULL (not used here, but needed by mht_map)
964  */
965 static int
966 heap_stats_entry_free (THREAD_ENTRY * thread_p, void *data, void *args)
967 {
968  HEAP_STATS_ENTRY *ent;
969 
970  ent = (HEAP_STATS_ENTRY *) data;
971  assert_release (ent != NULL);
972 
973  if (ent)
974  {
975  if (heap_Bestspace->free_list_count < HEAP_STATS_ENTRY_FREELIST_SIZE)
976  {
977  ent->next = heap_Bestspace->free_list;
978  heap_Bestspace->free_list = ent;
979 
980  heap_Bestspace->free_list_count++;
981  }
982  else
983  {
984  free_and_init (ent);
985 
986  heap_Bestspace->num_free++;
987  }
988  }
989 
990  return NO_ERROR;
991 }
992 
993 /*
994  * heap_stats_add_bestspace () -
995  */
996 static HEAP_STATS_ENTRY *
997 heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpid, int freespace)
998 {
999  HEAP_STATS_ENTRY *ent;
1000  int rc;
1001  PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER;
1002 
1004 
1005  PERF_UTIME_TRACKER_START (thread_p, &time_best_space);
1006 
1007  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1008 
1009  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
1010 
1011  if (ent)
1012  {
1013  ent->best.freespace = freespace;
1014  goto end;
1015  }
1016 
1018  {
1021 
1023 
1024  ent = NULL;
1025  goto end;
1026  }
1027 
1028  if (heap_Bestspace->free_list_count > 0)
1029  {
1030  assert_release (heap_Bestspace->free_list != NULL);
1031 
1032  ent = heap_Bestspace->free_list;
1033  if (ent == NULL)
1034  {
1035  goto end;
1036  }
1037  heap_Bestspace->free_list = ent->next;
1038  ent->next = NULL;
1039 
1040  heap_Bestspace->free_list_count--;
1041  }
1042  else
1043  {
1044  ent = (HEAP_STATS_ENTRY *) malloc (sizeof (HEAP_STATS_ENTRY));
1045  if (ent == NULL)
1046  {
1048 
1049  goto end;
1050  }
1051 
1052  heap_Bestspace->num_alloc++;
1053  }
1054 
1055  HFID_COPY (&ent->hfid, hfid);
1056  ent->best.vpid = *vpid;
1057  ent->best.freespace = freespace;
1058  ent->next = NULL;
1059 
1060  if (mht_put (heap_Bestspace->vpid_ht, &ent->best.vpid, ent) == NULL)
1061  {
1062  assert_release (false);
1063  (void) heap_stats_entry_free (thread_p, ent, NULL);
1064  ent = NULL;
1065  goto end;
1066  }
1067 
1068  if (mht_put_new (heap_Bestspace->hfid_ht, &ent->hfid, ent) == NULL)
1069  {
1070  assert_release (false);
1071  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1072  (void) heap_stats_entry_free (thread_p, ent, NULL);
1073  ent = NULL;
1074  goto end;
1075  }
1076 
1077  heap_Bestspace->num_stats_entries++;
1078 
1079 end:
1080 
1081  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1082 
1083  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1084 
1085  PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_ADD);
1086 
1087  return ent;
1088 }
1089 
1090 /*
1091  * heap_stats_del_bestspace_by_hfid () -
1092  * return: deleted count
1093  *
1094  * hfid(in):
1095  */
1096 static int
1098 {
1099  HEAP_STATS_ENTRY *ent;
1100  int del_cnt = 0;
1101  int rc;
1102  PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER;
1103 
1104  PERF_UTIME_TRACKER_START (thread_p, &time_best_space);
1105 
1106  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1107 
1108  while ((ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, NULL)) != NULL)
1109  {
1110  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
1111  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1112  (void) heap_stats_entry_free (thread_p, ent, NULL);
1113  ent = NULL;
1114 
1115  del_cnt++;
1116  }
1117 
1118  assert (del_cnt <= heap_Bestspace->num_stats_entries);
1119 
1120  heap_Bestspace->num_stats_entries -= del_cnt;
1121 
1122  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1123  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1124 
1125  PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_DEL);
1126 
1127  return del_cnt;
1128 }
1129 
1130 /*
1131  * heap_stats_del_bestspace_by_vpid () -
1132  * return: NO_ERROR
1133  *
1134  * vpid(in):
1135  */
1136 static int
1138 {
1139  HEAP_STATS_ENTRY *ent;
1140  int rc;
1141  PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER;
1142 
1143  PERF_UTIME_TRACKER_START (thread_p, &time_best_space);
1144  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1145 
1146  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
1147  if (ent == NULL)
1148  {
1149  goto end;
1150  }
1151 
1152  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
1153  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1154  (void) heap_stats_entry_free (thread_p, ent, NULL);
1155  ent = NULL;
1156 
1157  heap_Bestspace->num_stats_entries -= 1;
1158 
1159 end:
1160  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1161 
1162  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1163 
1164  PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_DEL);
1165 
1166  return NO_ERROR;
1167 }
1168 
1169 #if defined (ENABLE_UNUSED_FUNCTION)
1170 /*
1171  * heap_stats_get_bestspace_by_vpid () -
1172  * return: NO_ERROR
1173  *
1174  * vpid(in):
1175  */
1176 static HEAP_BESTSPACE
1177 heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid)
1178 {
1179  HEAP_STATS_ENTRY *ent;
1181  int rc;
1182 
1183  best.freespace = -1;
1184  VPID_SET_NULL (&best.vpid);
1185 
1186  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1187 
1188  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
1189  if (ent == NULL)
1190  {
1191  goto end;
1192  }
1193 
1194  best = ent->best;
1195 
1196 end:
1197  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1198 
1199  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1200 
1201  return best;
1202 }
1203 #endif /* ENABLE_UNUSED_FUNCTION */
1204 
1205 /*
1206  * Scan page buffer and latch page manipulation
1207  */
1208 
1209 /*
1210  * heap_scan_pb_lock_and_fetch () -
1211  * return:
1212  * vpid_ptr(in):
1213  * fetch_mode(in):
1214  * lock(in):
1215  * scan_cache(in):
1216  *
1217  * NOTE: Because this function is called in too many places and because it
1218  * is useful where a page was fixed for debug purpose, we pass the
1219  * caller file/line arguments to pgbuf_fix.
1220  */
1221 #if defined (NDEBUG)
1222 static PAGE_PTR
1223 heap_scan_pb_lock_and_fetch (THREAD_ENTRY * thread_p, const VPID * vpid_ptr, PAGE_FETCH_MODE fetch_mode, LOCK lock,
1224  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher)
1225 #else /* !NDEBUG */
1226 static PAGE_PTR
1227 heap_scan_pb_lock_and_fetch_debug (THREAD_ENTRY * thread_p, const VPID * vpid_ptr, PAGE_FETCH_MODE fetch_mode,
1228  LOCK lock, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher,
1229  const char *caller_file, const int caller_line)
1230 #endif /* !NDEBUG */
1231 {
1232  PAGE_PTR pgptr = NULL;
1233  LOCK page_lock;
1234  PGBUF_LATCH_MODE page_latch_mode;
1235 
1236  if (scan_cache != NULL)
1237  {
1238  if (scan_cache->page_latch == NULL_LOCK)
1239  {
1240  page_lock = NULL_LOCK;
1241  }
1242  else
1243  {
1244  assert (scan_cache->page_latch >= NULL_LOCK);
1245  assert (lock >= NULL_LOCK);
1246  page_lock = lock_Conv[scan_cache->page_latch][lock];
1247  assert (page_lock != NA_LOCK);
1248  }
1249  }
1250  else
1251  {
1252  page_lock = lock;
1253  }
1254 
1255  if (page_lock == S_LOCK)
1256  {
1257  page_latch_mode = PGBUF_LATCH_READ;
1258  }
1259  else
1260  {
1261  page_latch_mode = PGBUF_LATCH_WRITE;
1262  }
1263 
1264  if (pg_watcher != NULL)
1265  {
1266 #if defined (NDEBUG)
1267  if (pgbuf_ordered_fix_release (thread_p, vpid_ptr, fetch_mode, page_latch_mode, pg_watcher) != NO_ERROR)
1268 #else /* !NDEBUG */
1269  if (pgbuf_ordered_fix_debug (thread_p, vpid_ptr, fetch_mode, page_latch_mode, pg_watcher,
1270  caller_file, caller_line) != NO_ERROR)
1271 #endif /* !NDEBUG */
1272  {
1273  return NULL;
1274  }
1275  pgptr = pg_watcher->pgptr;
1276  }
1277  else
1278  {
1279 #if defined (NDEBUG)
1280  pgptr = pgbuf_fix_release (thread_p, vpid_ptr, fetch_mode, page_latch_mode, PGBUF_UNCONDITIONAL_LATCH);
1281 #else /* !NDEBUG */
1282  pgptr =
1283  pgbuf_fix_debug (thread_p, vpid_ptr, fetch_mode, page_latch_mode, PGBUF_UNCONDITIONAL_LATCH, caller_file,
1284  caller_line);
1285 #endif /* !NDEBUG */
1286  }
1287 
1288  if (pgptr != NULL)
1289  {
1290  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
1291  }
1292 
1293  return pgptr;
1294 }
1295 
1296 /*
1297  * heap_is_big_length () -
1298  * return: true/false
1299  * length(in):
1300  */
1301 bool
1303 {
1304  return (length > heap_Maxslotted_reclength) ? true : false;
1305 }
1306 
1307 /*
1308  * heap_get_spage_type () -
1309  * return: the type of the slotted page of the heap file.
1310  */
1311 static int
1313 {
1315 }
1316 
1317 /*
1318  * heap_is_reusable_oid () -
1319  * return: true if the heap file is reuse_oid table
1320  * file_type(in): the file type of the heap file
1321  */
1322 static bool
1324 {
1325  if (file_type == FILE_HEAP)
1326  {
1327  return false;
1328  }
1329  else if (file_type == FILE_HEAP_REUSE_SLOTS)
1330  {
1331  return true;
1332  }
1333  else
1334  {
1335  assert (false);
1337  }
1338  return false;
1339 }
1340 
1341 //
1342 // heap class representation cache
1343 // todo: move out of heap
1344 // todo: STL::list for _cache.area
1345 //
1346 
1347 // *INDENT-OFF*
1348 template <typename ErF, typename ... Args>
1349 void
1350 heap_classrepr_logging_template (const char *filename, const int line, ErF && er_f, const char *msg, Args &&... args)
1351 {
1352  cubthread::entry *thread_p = &cubthread::get_entry ();
1353  string_buffer er_input_str;
1354  er_input_str ("HEAP_CLASSREPR[tran=%d,thrd=%d]: %s\n", msg);
1355  er_f (filename, line, er_input_str.get_buffer (), thread_p->tran_index, thread_p->index,
1356  std::forward<Args> (args)...);
1357 }
1358 #define heap_classrepr_log_er(msg, ...) \
1359  if (prm_get_bool_value (PRM_ID_REPR_CACHE_LOG)) \
1360  heap_classrepr_logging_template (ARG_FILE_LINE, _er_log_debug, msg, __VA_ARGS__)
1361 #define heap_classrepr_log_stack(msg, ...) \
1362  if (prm_get_bool_value (PRM_ID_REPR_CACHE_LOG)) \
1363  heap_classrepr_logging_template (ARG_FILE_LINE, er_print_callstack, msg, __VA_ARGS__)
1364 // *INDENT-ON*
1365 
1366 /*
1367  * heap_classrepr_initialize_cache () - Initialize the class representation cache
1368  * return: NO_ERROR
1369  */
1370 static int
1372 {
1373  HEAP_CLASSREPR_ENTRY *cache_entry;
1374  HEAP_CLASSREPR_LOCK *lock_entry;
1375  HEAP_CLASSREPR_HASH *hash_entry;
1376  int i, ret = NO_ERROR;
1377  size_t size;
1378 
1379  if (heap_Classrepr != NULL)
1380  {
1382  if (ret != NO_ERROR)
1383  {
1384  goto exit_on_error;
1385  }
1386  }
1387 
1388  /* initialize hash entries table */
1389  heap_Classrepr_cache.num_entries = HEAP_CLASSREPR_MAXCACHE;
1390 
1391  heap_Classrepr_cache.area =
1392  (HEAP_CLASSREPR_ENTRY *) malloc (sizeof (HEAP_CLASSREPR_ENTRY) * heap_Classrepr_cache.num_entries);
1393  if (heap_Classrepr_cache.area == NULL)
1394  {
1397  sizeof (HEAP_CLASSREPR_ENTRY) * heap_Classrepr_cache.num_entries);
1398  goto exit_on_error;
1399  }
1400 
1401  cache_entry = heap_Classrepr_cache.area;
1402  for (i = 0; i < heap_Classrepr_cache.num_entries; i++)
1403  {
1404  pthread_mutex_init (&cache_entry[i].mutex, NULL);
1405 
1406  cache_entry[i].idx = i;
1407  cache_entry[i].fcnt = 0;
1408  cache_entry[i].zone = ZONE_FREE;
1409  cache_entry[i].next_wait_thrd = NULL;
1410  cache_entry[i].hash_next = NULL;
1411  cache_entry[i].prev = NULL;
1412  cache_entry[i].next = (i < heap_Classrepr_cache.num_entries - 1) ? &cache_entry[i + 1] : NULL;
1413 
1414  cache_entry[i].force_decache = false;
1415 
1416  OID_SET_NULL (&cache_entry[i].class_oid);
1417  cache_entry[i].max_reprid = DEFAULT_REPR_INCREMENT;
1418  cache_entry[i].repr = (OR_CLASSREP **) malloc (cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1419  if (cache_entry[i].repr == NULL)
1420  {
1422  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1423  goto exit_on_error;
1424  }
1425  memset (cache_entry[i].repr, 0, cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1426 
1427  cache_entry[i].last_reprid = NULL_REPRID;
1428  }
1429 
1430  /* initialize hash bucket table */
1431  heap_Classrepr_cache.num_hash = CLASSREPR_HASH_SIZE;
1432  heap_Classrepr_cache.hash_table =
1433  (HEAP_CLASSREPR_HASH *) malloc (heap_Classrepr_cache.num_hash * sizeof (HEAP_CLASSREPR_HASH));
1434  if (heap_Classrepr_cache.hash_table == NULL)
1435  {
1437  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, heap_Classrepr_cache.num_hash * sizeof (HEAP_CLASSREPR_HASH));
1438  goto exit_on_error;
1439  }
1440 
1441  hash_entry = heap_Classrepr_cache.hash_table;
1442  for (i = 0; i < heap_Classrepr_cache.num_hash; i++)
1443  {
1444  pthread_mutex_init (&hash_entry[i].hash_mutex, NULL);
1445  hash_entry[i].idx = i;
1446  hash_entry[i].hash_next = NULL;
1447  hash_entry[i].lock_next = NULL;
1448  }
1449 
1450  /* initialize hash lock table */
1451  size = thread_num_total_threads () * sizeof (HEAP_CLASSREPR_LOCK);
1452  heap_Classrepr_cache.lock_table = (HEAP_CLASSREPR_LOCK *) malloc (size);
1453  if (heap_Classrepr_cache.lock_table == NULL)
1454  {
1456  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, size);
1457  goto exit_on_error;
1458  }
1459  lock_entry = heap_Classrepr_cache.lock_table;
1460  for (i = 0; i < (int) thread_num_total_threads (); i++)
1461  {
1462  OID_SET_NULL (&lock_entry[i].class_oid);
1463  lock_entry[i].lock_next = NULL;
1464  lock_entry[i].next_wait_thrd = NULL;
1465  }
1466 
1467  /* initialize LRU list */
1468 
1469  pthread_mutex_init (&heap_Classrepr_cache.LRU_list.LRU_mutex, NULL);
1470  heap_Classrepr_cache.LRU_list.LRU_top = NULL;
1471  heap_Classrepr_cache.LRU_list.LRU_bottom = NULL;
1472 
1473  /* initialize free list */
1474  pthread_mutex_init (&heap_Classrepr_cache.free_list.free_mutex, NULL);
1475  heap_Classrepr_cache.free_list.free_top = &heap_Classrepr_cache.area[0];
1476  heap_Classrepr_cache.free_list.free_cnt = heap_Classrepr_cache.num_entries;
1477 
1478  heap_Classrepr = &heap_Classrepr_cache;
1479 
1480  return ret;
1481 
1482 exit_on_error:
1483 
1484  heap_Classrepr_cache.num_entries = 0;
1485 
1486  return (ret == NO_ERROR) ? ER_FAILED : ret;
1487 }
1488 
1489 /* TODO: STL::list for _cache.area */
1490 /*
1491  * heap_classrepr_finalize_cache () - Destroy any cached structures
1492  * return: NO_ERROR
1493  *
1494  * Note: Any cached representations are deallocated at this moment and
1495  * the hash table is also removed.
1496  */
1497 static int
1499 {
1500  HEAP_CLASSREPR_ENTRY *cache_entry;
1501  HEAP_CLASSREPR_HASH *hash_entry;
1502  int i, j;
1503  int ret = NO_ERROR;
1504 
1505  if (heap_Classrepr == NULL)
1506  {
1507  return NO_ERROR; /* nop */
1508  }
1509 
1510 #ifdef DEBUG_CLASSREPR_CACHE
1511  ret = heap_classrepr_dump_anyfixed ();
1512  if (ret != NO_ERROR)
1513  {
1514  return ret;
1515  }
1516 #endif /* DEBUG_CLASSREPR_CACHE */
1517 
1518  /* finalize hash entries table */
1519  cache_entry = heap_Classrepr_cache.area;
1520  for (i = 0; cache_entry != NULL && i < heap_Classrepr_cache.num_entries; i++)
1521  {
1522  pthread_mutex_destroy (&cache_entry[i].mutex);
1523 
1524  if (cache_entry[i].repr == NULL)
1525  {
1526  assert (cache_entry[i].repr != NULL);
1527  continue;
1528  }
1529 
1530  for (j = 0; j <= cache_entry[i].last_reprid; j++)
1531  {
1532  if (cache_entry[i].repr[j] != NULL)
1533  {
1534  or_free_classrep (cache_entry[i].repr[j]);
1535  cache_entry[i].repr[j] = NULL;
1536  }
1537  }
1538  free_and_init (cache_entry[i].repr);
1539  }
1540  if (heap_Classrepr_cache.area != NULL)
1541  {
1542  free_and_init (heap_Classrepr_cache.area);
1543  }
1544  heap_Classrepr_cache.num_entries = -1;
1545 
1546  /* finalize hash bucket table */
1547  hash_entry = heap_Classrepr_cache.hash_table;
1548  for (i = 0; hash_entry != NULL && i < heap_Classrepr_cache.num_hash; i++)
1549  {
1550  pthread_mutex_destroy (&hash_entry[i].hash_mutex);
1551  }
1552  heap_Classrepr_cache.num_hash = -1;
1553  if (heap_Classrepr_cache.hash_table != NULL)
1554  {
1555  free_and_init (heap_Classrepr_cache.hash_table);
1556  }
1557 
1558  /* finalize hash lock table */
1559  if (heap_Classrepr_cache.lock_table != NULL)
1560  {
1561  free_and_init (heap_Classrepr_cache.lock_table);
1562  }
1563 
1564  /* finalize LRU list */
1565 
1566  pthread_mutex_destroy (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1567 
1568  /* initialize free list */
1569  pthread_mutex_destroy (&heap_Classrepr_cache.free_list.free_mutex);
1570 
1571  heap_Classrepr = NULL;
1572 
1573  return ret;
1574 }
1575 
1576 /*
1577  * heap_classrepr_entry_reset () -
1578  * return: NO_ERROR
1579  * cache_entry(in):
1580  *
1581  * Note: Reset the given class representation entry.
1582  */
1583 static int
1585 {
1586  int i;
1587  int ret = NO_ERROR;
1588 
1589  if (cache_entry == NULL)
1590  {
1591  return NO_ERROR; /* nop */
1592  }
1593 
1594  /* free all classrepr */
1595  for (i = 0; i <= cache_entry->last_reprid; i++)
1596  {
1597  if (cache_entry->repr[i] != NULL)
1598  {
1599  or_free_classrep (cache_entry->repr[i]);
1600  cache_entry->repr[i] = NULL;
1601  }
1602  }
1603 
1604  cache_entry->force_decache = false;
1605  OID_SET_NULL (&cache_entry->class_oid);
1606  if (cache_entry->max_reprid > DEFAULT_REPR_INCREMENT)
1607  {
1608  OR_CLASSREP **t;
1609 
1610  t = cache_entry->repr;
1611  cache_entry->repr = (OR_CLASSREP **) malloc (DEFAULT_REPR_INCREMENT * sizeof (OR_CLASSREP *));
1612  if (cache_entry->repr == NULL)
1613  {
1616  cache_entry->repr = t;
1617  }
1618  else
1619  {
1620  free_and_init (t);
1621  cache_entry->max_reprid = DEFAULT_REPR_INCREMENT;
1622  memset (cache_entry->repr, 0, DEFAULT_REPR_INCREMENT * sizeof (OR_CLASSREP *));
1623  }
1624 
1625  }
1626  cache_entry->last_reprid = NULL_REPRID;
1627 
1628  return ret;
1629 }
1630 
1631 /*
1632  * heap_classrepr_entry_remove_from_LRU () -
1633  * return: NO_ERROR
1634  * cache_entry(in):
1635  */
1636 static int
1638 {
1639  if (cache_entry)
1640  {
1641  if (cache_entry == heap_Classrepr_cache.LRU_list.LRU_top)
1642  {
1643  heap_Classrepr_cache.LRU_list.LRU_top = cache_entry->next;
1644  }
1645  else
1646  {
1647  cache_entry->prev->next = cache_entry->next;
1648  }
1649 
1650  if (cache_entry == heap_Classrepr_cache.LRU_list.LRU_bottom)
1651  {
1652  heap_Classrepr_cache.LRU_list.LRU_bottom = cache_entry->prev;
1653  }
1654  else
1655  {
1656  cache_entry->next->prev = cache_entry->prev;
1657  }
1658  }
1659 
1660  return NO_ERROR;
1661 }
1662 
1663 /* TODO: STL::list for ->prev */
1664 /*
1665  * heap_classrepr_decache_guessed_last () -
1666  * return: NO_ERROR
1667  * class_oid(in):
1668  *
1669  * Note: Decache the guessed last representations (i.e., that with -1)
1670  * from the given class.
1671  *
1672  * Note: This function should be called when a class is updated.
1673  * 1: During normal update
1674  */
1675 static int
1677 {
1678  HEAP_CLASSREPR_ENTRY *cache_entry, *prev_entry, *cur_entry;
1679  HEAP_CLASSREPR_HASH *hash_anchor;
1680  int rv;
1681  int ret = NO_ERROR;
1682 
1683  heap_classrepr_log_er ("heap_classrepr_decache_guessed_last %d|%d|%d\n", OID_AS_ARGS (class_oid));
1684 
1685  if (class_oid != NULL)
1686  {
1687  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (class_oid)];
1688 
1689  search_begin:
1690  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
1691 
1692  for (cache_entry = hash_anchor->hash_next; cache_entry != NULL; cache_entry = cache_entry->hash_next)
1693  {
1694  if (OID_EQ (class_oid, &cache_entry->class_oid))
1695  {
1696  rv = pthread_mutex_trylock (&cache_entry->mutex);
1697  if (rv == 0)
1698  {
1699  goto delete_begin;
1700  }
1701 
1702  if (rv != EBUSY)
1703  {
1706  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1707  return ret;
1708  }
1709 
1710  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1711  rv = pthread_mutex_lock (&cache_entry->mutex);
1712 
1713  /* cache_entry can be used by others. check again */
1714  if (!OID_EQ (class_oid, &cache_entry->class_oid))
1715  {
1716  pthread_mutex_unlock (&cache_entry->mutex);
1717  goto search_begin;
1718  }
1719  break;
1720  }
1721  }
1722 
1723  /* class_oid cache_entry is not found */
1724  if (cache_entry == NULL)
1725  {
1726  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1727  return NO_ERROR;
1728  }
1729 
1730  /* hash anchor lock has been released */
1731  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
1732 
1733  delete_begin:
1734 
1735  /* delete classrepr from hash chain */
1736  prev_entry = NULL;
1737  cur_entry = hash_anchor->hash_next;
1738  while (cur_entry != NULL)
1739  {
1740  if (cur_entry == cache_entry)
1741  {
1742  break;
1743  }
1744  prev_entry = cur_entry;
1745  cur_entry = cur_entry->hash_next;
1746  }
1747 
1748  /* class_oid cache_entry is not found */
1749  if (cur_entry == NULL)
1750  {
1751  /* This cannot happen */
1752  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1753  pthread_mutex_unlock (&cache_entry->mutex);
1754 
1755  return NO_ERROR;
1756  }
1757 
1758  if (prev_entry == NULL)
1759  {
1760  hash_anchor->hash_next = cur_entry->hash_next;
1761  }
1762  else
1763  {
1764  prev_entry->hash_next = cur_entry->hash_next;
1765  }
1766  cur_entry->hash_next = NULL;
1767 
1768  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1769 
1770  cache_entry->force_decache = true;
1771 
1772  /* Remove from LRU list */
1773  if (cache_entry->zone == ZONE_LRU)
1774  {
1775  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1776  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
1777  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1778  cache_entry->zone = ZONE_VOID;
1779  }
1780  cache_entry->prev = NULL;
1781  cache_entry->next = NULL;
1782 
1783  int save_fcnt = cache_entry->fcnt;
1784  if (cache_entry->fcnt == 0)
1785  {
1786  /* move cache_entry to free_list */
1787  ret = heap_classrepr_entry_reset (cache_entry);
1788  if (ret == NO_ERROR)
1789  {
1790  ret = heap_classrepr_entry_free (cache_entry);
1791  }
1792  }
1793 
1794  pthread_mutex_unlock (&cache_entry->mutex);
1795 
1796  heap_classrepr_log_er ("heap_classrepr_decache_guessed_last %d|%d|%d cache_entry=%p fcnt=%d",
1797  OID_AS_ARGS (class_oid), cache_entry, save_fcnt);
1798  }
1799  return ret;
1800 }
1801 
1802 /*
1803  * heap_classrepr_decache () - Deache any unfixed class representations of
1804  * given class
1805  * return: NO_ERROR
1806  * class_oid(in):
1807  *
1808  * Note: Decache all class representations of given class. If a class
1809  * is not given all class representations are decached.
1810  *
1811  * Note: This function should be called when a class is updated.
1812  * 1: At the end/beginning of rollback since we do not have any
1813  * idea of a heap identifier of rolled back objects and we
1814  * expend too much time, searching for the OID, every time we
1815  * rolled back an updated object.
1816  */
1817 int
1819 {
1820  int ret;
1821 
1822  ret = heap_classrepr_decache_guessed_last (class_oid);
1823  if (ret != NO_ERROR)
1824  {
1825  return ret;
1826  }
1827 
1828  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
1829  {
1830  return ER_FAILED;
1831  }
1832  if (heap_Guesschn != NULL && heap_Guesschn->schema_change == false)
1833  {
1834  ret = heap_chnguess_decache (class_oid);
1835  }
1836  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
1837 
1838  return ret;
1839 }
1840 
1841 /*
1842  * heap_classrepr_restart_cache () - Restart classrepr recache.
1843  *
1844  * return: error code
1845  *
1846  * Note: This function is called at recovery.
1847  */
1848 int
1850 {
1851  int ret;
1852 
1853  if (!log_is_in_crash_recovery ())
1854  {
1856  return ER_FAILED;
1857  }
1858 
1860  if (ret != NO_ERROR)
1861  {
1862  return ret;
1863  }
1864 
1866  if (ret != NO_ERROR)
1867  {
1868  return ret;
1869  }
1870 
1871  return NO_ERROR;
1872 }
1873 
1874 /* TODO: STL::list for _cache.area */
1875 /*
1876  * heap_classrepr_free () - Free a class representation
1877  * return: NO_ERROR
1878  * classrep(in): The class representation structure
1879  * idx_incache(in): An index if the desired class representation is part of
1880  * the cache, otherwise -1 (no part of cache)
1881  *
1882  * Note: Free a class representation. If the class representation was
1883  * part of the class representation cache, the fix count is
1884  * decremented and the class representation will continue be
1885  * cached. The representation entry will be subject for
1886  * replacement when the fix count is zero (no one is using it).
1887  * If the class representatin was not part of the cache, it is
1888  * freed.
1889  *
1890  * NOTE: consider to use heap_classrepr_free_and_init.
1891  */
1892 int
1893 heap_classrepr_free (OR_CLASSREP * classrep, int *idx_incache)
1894 {
1895  HEAP_CLASSREPR_ENTRY *cache_entry;
1896  int rv;
1897  int ret = NO_ERROR;
1898 
1899  if (*idx_incache < 0)
1900  {
1901  or_free_classrep (classrep);
1902  return NO_ERROR;
1903  }
1904 
1905  cache_entry = &heap_Classrepr_cache.area[*idx_incache];
1906 
1907  rv = pthread_mutex_lock (&cache_entry->mutex);
1908  cache_entry->fcnt--;
1909  if (cache_entry->fcnt == 0)
1910  {
1911  /*
1912  * Is this entry declared to be decached
1913  */
1914 #ifdef DEBUG_CLASSREPR_CACHE
1915  rv = pthread_mutex_lock (&heap_Classrepr_cache.num_fix_entries_mutex);
1916  heap_Classrepr_cache.num_fix_entries--;
1917  pthread_mutex_unlock (&heap_Classrepr_cache.num_fix_entries_mutex);
1918 #endif /* DEBUG_CLASSREPR_CACHE */
1919  if (cache_entry->force_decache != 0)
1920  {
1921  /* cache_entry is already removed from LRU list. */
1922 
1923  /* move cache_entry to free_list */
1924  ret = heap_classrepr_entry_free (cache_entry);
1925  if (ret == NO_ERROR)
1926  {
1927  ret = heap_classrepr_entry_reset (cache_entry);
1928  }
1929  }
1930  else
1931  {
1932  /* relocate entry to the top of LRU list */
1933  if (cache_entry != heap_Classrepr_cache.LRU_list.LRU_top)
1934  {
1935  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1936  if (cache_entry->zone == ZONE_LRU)
1937  {
1938  /* remove from LRU list */
1939  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
1940  }
1941 
1942  /* insert into LRU top */
1943  cache_entry->prev = NULL;
1944  cache_entry->next = heap_Classrepr_cache.LRU_list.LRU_top;
1945  if (heap_Classrepr_cache.LRU_list.LRU_top == NULL)
1946  {
1947  heap_Classrepr_cache.LRU_list.LRU_bottom = cache_entry;
1948  }
1949  else
1950  {
1951  heap_Classrepr_cache.LRU_list.LRU_top->prev = cache_entry;
1952  }
1953  heap_Classrepr_cache.LRU_list.LRU_top = cache_entry;
1954  cache_entry->zone = ZONE_LRU;
1955 
1956  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1957  }
1958  }
1959  }
1960  pthread_mutex_unlock (&cache_entry->mutex);
1961  *idx_incache = -1;
1962 
1963  return ret;
1964 }
1965 
1966 #ifdef SERVER_MODE
1967 
1968 enum
1969 { NEED_TO_RETRY = 0, LOCK_ACQUIRED };
1970 
1971 /*
1972  * heap_classrepr_lock_class () - Prevent other threads accessing class_oid
1973  * class representation.
1974  * return: ER_FAILED, NEED_TO_RETRY or LOCK_ACQUIRED
1975  * hash_anchor(in):
1976  * class_oid(in):
1977  */
1978 static int
1979 heap_classrepr_lock_class (THREAD_ENTRY * thread_p, HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid)
1980 {
1981  HEAP_CLASSREPR_LOCK *cur_lock_entry;
1982  THREAD_ENTRY *cur_thrd_entry;
1983 
1984  if (thread_p == NULL)
1985  {
1986  thread_p = thread_get_thread_entry_info ();
1987  if (thread_p == NULL)
1988  {
1989  return ER_FAILED;
1990  }
1991  }
1992  cur_thrd_entry = thread_p;
1993 
1994  for (cur_lock_entry = hash_anchor->lock_next; cur_lock_entry != NULL; cur_lock_entry = cur_lock_entry->lock_next)
1995  {
1996  if (OID_EQ (&cur_lock_entry->class_oid, class_oid))
1997  {
1998  cur_thrd_entry->next_wait_thrd = cur_lock_entry->next_wait_thrd;
1999  cur_lock_entry->next_wait_thrd = cur_thrd_entry;
2000 
2001  thread_lock_entry (cur_thrd_entry);
2002  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2004 
2005  if (cur_thrd_entry->resume_status == THREAD_HEAP_CLSREPR_RESUMED)
2006  {
2007  return NEED_TO_RETRY; /* traverse hash chain again */
2008  }
2009  else
2010  {
2011  /* probably due to an interrupt */
2012  assert ((cur_thrd_entry->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT));
2013  return ER_FAILED;
2014  }
2015  }
2016  }
2017 
2018  cur_lock_entry = &heap_Classrepr_cache.lock_table[cur_thrd_entry->index];
2019  cur_lock_entry->class_oid = *class_oid;
2020  cur_lock_entry->next_wait_thrd = NULL;
2021  cur_lock_entry->lock_next = hash_anchor->lock_next;
2022  hash_anchor->lock_next = cur_lock_entry;
2023 
2024  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2025 
2026  return LOCK_ACQUIRED; /* lock acquired. */
2027 }
2028 
2029 /*
2030  * heap_classrepr_unlock_class () -
2031  * return: NO_ERROR
2032  * hash_anchor(in):
2033  * class_oid(in):
2034  * need_hash_mutex(in):
2035  */
2036 static int
2037 heap_classrepr_unlock_class (HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid, int need_hash_mutex)
2038 {
2039  HEAP_CLASSREPR_LOCK *prev_lock_entry, *cur_lock_entry;
2040  THREAD_ENTRY *cur_thrd_entry;
2041  int rv;
2042 
2043  /* if hash mutex lock is not acquired */
2044  if (need_hash_mutex)
2045  {
2046  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
2047  }
2048 
2049  prev_lock_entry = NULL;
2050  cur_lock_entry = hash_anchor->lock_next;
2051  while (cur_lock_entry != NULL)
2052  {
2053  if (OID_EQ (&cur_lock_entry->class_oid, class_oid))
2054  {
2055  break;
2056  }
2057  prev_lock_entry = cur_lock_entry;
2058  cur_lock_entry = cur_lock_entry->lock_next;
2059  }
2060 
2061  /* if lock entry is found, remove it from lock list */
2062  if (cur_lock_entry == NULL)
2063  { /* this cannot happen */
2064  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2065  return ER_FAILED;
2066  }
2067 
2068  if (prev_lock_entry == NULL)
2069  {
2070  hash_anchor->lock_next = cur_lock_entry->lock_next;
2071  }
2072  else
2073  {
2074  prev_lock_entry->lock_next = cur_lock_entry->lock_next;
2075  }
2076  cur_lock_entry->lock_next = NULL;
2077  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2078  for (cur_thrd_entry = cur_lock_entry->next_wait_thrd; cur_thrd_entry != NULL;
2079  cur_thrd_entry = cur_lock_entry->next_wait_thrd)
2080  {
2081  cur_lock_entry->next_wait_thrd = cur_thrd_entry->next_wait_thrd;
2082  cur_thrd_entry->next_wait_thrd = NULL;
2083 
2084  thread_wakeup (cur_thrd_entry, THREAD_HEAP_CLSREPR_RESUMED);
2085  }
2086 
2087  return NO_ERROR;
2088 }
2089 #endif /* SERVER_MODE */
2090 
2091 /* TODO: STL::list for ->prev */
2092 /*
2093  * heap_classrepr_entry_alloc () -
2094  * return:
2095  */
2096 static HEAP_CLASSREPR_ENTRY *
2098 {
2099  HEAP_CLASSREPR_HASH *hash_anchor;
2100  HEAP_CLASSREPR_ENTRY *cache_entry, *prev_entry, *cur_entry;
2101  int rv;
2102 
2103  cache_entry = NULL;
2104 
2105 /* check_free_list: */
2106 
2107  /* 1. Get entry from free list */
2108  if (heap_Classrepr_cache.free_list.free_top == NULL)
2109  {
2110  goto check_LRU_list;
2111  }
2112 
2113  rv = pthread_mutex_lock (&heap_Classrepr_cache.free_list.free_mutex);
2114  if (heap_Classrepr_cache.free_list.free_top == NULL)
2115  {
2116  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2117  cache_entry = NULL;
2118  }
2119  else
2120  {
2121  cache_entry = heap_Classrepr_cache.free_list.free_top;
2122  heap_Classrepr_cache.free_list.free_top = cache_entry->next;
2123  heap_Classrepr_cache.free_list.free_cnt--;
2124  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2125 
2126  rv = pthread_mutex_lock (&cache_entry->mutex);
2127  cache_entry->next = NULL;
2128  cache_entry->zone = ZONE_VOID;
2129 
2130  return cache_entry;
2131  }
2132 
2133 check_LRU_list:
2134  /* 2. Get entry from LRU list */
2135  if (heap_Classrepr_cache.LRU_list.LRU_bottom == NULL)
2136  {
2137  goto expand_list;
2138  }
2139 
2140  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
2141  for (cache_entry = heap_Classrepr_cache.LRU_list.LRU_bottom; cache_entry != NULL; cache_entry = cache_entry->prev)
2142  {
2143  if (cache_entry->fcnt == 0)
2144  {
2145  /* remove from LRU list */
2146  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
2147  cache_entry->zone = ZONE_VOID;
2148  cache_entry->next = cache_entry->prev = NULL;
2149  break;
2150  }
2151  }
2152  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
2153 
2154  if (cache_entry == NULL)
2155  {
2156  goto expand_list;
2157  }
2158 
2159  rv = pthread_mutex_lock (&cache_entry->mutex);
2160  /* if some has referenced, retry */
2161  if (cache_entry->fcnt != 0)
2162  {
2163  pthread_mutex_unlock (&cache_entry->mutex);
2164  goto check_LRU_list;
2165  }
2166 
2167  /* delete classrepr from hash chain */
2168  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (&cache_entry->class_oid)];
2169  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
2170  prev_entry = NULL;
2171  cur_entry = hash_anchor->hash_next;
2172  while (cur_entry != NULL)
2173  {
2174  if (cur_entry == cache_entry)
2175  {
2176  break;
2177  }
2178  prev_entry = cur_entry;
2179  cur_entry = cur_entry->hash_next;
2180  }
2181 
2182  if (cur_entry == NULL)
2183  {
2184  /* This cannot happen */
2185  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2186  pthread_mutex_unlock (&cache_entry->mutex);
2187 
2188  return NULL;
2189  }
2190  if (prev_entry == NULL)
2191  {
2192  hash_anchor->hash_next = cur_entry->hash_next;
2193  }
2194  else
2195  {
2196  prev_entry->hash_next = cur_entry->hash_next;
2197  }
2198  cur_entry->hash_next = NULL;
2199  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2200 
2201  (void) heap_classrepr_entry_reset (cache_entry);
2202 
2203 end:
2204 
2205  return cache_entry;
2206 
2207 expand_list:
2208 
2209  /* not supported */
2210  cache_entry = NULL;
2211  goto end;
2212 }
2213 
2214 /* TODO: STL::list for ->next */
2215 /*
2216  * heap_classrepr_entry_free () -
2217  * return: NO_ERROR
2218  * cache_entry(in):
2219  */
2220 static int
2222 {
2223  int rv;
2224  rv = pthread_mutex_lock (&heap_Classrepr_cache.free_list.free_mutex);
2225 
2226  cache_entry->next = heap_Classrepr_cache.free_list.free_top;
2227  heap_Classrepr_cache.free_list.free_top = cache_entry;
2228  cache_entry->zone = ZONE_FREE;
2229  heap_Classrepr_cache.free_list.free_cnt++;
2230 
2231  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2232 
2233  return NO_ERROR;
2234 }
2235 
2236 /*
2237  * heap_classrepr_get_from_record ()
2238  * return: classrepr
2239  *
2240  * last_reprid(out):
2241  * class_oid(in): The class identifier
2242  * class_recdes(in): The class recdes (when know) or NULL
2243  * reprid(in): Representation of the class or NULL_REPRID for last one
2244  */
2245 static OR_CLASSREP *
2246 heap_classrepr_get_from_record (THREAD_ENTRY * thread_p, REPR_ID * last_reprid, const OID * class_oid,
2247  RECDES * class_recdes, REPR_ID reprid)
2248 {
2249  RECDES peek_recdes;
2250  RECDES *recdes = NULL;
2251  HEAP_SCANCACHE scan_cache;
2252  OR_CLASSREP *repr = NULL;
2253 
2254  if (last_reprid != NULL)
2255  {
2256  *last_reprid = NULL_REPRID;
2257  }
2258 
2259  if (class_recdes != NULL)
2260  {
2261  recdes = class_recdes;
2262  }
2263  else
2264  {
2265  heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
2266  if (heap_get_class_record (thread_p, class_oid, &peek_recdes, &scan_cache, PEEK) != S_SUCCESS)
2267  {
2268  goto end;
2269  }
2270  recdes = &peek_recdes;
2271  }
2272 
2273  repr = or_get_classrep (recdes, reprid);
2274  if (last_reprid != NULL)
2275  {
2276  *last_reprid = or_rep_id (recdes);
2277  }
2278 
2279 end:
2280  if (class_recdes == NULL)
2281  {
2282  heap_scancache_end (thread_p, &scan_cache);
2283  }
2284  return repr;
2285 }
2286 
2287 /*
2288  * heap_classrepr_get () - Obtain the desired class representation
2289  * return: classrepr
2290  * class_oid(in): The class identifier
2291  * class_recdes(in): The class recdes (when know) or NULL
2292  * reprid(in): Representation of the class or NULL_REPRID for last one
2293  * idx_incache(in): An index if the desired class representation is part
2294  * of the cache
2295  *
2296  * Note: Obtain the desired class representation for the given class.
2297  */
2298 OR_CLASSREP *
2299 heap_classrepr_get (THREAD_ENTRY * thread_p, const OID * class_oid, RECDES * class_recdes, REPR_ID reprid,
2300  int *idx_incache)
2301 {
2302  HEAP_CLASSREPR_ENTRY *cache_entry;
2303  HEAP_CLASSREPR_HASH *hash_anchor;
2304  OR_CLASSREP *repr = NULL;
2305  OR_CLASSREP *repr_from_record = NULL;
2306  OR_CLASSREP *repr_last = NULL;
2307  REPR_ID last_reprid;
2308  int r;
2309 
2310  *idx_incache = -1;
2311 
2312  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (class_oid)];
2313 
2314  /* search entry with class_oid from hash chain */
2315 search_begin:
2316  r = pthread_mutex_lock (&hash_anchor->hash_mutex);
2317 
2318  for (cache_entry = hash_anchor->hash_next; cache_entry != NULL; cache_entry = cache_entry->hash_next)
2319  {
2320  if (OID_EQ (class_oid, &cache_entry->class_oid))
2321  {
2322  r = pthread_mutex_trylock (&cache_entry->mutex);
2323  if (r == 0)
2324  {
2325  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2326  }
2327  else
2328  {
2329  if (r != EBUSY)
2330  {
2331  /* some error code */
2333  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2334  goto exit;
2335  }
2336  /* if cache_entry lock is busy. release hash mutex lock and lock cache_entry lock unconditionally */
2337  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2338  r = pthread_mutex_lock (&cache_entry->mutex);
2339  }
2340  /* check if cache_entry is used by others */
2341  if (!OID_EQ (class_oid, &cache_entry->class_oid))
2342  {
2343  pthread_mutex_unlock (&cache_entry->mutex);
2344  goto search_begin;
2345  }
2346 
2347  break;
2348  }
2349  }
2350 
2351  if (cache_entry == NULL)
2352  {
2353  if (repr_from_record == NULL)
2354  {
2355  /* note: we need to read class record from heap page. however, latching a page and holding mutex is never a
2356  * good idea, and it can generate ugly deadlocks. but in most cases, we won't have concurrency here,
2357  * so let's try a conditional latch on page of class. if that doesn't work, release the hash mutex,
2358  * read representation from heap and restart the process to ensure consistency. */
2359  VPID vpid_of_class;
2360  PAGE_PTR page_of_class = NULL;
2361  VPID_GET_FROM_OID (&vpid_of_class, class_oid);
2362  page_of_class = pgbuf_fix (thread_p, &vpid_of_class, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_CONDITIONAL_LATCH);
2363  if (page_of_class == NULL)
2364  {
2365  /* we cannot hold mutex */
2366  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2367  }
2368  else if (spage_get_record_type (page_of_class, class_oid->slotid) != REC_HOME)
2369  {
2370  /* things get too complicated when we need to do ordered fix. */
2371  pgbuf_unfix_and_init (thread_p, page_of_class);
2372  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2373  }
2374  repr_from_record = heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, reprid);
2375  if (repr_from_record == NULL)
2376  {
2377  ASSERT_ERROR ();
2378 
2379  if (page_of_class != NULL)
2380  {
2381  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2382  pgbuf_unfix_and_init (thread_p, page_of_class);
2383  }
2384  goto exit;
2385  }
2386  if (reprid == NULL_REPRID)
2387  {
2388  reprid = last_reprid;
2389  }
2390  if (reprid != last_reprid && repr_last == NULL)
2391  {
2392  repr_last = heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, last_reprid);
2393  if (repr_last == NULL)
2394  {
2395  /* can we accept this case? */
2396  }
2397  }
2398  if (page_of_class == NULL)
2399  {
2400  /* hash mutex was released, we need to restart search. */
2401  goto search_begin;
2402  }
2403  else
2404  {
2405  pgbuf_unfix_and_init (thread_p, page_of_class);
2406  /* hash mutex was kept */
2407  /* fall through */
2408  }
2409  }
2410  assert (repr_from_record != NULL);
2411  assert (last_reprid != NULL_REPRID);
2412 
2413 #ifdef SERVER_MODE
2414  /* class_oid was not found. Lock class_oid. heap_classrepr_lock_class () release hash_anchor->hash_lock */
2415  r = heap_classrepr_lock_class (thread_p, hash_anchor, class_oid);
2416  if (r != LOCK_ACQUIRED)
2417  {
2418  if (r == NEED_TO_RETRY)
2419  {
2420  goto search_begin;
2421  }
2422  else
2423  {
2424  assert (r == ER_FAILED);
2425  goto exit;
2426  }
2427  }
2428 #endif
2429 
2430  /* Get free entry */
2431  cache_entry = heap_classrepr_entry_alloc ();
2432  if (cache_entry == NULL)
2433  {
2434  /* if all cache entry is busy, return disk repr. */
2435 
2436 #ifdef SERVER_MODE
2437  /* free lock for class_oid */
2438  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2439 #endif
2440 
2441  if (repr_last != NULL)
2442  {
2443  or_free_classrep (repr_last);
2444  }
2445 
2446  /* return disk repr when repr cache is full */
2447  return repr_from_record;
2448  }
2449 
2450  /* check if cache_entry->repr[last_reprid] is valid. */
2451  if (last_reprid >= cache_entry->max_reprid)
2452  {
2453  free_and_init (cache_entry->repr);
2454 
2455  cache_entry->repr = (OR_CLASSREP **) malloc ((last_reprid + 1) * sizeof (OR_CLASSREP *));
2456  if (cache_entry->repr == NULL)
2457  {
2459  (last_reprid + 1) * sizeof (OR_CLASSREP *));
2460 
2461  pthread_mutex_unlock (&cache_entry->mutex);
2462  (void) heap_classrepr_entry_free (cache_entry);
2463 #ifdef SERVER_MODE
2464  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2465 #endif
2466  if (repr != NULL)
2467  {
2468  or_free_classrep (repr);
2469  repr = NULL;
2470  }
2471  goto exit;
2472  }
2473  cache_entry->max_reprid = last_reprid + 1;
2474 
2475  memset (cache_entry->repr, 0, cache_entry->max_reprid * sizeof (OR_CLASSREP *));
2476  }
2477 
2478  if (reprid <= NULL_REPRID || reprid > last_reprid || reprid > cache_entry->max_reprid)
2479  {
2480  assert (false);
2481 
2482  pthread_mutex_unlock (&cache_entry->mutex);
2483  (void) heap_classrepr_entry_free (cache_entry);
2484 #ifdef SERVER_MODE
2485  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2486 #endif
2487 
2488  if (repr != NULL)
2489  {
2490  or_free_classrep (repr);
2491  repr = NULL;
2492  }
2493 
2495  goto exit;
2496  }
2497 
2498  cache_entry->repr[reprid] = repr_from_record;
2499  repr = cache_entry->repr[reprid];
2500  repr_from_record = NULL;
2501  cache_entry->last_reprid = last_reprid;
2502  if (reprid != last_reprid)
2503  { /* if last repr is not cached */
2504  /* normally, we should not access heap record while keeping mutex in cache entry. however, this entry was not
2505  * yet attached to cache, so no one will get its mutex yet */
2506  cache_entry->repr[last_reprid] = repr_last;
2507  repr_last = NULL;
2508  }
2509 
2510  cache_entry->fcnt = 1;
2511  cache_entry->class_oid = *class_oid;
2512 #ifdef DEBUG_CLASSREPR_CACHE
2513  r = pthread_mutex_lock (&heap_Classrepr_cache.num_fix_entries_mutex);
2514  heap_Classrepr_cache.num_fix_entries++;
2515  pthread_mutex_unlock (&heap_Classrepr_cache.num_fix_entries_mutex);
2516 
2517 #endif /* DEBUG_CLASSREPR_CACHE */
2518  *idx_incache = cache_entry->idx;
2519 
2520  /* Add to hash chain, and remove lock for class_oid */
2521  r = pthread_mutex_lock (&hash_anchor->hash_mutex);
2522  cache_entry->hash_next = hash_anchor->hash_next;
2523  hash_anchor->hash_next = cache_entry;
2524 
2525 #ifdef SERVER_MODE
2526  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, false);
2527 #endif
2528 
2529  heap_classrepr_log_stack ("heap_classrepr_get %d|%d|%d add repr %p to cache_entry %p", OID_AS_ARGS (class_oid),
2530  repr, cache_entry);
2531  }
2532  else
2533  {
2534  /* now, we have already cache_entry for class_oid. if it contains repr info for reprid, return it. else load
2535  * classrepr info for it */
2536  assert (!cache_entry->force_decache);
2537 
2538  if (reprid == NULL_REPRID)
2539  {
2540  reprid = cache_entry->last_reprid;
2541  }
2542 
2543  if (reprid <= NULL_REPRID || reprid > cache_entry->last_reprid || reprid > cache_entry->max_reprid)
2544  {
2545  assert (false);
2546 
2547  pthread_mutex_unlock (&cache_entry->mutex);
2548 
2550  goto exit;
2551  }
2552 
2553  /* reprid cannot be greater than cache_entry->last_reprid. */
2554  repr = cache_entry->repr[reprid];
2555  if (repr == NULL)
2556  {
2557  /* load repr. info. for reprid of class_oid */
2558  if (repr_from_record == NULL)
2559  {
2560  /* we need to read record from its page. we cannot hold cache mutex and latch a page. */
2561  pthread_mutex_unlock (&cache_entry->mutex);
2562  repr_from_record =
2563  heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, reprid);
2564  if (repr_from_record == NULL)
2565  {
2566  goto exit;
2567  }
2568  /* we need to start over */
2569  goto search_begin;
2570  }
2571  else
2572  {
2573  /* use load representation from record */
2574  cache_entry->repr[reprid] = repr_from_record;
2575  repr = repr_from_record;
2576  repr_from_record = NULL;
2577 
2578  /* fall through */
2579  }
2580  }
2581 
2582  cache_entry->fcnt++;
2583  *idx_incache = cache_entry->idx;
2584  }
2585  pthread_mutex_unlock (&cache_entry->mutex);
2586 
2587 exit:
2588  if (repr_from_record != NULL)
2589  {
2590  or_free_classrep (repr_from_record);
2591  }
2592  if (repr_last != NULL)
2593  {
2594  or_free_classrep (repr_last);
2595  }
2596  return repr;
2597 }
2598 
2599 #ifdef DEBUG_CLASSREPR_CACHE
2600 /*
2601  * heap_classrepr_dump_cache () - Dump the class representation cache
2602  * return: NO_ERROR
2603  * simple_dump(in):
2604  *
2605  * Note: Dump the class representation cache.
2606  */
2607 static int
2608 heap_classrepr_dump_cache (bool simple_dump)
2609 {
2610  OR_CLASSREP *classrepr;
2611  HEAP_CLASSREPR_ENTRY *cache_entry;
2612  int i, j;
2613  int rv;
2614  int ret = NO_ERROR;
2615 
2616  if (heap_Classrepr == NULL)
2617  {
2618  return NO_ERROR; /* nop */
2619  }
2620 
2621  (void) fflush (stderr);
2622  (void) fflush (stdout);
2623 
2624  fprintf (stdout, "*** Class Representation cache dump *** \n");
2625  fprintf (stdout, " Number of entries = %d, Number of used entries = %d\n", heap_Classrepr->num_entries,
2626  heap_Classrepr->num_entries - heap_Classrepr->free_list.free_cnt);
2627 
2628  for (cache_entry = heap_Classrepr->area, i = 0; i < heap_Classrepr->num_entries; cache_entry++, i++)
2629  {
2630  fprintf (stdout, " \nEntry_id %d\n", cache_entry->idx);
2631 
2632  rv = pthread_mutex_lock (&cache_entry->mutex);
2633  for (j = 0; j <= cache_entry->last_reprid; j++)
2634  {
2635  classrepr = cache_entry->repr[j];
2636  if (classrepr == NULL)
2637  {
2638  fprintf (stdout, ".....\n");
2639  continue;
2640  }
2641  fprintf (stdout, " Fix count = %d, force_decache = %d\n", cache_entry->fcnt, cache_entry->force_decache);
2642 
2643  if (simple_dump == true)
2644  {
2645  fprintf (stdout, " Class_oid = %d|%d|%d, Reprid = %d\n", (int) cache_entry->class_oid.volid,
2646  cache_entry->class_oid.pageid, (int) cache_entry->class_oid.slotid, cache_entry->repr[j]->id);
2647  fprintf (stdout, " Representation address = %p\n", classrepr);
2648 
2649  }
2650  else
2651  {
2652  ret = heap_classrepr_dump (&cache_entry->class_oid, classrepr);
2653  }
2654  }
2655 
2656  pthread_mutex_unlock (&cache_entry->mutex);
2657  }
2658 
2659  return ret;
2660 }
2661 #endif /* DEBUG_CLASSREPR_CACHE */
2662 
2663 /*
2664  * heap_classrepr_dump () - Dump schema of a given class representation
2665  * return: NO_ERROR
2666  * class_oid(in):
2667  * repr(in): The class representation
2668  *
2669  * Note: Dump the class representation cache.
2670  */
2671 static int
2672 heap_classrepr_dump (THREAD_ENTRY * thread_p, FILE * fp, const OID * class_oid, const OR_CLASSREP * repr)
2673 {
2674  OR_ATTRIBUTE *volatile attrepr;
2675  volatile int i;
2676  int k, j;
2677  char *classname;
2678  const char *attr_name;
2679  DB_VALUE def_dbvalue;
2680  PR_TYPE *pr_type;
2681  int disk_length;
2682  OR_BUF buf;
2683  bool copy;
2684  RECDES recdes = RECDES_INITIALIZER; /* Used to obtain attrnames */
2685  volatile int ret = NO_ERROR;
2686  char *index_name = NULL;
2687  char *string = NULL;
2688  int alloced_string = 0;
2689  HEAP_SCANCACHE scan_cache;
2690 
2691  /*
2692  * The class is fetched to print the attribute names.
2693  *
2694  * This is needed since the name of the attributes is not contained
2695  * in the class representation structure.
2696  */
2697  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
2698 
2699  if (repr == NULL)
2700  {
2701  goto exit_on_error;
2702  }
2703 
2704  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, COPY) != S_SUCCESS)
2705  {
2706  goto exit_on_error;
2707  }
2708 
2709  classname = or_class_name (&recdes);
2710  assert (classname != NULL);
2711 
2712  fprintf (fp, "\n");
2713  fprintf (fp,
2714  " Class-OID = %d|%d|%d, Classname = %s, reprid = %d,\n"
2715  " Attrs: Tot = %d, Nfix = %d, Nvar = %d, Nshare = %d, Nclass = %d,\n Total_length_of_fixattrs = %d\n",
2716  (int) class_oid->volid, class_oid->pageid, (int) class_oid->slotid, classname, repr->id, repr->n_attributes,
2717  (repr->n_attributes - repr->n_variable - repr->n_shared_attrs - repr->n_class_attrs), repr->n_variable,
2718  repr->n_shared_attrs, repr->n_class_attrs, repr->fixed_length);
2719 
2720  if (repr->n_attributes > 0)
2721  {
2722  fprintf (fp, "\n");
2723  fprintf (fp, " Attribute Specifications:\n");
2724  }
2725 
2726  for (i = 0, attrepr = repr->attributes; i < repr->n_attributes; i++, attrepr++)
2727  {
2728  string = NULL;
2729  alloced_string = 0;
2730  ret = or_get_attrname (&recdes, attrepr->id, &string, &alloced_string);
2731  if (ret != NO_ERROR)
2732  {
2733  ASSERT_ERROR ();
2734  goto exit_on_error;
2735  }
2736 
2737  attr_name = string;
2738  if (attr_name == NULL)
2739  {
2740  attr_name = "?????";
2741  }
2742 
2743  fprintf (fp, "\n Attrid = %d, Attrname = %s, type = %s,\n location = %d, position = %d,\n", attrepr->id,
2744  attr_name, pr_type_name (attrepr->type), attrepr->location, attrepr->position);
2745 
2746  if (string != NULL && alloced_string == 1)
2747  {
2748  db_private_free_and_init (thread_p, string);
2749  }
2750 
2751  if (!OID_ISNULL (&attrepr->classoid) && !OID_EQ (&attrepr->classoid, class_oid))
2752  {
2753  if (heap_get_class_name (thread_p, &attrepr->classoid, &classname) != NO_ERROR || classname == NULL)
2754  {
2755  ASSERT_ERROR_AND_SET (ret);
2756  goto exit_on_error;
2757  }
2758  fprintf (fp, " Inherited from Class: oid = %d|%d|%d, Name = %s\n", (int) attrepr->classoid.volid,
2759  attrepr->classoid.pageid, (int) attrepr->classoid.slotid, classname);
2760  free_and_init (classname);
2761  }
2762 
2763  if (attrepr->n_btids > 0)
2764  {
2765  fprintf (fp, " Number of Btids = %d,\n", attrepr->n_btids);
2766  for (k = 0; k < attrepr->n_btids; k++)
2767  {
2768  index_name = NULL;
2769  /* find index_name */
2770  for (j = 0; j < repr->n_indexes; ++j)
2771  {
2772  if (BTID_IS_EQUAL (&(repr->indexes[j].btid), &(attrepr->btids[k])))
2773  {
2774  index_name = repr->indexes[j].btname;
2775  break;
2776  }
2777  }
2778 
2779  fprintf (fp, " BTID: VFID %d|%d, Root_PGID %d, %s\n", (int) attrepr->btids[k].vfid.volid,
2780  attrepr->btids[k].vfid.fileid, attrepr->btids[k].root_pageid,
2781  (index_name == NULL) ? "unknown" : index_name);
2782  }
2783  }
2784 
2785  /*
2786  * Dump the default value if any.
2787  */
2788  fprintf (fp, " Default disk value format:\n");
2789  fprintf (fp, " length = %d, value = ", attrepr->default_value.val_length);
2790 
2791  if (attrepr->default_value.val_length <= 0)
2792  {
2793  fprintf (fp, "NULL");
2794  }
2795  else
2796  {
2797  or_init (&buf, (char *) attrepr->default_value.value, attrepr->default_value.val_length);
2798  buf.error_abort = 1;
2799 
2800  switch (_setjmp (buf.env))
2801  {
2802  case 0:
2803  /* Do not copy the string--just use the pointer. The pr_ routines for strings and sets have different
2804  * semantics for length. A negative length value for strings means "don't copy the string, just use the
2805  * pointer". */
2806 
2807  disk_length = attrepr->default_value.val_length;
2808  copy = (pr_is_set_type (attrepr->type)) ? true : false;
2809  pr_type = pr_type_from_id (attrepr->type);
2810  if (pr_type)
2811  {
2812  pr_type->data_readval (&buf, &def_dbvalue, attrepr->domain, disk_length, copy, NULL, 0);
2813 
2814  db_fprint_value (stdout, &def_dbvalue);
2815  (void) pr_clear_value (&def_dbvalue);
2816  }
2817  else
2818  {
2819  fprintf (fp, "PR_TYPE is NULL");
2820  }
2821  break;
2822  default:
2823  /*
2824  * An error was found during the reading of the attribute value
2825  */
2826  fprintf (fp, "Error transforming the default value\n");
2827  break;
2828  }
2829  }
2830  fprintf (fp, "\n");
2831  }
2832 
2833  (void) heap_scancache_end (thread_p, &scan_cache);
2834 
2835  return ret;
2836 
2837 exit_on_error:
2838 
2839  (void) heap_scancache_end (thread_p, &scan_cache);
2840 
2841  fprintf (fp, "Dump has been aborted...");
2842 
2843  return (ret == NO_ERROR) ? ER_FAILED : ret;
2844 }
2845 
2846 #ifdef DEBUG_CLASSREPR_CACHE
2847 /*
2848  * heap_classrepr_dump_anyfixed() - Dump class representation cache if
2849  * any entry is fixed
2850  * return: NO_ERROR
2851  *
2852  * Note: The class representation cache is dumped if any cache entry is fixed
2853  *
2854  * This is a debugging function that can be used to verify if
2855  * entries were freed after a set of operations (e.g., a
2856  * transaction or a API function).
2857  *
2858  * Note:
2859  * This function will not give you good results when there are
2860  * multiple users in the system (multiprocessing). However, it
2861  * can be used during shuttdown.
2862  */
2863 int
2864 heap_classrepr_dump_anyfixed (void)
2865 {
2866  int ret = NO_ERROR;
2867 
2868  if (heap_Classrepr->num_fix_entries > 0)
2869  {
2870  er_log_debug (ARG_FILE_LINE, "heap_classrepr_dump_anyfixed: Some entries are fixed\n");
2871  ret = heap_classrepr_dump_cache (true);
2872  }
2873 
2874  return ret;
2875 }
2876 #endif /* DEBUG_CLASSREPR_CACHE */
2877 
2878 /*
2879  * heap_stats_get_min_freespace () - Minimal space to consider a page for statistics
2880  * return: int minspace
2881  * heap_hdr(in): Current header of heap
2882  *
2883  * Note: Find the minimal space to consider to continue caching a page
2884  * for statistics.
2885  */
2886 static int
2888 {
2889  int min_freespace;
2890  int header_size;
2891 
2892  header_size = OR_MVCC_MAX_HEADER_SIZE;
2893 
2894  /*
2895  * Don't cache as a good space page if page does not have at least
2896  * unfill_space + one record
2897  */
2898 
2899  if (heap_hdr->estimates.num_recs > 0)
2900  {
2901  min_freespace = (int) (heap_hdr->estimates.recs_sumlen / heap_hdr->estimates.num_recs);
2902 
2903  if (min_freespace < (header_size + 20))
2904  {
2905  min_freespace = header_size + 20; /* Assume very small records */
2906  }
2907  }
2908  else
2909  {
2910  min_freespace = header_size + 20; /* Assume very small records */
2911  }
2912 
2913  min_freespace += heap_hdr->unfill_space;
2914 
2915  min_freespace = MIN (min_freespace, HEAP_DROP_FREE_SPACE);
2916 
2917  return min_freespace;
2918 }
2919 
2920 /*
2921  * heap_stats_update () - Update one header hinted page space statistics
2922  * return: NO_ERROR
2923  * pgptr(in): Page pointer
2924  * hfid(in): Object heap file identifier
2925  * prev_freespace(in):
2926  *
2927  * NOTE: There should be at least HEAP_DROP_FREE_SPACE in order to
2928  * insert this page to best hint array.
2929  * If we cannot fix a heap header page due to holding it by
2930  * others, we will postpone this updating until next deletion.
2931  * In this case, unfortunately, if some record is not deleted
2932  * from this page in the future, we may not use this page until
2933  * heap_stats_sync_bestspace function searches all pages.
2934  */
2935 void
2936 heap_stats_update (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const HFID * hfid, int prev_freespace)
2937 {
2938  VPID *vpid;
2939  int freespace, error;
2940  bool need_update;
2941 
2942  freespace = spage_get_free_space_without_saving (thread_p, pgptr, &need_update);
2944  {
2945  if (prev_freespace < freespace)
2946  {
2947  vpid = pgbuf_get_vpid_ptr (pgptr);
2948  assert_release (vpid != NULL);
2949 
2950  (void) heap_stats_add_bestspace (thread_p, hfid, vpid, freespace);
2951  }
2952  }
2953 
2954  if (need_update || prev_freespace <= HEAP_DROP_FREE_SPACE)
2955  {
2956  if (freespace > HEAP_DROP_FREE_SPACE)
2957  {
2958  vpid = pgbuf_get_vpid_ptr (pgptr);
2959  assert_release (vpid != NULL);
2960 
2961  error = heap_stats_update_internal (thread_p, hfid, vpid, freespace);
2962  if (error != NO_ERROR)
2963  {
2964  spage_set_need_update_best_hint (thread_p, pgptr, true);
2965  }
2966  else if (need_update == true)
2967  {
2968  spage_set_need_update_best_hint (thread_p, pgptr, false);
2969  }
2970  }
2971  else if (need_update == true)
2972  {
2973  spage_set_need_update_best_hint (thread_p, pgptr, false);
2974  }
2975  }
2976 }
2977 
2978 /*
2979  * heap_stats_update_internal () - Update one header hinted page space statistics
2980  * return: NO_ERROR
2981  * hfid(in): Object heap file identifier
2982  * lotspace_vpid(in): Page which has a lot of free space
2983  * free_space(in): The free space on the page
2984  *
2985  * Note: Update header hinted best space page information. This
2986  * function is used during deletions and updates when the free
2987  * space on the page is greater than HEAP_DROP_FREE_SPACE.
2988  */
2989 static int
2990 heap_stats_update_internal (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * lotspace_vpid, int free_space)
2991 {
2992  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
2993  PAGE_PTR hdr_pgptr = NULL; /* Page pointer to header page */
2994  VPID vpid; /* Page-volume identifier */
2995  RECDES recdes; /* Header record descriptor */
2996  LOG_DATA_ADDR addr; /* Address of logging data */
2997  int i, best;
2998  int ret = NO_ERROR;
2999 
3000  /* Retrieve the header of heap */
3001  vpid.volid = hfid->vfid.volid;
3002  vpid.pageid = hfid->hpgid;
3003 
3004  /*
3005  * We do not want to wait for the following operation.
3006  * So, if we cannot lock the page return.
3007  */
3008  hdr_pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
3009  if (hdr_pgptr == NULL)
3010  {
3011  /* Page is busy or other type of error */
3012  goto exit_on_error;
3013  }
3014 
3015  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
3016 
3017  /*
3018  * Peek the header record to find statistics for insertion.
3019  * Update the statistics directly.
3020  */
3021  if (spage_get_record (thread_p, hdr_pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
3022  {
3023  goto exit_on_error;
3024  }
3025 
3026  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
3027  best = heap_hdr->estimates.head;
3028 
3029  if (free_space >= heap_stats_get_min_freespace (heap_hdr))
3030  {
3031  /*
3032  * We do not compare with the current stored values since these values
3033  * may not be accurate at all. When the given one is supposed to be
3034  * accurate.
3035  */
3036 
3037  /*
3038  * Find a good place to insert this page
3039  */
3040  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
3041  {
3042  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid)
3044  {
3045  break;
3046  }
3047 
3048  best = HEAP_STATS_NEXT_BEST_INDEX (best);
3049  }
3050 
3051  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid))
3052  {
3053  heap_hdr->estimates.num_high_best++;
3054  assert (heap_hdr->estimates.num_high_best <= HEAP_NUM_BEST_SPACESTATS);
3055  }
3056  else if (heap_hdr->estimates.best[best].freespace > HEAP_DROP_FREE_SPACE)
3057  {
3058  heap_hdr->estimates.num_other_high_best++;
3059 
3060  heap_stats_put_second_best (heap_hdr, &heap_hdr->estimates.best[best].vpid);
3061  }
3062  /*
3063  * Now substitute the entry with the new information
3064  */
3065 
3066  heap_hdr->estimates.best[best].freespace = free_space;
3067  heap_hdr->estimates.best[best].vpid = *lotspace_vpid;
3068 
3069  heap_hdr->estimates.head = HEAP_STATS_NEXT_BEST_INDEX (best);
3070 
3071  /*
3072  * The changes to the statistics are not logged. They are fixed
3073  * automatically sooner or later
3074  */
3075 
3076  addr.vfid = &hfid->vfid;
3077  addr.pgptr = hdr_pgptr;
3079  log_skip_logging (thread_p, &addr);
3080  pgbuf_set_dirty (thread_p, hdr_pgptr, FREE);
3081  hdr_pgptr = NULL;
3082  }
3083  else
3084  {
3085  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
3086  }
3087 
3088  return ret;
3089 
3090 exit_on_error:
3091  if (hdr_pgptr)
3092  {
3093  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
3094  }
3095 
3096  return (ret == NO_ERROR) ? ER_FAILED : ret;
3097 }
3098 
3099 /*
3100  * heap_stats_put_second_best () - Put a free page into second best hint array
3101  * return: void
3102  * heap_hdr(in): Statistics of heap file
3103  * vpid(in): VPID to be added
3104  *
3105  * NOTE: A free page is not always inserted to the second best hint array.
3106  * Second best hints will be collected for every 1000 pages in order
3107  * to increase randomness for "emptying contiguous pages" scenario.
3108  */
3109 static void
3111 {
3112  int tail;
3113 
3114  if (heap_hdr->estimates.num_substitutions++ % 1000 == 0)
3115  {
3116  tail = heap_hdr->estimates.tail_second_best;
3117 
3118  heap_hdr->estimates.second_best[tail] = *vpid;
3120 
3122  {
3123  assert (heap_hdr->estimates.head_second_best == tail);
3124  heap_hdr->estimates.head_second_best = heap_hdr->estimates.tail_second_best;
3125  }
3126  else
3127  {
3129  heap_hdr->estimates.num_second_best++;
3130  }
3131 
3132  /* If both head and tail refer to the same index, the number of second best hints is
3133  * HEAP_NUM_BEST_SPACESTATS(10). */
3134  assert (heap_hdr->estimates.num_second_best != 0);
3136  ? ((heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3137  == heap_hdr->estimates.num_second_best)
3138  : ((10 + heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3139  == heap_hdr->estimates.num_second_best));
3140 
3141  heap_hdr->estimates.num_substitutions = 1;
3142  }
3143 }
3144 
3145 /*
3146  * heap_stats_put_second_best () - Get a free page from second best hint array
3147  * return: NO_ERROR or ER_FAILED
3148  * heap_hdr(in): Statistics of heap file
3149  * vpid(out): VPID to get
3150  */
3151 static int
3153 {
3154  int head;
3155 
3156  assert (vpid != NULL);
3157 
3158  if (heap_hdr->estimates.num_second_best == 0)
3159  {
3160  assert (heap_hdr->estimates.tail_second_best == heap_hdr->estimates.head_second_best);
3161  VPID_SET_NULL (vpid);
3162  return ER_FAILED;
3163  }
3164 
3165  head = heap_hdr->estimates.head_second_best;
3166 
3167  heap_hdr->estimates.num_second_best--;
3169 
3170  /* If both head and tail refer to the same index, the number of second best hints is 0. */
3172  assert ((heap_hdr->estimates.tail_second_best >= heap_hdr->estimates.head_second_best)
3173  ? ((heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3174  == heap_hdr->estimates.num_second_best)
3175  : ((10 + heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3176  == heap_hdr->estimates.num_second_best));
3177 
3178  *vpid = heap_hdr->estimates.second_best[head];
3179  return NO_ERROR;
3180 }
3181 
3182 #if defined(ENABLE_UNUSED_FUNCTION)
3183 /*
3184  * heap_stats_quick_num_fit_in_bestspace () - Guess the number of unit_size entries that
3185  * can fit in best space
3186  * return: number of units
3187  * bestspace(in): Array of best pages along with their freespace
3188  * (The freespace fields may be updated as a SIDE EFFECT)
3189  * num_entries(in): Number of estimated entries in best space.
3190  * unit_size(in): Units of this size
3191  * unfill_space(in): Unfill space on the pages
3192  *
3193  * Note: Find the number of units of "unit_size" that can fit in
3194  * current betsspace.
3195  */
3196 static int
3197 heap_stats_quick_num_fit_in_bestspace (HEAP_BESTSPACE * bestspace, int num_entries, int unit_size, int unfill_space)
3198 {
3199  int total_nunits = 0;
3200  int i;
3201 
3202  if (unit_size <= 0)
3203  {
3204  return ER_FAILED;
3205  }
3206 
3207  for (i = 0; i < num_entries; i++)
3208  {
3209  if ((bestspace[i].freespace - unfill_space) >= unit_size)
3210  {
3211  /*
3212  * How many min_spaces can fit in this page
3213  */
3214  total_nunits += (bestspace[i].freespace - unfill_space) / unit_size;
3215  }
3216  }
3217 
3218  return total_nunits;
3219 }
3220 #endif
3221 
3222 /*
3223  * heap_stats_find_page_in_bestspace () - Find a page within best space
3224  * statistics with the needed space
3225  * return: HEAP_FINDPSACE (found, not found, or error)
3226  * hfid(in): Object heap file identifier
3227  * bestspace(in): Array of best pages along with their freespace
3228  * (The freespace fields may be updated as a SIDE EFFECT)
3229  * idx_badspace(in/out): An index into best space with no so good space.
3230  * needed_space(in): The needed space.
3231  * scan_cache(in): Scan cache if any
3232  * pgptr(out): Best page with enough space or NULL
3233  *
3234  * Note: Search for a page within the best space cache which has the
3235  * needed space. The free space fields of best space cache along
3236  * with some other index information are updated (as a side
3237  * effect) as the best space cache is accessed.
3238  */
3239 static HEAP_FINDSPACE
3241  int *idx_badspace, int record_length, int needed_space, HEAP_SCANCACHE * scan_cache,
3242  PGBUF_WATCHER * pg_watcher)
3243 {
3244 #define BEST_PAGE_SEARCH_MAX_COUNT 100
3245 
3246  HEAP_FINDSPACE found;
3247  int old_wait_msecs;
3248  int notfound_cnt;
3249  HEAP_STATS_ENTRY *ent;
3251  int rc;
3252  int idx_worstspace;
3253  int i, best_array_index = -1;
3254  bool hash_is_available;
3255  bool best_hint_is_used;
3256  PERF_UTIME_TRACKER time_best_space = PERF_UTIME_TRACKER_INITIALIZER;
3257  PERF_UTIME_TRACKER time_find_page_best_space = PERF_UTIME_TRACKER_INITIALIZER;
3258 
3259  assert (PGBUF_IS_CLEAN_WATCHER (pg_watcher));
3260 
3261  PERF_UTIME_TRACKER_START (thread_p, &time_find_page_best_space);
3262 
3263  /*
3264  * If a page is busy, don't wait continue looking for other pages in our
3265  * statistics. This will improve some contentions on the heap at the
3266  * expenses of storage.
3267  */
3268 
3269  /* LK_FORCE_ZERO_WAIT doesn't set error when deadlock occurs */
3270  old_wait_msecs = xlogtb_reset_wait_msecs (thread_p, LK_FORCE_ZERO_WAIT);
3271 
3272  found = HEAP_FINDSPACE_NOTFOUND;
3273  notfound_cnt = 0;
3274  best_array_index = 0;
3275  hash_is_available = prm_get_integer_value (PRM_ID_HF_MAX_BESTSPACE_ENTRIES) > 0;
3276 
3277  while (found == HEAP_FINDSPACE_NOTFOUND)
3278  {
3279  best.freespace = -1; /* init */
3280  best_hint_is_used = false;
3281 
3282  if (hash_is_available)
3283  {
3284  PERF_UTIME_TRACKER_START (thread_p, &time_best_space);
3285  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
3286 
3287  while (notfound_cnt < BEST_PAGE_SEARCH_MAX_COUNT
3288  && (ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, NULL)) != NULL)
3289  {
3290  if (ent->best.freespace >= needed_space)
3291  {
3292  best = ent->best;
3293  assert (best.freespace > 0 && best.freespace <= PGLENGTH_MAX);
3294  break;
3295  }
3296 
3297  /* remove in memory bestspace */
3298  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
3299  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
3300  (void) heap_stats_entry_free (thread_p, ent, NULL);
3301  ent = NULL;
3302 
3303  heap_Bestspace->num_stats_entries--;
3304 
3305  notfound_cnt++;
3306  }
3307 
3308  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
3309  PERF_UTIME_TRACKER_TIME (thread_p, &time_best_space, PSTAT_HF_BEST_SPACE_FIND);
3310  }
3311 
3312  if (best.freespace == -1)
3313  {
3314  /* Maybe PRM_ID_HF_MAX_BESTSPACE_ENTRIES <= 0 or There is no best space in heap_Bestspace hashtable. We will
3315  * use bestspace hint in heap_header. */
3316  while (best_array_index < HEAP_NUM_BEST_SPACESTATS)
3317  {
3318  if (bestspace[best_array_index].freespace >= needed_space)
3319  {
3320  best.vpid = bestspace[best_array_index].vpid;
3321  best.freespace = bestspace[best_array_index].freespace;
3322  assert (best.freespace > 0 && best.freespace <= PGLENGTH_MAX);
3323  best_hint_is_used = true;
3324  break;
3325  }
3326  best_array_index++;
3327  }
3328  }
3329 
3330  if (best.freespace == -1)
3331  {
3332  break; /* not found, exit loop */
3333  }
3334 
3335  /* If page could not be fixed, we will interrogate er_errid () to see the error type. If an error is already
3336  * set, the interrogation will be corrupted.
3337  * Make sure an error is not set.
3338  */
3339  if (er_errid () != NO_ERROR)
3340  {
3341  if (er_errid () == ER_INTERRUPTED)
3342  {
3343  /* interrupt arrives at any time */
3344  break;
3345  }
3346 #if defined (SERVER_MODE)
3347  // ignores a warning and expects no other errors
3349 #endif /* SERVER_MODE */
3350  er_clear ();
3351  }
3352 
3353  pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, &best.vpid, OLD_PAGE, X_LOCK, scan_cache, pg_watcher);
3354  if (pg_watcher->pgptr == NULL)
3355  {
3356  /*
3357  * Either we timeout and we want to continue in this case, or
3358  * we have another kind of problem.
3359  */
3360  switch (er_errid ())
3361  {
3362  case NO_ERROR:
3363  /* In case of latch-timeout in pgbuf_fix, the timeout error(ER_LK_PAGE_TIMEOUT) is not set, because lock
3364  * wait time is LK_FORCE_ZERO_WAIT. So we will just continue to find another page. */
3365  break;
3366 
3367  case ER_INTERRUPTED:
3368  found = HEAP_FINDSPACE_ERROR;
3369  break;
3370 
3371  default:
3372  /*
3373  * Something went wrong, we are unable to fetch this page.
3374  */
3375  if (best_hint_is_used == true)
3376 
3377  {
3378  assert (best_array_index < HEAP_NUM_BEST_SPACESTATS);
3379  bestspace[best_array_index].freespace = 0;
3380  }
3381  else
3382  {
3383  (void) heap_stats_del_bestspace_by_vpid (thread_p, &best.vpid);
3384  }
3385  found = HEAP_FINDSPACE_ERROR;
3386 
3387  /* Do not allow unexpected errors. */
3388  assert (false);
3389  break;
3390  }
3391  }
3392  else
3393  {
3394  best.freespace = spage_max_space_for_new_record (thread_p, pg_watcher->pgptr);
3395  if (best.freespace >= needed_space)
3396  {
3397  /*
3398  * Decrement by only the amount space needed by the caller. Don't
3399  * include the unfill factor
3400  */
3401  best.freespace -= record_length + heap_Slotted_overhead;
3402  found = HEAP_FINDSPACE_FOUND;
3403  }
3404 
3405  if (hash_is_available)
3406  {
3407  /* Add or refresh the free space of the page */
3408  (void) heap_stats_add_bestspace (thread_p, hfid, &best.vpid, best.freespace);
3409  }
3410 
3411  if (best_hint_is_used == true)
3412  {
3413  assert (VPID_EQ (&best.vpid, &(bestspace[best_array_index].vpid)));
3414  assert (best_array_index < HEAP_NUM_BEST_SPACESTATS);
3415 
3416  bestspace[best_array_index].freespace = best.freespace;
3417  }
3418 
3419  if (found != HEAP_FINDSPACE_FOUND)
3420  {
3421  pgbuf_ordered_unfix (thread_p, pg_watcher);
3422  }
3423  }
3424 
3425  if (found == HEAP_FINDSPACE_NOTFOUND)
3426  {
3427  if (best_hint_is_used)
3428  {
3429  /* Increment best_array_index for next search */
3430  best_array_index++;
3431  }
3432  else
3433  {
3434  notfound_cnt++;
3435  }
3436  }
3437  }
3438 
3439  idx_worstspace = 0;
3440  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
3441  {
3442  /* find worst space in bestspace */
3443  if (bestspace[idx_worstspace].freespace > bestspace[i].freespace)
3444  {
3445  idx_worstspace = i;
3446  }
3447 
3448  /* update bestspace of heap header page if found best page at memory hash table */
3449  if (best_hint_is_used == false && found == HEAP_FINDSPACE_FOUND && VPID_EQ (&best.vpid, &bestspace[i].vpid))
3450  {
3451  bestspace[i].freespace = best.freespace;
3452  }
3453  }
3454 
3455  /*
3456  * Set the idx_badspace to the index with the smallest free space
3457  * which may not be accurate. This is used for future lookups (where to
3458  * start) into the findbest space ring.
3459  */
3460  *idx_badspace = idx_worstspace;
3461 
3462  /*
3463  * Reset back the timeout value of the transaction
3464  */
3465  (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msecs);
3466  PERF_UTIME_TRACKER_TIME (thread_p, &time_find_page_best_space, PSTAT_HF_HEAP_FIND_PAGE_BEST_SPACE);
3467 
3468  return found;
3469 }
3470 
3471 /*
3472  * heap_stats_find_best_page () - Find a page with the needed space.
3473  * return: pointer to page with enough space or NULL
3474  * hfid(in): Object heap file identifier
3475  * needed_space(in): The minimal space needed
3476  * isnew_rec(in): Are we inserting a new record to the heap ?
3477  * newrec_size(in): Size of the new record
3478  * scan_cache(in/out): Scan cache used to estimate the best space pages
3479  *
3480  * Note: Find a page among the set of best pages of the heap which has
3481  * the needed space. If we do not find any page, a new page is
3482  * allocated. The heap header and the scan cache may be updated
3483  * as a side effect to reflect more accurate space on some of the
3484  * set of best pages.
3485  */
3486 static PAGE_PTR
3487 heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int needed_space, bool isnew_rec,
3488  int newrec_size, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher)
3489 {
3490  VPID vpid; /* Volume and page identifiers */
3491  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
3492  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
3493  HEAP_HDR_STATS *heap_hdr; /* Heap header */
3494  VPID *hdr_vpidp;
3495  int total_space;
3496  int try_find, try_sync;
3497  int num_pages_found;
3498  float other_high_best_ratio;
3499  PGBUF_WATCHER hdr_page_watcher;
3500  int error_code = NO_ERROR;
3501  PERF_UTIME_TRACKER time_find_best_page = PERF_UTIME_TRACKER_INITIALIZER;
3502 
3503  PERF_UTIME_TRACKER_START (thread_p, &time_find_best_page);
3504  /*
3505  * Try to use the space cache for as much information as possible to avoid
3506  * fetching and updating the header page a lot.
3507  */
3508 
3509  assert (scan_cache == NULL || scan_cache->cache_last_fix_page == false || scan_cache->page_watcher.pgptr == NULL);
3510  PGBUF_INIT_WATCHER (&hdr_page_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
3511 
3512  /*
3513  * Get the heap header in exclusive mode since it is going to be changed.
3514  *
3515  * Note: to avoid any possibilities of deadlocks, I should not have any locks
3516  * on the heap at this moment.
3517  * That is, we must assume that locking the header of the heap in
3518  * exclusive mode, the rest of the heap is locked.
3519  */
3520 
3521  vpid.volid = hfid->vfid.volid;
3522  vpid.pageid = hfid->hpgid;
3523 
3524  addr_hdr.vfid = &hfid->vfid;
3526 
3527  error_code = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_page_watcher);
3528  if (error_code != NO_ERROR)
3529  {
3530  /* something went wrong. Unable to fetch header page */
3531  ASSERT_ERROR ();
3532  goto error;
3533  }
3534  assert (hdr_page_watcher.pgptr != NULL);
3535 
3536  (void) pgbuf_check_page_ptype (thread_p, hdr_page_watcher.pgptr, PAGE_HEAP);
3537 
3538  if (spage_get_record (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
3539  {
3540  assert (false);
3541  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3542  goto error;
3543  }
3544 
3545  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
3546 
3547  if (isnew_rec == true)
3548  {
3549  heap_hdr->estimates.num_recs += 1;
3550  if (newrec_size > DB_PAGESIZE)
3551  {
3552  heap_hdr->estimates.num_pages += CEIL_PTVDIV (newrec_size, DB_PAGESIZE);
3553  }
3554  }
3555  heap_hdr->estimates.recs_sumlen += (float) newrec_size;
3556 
3557  assert (!heap_is_big_length (needed_space));
3558  /* Take into consideration the unfill factor for pages with objects */
3559  total_space = needed_space + heap_Slotted_overhead + heap_hdr->unfill_space;
3560  if (heap_is_big_length (total_space))
3561  {
3562  total_space = needed_space + heap_Slotted_overhead;
3563  }
3564 
3565  try_find = 0;
3566  while (true)
3567  {
3568  try_find++;
3569  assert (pg_watcher->pgptr == NULL);
3570  if (heap_stats_find_page_in_bestspace (thread_p, hfid, heap_hdr->estimates.best, &(heap_hdr->estimates.head),
3571  needed_space, total_space, scan_cache, pg_watcher) == HEAP_FINDSPACE_ERROR)
3572  {
3573  ASSERT_ERROR ();
3574  assert (pg_watcher->pgptr == NULL);
3575  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3576  goto error;
3577  }
3578  if (pg_watcher->pgptr != NULL)
3579  {
3580  /* found the page */
3581  break;
3582  }
3583 
3584  assert (hdr_page_watcher.page_was_unfixed == false);
3585 
3586  if (heap_hdr->estimates.num_other_high_best <= 0 || heap_hdr->estimates.num_pages <= 0)
3587  {
3588  assert (heap_hdr->estimates.num_pages > 0);
3589  other_high_best_ratio = 0;
3590  }
3591  else
3592  {
3593  other_high_best_ratio =
3594  (float) heap_hdr->estimates.num_other_high_best / (float) heap_hdr->estimates.num_pages;
3595  }
3596 
3597  if (try_find >= 2 || other_high_best_ratio < HEAP_BESTSPACE_SYNC_THRESHOLD)
3598  {
3599  /* We stop to find free pages if: (1) we have tried to do it twice (2) it is first trying but we have no
3600  * hints Regarding (2), we will find free pages by heap_stats_sync_bestspace only if we know that a free page
3601  * exists somewhere. and (num_other_high_best/total page) > HEAP_BESTSPACE_SYNC_THRESHOLD.
3602  * num_other_high_best means the number of free pages existing somewhere in the heap file. */
3603  break;
3604  }
3605 
3606  /*
3607  * The followings will try to find free pages and fill best hints with them.
3608  */
3609 
3610  if (scan_cache != NULL)
3611  {
3612  assert (HFID_EQ (hfid, &scan_cache->node.hfid));
3613  assert (scan_cache->file_type != FILE_UNKNOWN_TYPE);
3614  }
3615 
3616  hdr_vpidp = pgbuf_get_vpid_ptr (hdr_page_watcher.pgptr);
3617 
3618  try_sync = 0;
3619  do
3620  {
3621  try_sync++;
3622  heap_bestspace_log ("heap_stats_find_best_page: call heap_stats_sync_bestspace() "
3623  "hfid { vfid { fileid %d volid %d } hpgid %d } hdr_vpid { pageid %d volid %d } "
3624  "scan_all %d ", hfid->vfid.fileid, hfid->vfid.volid, hfid->hpgid, hdr_vpidp->pageid,
3625  hdr_vpidp->volid, 0);
3626 
3627  num_pages_found = heap_stats_sync_bestspace (thread_p, hfid, heap_hdr, hdr_vpidp, false, true);
3628  if (num_pages_found < 0)
3629  {
3630  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3631  ASSERT_ERROR ();
3632  goto error;
3633  }
3634  }
3635  while (num_pages_found == 0 && try_sync <= 2);
3636 
3637  /* If we cannot find free pages, give up. */
3638  if (num_pages_found <= 0)
3639  {
3640  break;
3641  }
3642  }
3643 
3644  if (pg_watcher->pgptr == NULL)
3645  {
3646  /*
3647  * None of the best pages has the needed space, allocate a new page.
3648  * Set the head to the index with the smallest free space, which may not
3649  * be accurate.
3650  */
3651  if (heap_vpid_alloc (thread_p, hfid, hdr_page_watcher.pgptr, heap_hdr, scan_cache, pg_watcher) != NO_ERROR)
3652  {
3653  ASSERT_ERROR ();
3654  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3655  goto error;
3656  }
3657  assert (pg_watcher->pgptr != NULL || er_errid () == ER_INTERRUPTED
3659  }
3660 
3661  addr_hdr.pgptr = hdr_page_watcher.pgptr;
3662  log_skip_logging (thread_p, &addr_hdr);
3663  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_page_watcher);
3664 
3665  PERF_UTIME_TRACKER_TIME (thread_p, &time_find_best_page, PSTAT_HF_HEAP_FIND_BEST_PAGE);
3666 
3667  return pg_watcher->pgptr;
3668 
3669 error:
3670  PERF_UTIME_TRACKER_TIME (thread_p, &time_find_best_page, PSTAT_HF_HEAP_FIND_BEST_PAGE);
3671 
3672  return NULL;
3673 }
3674 
3675 /*
3676  * heap_stats_sync_bestspace () - Synchronize the statistics of best space
3677  * return: the number of pages found
3678  * hfid(in): Heap file identifier
3679  * heap_hdr(in): Heap header (Heap header page should be acquired in
3680  * exclusive mode)
3681  * hdr_vpid(in):
3682  * scan_all(in): Scan the whole heap or stop after HEAP_NUM_BEST_SPACESTATS
3683  * best pages have been found.
3684  * can_cycle(in): True, it allows to go back to beginning of the heap.
3685  * FALSE, don't go back to beginning of the heap. FALSE is used
3686  * when it is known that there is not free space at the
3687  * beginning of heap. For example, it can be used when we
3688  * pre-allocate. pages
3689  *
3690  * Note: Synchronize for best space, so that we can reuse heap space as
3691  * much as possible.
3692  *
3693  * Note: This function does not do any logging.
3694  */
3695 static int
3696 heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * hdr_vpid,
3697  bool scan_all, bool can_cycle)
3698 {
3699  int i, best, num_high_best, num_other_best, start_pos;
3700  VPID vpid = { NULL_PAGEID, NULL_VOLID };
3701  VPID start_vpid = { NULL_PAGEID, NULL_VOLID };
3702  VPID next_vpid = { NULL_PAGEID, NULL_VOLID };
3703  VPID stopat_vpid = { NULL_PAGEID, NULL_VOLID };
3704  int num_pages = 0;
3705  int num_recs = 0;
3706  float recs_sumlen = 0.0;
3707  int free_space = 0;
3708  int min_freespace;
3709  int ret = NO_ERROR;
3710  int npages = 0, nrecords = 0, rec_length;
3711  int num_iterations = 0, max_iterations;
3712  HEAP_BESTSPACE *best_pages_hint_p;
3713  bool iterate_all = false;
3714  bool search_all = false;
3715  PGBUF_WATCHER pg_watcher;
3716  PGBUF_WATCHER old_pg_watcher;
3717  PERF_UTIME_TRACKER timer_sync_best_space = PERF_UTIME_TRACKER_INITIALIZER;
3718 
3719  PERF_UTIME_TRACKER_START (thread_p, &timer_sync_best_space);
3720 
3721  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
3722  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
3723 
3724  min_freespace = heap_stats_get_min_freespace (heap_hdr);
3725 
3726  best = 0;
3727  start_pos = -1;
3728  num_high_best = num_other_best = 0;
3729 
3730  if (scan_all != true)
3731  {
3733  {
3734  search_all = true;
3735  start_pos = -1;
3736  next_vpid = heap_hdr->estimates.full_search_vpid;
3737  }
3738  else
3739  {
3740  if (heap_hdr->estimates.num_high_best > 0)
3741  {
3742  /* Use recently inserted one first. */
3743  start_pos = HEAP_STATS_PREV_BEST_INDEX (heap_hdr->estimates.head);
3744  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
3745  {
3746  if (!VPID_ISNULL (&heap_hdr->estimates.best[start_pos].vpid))
3747  {
3748  next_vpid = heap_hdr->estimates.best[start_pos].vpid;
3749  start_vpid = next_vpid;
3750  break;
3751  }
3752 
3753  start_pos = HEAP_STATS_PREV_BEST_INDEX (start_pos);
3754  }
3755  }
3756  else
3757  {
3758  /* If there are hint pages in second best array, we will try to use it first. Otherwise, we will search
3759  * all pages in the file. */
3760  if (heap_hdr->estimates.num_second_best > 0)
3761  {
3762  if (heap_stats_get_second_best (heap_hdr, &next_vpid) != NO_ERROR)
3763  {
3764  /* This should not be happened. */
3765  assert (false);
3766  search_all = true;
3767  }
3768  }
3769  else
3770  {
3771  search_all = true;
3772  }
3773 
3774  if (search_all == true)
3775  {
3776  assert (VPID_ISNULL (&next_vpid));
3777  next_vpid = heap_hdr->estimates.full_search_vpid;
3778  }
3779 
3780  start_vpid = next_vpid;
3781  start_pos = -1;
3782  }
3783  }
3784 
3785  if (can_cycle == true)
3786  {
3787  stopat_vpid = next_vpid;
3788  }
3789  }
3790 
3791  if (VPID_ISNULL (&next_vpid))
3792  {
3793  /*
3794  * Start from beginning of heap due to lack of statistics.
3795  */
3796  next_vpid.volid = hfid->vfid.volid;
3797  next_vpid.pageid = hfid->hpgid;
3798  start_vpid = next_vpid;
3799  start_pos = -1;
3800  can_cycle = false;
3801  }
3802 
3803  /*
3804  * Note that we do not put any locks on the pages that we are scanning
3805  * since the best space array is only used for hints, and it is OK
3806  * if it is a little bit wrong.
3807  */
3808  best_pages_hint_p = heap_hdr->estimates.best;
3809 
3810  num_iterations = 0;
3811  max_iterations = MIN ((int) (heap_hdr->estimates.num_pages * 0.2), heap_Find_best_page_limit);
3812  max_iterations = MAX (max_iterations, HEAP_NUM_BEST_SPACESTATS);
3813 
3814  while (!VPID_ISNULL (&next_vpid) || can_cycle == true)
3815  {
3816  if (can_cycle == true && VPID_ISNULL (&next_vpid))
3817  {
3818  /*
3819  * Go back to beginning of heap looking for good pages with a lot of
3820  * free space
3821  */
3822  next_vpid.volid = hfid->vfid.volid;
3823  next_vpid.pageid = hfid->hpgid;
3824  can_cycle = false;
3825  }
3826 
3827  while ((scan_all == true || num_high_best < HEAP_NUM_BEST_SPACESTATS) && !VPID_ISNULL (&next_vpid)
3828  && (can_cycle == true || !VPID_EQ (&next_vpid, &stopat_vpid)))
3829  {
3830  if (scan_all == false)
3831  {
3832  if (++num_iterations > max_iterations)
3833  {
3834  heap_bestspace_log ("heap_stats_sync_bestspace: num_iterations %d best %d "
3835  "next_vpid { pageid %d volid %d }\n", num_iterations, num_high_best,
3836  next_vpid.pageid, next_vpid.volid);
3837 
3838  /* TODO: Do we really need to update the last scanned */
3839  /* in case we found less than 10 pages. */
3840  /* It is obivous we didn't find any pages. */
3841  if (start_pos != -1 && num_high_best == 0)
3842  {
3843  /* Delete a starting VPID. */
3844  VPID_SET_NULL (&best_pages_hint_p[start_pos].vpid);
3845  best_pages_hint_p[start_pos].freespace = 0;
3846 
3847  heap_hdr->estimates.num_high_best--;
3848  }
3849  iterate_all = true;
3850  break;
3851  }
3852  }
3853 
3854  vpid = next_vpid;
3855  ret = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, PGBUF_LATCH_READ, &pg_watcher);
3856  if (ret != NO_ERROR)
3857  {
3858  break;
3859  }
3860  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
3861 
3862  if (old_pg_watcher.pgptr != NULL)
3863  {
3864  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
3865  }
3866 
3867  ret = heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &next_vpid);
3868  if (ret != NO_ERROR)
3869  {
3870  assert (false);
3871  pgbuf_ordered_unfix (thread_p, &pg_watcher);
3872  break;
3873  }
3874  if (search_all)
3875  {
3876  /* Save the last position to be searched next time. */
3877  heap_hdr->estimates.full_search_vpid = next_vpid;
3878  }
3879 
3880  spage_collect_statistics (pg_watcher.pgptr, &npages, &nrecords, &rec_length);
3881 
3882  num_pages += npages;
3883  num_recs += nrecords;
3884  recs_sumlen += rec_length;
3885 
3886  free_space = spage_max_space_for_new_record (thread_p, pg_watcher.pgptr);
3887 
3888  if (free_space >= min_freespace && free_space > HEAP_DROP_FREE_SPACE)
3889  {
3891  {
3892  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, free_space);
3893  }
3894 
3895  if (num_high_best < HEAP_NUM_BEST_SPACESTATS)
3896  {
3897  best_pages_hint_p[best].vpid = vpid;
3898  best_pages_hint_p[best].freespace = free_space;
3899 
3900  best = HEAP_STATS_NEXT_BEST_INDEX (best);
3901  num_high_best++;
3902  }
3903  else
3904  {
3905  num_other_best++;
3906  }
3907  }
3908 
3909  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
3910  }
3911 
3912  assert (pg_watcher.pgptr == NULL);
3913  if (old_pg_watcher.pgptr != NULL)
3914  {
3915  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
3916  }
3917 
3918  if (scan_all == false
3919  && (iterate_all == true || num_high_best == HEAP_NUM_BEST_SPACESTATS
3920  || (can_cycle == false && VPID_EQ (&next_vpid, &stopat_vpid))))
3921  {
3922  break;
3923  }
3924 
3925  VPID_SET_NULL (&next_vpid);
3926  }
3927 
3928  heap_bestspace_log ("heap_stats_sync_bestspace: scans from {%d|%d} to {%d|%d}, num_iterations(%d) "
3929  "max_iterations(%d) num_high_best(%d)\n", start_vpid.volid, start_vpid.pageid, vpid.volid,
3930  vpid.pageid, num_iterations, max_iterations, num_high_best);
3931 
3932  /* If we have scanned all pages, we should update all statistics even if we have not found any hints. This logic is
3933  * used to handle "select count(*) from table". */
3934  if (scan_all == false && num_high_best == 0 && heap_hdr->estimates.num_second_best == 0)
3935  {
3936  goto end;
3937  }
3938 
3939  if (num_high_best < HEAP_NUM_BEST_SPACESTATS)
3940  {
3941  for (i = best; i < HEAP_NUM_BEST_SPACESTATS; i++)
3942  {
3943  VPID_SET_NULL (&best_pages_hint_p[i].vpid);
3944  best_pages_hint_p[i].freespace = 0;
3945  }
3946  }
3947 
3948  heap_hdr->estimates.head = best; /* reinit */
3950  assert (heap_hdr->estimates.head >= 0 && heap_hdr->estimates.head < HEAP_NUM_BEST_SPACESTATS
3952 
3953  if (scan_all == true || heap_hdr->estimates.num_pages <= num_pages)
3954  {
3955  /*
3956  * We scan the whole heap.
3957  * Reset its statistics with new found statistics
3958  */
3959  heap_hdr->estimates.num_other_high_best = num_other_best;
3960  heap_hdr->estimates.num_pages = num_pages;
3961  heap_hdr->estimates.num_recs = num_recs;
3962  heap_hdr->estimates.recs_sumlen = recs_sumlen;
3963  }
3964  else
3965  {
3966  /*
3967  * We did not scan the whole heap.
3968  * We reset only some of its statistics since we do not have any idea
3969  * which ones are better the ones that are currently recorded or the ones
3970  * just found.
3971  */
3972  heap_hdr->estimates.num_other_high_best -= heap_hdr->estimates.num_high_best;
3973 
3974  if (heap_hdr->estimates.num_other_high_best < num_other_best)
3975  {
3976  heap_hdr->estimates.num_other_high_best = num_other_best;
3977  }
3978 
3979  if (num_recs > heap_hdr->estimates.num_recs || recs_sumlen > heap_hdr->estimates.recs_sumlen)
3980  {
3981  heap_hdr->estimates.num_pages = num_pages;
3982  heap_hdr->estimates.num_recs = num_recs;
3983  heap_hdr->estimates.recs_sumlen = recs_sumlen;
3984  }
3985  }
3986 
3987 end:
3988  PERF_UTIME_TRACKER_TIME (thread_p, &timer_sync_best_space, PSTAT_HEAP_STATS_SYNC_BESTSPACE);
3989 
3990  return num_high_best;
3991 }
3992 
3993 /*
3994  * heap_get_last_page () - Get the last page pointer.
3995  * return: error code
3996  * hfid(in): Object heap file identifier
3997  * heap_hdr(in): The heap header structure
3998  * scan_cache(in): Scan cache
3999  * last_vpid(out): VPID of the last page
4000  *
4001  * Note: The last vpid is saved on heap header. We log it and should be the right VPID.
4002  */
4003 static int
4004 heap_get_last_page (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, HEAP_SCANCACHE * scan_cache,
4005  VPID * last_vpid, PGBUF_WATCHER * pg_watcher)
4006 {
4007  int error_code = NO_ERROR;
4008 
4009  assert (pg_watcher != NULL);
4010  assert (last_vpid != NULL);
4011  assert (!VPID_ISNULL (&heap_hdr->estimates.last_vpid));
4012 
4013  *last_vpid = heap_hdr->estimates.last_vpid;
4014  pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, last_vpid, OLD_PAGE, X_LOCK, scan_cache, pg_watcher);
4015  if (pg_watcher->pgptr == NULL)
4016  {
4017  ASSERT_ERROR_AND_SET (error_code);
4018  return error_code;
4019  }
4020 
4021 #if !defined (NDEBUG)
4022  {
4023  RECDES recdes;
4024  HEAP_CHAIN *chain;
4025  if (spage_get_record (thread_p, pg_watcher->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4026  {
4027  assert (false);
4028  pgbuf_ordered_unfix (thread_p, pg_watcher);
4029  return ER_FAILED;
4030  }
4031  chain = (HEAP_CHAIN *) recdes.data;
4032  assert (VPID_ISNULL (&chain->next_vpid));
4033  }
4034 #endif /* !NDEBUG */
4035 
4036  return NO_ERROR;
4037 }
4038 
4039 /*
4040  * heap_get_last_vpid () - Get last heap page VPID from heap file header
4041  *
4042  * return : Error code
4043  * thread_p (in) : Thread entry
4044  * hfid (in) : Heap file identifier
4045  * last_vpid (out) : Last heap page VPID
4046  */
4047 STATIC_INLINE int
4048 heap_get_last_vpid (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * last_vpid)
4049 {
4050  PGBUF_WATCHER watcher_heap_header;
4051  VPID vpid_heap_header;
4052  HEAP_HDR_STATS *hdr_stats = NULL;
4053 
4054  int error_code = NO_ERROR;
4055 
4056  PGBUF_INIT_WATCHER (&watcher_heap_header, PGBUF_ORDERED_HEAP_HDR, hfid);
4057 
4058  VPID_SET_NULL (last_vpid);
4059 
4060  vpid_heap_header.volid = hfid->vfid.volid;
4061  vpid_heap_header.pageid = hfid->hpgid;
4062  error_code = pgbuf_ordered_fix (thread_p, &vpid_heap_header, OLD_PAGE, PGBUF_LATCH_READ, &watcher_heap_header);
4063  if (error_code != NO_ERROR)
4064  {
4065  ASSERT_ERROR ();
4066  return error_code;
4067  }
4068 
4069  hdr_stats = heap_get_header_stats_ptr (thread_p, watcher_heap_header.pgptr);
4070  if (hdr_stats == NULL)
4071  {
4072  assert_release (false);
4073  pgbuf_ordered_unfix (thread_p, &watcher_heap_header);
4074  return ER_FAILED;
4075  }
4076  *last_vpid = hdr_stats->estimates.last_vpid;
4077  pgbuf_ordered_unfix (thread_p, &watcher_heap_header);
4078  return NO_ERROR;
4079 }
4080 
4081 /*
4082  * heap_get_header_stats_ptr () - Get pointer to heap header statistics.
4083  *
4084  * return : Pointer to heap header statistics
4085  * page_header (in) : Heap header page
4086  */
4089 {
4090  RECDES recdes;
4091 
4092  if (spage_get_record (thread_p, page_header, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4093  {
4094  assert_release (false);
4095  return NULL;
4096  }
4097  return (HEAP_HDR_STATS *) recdes.data;
4098 }
4099 
4100 /*
4101  * heap_copy_header_stats () - Copy heap header statistics
4102  *
4103  * return : Error code
4104  * page_header (in) : Heap header page
4105  * header_stats (out) : Heap header statistics
4106  */
4107 STATIC_INLINE int
4108 heap_copy_header_stats (THREAD_ENTRY * thread_p, PAGE_PTR page_header, HEAP_HDR_STATS * header_stats)
4109 {
4110  RECDES recdes;
4111 
4112  recdes.data = (char *) header_stats;
4113  recdes.area_size = sizeof (*header_stats);
4114  if (spage_get_record (thread_p, page_header, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, COPY) != S_SUCCESS)
4115  {
4116  assert_release (false);
4117  return ER_FAILED;
4118  }
4119  return NO_ERROR;
4120 }
4121 
4122 /*
4123  * heap_get_chain_ptr () - Get pointer to chain in heap page
4124  *
4125  * return : Pointer to chain in heap page
4126  * page_heap (in) : Heap page
4127  */
4130 {
4131  RECDES recdes;
4132 
4133  if (spage_get_record (thread_p, page_heap, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4134  {
4135  assert_release (false);
4136  return NULL;
4137  }
4138  return (HEAP_CHAIN *) recdes.data;
4139 }
4140 
4141 /*
4142  * heap_copy_chain () - Copy chain from heap page
4143  *
4144  * return : Error code
4145  * page_heap (in) : Heap page
4146  * chain (out) : Heap chain
4147  */
4148 STATIC_INLINE int
4149 heap_copy_chain (THREAD_ENTRY * thread_p, PAGE_PTR page_heap, HEAP_CHAIN * chain)
4150 {
4151  RECDES recdes;
4152 
4153  if (spage_get_record (thread_p, page_heap, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4154  {
4155  assert_release (false);
4156  return ER_FAILED;
4157  }
4158  assert (recdes.length >= (int) sizeof (*chain));
4159  memcpy (chain, recdes.data, sizeof (*chain));
4160  return NO_ERROR;
4161 }
4162 
4163 /*
4164  * heap_vpid_init_new () - FILE_INIT_PAGE_FUNC for heap non-header pages
4165  *
4166  * return : Error code
4167  * thread_p (in) : Thread entry
4168  * page (in) : New heap file page
4169  * args (in) : HEAP_CHAIN *
4170  */
4171 static int
4172 heap_vpid_init_new (THREAD_ENTRY * thread_p, PAGE_PTR page, void *args)
4173 {
4175  HEAP_CHAIN chain;
4176  RECDES recdes;
4177  INT16 slotid;
4178  int sp_success;
4179 
4180  assert (page != NULL);
4181  assert (args != NULL);
4182 
4183  chain = *(HEAP_CHAIN *) args; /* get chain from args. it is already initialized */
4184 
4185  /* initialize new page. */
4186  addr.pgptr = page;
4187  pgbuf_set_page_ptype (thread_p, addr.pgptr, PAGE_HEAP);
4188 
4189  /* initialize the page and chain it with the previous last allocated page */
4191 
4192  recdes.area_size = recdes.length = sizeof (chain);
4193  recdes.type = REC_HOME;
4194  recdes.data = (char *) &chain;
4195 
4196  sp_success = spage_insert (thread_p, addr.pgptr, &recdes, &slotid);
4197  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
4198  {
4199  assert (false);
4200 
4201  /* initialization has failed !! */
4202  if (sp_success != SP_SUCCESS)
4203  {
4205  }
4206  return ER_FAILED;
4207  }
4208 
4209  log_append_undoredo_data (thread_p, RVHF_NEWPAGE, &addr, 0, recdes.length, NULL, recdes.data);
4210  pgbuf_set_dirty (thread_p, addr.pgptr, DONT_FREE);
4211  return NO_ERROR;
4212 }
4213 
4214 /*
4215  * heap_vpid_alloc () - allocate, fetch, and initialize a new page
4216  * return: error code
4217  * hfid(in): Object heap file identifier
4218  * hdr_pgptr(in): The heap page header
4219  * heap_hdr(in): The heap header structure
4220  * scan_cache(in): Scan cache
4221  * new_pg_watcher(out): watcher for new page.
4222  *
4223  * Note: Allocate and initialize a new heap page. The heap header is
4224  * updated to reflect a newly allocated best space page and
4225  * the set of best space pages information may be updated to
4226  * include the previous best1 space page.
4227  */
4228 static int
4229 heap_vpid_alloc (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR hdr_pgptr, HEAP_HDR_STATS * heap_hdr,
4230  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * new_pg_watcher)
4231 {
4232  VPID vpid; /* Volume and page identifiers */
4233  LOG_DATA_ADDR addr = LOG_DATA_ADDR_INITIALIZER; /* Address of logging data */
4234  int best;
4235  VPID last_vpid;
4236  PGBUF_WATCHER last_pg_watcher;
4237  HEAP_CHAIN new_page_chain;
4238  HEAP_HDR_STATS heap_hdr_prev = *heap_hdr;
4239 
4240  int error_code = NO_ERROR;
4241 
4242  assert (PGBUF_IS_CLEAN_WATCHER (new_pg_watcher));
4243 
4244  PGBUF_INIT_WATCHER (&last_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4245  addr.vfid = &hfid->vfid;
4247 
4248  error_code = heap_get_last_page (thread_p, hfid, heap_hdr, scan_cache, &last_vpid, &last_pg_watcher);
4249  if (error_code != NO_ERROR)
4250  {
4251  ASSERT_ERROR ();
4252  return error_code;
4253  }
4254  if (last_pg_watcher.pgptr == NULL)
4255  {
4256  /* something went wrong, return error */
4257  assert_release (false);
4258  return ER_FAILED;
4259  }
4260  assert (!VPID_ISNULL (&last_vpid));
4261 
4262  log_sysop_start (thread_p);
4263 
4264  /* init chain for new page */
4265  new_page_chain.class_oid = heap_hdr->class_oid;
4266  new_page_chain.prev_vpid = last_vpid;
4267  VPID_SET_NULL (&new_page_chain.next_vpid);
4268  new_page_chain.max_mvccid = MVCCID_NULL;
4269  new_page_chain.flags = 0;
4271 
4272  /* allocate new page and initialize it */
4273  error_code = file_alloc (thread_p, &hfid->vfid, heap_vpid_init_new, &new_page_chain, &vpid, NULL);
4274  if (error_code != NO_ERROR)
4275  {
4276  ASSERT_ERROR ();
4277  goto error;
4278  }
4279 
4280  /* add link from previous last page */
4282 
4283  if (last_pg_watcher.pgptr == hdr_pgptr)
4284  {
4285  heap_hdr->next_vpid = vpid;
4286  /* will be logged later */
4287  }
4288  else
4289  {
4290  HEAP_CHAIN *chain, chain_prev;
4291 
4292  /* get chain */
4293  chain = heap_get_chain_ptr (thread_p, last_pg_watcher.pgptr);
4294  if (chain == NULL)
4295  {
4296  assert_release (false);
4297  error_code = ER_FAILED;
4298  goto error;
4299  }
4300  /* update chain */
4301  /* save old chain for logging */
4302  chain_prev = *chain;
4303  /* change next link */
4304  chain->next_vpid = vpid;
4305 
4306  /* log change */
4307  addr.pgptr = last_pg_watcher.pgptr;
4308  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (HEAP_CHAIN), sizeof (HEAP_CHAIN), &chain_prev,
4309  chain);
4310  pgbuf_set_dirty (thread_p, addr.pgptr, DONT_FREE);
4311  }
4312 
4313  pgbuf_ordered_unfix (thread_p, &last_pg_watcher);
4314 
4315  /* now update header statistics for best1 space page. the changes to the statistics are not logged. */
4316  /* last page hint */
4317  heap_hdr->estimates.last_vpid = vpid;
4318  heap_hdr->estimates.num_pages++;
4319 
4320  best = heap_hdr->estimates.head;
4321  heap_hdr->estimates.head = HEAP_STATS_NEXT_BEST_INDEX (best);
4322  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid))
4323  {
4324  heap_hdr->estimates.num_high_best++;
4326  }
4327  else
4328  {
4329  if (heap_hdr->estimates.best[best].freespace > HEAP_DROP_FREE_SPACE)
4330  {
4331  heap_hdr->estimates.num_other_high_best++;
4332  heap_stats_put_second_best (heap_hdr, &heap_hdr->estimates.best[best].vpid);
4333  }
4334  }
4335 
4336  heap_hdr->estimates.best[best].vpid = vpid;
4337  heap_hdr->estimates.best[best].freespace = DB_PAGESIZE;
4338 
4340  {
4341  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, heap_hdr->estimates.best[best].freespace);
4342  }
4343 
4344  /* we really have nothing to lose from logging stats here and also it is good to have a certain last VPID. */
4345  addr.pgptr = hdr_pgptr;
4346  log_append_undoredo_data (thread_p, RVHF_STATS, &addr, sizeof (HEAP_HDR_STATS), sizeof (HEAP_HDR_STATS),
4347  &heap_hdr_prev, heap_hdr);
4348  log_sysop_commit (thread_p);
4349 
4350  /* fix new page */
4351  new_pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, scan_cache, new_pg_watcher);
4352  if (new_pg_watcher->pgptr == NULL)
4353  {
4354  ASSERT_ERROR_AND_SET (error_code);
4355  return error_code;
4356  }
4357 
4358  return NO_ERROR;
4359 
4360 error:
4361  assert (error_code != NO_ERROR);
4362 
4363  if (last_pg_watcher.pgptr != NULL)
4364  {
4365  pgbuf_ordered_unfix (thread_p, &last_pg_watcher);
4366  }
4367  log_sysop_abort (thread_p);
4368 
4369  return error_code;
4370 }
4371 
4372 /*
4373  * heap_vpid_remove () - Deallocate a heap page
4374  * return: rm_vpid on success or NULL on error
4375  * hfid(in): Object heap file identifier
4376  * heap_hdr(in): The heap header stats
4377  * rm_vpid(in): Page to remove
4378  *
4379  * Note: The given page is removed from the heap. The linked list of heap
4380  * pages is updated to remove this page, and the heap header may
4381  * be updated if this page was part of the statistics.
4382  */
4383 static VPID *
4384 heap_vpid_remove (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * rm_vpid)
4385 {
4386  RECDES rm_recdes; /* Record descriptor which holds the chain of the page to be removed */
4387  HEAP_CHAIN *rm_chain; /* Chain information of the page to be removed */
4388  VPID vpid; /* Real identifier of previous page */
4389  LOG_DATA_ADDR addr; /* Log address of previous page */
4390  RECDES recdes; /* Record descriptor to page header */
4391  HEAP_CHAIN chain; /* Chain to next and prev page */
4392  int sp_success;
4393  int i;
4394  PGBUF_WATCHER rm_pg_watcher;
4395  PGBUF_WATCHER prev_pg_watcher;
4396 
4397  PGBUF_INIT_WATCHER (&rm_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4398  PGBUF_INIT_WATCHER (&prev_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4399 
4400  /*
4401  * Make sure that this is not the header page since the header page cannot
4402  * be removed. If the header page is removed.. the heap is gone
4403  */
4404 
4405  if (rm_vpid->pageid == hfid->hpgid && rm_vpid->volid == hfid->vfid.volid)
4406  {
4407  er_log_debug (ARG_FILE_LINE, "heap_vpid_remove: Trying to remove header page = %d|%d of heap file = %d|%d|%d",
4408  (int) rm_vpid->volid, rm_vpid->pageid, (int) hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid);
4410  goto error;
4411  }
4412 
4413  /* Get the chain record */
4414  rm_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, rm_vpid, OLD_PAGE, X_LOCK, NULL, &rm_pg_watcher);
4415  if (rm_pg_watcher.pgptr == NULL)
4416  {
4417  /* Look like a system error. Unable to obtain chain header record */
4418  goto error;
4419  }
4420 
4421  if (spage_get_record (thread_p, rm_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &rm_recdes, PEEK) != S_SUCCESS)
4422  {
4423  /* Look like a system error. Unable to obtain chain header record */
4424  goto error;
4425  }
4426 
4427  rm_chain = (HEAP_CHAIN *) rm_recdes.data;
4428 
4429  /*
4430  * UPDATE PREVIOUS PAGE
4431  *
4432  * Update chain next field of previous last page
4433  * If previous page is the heap header page, it contains a heap header
4434  * instead of a chain.
4435  */
4436 
4437  vpid = rm_chain->prev_vpid;
4438  addr.vfid = &hfid->vfid;
4440 
4441  prev_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &prev_pg_watcher);
4442  if (prev_pg_watcher.pgptr == NULL)
4443  {
4444  /* something went wrong, return */
4445  goto error;
4446  }
4447 
4448  if (rm_pg_watcher.page_was_unfixed)
4449  {
4450  /* TODO : unexpected: need to reconsider the algorithm, if this is an ordinary case */
4452  vpid.volid, vpid.pageid);
4453  goto error;
4454  }
4455 
4456  /*
4457  * Make sure that the page to be removed is not referenced on the heap
4458  * statistics
4459  */
4460 
4461  assert (heap_hdr != NULL);
4462 
4463  /*
4464  * We cannot break in the following loop since a best page could be
4465  * duplicated
4466  */
4467  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
4468  {
4469  if (VPID_EQ (&heap_hdr->estimates.best[i].vpid, rm_vpid))
4470  {
4471  VPID_SET_NULL (&heap_hdr->estimates.best[i].vpid);
4472  heap_hdr->estimates.best[i].freespace = 0;
4473  heap_hdr->estimates.head = i;
4474  }
4475  }
4476 
4477  if (VPID_EQ (&heap_hdr->estimates.last_vpid, rm_vpid))
4478  {
4479  /* If the page is the last page of the heap file, update the hint */
4480  heap_hdr->estimates.last_vpid = rm_chain->prev_vpid;
4481  }
4482 
4483  /*
4484  * Is previous page the header page ?
4485  */
4486  if (vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid)
4487  {
4488  /*
4489  * PREVIOUS PAGE IS THE HEADER PAGE.
4490  * It contains a heap header instead of a chain record
4491  */
4492  heap_hdr->next_vpid = rm_chain->next_vpid;
4493  }
4494  else
4495  {
4496  /*
4497  * PREVIOUS PAGE IS NOT THE HEADER PAGE.
4498  * It contains a chain...
4499  * We need to make sure that there is not references to the page to delete
4500  * in the statistics of the heap header
4501  */
4502 
4503  /* NOW check the PREVIOUS page */
4504  /* Get the chain record */
4505  if (spage_get_record (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4506  {
4507  /* Look like a system error. Unable to obtain header record */
4508  goto error;
4509  }
4510 
4511  /* Copy the chain record to memory.. so we can log the changes */
4512  memcpy (&chain, recdes.data, sizeof (chain));
4513 
4514  /* Modify the chain of the previous page in memory */
4515  chain.next_vpid = rm_chain->next_vpid;
4516 
4517  /* Log the desired changes.. and then change the header */
4518  addr.pgptr = prev_pg_watcher.pgptr;
4519  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (chain), sizeof (chain), recdes.data, &chain);
4520 
4521  /* Now change the record */
4522  recdes.area_size = recdes.length = sizeof (chain);
4523  recdes.type = REC_HOME;
4524  recdes.data = (char *) &chain;
4525 
4526  sp_success = spage_update (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
4527  if (sp_success != SP_SUCCESS)
4528  {
4529  /*
4530  * This look like a system error, size did not change, so why did it
4531  * fail
4532  */
4533  if (sp_success != SP_ERROR)
4534  {
4536  }
4537  goto error;
4538  }
4539 
4540  }
4541 
4542  /* Now set dirty, free and unlock the previous page */
4543  pgbuf_ordered_set_dirty_and_free (thread_p, &prev_pg_watcher);
4544 
4545  /*
4546  * UPDATE NEXT PAGE
4547  *
4548  * Update chain previous field of next page
4549  */
4550 
4551  if (!(VPID_ISNULL (&rm_chain->next_vpid)))
4552  {
4553  vpid = rm_chain->next_vpid;
4555 
4556  prev_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &prev_pg_watcher);
4557  if (prev_pg_watcher.pgptr == NULL)
4558  {
4559  /* something went wrong, return */
4560  goto error;
4561  }
4562 
4563  /* Get the chain record */
4564  if (spage_get_record (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4565  {
4566  /* Look like a system error. Unable to obtain header record */
4567  goto error;
4568  }
4569 
4570  /* Copy the chain record to memory.. so we can log the changes */
4571  memcpy (&chain, recdes.data, sizeof (chain));
4572 
4573  /* Modify the chain of the next page in memory */
4574  chain.prev_vpid = rm_chain->prev_vpid;
4575 
4576  /* Log the desired changes.. and then change the header */
4577  addr.pgptr = prev_pg_watcher.pgptr;
4578  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (chain), sizeof (chain), recdes.data, &chain);
4579 
4580  /* Now change the record */
4581  recdes.area_size = recdes.length = sizeof (chain);
4582  recdes.type = REC_HOME;
4583  recdes.data = (char *) &chain;
4584 
4585  sp_success = spage_update (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
4586  if (sp_success != SP_SUCCESS)
4587  {
4588  /*
4589  * This look like a system error, size did not change, so why did it
4590  * fail
4591  */
4592  if (sp_success != SP_ERROR)
4593  {
4595  }
4596  goto error;
4597  }
4598 
4599  /* Now set dirty, free and unlock the next page */
4600 
4601  pgbuf_ordered_set_dirty_and_free (thread_p, &prev_pg_watcher);
4602  }
4603 
4604  /* Free the page to be deallocated and deallocate the page */
4605  pgbuf_ordered_unfix (thread_p, &rm_pg_watcher);
4606 
4607  if (file_dealloc (thread_p, &hfid->vfid, rm_vpid, FILE_HEAP) != NO_ERROR)
4608  {
4609  ASSERT_ERROR ();
4610  goto error;
4611  }
4612 
4613  (void) heap_stats_del_bestspace_by_vpid (thread_p, rm_vpid);
4614 
4615  return rm_vpid;
4616 
4617 error:
4618  if (rm_pg_watcher.pgptr != NULL)
4619  {
4620  pgbuf_ordered_unfix (thread_p, &rm_pg_watcher);
4621  }
4622  if (addr.pgptr != NULL)
4623  {
4624  pgbuf_ordered_unfix (thread_p, &prev_pg_watcher);
4625  }
4626 
4627  return NULL;
4628 }
4629 
4630 /*
4631  * heap_remove_page_on_vacuum () - Remove heap page from heap file during
4632  * vacuum process. Function is trying to
4633  * be as least intrusive as possible and all
4634  * required pages are latched conditionally.
4635  * Give up on any failed operation.
4636  *
4637  * return : True if page was deallocated, false if not.
4638  * thread_p (in) : Thread entry.
4639  * page_ptr (in) : Pointer to page being deallocated.
4640  * hfid (in) : Heap file identifier.
4641  */
4642 bool
4643 heap_remove_page_on_vacuum (THREAD_ENTRY * thread_p, PAGE_PTR * page_ptr, HFID * hfid)
4644 {
4645  VPID page_vpid = VPID_INITIALIZER; /* VPID of page being removed. */
4646  VPID prev_vpid = VPID_INITIALIZER; /* VPID of previous page. */
4647  VPID next_vpid = VPID_INITIALIZER; /* VPID of next page. */
4648  VPID header_vpid = VPID_INITIALIZER; /* VPID of heap header page. */
4649  HEAP_HDR_STATS heap_hdr; /* Header header & stats. */
4650  HEAP_CHAIN chain; /* Heap page header used to read and update page links. */
4651  RECDES copy_recdes; /* Record to copy header from pages. */
4652  /* Buffer used for copy record. */
4653  char copy_recdes_buffer[MAX (sizeof (HEAP_CHAIN), sizeof (HEAP_HDR_STATS)) + MAX_ALIGNMENT];
4654  RECDES update_recdes; /* Record containing updated header data. */
4655  int i = 0; /* Iterator. */
4656  bool is_system_op_started = false; /* Set to true once system operation is started. */
4657  PGBUF_WATCHER crt_watcher; /* Watcher for current page. */
4658  PGBUF_WATCHER header_watcher; /* Watcher for header page. */
4659  PGBUF_WATCHER prev_watcher; /* Watcher for previous page. */
4660  PGBUF_WATCHER next_watcher; /* Watcher for next page. */
4661 
4662  /* Assert expected arguments. */
4663  /* Page to remove must be fixed. */
4664  assert (page_ptr != NULL && *page_ptr != NULL);
4665  /* Page to remove must be empty. */
4666  assert (spage_number_of_records (*page_ptr) <= 1);
4667  /* Heap file identifier must be known. */
4668  assert (hfid != NULL && !HFID_IS_NULL (hfid));
4669 
4670  /* Get VPID of page to be removed. */
4671  pgbuf_get_vpid (*page_ptr, &page_vpid);
4672 
4673  if (page_vpid.pageid == hfid->hpgid && page_vpid.volid == hfid->vfid.volid)
4674  {
4675  /* Cannot remove heap file header page. */
4676  return false;
4677  }
4678 
4679  /* Use page watchers to do the ordered fix. */
4680  PGBUF_INIT_WATCHER (&crt_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4681  PGBUF_INIT_WATCHER (&header_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
4682  PGBUF_INIT_WATCHER (&prev_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4683  PGBUF_INIT_WATCHER (&next_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4684 
4685  /* Current page is already fixed. Just attach watcher. */
4686  pgbuf_attach_watcher (thread_p, *page_ptr, PGBUF_LATCH_WRITE, hfid, &crt_watcher);
4687 
4688  /* Header vpid. */
4689  header_vpid.volid = hfid->vfid.volid;
4690  header_vpid.pageid = hfid->hpgid;
4691 
4692  /* Fix required pages: Heap header page. Previous page (always exists). Next page (if exists). */
4693 
4694  /* Fix header page first, because it has higher priority. */
4695  if (pgbuf_ordered_fix (thread_p, &header_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &header_watcher) != NO_ERROR)
4696  {
4697  /* Give up. */
4699  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4700  goto error;
4701  }
4702  assert (header_watcher.pgptr != NULL);
4703 
4704  if (crt_watcher.page_was_unfixed)
4705  {
4706  *page_ptr = crt_watcher.pgptr; /* home was refixed */
4707  }
4708 
4709  /* Get previous and next page VPID's. */
4710  if (heap_vpid_prev (thread_p, hfid, *page_ptr, &prev_vpid) != NO_ERROR
4711  || heap_vpid_next (thread_p, hfid, *page_ptr, &next_vpid) != NO_ERROR)
4712  {
4713  /* Give up. */
4715  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4716  goto error;
4717  }
4718 
4719  /* Fix previous page if it is not same as header. */
4720  if (!VPID_ISNULL (&prev_vpid) && !VPID_EQ (&prev_vpid, &header_vpid))
4721  {
4722  if (pgbuf_ordered_fix (thread_p, &prev_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &prev_watcher) != NO_ERROR)
4723  {
4724  /* Give up. */
4726  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4727  page_vpid.pageid);
4728  goto error;
4729  }
4730  }
4731 
4732  /* Fix next page if current page is not last in heap file. */
4733  if (!VPID_ISNULL (&next_vpid))
4734  {
4735  if (pgbuf_ordered_fix (thread_p, &next_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &next_watcher) != NO_ERROR)
4736  {
4737  /* Give up. */
4739  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4740  page_vpid.pageid);
4741  goto error;
4742  }
4743  }
4744 
4745  /* All pages are fixed. */
4746 
4747  if (crt_watcher.page_was_unfixed)
4748  {
4749  *page_ptr = crt_watcher.pgptr; /* home was refixed */
4750 
4751  if (spage_number_of_records (crt_watcher.pgptr) > 1)
4752  {
4753  /* Current page has new data. It is no longer a candidate for removal. */
4755  "Candidate heap page %d|%d to remove was changed and has new data.", page_vpid.volid,
4756  page_vpid.pageid);
4757  goto error;
4758  }
4759  }
4760 
4761  /* recheck the dealloc flag after all latches are acquired */
4762  if (pgbuf_has_prevent_dealloc (crt_watcher.pgptr))
4763  {
4764  /* Even though we have fixed all required pages, somebody was doing a heap scan, and already reached our page. We
4765  * cannot deallocate it. */
4767  "Candidate heap page %d|%d to remove has waiters.", page_vpid.volid, page_vpid.pageid);
4768  goto error;
4769  }
4770 
4771  /* if we are here, the page should not be accessed by any active or vacuum workers. Active workers are prevented
4772  * from accessing it through heap scan, and direct references should not exist.
4773  * the function would not be called if any other vacuum workers would try to access the page. */
4774  if (pgbuf_has_any_waiters (crt_watcher.pgptr))
4775  {
4776  assert (false);
4777  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "%s", "Unexpected page waiters");
4778  goto error;
4779  }
4780  /* all good, we can deallocate the page */
4781 
4782  /* Start changes under the protection of system operation. */
4783  log_sysop_start (thread_p);
4784  is_system_op_started = true;
4785 
4786  /* Remove page from statistics in header page. */
4787  copy_recdes.data = PTR_ALIGN (copy_recdes_buffer, MAX_ALIGNMENT);
4788  copy_recdes.area_size = sizeof (heap_hdr);
4789  if (spage_get_record (thread_p, header_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) != S_SUCCESS)
4790  {
4791  assert_release (false);
4793  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4794  goto error;
4795  }
4796  memcpy (&heap_hdr, copy_recdes.data, sizeof (heap_hdr));
4797 
4798  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
4799  {
4800  if (VPID_EQ (&heap_hdr.estimates.best[i].vpid, &page_vpid))
4801  {
4802  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
4803  heap_hdr.estimates.best[i].freespace = 0;
4804  heap_hdr.estimates.head = i;
4805  heap_hdr.estimates.num_high_best--;
4806  }
4807  if (VPID_EQ (&heap_hdr.estimates.second_best[i], &page_vpid))
4808  {
4809  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
4810  }
4811  }
4812  if (VPID_EQ (&heap_hdr.estimates.last_vpid, &page_vpid))
4813  {
4814  VPID_COPY (&heap_hdr.estimates.last_vpid, &prev_vpid);
4815  }
4816  if (VPID_EQ (&prev_vpid, &header_vpid))
4817  {
4818  /* Update next link. */
4819  VPID_COPY (&heap_hdr.next_vpid, &next_vpid);
4820  }
4821  if (VPID_EQ (&heap_hdr.estimates.full_search_vpid, &page_vpid))
4822  {
4824  }
4825 
4826  /* Update header and log changes. */
4827  update_recdes.data = (char *) &heap_hdr;
4828  update_recdes.length = sizeof (heap_hdr);
4829  if (spage_update (thread_p, header_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4830  {
4831  assert_release (false);
4833  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4834  goto error;
4835  }
4836  log_append_undoredo_data2 (thread_p, RVHF_STATS, &hfid->vfid, header_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID,
4837  sizeof (heap_hdr), sizeof (heap_hdr), copy_recdes.data, update_recdes.data);
4838  pgbuf_set_dirty (thread_p, header_watcher.pgptr, DONT_FREE);
4839 
4840  /* Update links in previous and next page. */
4841 
4842  if (prev_watcher.pgptr != NULL)
4843  {
4844  /* Next link in previous page. */
4845  assert (!VPID_EQ (&header_vpid, &prev_vpid));
4846  copy_recdes.area_size = sizeof (chain);
4847  if (spage_get_record (thread_p, prev_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) !=
4848  S_SUCCESS)
4849  {
4850  assert_release (false);
4852  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4853  page_vpid.pageid);
4854  goto error;
4855  }
4856  memcpy (&chain, copy_recdes.data, copy_recdes.length);
4857  VPID_COPY (&chain.next_vpid, &next_vpid);
4858  update_recdes.data = (char *) &chain;
4859  update_recdes.length = sizeof (chain);
4860  if (spage_update (thread_p, prev_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4861  {
4862  assert_release (false);
4864  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4865  page_vpid.pageid);
4866  goto error;
4867  }
4869  sizeof (chain), sizeof (chain), copy_recdes.data, update_recdes.data);
4870  pgbuf_set_dirty (thread_p, prev_watcher.pgptr, DONT_FREE);
4871  }
4872 
4873  if (next_watcher.pgptr != NULL)
4874  {
4875  /* Previous link in next page. */
4876  copy_recdes.area_size = sizeof (chain);
4877  if (spage_get_record (thread_p, next_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) !=
4878  S_SUCCESS)
4879  {
4880  assert_release (false);
4882  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4883  page_vpid.pageid);
4884  goto error;
4885  }
4886  memcpy (&chain, copy_recdes.data, sizeof (chain));
4887  VPID_COPY (&chain.prev_vpid, &prev_vpid);
4888  update_recdes.data = (char *) &chain;
4889  update_recdes.length = sizeof (chain);
4890 
4891  if (spage_update (thread_p, next_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4892  {
4893  assert_release (false);
4895  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4896  page_vpid.pageid);
4897  goto error;
4898  }
4900  sizeof (chain), sizeof (chain), copy_recdes.data, update_recdes.data);
4901  pgbuf_set_dirty (thread_p, next_watcher.pgptr, DONT_FREE);
4902  }
4903 
4904  /* Unfix current page. */
4905  pgbuf_ordered_unfix_and_init (thread_p, *page_ptr, &crt_watcher);
4906  /* Deallocate current page. */
4907  if (file_dealloc (thread_p, &hfid->vfid, &page_vpid, FILE_HEAP) != NO_ERROR)
4908  {
4909  ASSERT_ERROR ();
4911  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4912  goto error;
4913  }
4914 
4915  /* Remove page from best space cached statistics. */
4916  (void) heap_stats_del_bestspace_by_vpid (thread_p, &page_vpid);
4917 
4918  /* Finished. */
4919  log_sysop_commit (thread_p);
4920  is_system_op_started = false;
4921 
4922  /* Unfix all pages. */
4923  if (next_watcher.pgptr != NULL)
4924  {
4925  pgbuf_ordered_unfix (thread_p, &next_watcher);
4926  }
4927  if (prev_watcher.pgptr != NULL)
4928  {
4929  pgbuf_ordered_unfix (thread_p, &prev_watcher);
4930  }
4931  pgbuf_ordered_unfix (thread_p, &header_watcher);
4932 
4933  /* Page removed successfully. */
4934  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Successfully remove heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4935  return true;
4936 
4937 error:
4938  if (is_system_op_started)
4939  {
4940  log_sysop_abort (thread_p);
4941  }
4942  if (next_watcher.pgptr != NULL)
4943  {
4944  pgbuf_ordered_unfix (thread_p, &next_watcher);
4945  }
4946  if (prev_watcher.pgptr != NULL)
4947  {
4948  pgbuf_ordered_unfix (thread_p, &prev_watcher);
4949  }
4950  if (header_watcher.pgptr != NULL)
4951  {
4952  pgbuf_ordered_unfix (thread_p, &header_watcher);
4953  }
4954  if (*page_ptr != NULL)
4955  {
4956  if (*page_ptr != crt_watcher.pgptr)
4957  {
4958  /* jumped to here while fixing pages */
4959  assert (crt_watcher.page_was_unfixed);
4960  *page_ptr = crt_watcher.pgptr;
4961  }
4962  assert (crt_watcher.pgptr == *page_ptr);
4963  pgbuf_ordered_unfix_and_init (thread_p, *page_ptr, &crt_watcher);
4964  }
4965  else
4966  {
4967  assert (crt_watcher.pgptr == NULL);
4968  }
4969  /* Page was not removed. */
4970  return false;
4971 }
4972 
4973 /*
4974  * heap_vpid_next () - Find next page of heap
4975  * return: NO_ERROR
4976  * hfid(in): Object heap file identifier
4977  * pgptr(in): Current page pointer
4978  * next_vpid(in/out): Next volume-page identifier
4979  *
4980  * Note: Find the next page of heap file.
4981  */
4982 int
4983 heap_vpid_next (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR pgptr, VPID * next_vpid)
4984 {
4985  HEAP_CHAIN *chain; /* Chain to next and prev page */
4986  HEAP_HDR_STATS *heap_hdr; /* Header of heap file */
4987  RECDES recdes; /* Record descriptor to page header */
4988  int ret = NO_ERROR;
4989 
4990  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
4991 
4992  /* Get either the heap header or chain record */
4993  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4994  {
4995  /* Unable to get header/chain record for the given page */
4996  VPID_SET_NULL (next_vpid);
4997  ret = ER_FAILED;
4998  }
4999  else
5000  {
5001  pgbuf_get_vpid (pgptr, next_vpid);
5002  /* Is this the header page ? */
5003  if (next_vpid->pageid == hfid->hpgid && next_vpid->volid == hfid->vfid.volid)
5004  {
5005  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
5006  *next_vpid = heap_hdr->next_vpid;
5007  }
5008  else
5009  {
5010  chain = (HEAP_CHAIN *) recdes.data;
5011  *next_vpid = chain->next_vpid;
5012  }
5013  }
5014 
5015  return ret;
5016 }
5017 
5018 /*
5019  * heap_vpid_prev () - Find previous page of heap
5020  * return: NO_ERROR
5021  * hfid(in): Object heap file identifier
5022  * pgptr(in): Current page pointer
5023  * prev_vpid(in/out): Previous volume-page identifier
5024  *
5025  * Note: Find the previous page of heap file.
5026  */
5027 int
5028 heap_vpid_prev (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR pgptr, VPID * prev_vpid)
5029 {
5030  HEAP_CHAIN *chain; /* Chain to next and prev page */
5031  RECDES recdes; /* Record descriptor to page header */
5032  int ret = NO_ERROR;
5033 
5034  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
5035 
5036  /* Get either the header or chain record */
5037  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
5038  {
5039  /* Unable to get header/chain record for the given page */
5040  VPID_SET_NULL (prev_vpid);
5041  ret = ER_FAILED;
5042  }
5043  else
5044  {
5045  pgbuf_get_vpid (pgptr, prev_vpid);
5046  /* Is this the header page ? */
5047  if (prev_vpid->pageid == hfid->hpgid && prev_vpid->volid == hfid->vfid.volid)
5048  {
5049  VPID_SET_NULL (prev_vpid);
5050  }
5051  else
5052  {
5053  chain = (HEAP_CHAIN *) recdes.data;
5054  *prev_vpid = chain->prev_vpid;
5055  }
5056  }
5057 
5058  return ret;
5059 }
5060 
5061 /*
5062  * heap_manager_initialize () -
5063  * return: NO_ERROR
5064  *
5065  * Note: Initialization process of the heap file module. Find the
5066  * maximum size of an object that can be inserted in the heap.
5067  * Objects that overpass this size are stored in overflow.
5068  */
5069 int
5071 {
5072  int ret;
5073 
5074 #define HEAP_MAX_FIRSTSLOTID_LENGTH (sizeof (HEAP_HDR_STATS))
5075 
5078 
5079  /* Initialize the class representation cache */
5080  ret = heap_chnguess_initialize ();
5081  if (ret != NO_ERROR)
5082  {
5083  return ret;
5084  }
5085 
5087  if (ret != NO_ERROR)
5088  {
5089  return ret;
5090  }
5091 
5092  /* Initialize best space cache */
5094  if (ret != NO_ERROR)
5095  {
5096  return ret;
5097  }
5098 
5099  /* Initialize class OID->HFID cache */
5100  ret = heap_initialize_hfid_table ();
5101 
5102  return ret;
5103 }
5104 
5105 /*
5106  * heap_manager_finalize () - Terminate the heap manager
5107  * return: NO_ERROR
5108  * Note: Deallocate any cached structure.
5109  */
5110 int
5112 {
5113  int ret;
5114 
5115  ret = heap_chnguess_finalize ();
5116  if (ret != NO_ERROR)
5117  {
5118  return ret;
5119  }
5120 
5122  if (ret != NO_ERROR)
5123  {
5124  return ret;
5125  }
5126 
5128  if (ret != NO_ERROR)
5129  {
5130  return ret;
5131  }
5132 
5134 
5135  return ret;
5136 }
5137 
5138 /*
5139  * heap_create_internal () - Create a heap file
5140  * return: HFID * (hfid on success and NULL on failure)
5141  * hfid(in/out): Object heap file identifier.
5142  * All fields in the identifier are set, except the volume
5143  * identifier which should have already been set by the caller.
5144  * exp_npgs(in): Expected number of pages
5145  * class_oid(in): OID of the class for which the heap will be created.
5146  * reuse_oid(in): if true, the OIDs of deleted instances will be reused
5147  *
5148  * Note: Creates a heap file on the disk volume associated with
5149  * hfid->vfid->volid.
5150  *
5151  * A set of sectors is allocated to improve locality of the heap.
5152  * The number of sectors to allocate is estimated from the number
5153  * of expected pages. The maximum number of allocated sectors is
5154  * 25% of the total number of sectors in disk. When the number of
5155  * pages cannot be estimated, a negative value can be passed to
5156  * indicate so. In this case, no sectors are allocated. The
5157  * number of expected pages are not allocated at this moment,
5158  * they are allocated as needs arrives.
5159  */
5160 static int
5161 heap_create_internal (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, const bool reuse_oid)
5162 {
5163  HEAP_HDR_STATS heap_hdr; /* Heap file header */
5164  VPID vpid; /* Volume and page identifiers */
5165  RECDES recdes; /* Record descriptor */
5166  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
5167  INT16 slotid;
5168  int sp_success;
5169  int i;
5170  FILE_DESCRIPTORS des;
5171  const FILE_TYPE file_type = reuse_oid ? FILE_HEAP_REUSE_SLOTS : FILE_HEAP;
5172  PAGE_TYPE ptype = PAGE_HEAP;
5174  TDE_ALGORITHM tde_algo = TDE_ALGORITHM_NONE;
5175 
5176  int error_code = NO_ERROR;
5177 
5178  addr_hdr.pgptr = NULL;
5179  log_sysop_start (thread_p);
5180 
5181  if (class_oid == NULL)
5182  {
5183  class_oid = &null_oid;
5184  }
5185  memset (hfid, 0, sizeof (HFID));
5186  HFID_SET_NULL (hfid);
5187 
5188  memset (&des, 0, sizeof (des));
5189 
5190  if (prm_get_bool_value (PRM_ID_DONT_REUSE_HEAP_FILE) == false && file_type == FILE_HEAP)
5191  {
5192  /*
5193  * Try to reuse an already mark deleted heap file
5194  */
5195 
5196  error_code = file_tracker_reuse_heap (thread_p, class_oid, hfid);
5197  if (error_code != NO_ERROR)
5198  {
5199  ASSERT_ERROR ();
5200  goto error;
5201  }
5202 
5203  if (!HFID_IS_NULL (hfid))
5204  {
5205  /* reuse heap file */
5206  if (heap_reuse (thread_p, hfid, class_oid, reuse_oid) == NULL)
5207  {
5208  ASSERT_ERROR_AND_SET (error_code);
5209  goto error;
5210  }
5211 
5212  error_code = heap_cache_class_info (thread_p, class_oid, hfid, file_type, NULL);
5213  if (error_code != NO_ERROR)
5214  {
5215  /* could not cache */
5216  ASSERT_ERROR ();
5217  goto error;
5218  }
5219  /* reuse successful */
5220  goto end;
5221  }
5222  }
5223 
5224  /*
5225  * Create the unstructured file for the heap
5226  * Create the header for the heap file. The header is used to speed
5227  * up insertions of objects and to find some simple information about the
5228  * heap.
5229  * We do not initialize the page during the allocation since the file is
5230  * new, and the file is going to be removed in the event of a crash.
5231  */
5232 
5233  error_code = file_create_heap (thread_p, reuse_oid, class_oid, &hfid->vfid);
5234  if (error_code != NO_ERROR)
5235  {
5236  ASSERT_ERROR ();
5237  goto error;
5238  }
5239 
5240  error_code = file_alloc_sticky_first_page (thread_p, &hfid->vfid, file_init_page_type, &ptype, &vpid,
5241  &addr_hdr.pgptr);
5242  if (error_code != NO_ERROR)
5243  {
5244  ASSERT_ERROR ();
5245  goto error;
5246  }
5247  if (vpid.volid != hfid->vfid.volid)
5248  {
5249  /* we got problems */
5250  assert_release (false);
5251  error_code = ER_FAILED;
5252  goto error;
5253  }
5254  if (addr_hdr.pgptr == NULL)
5255  {
5256  /* something went wrong, destroy the file, and return */
5257  assert_release (false);
5258  error_code = ER_FAILED;
5259  goto error;
5260  }
5261 
5262  hfid->hpgid = vpid.pageid;
5263 
5264  /* update file descriptor to include class and hfid */
5265  des.heap.class_oid = *class_oid;
5266  des.heap.hfid = *hfid;
5267  error_code = file_descriptor_update (thread_p, &hfid->vfid, &des);
5268  if (error_code != NO_ERROR)
5269  {
5270  ASSERT_ERROR ();
5271  goto error;
5272  }
5273 
5274  error_code = heap_cache_class_info (thread_p, class_oid, hfid, file_type, NULL);
5275  if (error_code != NO_ERROR)
5276  {
5277  /* Failed to cache HFID. */
5278  ASSERT_ERROR ();
5279  goto error;
5280  }
5281 
5282  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5283 
5284  pgbuf_set_page_ptype (thread_p, addr_hdr.pgptr, PAGE_HEAP);
5285 
5286  /* Initialize header page */
5288 
5289  /* Now insert header */
5290  memset (&heap_hdr, 0, sizeof (heap_hdr));
5291  heap_hdr.class_oid = *class_oid;
5292  VFID_SET_NULL (&heap_hdr.ovf_vfid);
5293  VPID_SET_NULL (&heap_hdr.next_vpid);
5294 
5296 
5297  heap_hdr.estimates.num_pages = 1;
5298  heap_hdr.estimates.num_recs = 0;
5299  heap_hdr.estimates.recs_sumlen = 0.0;
5300 
5301  heap_hdr.estimates.best[0].vpid.volid = hfid->vfid.volid;
5302  heap_hdr.estimates.best[0].vpid.pageid = hfid->hpgid;
5303  heap_hdr.estimates.best[0].freespace = spage_max_space_for_new_record (thread_p, addr_hdr.pgptr);
5304 
5305  heap_hdr.estimates.head = 1;
5306  for (i = heap_hdr.estimates.head; i < HEAP_NUM_BEST_SPACESTATS; i++)
5307  {
5308  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
5309  heap_hdr.estimates.best[i].freespace = 0;
5310  }
5311 
5312  heap_hdr.estimates.num_high_best = 1;
5313  heap_hdr.estimates.num_other_high_best = 0;
5314 
5315  heap_hdr.estimates.num_second_best = 0;
5316  heap_hdr.estimates.head_second_best = 0;
5317  heap_hdr.estimates.tail_second_best = 0;
5318  heap_hdr.estimates.num_substitutions = 0;
5319 
5320  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
5321  {
5322  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
5323  }
5324 
5325  heap_hdr.estimates.last_vpid.volid = hfid->vfid.volid;
5326  heap_hdr.estimates.last_vpid.pageid = hfid->hpgid;
5327 
5328  heap_hdr.estimates.full_search_vpid.volid = hfid->vfid.volid;
5329  heap_hdr.estimates.full_search_vpid.pageid = hfid->hpgid;
5330 
5331  recdes.area_size = recdes.length = sizeof (HEAP_HDR_STATS);
5332  recdes.type = REC_HOME;
5333  recdes.data = (char *) &heap_hdr;
5334 
5335  sp_success = spage_insert (thread_p, addr_hdr.pgptr, &recdes, &slotid);
5336  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
5337  {
5338  assert (false);
5339  /* something went wrong, destroy file and return error */
5340  if (sp_success != SP_SUCCESS)
5341  {
5344  }
5345 
5346  /* Free the page and release the lock */
5347  error_code = ER_HEAP_UNABLE_TO_CREATE_HEAP;
5348  goto error;
5349  }
5350  else
5351  {
5352  /*
5353  * Don't need to log before image (undo) since file and pages of the heap
5354  * are deallocated during undo (abort).
5355  */
5356  addr_hdr.vfid = &hfid->vfid;
5358  log_append_redo_data (thread_p, RVHF_CREATE_HEADER, &addr_hdr, sizeof (heap_hdr), &heap_hdr);
5359  pgbuf_set_dirty (thread_p, addr_hdr.pgptr, FREE);
5360  addr_hdr.pgptr = NULL;
5361  }
5362 
5363 end:
5364  /* apply TDE to created heap file if needed */
5365  if (heap_get_class_tde_algorithm (thread_p, class_oid, &tde_algo) == NO_ERROR)
5366  {
5367  error_code = file_apply_tde_algorithm (thread_p, &hfid->vfid, tde_algo);
5368  if (error_code != NO_ERROR)
5369  {
5370  ASSERT_ERROR ();
5371  goto error;
5372  }
5373  }
5374  /* if heap_get_class_tde_algorithm() fails, just skip to apply with expectation that a higher layer do this later */
5375 
5376  assert (error_code == NO_ERROR);
5377 
5378  log_sysop_attach_to_outer (thread_p);
5379  vacuum_log_add_dropped_file (thread_p, &hfid->vfid, class_oid, VACUUM_LOG_ADD_DROPPED_FILE_UNDO);
5380 
5381  logpb_force_flush_pages (thread_p);
5382 
5383  return NO_ERROR;
5384 
5385 error:
5386  assert (error_code != NO_ERROR);
5387 
5388  if (addr_hdr.pgptr != NULL)
5389  {
5390  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
5391  }
5392 
5393  hfid->vfid.fileid = NULL_FILEID;
5394  hfid->hpgid = NULL_PAGEID;
5395 
5396  log_sysop_abort (thread_p);
5397  return error_code;
5398 }
5399 
5400 /*
5401  * heap_delete_all_page_records () -
5402  * return: false if nothing is deleted, otherwise true
5403  * vpid(in): the vpid of the page
5404  * pgptr(in): PAGE_PTR to the page
5405  */
5406 static bool
5407 heap_delete_all_page_records (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_PTR pgptr)
5408 {
5409  bool something_deleted = false;
5410  OID oid;
5411  RECDES recdes;
5412 
5413  assert (pgptr != NULL);
5414  assert (vpid != NULL);
5415 
5416  oid.volid = vpid->volid;
5417  oid.pageid = vpid->pageid;
5418  oid.slotid = NULL_SLOTID;
5419 
5420  while (true)
5421  {
5422  if (spage_next_record (pgptr, &oid.slotid, &recdes, PEEK) != S_SUCCESS)
5423  {
5424  break;
5425  }
5427  {
5428  continue;
5429  }
5430  (void) spage_delete (thread_p, pgptr, oid.slotid);
5431  something_deleted = true;
5432  }
5433 
5434  return something_deleted;
5435 }
5436 
5437 /*
5438  * heap_reinitialize_page () -
5439  * return: NO_ERROR if succeed, otherwise error code
5440  * pgptr(in): PAGE_PTR to the page
5441  * is_header_page(in): true if the page is the header page
5442  */
5443 static int
5444 heap_reinitialize_page (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const bool is_header_page)
5445 {
5446  HEAP_CHAIN tmp_chain;
5447  HEAP_HDR_STATS tmp_hdr_stats;
5448  PGSLOTID slotid = NULL_SLOTID;
5449  RECDES recdes;
5450  int error_code = NO_ERROR;
5451 
5452  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
5453  {
5455  error_code = ER_GENERIC_ERROR;
5456  goto error_exit;
5457  }
5458 
5459  if (is_header_page)
5460  {
5461  assert (recdes.length == sizeof (HEAP_HDR_STATS));
5462  tmp_hdr_stats = *(HEAP_HDR_STATS *) recdes.data;
5463  recdes.data = (char *) &tmp_hdr_stats;
5464  recdes.area_size = recdes.length = sizeof (tmp_hdr_stats);
5465  recdes.type = REC_HOME;
5466  }
5467  else
5468  {
5469  assert (recdes.length == sizeof (HEAP_CHAIN));
5470  tmp_chain = *(HEAP_CHAIN *) recdes.data;
5471  recdes.data = (char *) &tmp_chain;
5472  recdes.area_size = recdes.length = sizeof (tmp_chain);
5473  recdes.type = REC_HOME;
5474  }
5475 
5476  (void) pgbuf_set_page_ptype (thread_p, pgptr, PAGE_HEAP);
5477 
5478  /* Initialize header page */
5480 
5481  if (spage_insert (thread_p, pgptr, &recdes, &slotid) != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
5482  {
5484  error_code = ER_GENERIC_ERROR;
5485  goto error_exit;
5486  }
5487  else
5488  {
5489  /* All is well and the page is now empty. */
5490  }
5491 
5492  return error_code;
5493 
5494 error_exit:
5495  if (error_code == NO_ERROR)
5496  {
5497  error_code = ER_GENERIC_ERROR;
5498  }
5499  return error_code;
5500 }
5501 
5502 /*
5503  * heap_reuse () - Reuse a heap
5504  * return: HFID * (hfid on success and NULL on failure)
5505  * hfid(in): Object heap file identifier.
5506  * class_oid(in): OID of the class for which the heap will be created.
5507  *
5508  * Note: Clean the given heap file so that it can be reused.
5509  * Note: The heap file must have been permanently marked as deleted.
5510  */
5511 static const HFID *
5512 heap_reuse (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid, const bool reuse_oid)
5513 {
5514  VPID vpid; /* Volume and page identifiers */
5515  PAGE_PTR hdr_pgptr = NULL; /* Page pointer to header page */
5516  PAGE_PTR pgptr = NULL; /* Page pointer */
5517  LOG_DATA_ADDR addr; /* Address of logging data */
5518  HEAP_HDR_STATS *heap_hdr = NULL; /* Header of heap structure */
5519  HEAP_CHAIN *chain; /* Chain to next and prev page */
5520  RECDES recdes;
5521  VPID last_vpid;
5522  int is_header_page;
5523  int npages = 0;
5524  int i;
5525  bool need_update;
5526 
5527  assert (class_oid != NULL);
5528  assert (!OID_ISNULL (class_oid));
5529 
5530  VPID_SET_NULL (&last_vpid);
5531  addr.vfid = &hfid->vfid;
5532 
5533  /*
5534  * Read the header page.
5535  * We lock the header page in exclusive mode.
5536  */
5537 
5538  vpid.volid = hfid->vfid.volid;
5539  vpid.pageid = hfid->hpgid;
5540  hdr_pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
5541  if (hdr_pgptr == NULL)
5542  {
5543  return NULL;
5544  }
5545 
5546  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
5547 
5548  /*
5549  * Start scanning every page of the heap and removing the objects.
5550  * Note that, for normal heap files, the slot is not removed since we do not
5551  * know if the objects are pointed by some other objects in the database.
5552  * For reusable OID heap files we are certain there can be no references to
5553  * the objects so we can simply initialize the slotted page.
5554  */
5555  /*
5556  * Note Because the objects of reusable OID heaps are not referenced,
5557  * reusing such heaps provides no actual benefit. We might consider
5558  * giving up the reuse heap mechanism for reusable OID heaps in the
5559  * future.
5560  */
5561 
5562  while (!(VPID_ISNULL (&vpid)))
5563  {
5564  /*
5565  * Fetch the page
5566  */
5567  pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
5568  if (pgptr == NULL)
5569  {
5570  goto error;
5571  }
5572 
5573  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
5574 
5575  is_header_page = (hdr_pgptr == pgptr) ? 1 : 0;
5576 
5577  /*
5578  * Remove all the objects in this page
5579  */
5580  if (!reuse_oid)
5581  {
5582  (void) heap_delete_all_page_records (thread_p, &vpid, pgptr);
5583 
5584  addr.pgptr = pgptr;
5585  addr.offset = is_header_page;
5586  log_append_redo_data (thread_p, RVHF_REUSE_PAGE, &addr, sizeof (*class_oid), class_oid);
5587  }
5588  else
5589  {
5590  if (spage_number_of_slots (pgptr) > 1)
5591  {
5592  if (heap_reinitialize_page (thread_p, pgptr, is_header_page) != NO_ERROR)
5593  {
5594  goto error;
5595  }
5596  }
5597 
5598  addr.pgptr = pgptr;
5599  addr.offset = is_header_page;
5600  log_append_redo_data (thread_p, RVHF_REUSE_PAGE_REUSE_OID, &addr, sizeof (*class_oid), class_oid);
5601  }
5602 
5603  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
5604  {
5605  goto error;
5606  }
5607  if (recdes.data == NULL)
5608  {
5609  goto error;
5610  }
5611 
5612  /* save new class oid in the page. it dirties the page. */
5613  if (is_header_page)
5614  {
5615  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
5616  COPY_OID (&(heap_hdr->class_oid), class_oid);
5617  }
5618  else
5619  {
5620  chain = (HEAP_CHAIN *) recdes.data;
5621  COPY_OID (&(chain->class_oid), class_oid);
5622  chain->max_mvccid = MVCCID_NULL;
5623  chain->flags = 0;
5625  }
5626 
5627  if (npages < HEAP_NUM_BEST_SPACESTATS)
5628  {
5629  heap_hdr->estimates.best[npages].vpid = vpid;
5630  heap_hdr->estimates.best[npages].freespace =
5631  spage_get_free_space_without_saving (thread_p, pgptr, &need_update);
5632 
5633  }
5634 
5636  {
5637  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, DB_PAGESIZE);
5638  }
5639 
5640  npages++;
5641  last_vpid = vpid;
5642 
5643  /*
5644  * Find next page to scan and free the current page
5645  */
5646  if (heap_vpid_next (thread_p, hfid, pgptr, &vpid) != NO_ERROR)
5647  {
5648  goto error;
5649  }
5650 
5651  pgbuf_set_dirty (thread_p, pgptr, FREE);
5652  pgptr = NULL;
5653  }
5654 
5655  /*
5656  * Reset the statistics. Set statistics for insertion back to first page
5657  * and reset unfill space according to new parameters
5658  */
5659  VFID_SET_NULL (&heap_hdr->ovf_vfid);
5660  heap_hdr->unfill_space = (int) ((float) DB_PAGESIZE * prm_get_float_value (PRM_ID_HF_UNFILL_FACTOR));
5661  heap_hdr->estimates.num_pages = npages;
5662  heap_hdr->estimates.num_recs = 0;
5663  heap_hdr->estimates.recs_sumlen = 0.0;
5664 
5665  if (npages < HEAP_NUM_BEST_SPACESTATS)
5666  {
5667  heap_hdr->estimates.num_high_best = npages;
5668  heap_hdr->estimates.num_other_high_best = 0;
5669  }
5670  else
5671  {
5674  }
5675 
5676  heap_hdr->estimates.head = 0;
5677  for (i = npages; i < HEAP_NUM_BEST_SPACESTATS; i++)
5678  {
5679  VPID_SET_NULL (&heap_hdr->estimates.best[i].vpid);
5680  heap_hdr->estimates.best[i].freespace = 0;
5681  }
5682 
5683  heap_hdr->estimates.last_vpid = last_vpid;
5684 
5685  addr.pgptr = hdr_pgptr;
5687  log_append_redo_data (thread_p, RVHF_STATS, &addr, sizeof (*heap_hdr), heap_hdr);
5688  pgbuf_set_dirty (thread_p, hdr_pgptr, FREE);
5689  hdr_pgptr = NULL;
5690 
5691  return hfid;
5692 
5693 error:
5694  if (pgptr != NULL)
5695  {
5696  pgbuf_unfix_and_init (thread_p, pgptr);
5697  }
5698  if (hdr_pgptr != NULL)
5699  {
5700  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
5701  }
5702 
5703  return NULL;
5704 }
5705 
5706 #if defined(CUBRID_DEBUG)
5707 /*
5708  * heap_hfid_isvalid () -
5709  * return:
5710  * hfid(in):
5711  */
5712 static DISK_ISVALID
5713 heap_hfid_isvalid (HFID * hfid)
5714 {
5715  DISK_ISVALID valid_pg = DISK_VALID;
5716 
5717  if (hfid == NULL || HFID_IS_NULL (hfid))
5718  {
5719  return DISK_INVALID;
5720  }
5721 
5722  valid_pg = disk_is_page_sector_reserved (hfid->vfid.volid, hfid->vfid.fileid);
5723  if (valid_pg == DISK_VALID)
5724  {
5725  valid_pg = disk_is_page_sector_reserved (hfid->vfid.volid, hfid->hpgid);
5726  }
5727 
5728  return valid_pg;
5729 }
5730 
5731 /*
5732  * heap_scanrange_isvalid () -
5733  * return:
5734  * scan_range(in):
5735  */
5736 static DISK_ISVALID
5737 heap_scanrange_isvalid (HEAP_SCANRANGE * scan_range)
5738 {
5739  DISK_ISVALID valid_pg = DISK_INVALID;
5740 
5741  if (scan_range != NULL)
5742  {
5743  valid_pg = heap_hfid_isvalid (&scan_range->scan_cache.hfid);
5744  }
5745 
5746  if (valid_pg != DISK_VALID)
5747  {
5748  if (valid_pg != DISK_ERROR)
5749  {
5750  er_log_debug (ARG_FILE_LINE, " ** SYSTEM ERROR scanrange has not been initialized");
5752  }
5753  }
5754 
5755  return valid_pg;
5756 }
5757 #endif /* CUBRID_DEBUG */
5758 
5759 /*
5760  * xheap_create () - Create a heap file
5761  * return: int
5762  * hfid(in/out): Object heap file identifier.
5763  * All fields in the identifier are set, except the volume
5764  * identifier which should have already been set by the caller.
5765  * class_oid(in): OID of the class for which the heap will be created.
5766  * reuse_oid(int):
5767  *
5768  * Note: Creates an object heap file on the disk volume associated with
5769  * hfid->vfid->volid.
5770  */
5771 int
5772 xheap_create (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, bool reuse_oid)
5773 {
5774  return heap_create_internal (thread_p, hfid, class_oid, reuse_oid);
5775 }
5776 
5777 /*
5778  * xheap_destroy () - Destroy a heap file
5779  * return: int
5780  * hfid(in): Object heap file identifier.
5781  * class_oid(in):
5782  *
5783  * Note: Destroy the heap file associated with the given heap identifier.
5784  */
5785 int
5786 xheap_destroy (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid)
5787 {
5788  VFID vfid;
5789  LOG_DATA_ADDR addr;
5790 
5792 
5793  addr.vfid = NULL;
5794  addr.pgptr = NULL;
5795  addr.offset = -1;
5796  if (heap_ovf_find_vfid (thread_p, hfid, &vfid, false, PGBUF_UNCONDITIONAL_LATCH) != NULL)
5797  {
5798  file_postpone_destroy (thread_p, &vfid);
5799  }
5800 
5801  file_postpone_destroy (thread_p, &hfid->vfid);
5802 
5803  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5804 
5805  return NO_ERROR;
5806 }
5807 
5808 /*
5809  * xheap_destroy_newly_created () - Destroy heap if it is a newly created heap
5810  * return: NO_ERROR
5811  * hfid(in): Object heap file identifier.
5812  * class_oid(in): class OID
5813  *
5814  * Note: Destroy the heap file associated with the given heap
5815  * identifier if it is a newly created heap file.
5816  */
5817 int
5818 xheap_destroy_newly_created (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid)
5819 {
5820  VFID vfid;
5821  FILE_TYPE file_type;
5822  int ret;
5824 
5825  ret = file_get_type (thread_p, &hfid->vfid, &file_type);
5826  if (ret != NO_ERROR)
5827  {
5828  ASSERT_ERROR ();
5829  return ret;
5830  }
5831  if (file_type == FILE_HEAP_REUSE_SLOTS)
5832  {
5833  ret = xheap_destroy (thread_p, hfid, class_oid);
5834  return ret;
5835  }
5836 
5838 
5839  if (heap_ovf_find_vfid (thread_p, hfid, &vfid, false, PGBUF_UNCONDITIONAL_LATCH) != NULL)
5840  {
5841  file_postpone_destroy (thread_p, &vfid);
5842  }
5843 
5844  log_append_postpone (thread_p, RVHF_MARK_DELETED, &addr, sizeof (hfid->vfid), &hfid->vfid);
5845 
5846  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5847 
5848  return ret;
5849 }
5850 
5851 /*
5852  * heap_rv_mark_deleted_on_undo () - mark heap file as deleted on undo
5853  *
5854  * return : error code
5855  * thread_p (in) : thread entry
5856  * rcv (in) : recovery data
5857  */
5858 int
5860 {
5861  int error_code = file_rv_tracker_mark_heap_deleted (thread_p, rcv, true);
5862  if (error_code != NO_ERROR)
5863  {
5864  assert_release (false);
5865  }
5866  return error_code;
5867 }
5868 
5869 /*
5870  * heap_rv_mark_deleted_on_postpone () - mark heap file as deleted on postpone
5871  *
5872  * return : error code
5873  * thread_p (in) : thread entry
5874  * rcv (in) : recovery data
5875  */
5876 int
5878 {
5879  int error_code = file_rv_tracker_mark_heap_deleted (thread_p, rcv, false);
5880  if (error_code != NO_ERROR)
5881  {
5882  assert_release (false);
5883  }
5884  return error_code;
5885 }
5886 
5887 /*
5888  * heap_assign_address () - Assign a new location
5889  * return: NO_ERROR / ER_FAILED
5890  * hfid(in): Object heap file identifier
5891  * class_oid(in): class identifier
5892  * oid(out): Object identifier.
5893  * expected_length(in): Expected length
5894  *
5895  * Note: Assign an OID to an object and reserve the expected length for
5896  * the object. The following rules are observed for the expected length.
5897  * 1. A negative value is passed when only an approximation of
5898  * the length of the object is known. This approximation is
5899  * taken as the minimal length by this module. This case is
5900  * used when the transformer module (tfcl) skips some fileds
5901  * while walking through the object to find out its length.
5902  * a) Heap manager find the average length of objects in the
5903  * heap.
5904  * If the average length > abs(expected_length)
5905  * The average length is used instead
5906  * 2. A zero value, heap manager uses the average length of the
5907  * objects in the heap.
5908  * 3. If length is larger than one page, the size of an OID is
5909  * used since the object is going to be stored in overflow
5910  * 4. If length is > 0 and smaller than OID_SIZE
5911  * OID_SIZE is used as the expected length.
5912  */
5913 int
5914 heap_assign_address (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, int expected_length)
5915 {
5916  HEAP_OPERATION_CONTEXT insert_context;
5917  RECDES recdes;
5918  int rc;
5919 
5920  if (expected_length <= 0)
5921  {
5922  rc = heap_estimate_avg_length (thread_p, hfid, recdes.length);
5923  if (rc != NO_ERROR)
5924  {
5925  return rc;
5926  }
5927 
5928  if (recdes.length > (-expected_length))
5929  {
5930  expected_length = recdes.length;
5931  }
5932  else
5933  {
5934  expected_length = -expected_length;
5935  }
5936  }
5937 
5938  /*
5939  * Use the expected length only when it is larger than the size of an OID
5940  * and it is smaller than the maximum size of an object that can be stored
5941  * in the primary area (no in overflow). In any other case, use the the size
5942  * of an OID as the length.
5943  */
5944 
5945  recdes.length =
5946  ((expected_length > SSIZEOF (OID) && !heap_is_big_length (expected_length)) ? expected_length : SSIZEOF (OID));
5947 
5948  recdes.data = NULL;
5949  recdes.type = REC_ASSIGN_ADDRESS;
5950 
5951  /* create context */
5952  heap_create_insert_context (&insert_context, (HFID *) hfid, class_oid, &recdes, NULL);
5953 
5954  /* insert */
5955  rc = heap_insert_logical (thread_p, &insert_context, NULL);
5956  if (rc != NO_ERROR)
5957  {
5958  return rc;
5959  }
5960 
5961  /* get result and exit */
5962  COPY_OID (oid, &insert_context.res_oid);
5963  return NO_ERROR;
5964 }
5965 
5966 /*
5967  * heap_flush () - Flush all dirty pages where the object resides
5968  * return:
5969  * oid(in): Object identifier
5970  *
5971  * Note: Flush all dirty pages where the object resides.
5972  */
5973 void
5974 heap_flush (THREAD_ENTRY * thread_p, const OID * oid)
5975 {
5976  VPID vpid; /* Volume and page identifiers */
5977  PAGE_PTR pgptr = NULL; /* Page pointer */
5978  INT16 type;
5979  OID forward_oid;
5980  RECDES forward_recdes;
5981  int ret = NO_ERROR;
5982 
5983  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
5984  {
5985  return;
5986  }
5987 
5988  /*
5989  * Lock and fetch the page where the object is stored
5990  */
5991  vpid.volid = oid->volid;
5992  vpid.pageid = oid->pageid;
5993  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
5994  if (pgptr == NULL)
5995  {
5996  if (er_errid () == ER_PB_BAD_PAGEID)
5997  {
5999  }
6000  /* something went wrong, return */
6001  return;
6002  }
6003 
6004  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
6005 
6006  type = spage_get_record_type (pgptr, oid->slotid);
6007  if (type == REC_UNKNOWN)
6008  {
6009  goto end;
6010  }
6011 
6012  /* If this page is dirty flush it */
6013  (void) pgbuf_flush_with_wal (thread_p, pgptr);
6014 
6015  switch (type)
6016  {
6017  case REC_RELOCATION:
6018  /*
6019  * The object stored on the page is a relocation record. The relocation
6020  * record is used as a map to find the actual location of the content of
6021  * the object.
6022  */
6023 
6024  forward_recdes.data = (char *) &forward_oid;
6025  forward_recdes.area_size = OR_OID_SIZE;
6026 
6027  if (spage_get_record (thread_p, pgptr, oid->slotid, &forward_recdes, COPY) != S_SUCCESS)
6028  {
6029  /* Unable to get relocation record of the object */
6030  goto end;
6031  }
6032  pgbuf_unfix_and_init (thread_p, pgptr);
6033 
6034  /* Fetch the new home page */
6035  vpid.volid = forward_oid.volid;
6036  vpid.pageid = forward_oid.pageid;
6037 
6038  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
6039  if (pgptr == NULL)
6040  {
6041  if (er_errid () == ER_PB_BAD_PAGEID)
6042  {
6044  forward_oid.pageid, forward_oid.slotid);
6045  }
6046 
6047  return;
6048  }
6049 
6050  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
6051 
6052  (void) pgbuf_flush_with_wal (thread_p, pgptr);
6053  break;
6054 
6055  case REC_BIGONE:
6056  /*
6057  * The object stored in the heap page is a relocation_overflow record,
6058  * get the overflow address of the object
6059  */
6060  forward_recdes.data = (char *) &forward_oid;
6061  forward_recdes.area_size = OR_OID_SIZE;
6062 
6063  if (spage_get_record (thread_p, pgptr, oid->slotid, &forward_recdes, COPY) != S_SUCCESS)
6064  {
6065  /* Unable to peek overflow address of multipage object */
6066  goto end;
6067  }
6068  pgbuf_unfix_and_init (thread_p, pgptr);
6069  ret = heap_ovf_flush (thread_p, &forward_oid);
6070  break;
6071 
6072  case REC_ASSIGN_ADDRESS:
6073  case REC_HOME:
6074  case REC_NEWHOME:
6075  case REC_MARKDELETED:
6077  default:
6078  break;
6079  }
6080 
6081 end:
6082  if (pgptr != NULL)
6083  {
6084  pgbuf_unfix_and_init (thread_p, pgptr);
6085  }
6086 }
6087 
6088 /*
6089  * xheap_reclaim_addresses () - Reclaim addresses/OIDs and delete empty pages
6090  * return: NO_ERROR
6091  * hfid(in): Heap file identifier
6092  *
6093  * Note: Reclaim the addresses (OIDs) of deleted objects of the given heap and
6094  * delete all the heap pages that are left empty.
6095  *
6096  * This function can be called:
6097  * a: When there are no more references to deleted objects of the given
6098  * heap. This happens during offline compactdb execution after all the
6099  * classes in the schema have been processed by the process_class ()
6100  * function that sets the references to deleted objects to NULL.
6101  * b: When we are sure there can be no references to any object of the
6102  * associated class. This happens during online compactdb execution when
6103  * all the classes in the schema are checked to see if can they point to
6104  * instances of the current class by checking all their atributes'
6105  * domains.
6106  *
6107  * If references to deleted objects were nulled by the current
6108  * transaction some recovery problems may happen in the case of a crash
6109  * since the reclaiming of the addresses is done without logging (or
6110  * very little one) and thus it cannot be fully undone. Some logging is
6111  * done to make sure that media recovery will not be impacted. This was
6112  * done to avoid a lot of unneeded logging. Thus, if the caller was
6113  * setting references to deleted objects to NULL, the caller must commit
6114  * his transaction before this function is invoked.
6115  *
6116  * This function must be run:
6117  * a: offline, that is, when the user is the only one using the database
6118  * system.
6119  * b: online while holding an exclusive lock on the associated class.
6120  */
6121 int
6122 xheap_reclaim_addresses (THREAD_ENTRY * thread_p, const HFID * hfid)
6123 {
6124  VPID vpid;
6125  VPID prv_vpid;
6126  int best, i;
6127  HEAP_HDR_STATS initial_heap_hdr;
6128  HEAP_HDR_STATS heap_hdr;
6129  RECDES hdr_recdes;
6130  LOG_DATA_ADDR addr;
6131  int ret = NO_ERROR;
6132  int free_space;
6133  int npages, nrecords, rec_length;
6134  bool need_update;
6135  PGBUF_WATCHER hdr_page_watcher;
6136  PGBUF_WATCHER curr_page_watcher;
6137 
6138  PGBUF_INIT_WATCHER (&hdr_page_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
6139  PGBUF_INIT_WATCHER (&curr_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
6140 
6141  addr.vfid = &hfid->vfid;
6142  addr.pgptr = NULL;
6143  addr.offset = 0;
6144 
6145  vpid.volid = hfid->vfid.volid;
6146  vpid.pageid = hfid->hpgid;
6147 
6148  ret = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_page_watcher);
6149  if (ret != NO_ERROR)
6150  {
6151  goto exit_on_error;
6152  }
6153 
6154  (void) pgbuf_check_page_ptype (thread_p, hdr_page_watcher.pgptr, PAGE_HEAP);
6155 
6156  hdr_recdes.data = (char *) &heap_hdr;
6157  hdr_recdes.area_size = sizeof (heap_hdr);
6158 
6159  if (spage_get_record (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, COPY) != S_SUCCESS)
6160  {
6161  goto exit_on_error;
6162  }
6163  prv_vpid = heap_hdr.estimates.last_vpid;
6164 
6165  /* Copy the header to memory.. so we can log the changes */
6166  memcpy (&initial_heap_hdr, hdr_recdes.data, sizeof (initial_heap_hdr));
6167 
6168  /*
6169  * Initialize best estimates
6170  */
6171  heap_hdr.estimates.num_pages = 0;
6172  heap_hdr.estimates.num_recs = 0;
6173  heap_hdr.estimates.recs_sumlen = 0.0;
6174  heap_hdr.estimates.num_high_best = 0;
6175  heap_hdr.estimates.num_other_high_best = 0;
6176  heap_hdr.estimates.head = 0;
6177 
6178  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
6179  {
6180  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
6181  heap_hdr.estimates.best[0].freespace = 0;
6182  }
6183 
6184  /* Initialize second best estimates */
6185  heap_hdr.estimates.num_second_best = 0;
6186  heap_hdr.estimates.head_second_best = 0;
6187  heap_hdr.estimates.tail_second_best = 0;
6188  heap_hdr.estimates.num_substitutions = 0;
6189 
6190  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
6191  {
6192  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
6193  }
6194 
6195  /* initialize full_search_vpid */
6196  heap_hdr.estimates.full_search_vpid.volid = hfid->vfid.volid;
6197  heap_hdr.estimates.full_search_vpid.pageid = hfid->hpgid;
6198 
6199  best = 0;
6200 
6201  while (!(VPID_ISNULL (&prv_vpid)))
6202  {
6203  vpid = prv_vpid;
6204  curr_page_watcher.pgptr =
6205  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &curr_page_watcher);
6206  if (curr_page_watcher.pgptr == NULL)
6207  {
6208  goto exit_on_error;
6209  }
6210 
6211  (void) pgbuf_check_page_ptype (thread_p, curr_page_watcher.pgptr, PAGE_HEAP);
6212 
6213  if (heap_vpid_prev (thread_p, hfid, curr_page_watcher.pgptr, &prv_vpid) != NO_ERROR)
6214  {
6215  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6216 
6217  goto exit_on_error;
6218  }
6219 
6220  /*
6221  * Are there any objects in this page ?
6222  * Compare against > 1 since every heap page contains a header record
6223  * (heap header or chain).
6224  */
6225 
6226  if (spage_number_of_records (curr_page_watcher.pgptr) > 1
6227  || (vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid))
6228  {
6229  if (spage_reclaim (thread_p, curr_page_watcher.pgptr) == true)
6230  {
6231  addr.pgptr = curr_page_watcher.pgptr;
6232  /*
6233  * If this function is called correctly (see the notes in the
6234  * header comment about the preconditions) we can skip the
6235  * logging of spage_reclaim (). Logging for REDO would add many
6236  * log records for any compactdb operation and would only
6237  * benefit the infrequent scenario of compactdb operations that
6238  * crash right at the end. UNDO operations are not absolutely
6239  * required because the deleted OIDs should be unreferenced
6240  * anyway; there should be no harm in reusing them. Basically,
6241  * since the call to spage_reclaim () should leave the database
6242  * logically unmodified, neither REDO nor UNDO are required.
6243  */
6244  log_skip_logging (thread_p, &addr);
6245  pgbuf_set_dirty (thread_p, curr_page_watcher.pgptr, DONT_FREE);
6246  }
6247  }
6248 
6249  /*
6250  * Throw away the page if it doesn't contain any object. The header of
6251  * the heap cannot be thrown.
6252  */
6253 
6254  if (!(vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid)
6255  && spage_number_of_records (curr_page_watcher.pgptr) <= 1
6256  /* Is any vacuum required? */
6257  && vacuum_is_mvccid_vacuumed (heap_page_get_max_mvccid (thread_p, curr_page_watcher.pgptr)))
6258  {
6259  /*
6260  * This page can be thrown away
6261  */
6262  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6263  if (heap_vpid_remove (thread_p, hfid, &heap_hdr, &vpid) == NULL)
6264  {
6265  goto exit_on_error;
6266  }
6267  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Compactdb removed page %d|%d from heap file (%d, %d|%d).\n",
6268  vpid.volid, vpid.pageid, hfid->hpgid, hfid->vfid.volid, hfid->vfid.fileid);
6269  }
6270  else
6271  {
6272  spage_collect_statistics (curr_page_watcher.pgptr, &npages, &nrecords, &rec_length);
6273 
6274  heap_hdr.estimates.num_pages += npages;
6275  heap_hdr.estimates.num_recs += nrecords;
6276  heap_hdr.estimates.recs_sumlen += rec_length;
6277 
6278  free_space = spage_get_free_space_without_saving (thread_p, curr_page_watcher.pgptr, &need_update);
6279 
6280  if (free_space > HEAP_DROP_FREE_SPACE)
6281  {
6282  if (best < HEAP_NUM_BEST_SPACESTATS)
6283  {
6284  heap_hdr.estimates.best[best].vpid = vpid;
6285  heap_hdr.estimates.best[best].freespace = free_space;
6286  best++;
6287  }
6288  else
6289  {
6290  heap_hdr.estimates.num_other_high_best++;
6291  heap_stats_put_second_best (&heap_hdr, &vpid);
6292  }
6293 
6295  {
6296  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, free_space);
6297  }
6298  }
6299 
6300  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6301  }
6302  }
6303 
6304  heap_hdr.estimates.num_high_best = best;
6305  /*
6306  * Set the rest of the statistics to NULL
6307  */
6308  for (; best < HEAP_NUM_BEST_SPACESTATS; best++)
6309  {
6310  VPID_SET_NULL (&heap_hdr.estimates.best[best].vpid);
6311  heap_hdr.estimates.best[best].freespace = 0;
6312  }
6313 
6314  /* Log the desired changes.. and then change the header We need to log the header changes in order to always benefit
6315  * from the updated statistics and in order to avoid referencing deleted pages in the statistics. */
6316  addr.pgptr = hdr_page_watcher.pgptr;
6318  log_append_undoredo_data (thread_p, RVHF_STATS, &addr, sizeof (HEAP_HDR_STATS), sizeof (HEAP_HDR_STATS),
6319  &initial_heap_hdr, hdr_recdes.data);
6320 
6321  /* Now update the statistics */
6322  if (spage_update (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes) != SP_SUCCESS)
6323  {
6324  goto exit_on_error;
6325  }
6326 
6327  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_page_watcher);
6328 
6329  return ret;
6330 
6331 exit_on_error:
6332 
6333  if (hdr_page_watcher.pgptr != NULL)
6334  {
6335  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
6336  }
6337 
6338  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6339 }
6340 
6341 /*
6342  * heap_ovf_find_vfid () - Find overflow file identifier
6343  * return: ovf_vfid or NULL
6344  * hfid(in): Object heap file identifier
6345  * ovf_vfid(in/out): Overflow file identifier.
6346  * docreate(in): true/false. If true and the overflow file does not
6347  * exist, it is created.
6348  *
6349  * Note: Find overflow file identifier. If the overflow file does not
6350  * exist, it may be created depending of the value of argument create.
6351  */
6352 VFID *
6353 heap_ovf_find_vfid (THREAD_ENTRY * thread_p, const HFID * hfid, VFID * ovf_vfid, bool docreate,
6354  PGBUF_LATCH_CONDITION latch_cond)
6355 {
6356  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
6357  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
6358  VPID vpid; /* Page-volume identifier */
6359  RECDES hdr_recdes; /* Header record descriptor */
6361 
6362  addr_hdr.vfid = &hfid->vfid;
6364 
6365  /* Read the header page */
6366  vpid.volid = hfid->vfid.volid;
6367  vpid.pageid = hfid->hpgid;
6368 
6369  mode = (docreate == true ? PGBUF_LATCH_WRITE : PGBUF_LATCH_READ);
6370  addr_hdr.pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, mode, latch_cond);
6371  if (addr_hdr.pgptr == NULL)
6372  {
6373  /* something went wrong, return */
6374  return NULL;
6375  }
6376 
6377  (void) pgbuf_check_page_ptype (thread_p, addr_hdr.pgptr, PAGE_HEAP);
6378 
6379  /* Peek the header record */
6380 
6381  if (spage_get_record (thread_p, addr_hdr.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
6382  {
6383  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
6384  return NULL;
6385  }
6386 
6387  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
6388  if (VFID_ISNULL (&heap_hdr->ovf_vfid))
6389  {
6390  if (docreate == true)
6391  {
6392  FILE_DESCRIPTORS des;
6393  TDE_ALGORITHM tde_algo = TDE_ALGORITHM_NONE;
6394  /* Create the overflow file. Try to create the overflow file in the same volume where the heap was defined */
6395 
6396  /* START A TOP SYSTEM OPERATION */
6397  log_sysop_start (thread_p);
6398 
6399  /* Initialize description of overflow heap file */
6400  memset (&des, 0, sizeof (des));
6401  HFID_COPY (&des.heap_overflow.hfid, hfid);
6402  des.heap_overflow.class_oid = heap_hdr->class_oid;
6403  if (file_create_with_npages (thread_p, FILE_MULTIPAGE_OBJECT_HEAP, 1, &des, ovf_vfid) != NO_ERROR)
6404  {
6405  log_sysop_abort (thread_p);
6406  ovf_vfid = NULL;
6407  goto exit;
6408  }
6409 
6410  if (heap_get_class_tde_algorithm (thread_p, &heap_hdr->class_oid, &tde_algo) != NO_ERROR)
6411  {
6412  log_sysop_abort (thread_p);
6413  ovf_vfid = NULL;
6414  goto exit;
6415  }
6416 
6417  if (file_apply_tde_algorithm (thread_p, ovf_vfid, tde_algo) != NO_ERROR)
6418  {
6419  log_sysop_abort (thread_p);
6420  ovf_vfid = NULL;
6421  goto exit;
6422  }
6423 
6424  /* Log undo, then redo */
6425  log_append_undo_data (thread_p, RVHF_STATS, &addr_hdr, sizeof (*heap_hdr), heap_hdr);
6426  VFID_COPY (&heap_hdr->ovf_vfid, ovf_vfid);
6427  log_append_redo_data (thread_p, RVHF_STATS, &addr_hdr, sizeof (*heap_hdr), heap_hdr);
6428  pgbuf_set_dirty (thread_p, addr_hdr.pgptr, DONT_FREE);
6429 
6430  log_sysop_commit (thread_p);
6431  }
6432  else
6433  {
6434  ovf_vfid = NULL;
6435  }
6436  }
6437  else
6438  {
6439  VFID_COPY (ovf_vfid, &heap_hdr->ovf_vfid);
6440  }
6441 
6442 exit:
6443  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
6444 
6445  return ovf_vfid;
6446 }
6447 
6448 /*
6449  * heap_ovf_insert () - Insert the content of a multipage object in overflow
6450  * return: OID *(ovf_oid on success or NULL on failure)
6451  * hfid(in): Object heap file identifier
6452  * ovf_oid(in/out): Overflow address
6453  * recdes(in): Record descriptor
6454  *
6455  * Note: Insert the content of a multipage object in overflow.
6456  */
6457 static OID *
6458 heap_ovf_insert (THREAD_ENTRY * thread_p, const HFID * hfid, OID * ovf_oid, RECDES * recdes)
6459 {
6460  VFID ovf_vfid;
6461  VPID ovf_vpid; /* Address of overflow insertion */
6462 
6463  if (heap_ovf_find_vfid (thread_p, hfid, &ovf_vfid, true, PGBUF_UNCONDITIONAL_LATCH) == NULL
6464  || overflow_insert (thread_p, &ovf_vfid, &ovf_vpid, recdes, FILE_MULTIPAGE_OBJECT_HEAP) != NO_ERROR)
6465  {
6466  return NULL;
6467  }
6468 
6469  ovf_oid->pageid = ovf_vpid.pageid;
6470  ovf_oid->volid = ovf_vpid.volid;
6471  ovf_oid->slotid = NULL_SLOTID; /* Irrelevant */
6472 
6473  return ovf_oid;
6474 }
6475 
6476 /*
6477  * heap_ovf_update () - Update the content of a multipage object
6478  * return: OID *(ovf_oid on success or NULL on failure)
6479  * hfid(in): Object heap file identifier
6480  * ovf_oid(in): Overflow address
6481  * recdes(in): Record descriptor
6482  *
6483  * Note: Update the content of a multipage object.
6484  */
6485 static const OID *
6486 heap_ovf_update (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, RECDES * recdes)
6487 {
6488  VFID ovf_vfid;
6489  VPID ovf_vpid;
6490 
6491  if (heap_ovf_find_vfid (thread_p, hfid, &ovf_vfid, false, PGBUF_UNCONDITIONAL_LATCH) == NULL)
6492  {
6493  return NULL;
6494  }
6495 
6496  ovf_vpid.pageid = ovf_oid->pageid;
6497  ovf_vpid.volid = ovf_oid->volid;
6498 
6499  if (overflow_update (thread_p, &ovf_vfid, &ovf_vpid, recdes, FILE_MULTIPAGE_OBJECT_HEAP) != NO_ERROR)
6500  {
6501  ASSERT_ERROR ();
6502  return NULL;
6503  }
6504  else
6505  {
6506  return ovf_oid;
6507  }
6508 }
6509 
6510 /*
6511  * heap_ovf_delete () - Delete the content of a multipage object
6512  * return: OID *(ovf_oid on success or NULL on failure)
6513  * hfid(in): Object heap file identifier
6514  * ovf_oid(in): Overflow address
6515  * ovf_vfid_p(in): Overflow file identifier. If given argument is NULL,
6516  * it must be obtained from heap file header.
6517  *
6518  * Note: Delete the content of a multipage object.
6519  */
6520 const OID *
6521 heap_ovf_delete (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, VFID * ovf_vfid_p)
6522 {
6523  VFID ovf_vfid;
6524  VPID ovf_vpid;
6525 
6526  if (ovf_vfid_p == NULL || VFID_ISNULL (ovf_vfid_p))
6527  {
6528  /* Get overflow file VFID from heap file header. */
6529  ovf_vfid_p = (ovf_vfid_p != NULL) ? ovf_vfid_p : &ovf_vfid;
6530  if (heap_ovf_find_vfid (thread_p, hfid, ovf_vfid_p, false, PGBUF_UNCONDITIONAL_LATCH) == NULL)
6531  {
6532  return NULL;
6533  }
6534  }
6535 
6536  ovf_vpid.pageid = ovf_oid->pageid;
6537  ovf_vpid.volid = ovf_oid->volid;
6538 
6539  if (overflow_delete (thread_p, ovf_vfid_p, &ovf_vpid) == NULL)
6540  {
6541  return NULL;
6542  }
6543  else
6544  {
6545  return ovf_oid;
6546  }
6547 
6548 }
6549 
6550 /*
6551  * heap_ovf_flush () - Flush all overflow dirty pages where the object resides
6552  * return: NO_ERROR
6553  * ovf_oid(in): Overflow address
6554  *
6555  * Note: Flush all overflow dirty pages where the object resides.
6556  */
6557 static int
6558 heap_ovf_flush (THREAD_ENTRY * thread_p, const OID * ovf_oid)
6559 {
6560  VPID ovf_vpid;
6561 
6562  ovf_vpid.pageid = ovf_oid->pageid;
6563  ovf_vpid.volid = ovf_oid->volid;
6564  overflow_flush (thread_p, &ovf_vpid);
6565 
6566  return NO_ERROR;
6567 }
6568 
6569 /*
6570  * heap_ovf_get_length () - Find length of overflow object
6571  * return: length
6572  * ovf_oid(in): Overflow address
6573  *
6574  * Note: The length of the content of a multipage object associated
6575  * with the given overflow address is returned. In the case of
6576  * any error, -1 is returned.
6577  */
6578 static int
6579 heap_ovf_get_length (THREAD_ENTRY * thread_p, const OID * ovf_oid)
6580 {
6581  VPID ovf_vpid;
6582 
6583  ovf_vpid.pageid = ovf_oid->pageid;
6584  ovf_vpid.volid = ovf_oid->volid;
6585 
6586  return overflow_get_length (thread_p, &ovf_vpid);
6587 }
6588 
6589 /*
6590  * heap_ovf_get () - get/retrieve the content of a multipage object from overflow
6591  * return: SCAN_CODE
6592  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END)
6593  * ovf_oid(in): Overflow address
6594  * recdes(in): Record descriptor
6595  * chn(in):
6596  *
6597  * Note: The content of a multipage object associated with the given
6598  * overflow address(oid) is placed into the area pointed to by
6599  * the record descriptor. If the content of the object does not
6600  * fit in such an area (i.e., recdes->area_size), an error is
6601  * returned and a hint of its length is returned as a negative
6602  * value in recdes->length. The length of the retrieved object is
6603  * set in the the record descriptor (i.e., recdes->length).
6604  */
6605 static SCAN_CODE
6606 heap_ovf_get (THREAD_ENTRY * thread_p, const OID * ovf_oid, RECDES * recdes, int chn, MVCC_SNAPSHOT * mvcc_snapshot)
6607 {
6608  VPID ovf_vpid;
6609  int rest_length;
6610  SCAN_CODE scan;
6611 
6612  ovf_vpid.pageid = ovf_oid->pageid;
6613  ovf_vpid.volid = ovf_oid->volid;
6614 
6615  if (chn != NULL_CHN)
6616  {
6617  /*
6618  * This assumes that most of the time, we have the right cache coherency
6619  * number and that it is expensive to copy the overflow object to be
6620  * thrown most of the time. Thus, it is OK to do some extra page look up
6621  * when failures (it should be OK since the overflow page should be
6622  * already in the page buffer pool.
6623  */
6624 
6625  scan = overflow_get_nbytes (thread_p, &ovf_vpid, recdes, 0, OR_MVCC_MAX_HEADER_SIZE, &rest_length, mvcc_snapshot);
6626  if (scan == S_SUCCESS && chn == or_chn (recdes))
6627  {
6628  return S_SUCCESS_CHN_UPTODATE;
6629  }
6630  }
6631  scan = overflow_get (thread_p, &ovf_vpid, recdes, mvcc_snapshot);
6632 
6633  return scan;
6634 }
6635 
6636 /*
6637  * heap_ovf_get_capacity () - Find space consumed oveflow object
6638  * return: NO_ERROR
6639  * ovf_oid(in): Overflow address
6640  * ovf_len(out): Length of overflow object
6641  * ovf_num_pages(out): Total number of overflow pages
6642  * ovf_overhead(out): System overhead for overflow record
6643  * ovf_free_space(out): Free space for exapnsion of the overflow rec
6644  *
6645  * Note: Find the current storage facts/capacity of given overflow rec
6646  */
6647 static int
6648 heap_ovf_get_capacity (THREAD_ENTRY * thread_p, const OID * ovf_oid, int *ovf_len, int *ovf_num_pages,
6649  int *ovf_overhead, int *ovf_free_space)
6650 {
6651  VPID ovf_vpid;
6652 
6653  ovf_vpid.pageid = ovf_oid->pageid;
6654  ovf_vpid.volid = ovf_oid->volid;
6655 
6656  return overflow_get_capacity (thread_p, &ovf_vpid, ovf_len, ovf_num_pages, ovf_overhead, ovf_free_space);
6657 }
6658 
6659 /*
6660  * heap_scancache_check_with_hfid () - Check if scancache is on provided HFID
6661  * and reinitialize it otherwise
6662  * thread_p(in): thread entry
6663  * hfid(in): heap file identifier to check the scancache against
6664  * scan_cache(in/out): pointer to scancache pointer
6665  * returns: error code or NO_ERROR
6666  *
6667  * NOTE: Function may alter the scan cache address. Caller must make sure it
6668  * doesn't pass it's only reference to the object OR it is not the owner
6669  * of the object.
6670  * NOTE: Function may alter the members of (*scan_cache).
6671  */
6672 static int
6673 heap_scancache_check_with_hfid (THREAD_ENTRY * thread_p, HFID * hfid, OID * class_oid, HEAP_SCANCACHE ** scan_cache)
6674 {
6675  if (*scan_cache != NULL)
6676  {
6677  if ((*scan_cache)->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
6678  {
6679  er_log_debug (ARG_FILE_LINE, "heap_insert: Your scancache is not initialized");
6680  *scan_cache = NULL;
6681  }
6682  else if (!HFID_EQ (&(*scan_cache)->node.hfid, hfid) || OID_ISNULL (&(*scan_cache)->node.class_oid))
6683  {
6684  int r;
6685 
6686  /* scancache is not on our heap file, reinitialize it */
6687  /* this is a very dangerous thing to do and is very risky. the caller may have done a big mistake.
6688  * we could use it as backup for release run, but we should catch it on debug.
6689  * todo: add assert (false); here
6690  */
6691  r = heap_scancache_reset_modify (thread_p, *scan_cache, hfid, class_oid);
6692  if (r != NO_ERROR)
6693  {
6694  return r;
6695  }
6696  }
6697  }
6698 
6699  /* all ok */
6700  return NO_ERROR;
6701 }
6702 
6703 /*
6704  * heap_scancache_start_internal () - Start caching information for a heap scan
6705  * return: NO_ERROR
6706  * scan_cache(in/out): Scan cache
6707  * hfid(in): Heap file identifier of the scan cache or NULL
6708  * If NULL is given heap_get is the only function that can
6709  * be used with the scan cache.
6710  * class_oid(in): Class identifier of scan cache
6711  * For any class, NULL or NULL_OID can be given
6712  * cache_last_fix_page(in): Wheater or not to cache the last fetched page
6713  * between scan objects ?
6714  * is_queryscan(in):
6715  * is_indexscan(in):
6716  *
6717  */
6718 static int
6719 heap_scancache_start_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
6720  const OID * class_oid, int cache_last_fix_page, bool is_queryscan, int is_indexscan,
6722 {
6723  int ret = NO_ERROR;
6724 
6725  if (class_oid != NULL)
6726  {
6727  /*
6728  * Scanning the instances of a specific class
6729  */
6730  scan_cache->node.class_oid = *class_oid;
6731 
6732  if (is_queryscan == true)
6733  {
6734  /*
6735  * Acquire a lock for the heap scan so that the class is not updated
6736  * during the scan of the heap. This can happen in transaction isolation
6737  * levels that release the locks of the class when the class is read.
6738  */
6739  if (lock_scan (thread_p, class_oid, LK_UNCOND_LOCK, IS_LOCK) != LK_GRANTED)
6740  {
6741  goto exit_on_error;
6742  }
6743  }
6744 
6745  ret = heap_get_class_info (thread_p, class_oid, &scan_cache->node.hfid, &scan_cache->file_type, NULL);
6746  if (ret != NO_ERROR)
6747  {
6748  ASSERT_ERROR ();
6749  return ret;
6750  }
6751  assert (hfid == NULL || HFID_EQ (hfid, &scan_cache->node.hfid));
6752  assert (scan_cache->file_type == FILE_HEAP || scan_cache->file_type == FILE_HEAP_REUSE_SLOTS);
6753  }
6754  else
6755  {
6756  /*
6757  * Scanning the instances of any class in the heap
6758  */
6759  OID_SET_NULL (&scan_cache->node.class_oid);
6760 
6761  if (hfid == NULL)
6762  {
6763  HFID_SET_NULL (&scan_cache->node.hfid);
6764  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
6765  scan_cache->file_type = FILE_UNKNOWN_TYPE;
6766  }
6767  else
6768  {
6769  scan_cache->node.hfid.vfid.volid = hfid->vfid.volid;
6770  scan_cache->node.hfid.vfid.fileid = hfid->vfid.fileid;
6771  scan_cache->node.hfid.hpgid = hfid->hpgid;
6772  if (file_get_type (thread_p, &hfid->vfid, &scan_cache->file_type) != NO_ERROR)
6773  {
6774  ASSERT_ERROR ();
6775  goto exit_on_error;
6776  }
6777  if (scan_cache->file_type == FILE_UNKNOWN_TYPE)
6778  {
6779  assert_release (false);
6780  goto exit_on_error;
6781  }
6782  }
6783  }
6784 
6785  scan_cache->page_latch = S_LOCK;
6786 
6787  scan_cache->node.classname = NULL;
6788  scan_cache->cache_last_fix_page = cache_last_fix_page;
6790  scan_cache->start_area ();
6791  scan_cache->num_btids = 0;
6792  scan_cache->m_index_stats = NULL;
6794  scan_cache->mvcc_snapshot = mvcc_snapshot;
6795  scan_cache->partition_list = NULL;
6796 
6797  return ret;
6798 
6799 exit_on_error:
6800 
6801  HFID_SET_NULL (&scan_cache->node.hfid);
6802  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
6803  OID_SET_NULL (&scan_cache->node.class_oid);
6804  scan_cache->node.classname = NULL;
6805  scan_cache->page_latch = NULL_LOCK;
6806  scan_cache->cache_last_fix_page = false;
6808  scan_cache->num_btids = 0;
6809  scan_cache->m_index_stats = NULL;
6810  scan_cache->file_type = FILE_UNKNOWN_TYPE;
6811  scan_cache->debug_initpattern = 0;
6812  scan_cache->mvcc_snapshot = NULL;
6813  scan_cache->partition_list = NULL;
6814 
6815  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6816 }
6817 
6818 /*
6819  * heap_scancache_start () - Start caching information for a heap scan
6820  * return: NO_ERROR
6821  * scan_cache(in/out): Scan cache
6822  * hfid(in): Heap file identifier of the scan cache or NULL
6823  * If NULL is given heap_get is the only function that can
6824  * be used with the scan cache.
6825  * class_oid(in): Class identifier of scan cache
6826  * For any class, NULL or NULL_OID can be given
6827  * cache_last_fix_page(in): Wheater or not to cache the last fetched page
6828  * between scan objects ?
6829  * is_indexscan(in):
6830  *
6831  */
6832 int
6833 heap_scancache_start (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid, const OID * class_oid,
6834  int cache_last_fix_page, int is_indexscan, MVCC_SNAPSHOT * mvcc_snapshot)
6835 {
6836  return heap_scancache_start_internal (thread_p, scan_cache, hfid, class_oid, cache_last_fix_page, true, is_indexscan,
6837  mvcc_snapshot);
6838 }
6839 
6840 /*
6841  * heap_scancache_start_modify () - Start caching information for heap
6842  * modifications
6843  * return: NO_ERROR
6844  * scan_cache(in/out): Scan cache
6845  * hfid(in): Heap file identifier of the scan cache or NULL
6846  * If NULL is given heap_get is the only function that can
6847  * be used with the scan cache.
6848  * class_oid(in): Class identifier of scan cache
6849  * For any class, NULL or NULL_OID can be given
6850  * op_type(in):
6851  *
6852  * Note: A scancache structure is started for heap modifications.
6853  * The scan_cache structure is used to modify objects of the heap
6854  * with heap_insert, heap_update, and heap_delete. The scan structure
6855  * is used to cache information about the latest used page which
6856  * can be used by the following function to guess where to insert
6857  * objects, or other updates and deletes on the same page.
6858  * Good when we are updating things in a sequential way.
6859  *
6860  * The heap manager automatically resets the scan_cache structure
6861  * when it is used with a different heap. That is, the scan_cache
6862  * is reset with the heap and class of the insertion, update, and
6863  * delete. Therefore, you could pass NULLs to hfid, and class_oid
6864  * to this function, but that it is not recommended.
6865  */
6866 int
6867 heap_scancache_start_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
6868  const OID * class_oid, int op_type, MVCC_SNAPSHOT * mvcc_snapshot)
6869 {
6870  OR_CLASSREP *classrepr = NULL;
6871  int classrepr_cacheindex = -1;
6872  int i;
6873  int ret = NO_ERROR;
6874 
6875  if (heap_scancache_start_internal (thread_p, scan_cache, hfid, NULL, false, false, false, mvcc_snapshot) != NO_ERROR)
6876  {
6877  goto exit_on_error;
6878  }
6879 
6880  if (class_oid != NULL)
6881  {
6882  ret = heap_scancache_reset_modify (thread_p, scan_cache, hfid, class_oid);
6883  if (ret != NO_ERROR)
6884  {
6885  goto exit_on_error;
6886  }
6887  }
6888  else
6889  {
6890  scan_cache->page_latch = X_LOCK;
6891  }
6892 
6893  if (BTREE_IS_MULTI_ROW_OP (op_type) && class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid))
6894  {
6895  /* get class representation to find the total number of indexes */
6896  classrepr = heap_classrepr_get (thread_p, (OID *) class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
6897  if (classrepr == NULL)
6898  {
6899  goto exit_on_error;
6900  }
6901  scan_cache->num_btids = classrepr->n_indexes;
6902 
6903  if (scan_cache->num_btids > 0)
6904  {
6905  delete scan_cache->m_index_stats;
6906  scan_cache->m_index_stats = new multi_index_unique_stats ();
6907  /* initialize the structure */
6908  for (i = 0; i < scan_cache->num_btids; i++)
6909  {
6910  scan_cache->m_index_stats->add_empty (classrepr->indexes[i].btid);
6911  }
6912  }
6913 
6914  /* free class representation */
6915  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
6916  }
6917 
6918  /* In case of SINGLE_ROW_INSERT, SINGLE_ROW_UPDATE, SINGLE_ROW_DELETE, or SINGLE_ROW_MODIFY, the 'num_btids' and
6919  * 'm_index_stats' of scan cache structure have to be set as 0 and NULL, respectively. */
6920 
6921  return ret;
6922 
6923 exit_on_error:
6924 
6925  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6926 }
6927 
6928 /*
6929  * heap_scancache_force_modify () -
6930  * return: NO_ERROR
6931  * scan_cache(in):
6932  */
6933 static int
6935 {
6936  if (scan_cache == NULL || scan_cache->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
6937  {
6938  return NO_ERROR;
6939  }
6940 
6941  /* Free fetched page */
6942  if (scan_cache->page_watcher.pgptr != NULL)
6943  {
6944  pgbuf_ordered_unfix (thread_p, &(scan_cache->page_watcher));
6945  }
6946 
6947  return NO_ERROR;
6948 }
6949 
6950 /*
6951  * heap_scancache_reset_modify () - Reset the current caching information
6952  * return: NO_ERROR
6953  * scan_cache(in/out): Scan cache
6954  * hfid(in): Heap file identifier of the scan cache
6955  * class_oid(in): Class identifier of scan cache
6956  *
6957  * Note: Any page that has been cached under the current scan cache is
6958  * freed and the scancache structure is reinitialized with the
6959  * new information.
6960  */
6961 static int
6962 heap_scancache_reset_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
6963  const OID * class_oid)
6964 {
6965  int ret;
6966 
6967  ret = heap_scancache_force_modify (thread_p, scan_cache);
6968  if (ret != NO_ERROR)
6969  {
6970  return ret;
6971  }
6972 
6973  if (class_oid != NULL)
6974  {
6975  if (!OID_EQ (class_oid, &scan_cache->node.class_oid))
6976  {
6977  ret = heap_get_class_info (thread_p, class_oid, &scan_cache->node.hfid, &scan_cache->file_type, NULL);
6978  if (ret != NO_ERROR)
6979  {
6980  ASSERT_ERROR ();
6981  return ret;
6982  }
6983  assert (HFID_EQ (&scan_cache->node.hfid, hfid));
6984  scan_cache->node.class_oid = *class_oid;
6985  }
6986  }
6987  else
6988  {
6989  OID_SET_NULL (&scan_cache->node.class_oid);
6990 
6991  if (!HFID_EQ (&scan_cache->node.hfid, hfid))
6992  {
6993  scan_cache->node.hfid.vfid.volid = hfid->vfid.volid;
6994  scan_cache->node.hfid.vfid.fileid = hfid->vfid.fileid;
6995  scan_cache->node.hfid.hpgid = hfid->hpgid;
6996 
6997  ret = file_get_type (thread_p, &hfid->vfid, &scan_cache->file_type);
6998  if (ret != NO_ERROR)
6999  {
7000  ASSERT_ERROR ();
7001  return ret;
7002  }
7003  if (scan_cache->file_type == FILE_UNKNOWN_TYPE)
7004  {
7005  assert_release (false);
7006  return ER_FAILED;
7007  }
7008  }
7009  }
7010  scan_cache->page_latch = X_LOCK;
7011  scan_cache->node.classname = NULL;
7012 
7013  return ret;
7014 }
7015 
7016 /*
7017  * heap_scancache_quick_start () - Start caching information for a heap scan
7018  * return: NO_ERROR
7019  * scan_cache(in/out): Scan cache
7020  *
7021  * Note: This is a quick way to initialize a scancahe structure. It
7022  * should be used only when we would like to peek only one object
7023  * (heap_get). This function will cache the last fetched page by default.
7024  *
7025  * This function was created to avoid some of the overhead
7026  * associated with scancahe(e.g., find best pages, lock the heap)
7027  * since we are not really scanning the heap.
7028  *
7029  * For other needs/uses, please refer to heap_scancache_start ().
7030  *
7031  * Note: Using many scancaches with the cached_fix page option at the
7032  * same time should be avoided since page buffers are fixed and
7033  * locked for future references and there is a limit of buffers
7034  * in the page buffer pool. This is analogous to fetching many
7035  * pages at the same time. The page buffer pool is expanded when
7036  * needed, however, developers must pay special attention to
7037  * avoid this situation.
7038  */
7039 int
7041 {
7043 
7044  scan_cache->page_latch = S_LOCK;
7045 
7046  return NO_ERROR;
7047 }
7048 
7049 /*
7050  * heap_scancache_quick_start_modify () - Start caching information
7051  * for a heap modifications
7052  * return: NO_ERROR
7053  * scan_cache(in/out): Scan cache
7054  */
7055 int
7057 {
7059 
7060  scan_cache->page_latch = X_LOCK;
7061 
7062  return NO_ERROR;
7063 }
7064 
7065 /*
7066  * heap_scancache_quick_start_internal () -
7067  *
7068  * return: NO_ERROR
7069  * scan_cache(in/out): Scan cache
7070  */
7071 static int
7073 {
7074  HFID_SET_NULL (&scan_cache->node.hfid);
7075  if (hfid == NULL)
7076  {
7077  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
7079  }
7080  else
7081  {
7082  HFID_COPY (&scan_cache->node.hfid, hfid);
7084  }
7085  OID_SET_NULL (&scan_cache->node.class_oid);
7086  scan_cache->node.classname = NULL;
7087  scan_cache->page_latch = S_LOCK;
7088  scan_cache->cache_last_fix_page = true;
7089  scan_cache->start_area ();
7090  scan_cache->num_btids = 0;
7091  scan_cache->m_index_stats = NULL;
7092  scan_cache->file_type = FILE_UNKNOWN_TYPE;
7094  scan_cache->mvcc_snapshot = NULL;
7095  scan_cache->partition_list = NULL;
7096 
7097  return NO_ERROR;
7098 }
7099 
7100 /*
7101  * heap_scancache_quick_end () - Stop caching information for a heap scan
7102  * return: NO_ERROR
7103  * scan_cache(in/out): Scan cache
7104  *
7105  * Note: Any fixed heap page on the given scan is freed and any memory
7106  * allocated by this scan is also freed. The scan_cache structure
7107  * is undefined. This function does not update any space statistics.
7108  */
7109 static int
7111 {
7112  int ret = NO_ERROR;
7113 
7115  {
7116  er_log_debug (ARG_FILE_LINE, "heap_scancache_quick_end: Your scancache is not initialized");
7117  ret = ER_GENERIC_ERROR;
7119  }
7120  else
7121  {
7122  delete scan_cache->m_index_stats;
7123  scan_cache->m_index_stats = NULL;
7124  scan_cache->num_btids = 0;
7125 
7126  if (scan_cache->cache_last_fix_page == true)
7127  {
7128  /* Free fetched page */
7129  if (scan_cache->page_watcher.pgptr != NULL)
7130  {
7131  pgbuf_ordered_unfix (thread_p, &scan_cache->page_watcher);
7132  }
7133  }
7134 
7135  if (scan_cache->partition_list)
7136  {
7137  HEAP_SCANCACHE_NODE_LIST *next_node = NULL;
7138  HEAP_SCANCACHE_NODE_LIST *curr_node = NULL;
7139 
7140  curr_node = scan_cache->partition_list;
7141 
7142  while (curr_node != NULL)
7143  {
7144  next_node = curr_node->next;
7145  db_private_free_and_init (thread_p, curr_node);
7146  curr_node = next_node;
7147  }
7148  }
7149  }
7150 
7151  HFID_SET_NULL (&scan_cache->node.hfid);
7152  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
7153  scan_cache->node.classname = NULL;
7154  OID_SET_NULL (&scan_cache->node.class_oid);
7155  scan_cache->page_latch = NULL_LOCK;
7156  assert (PGBUF_IS_CLEAN_WATCHER (&(scan_cache->page_watcher)));
7157  scan_cache->end_area ();
7158  scan_cache->file_type = FILE_UNKNOWN_TYPE;
7159  scan_cache->debug_initpattern = 0;
7160 
7161  return ret;
7162 }
7163 
7164 /*
7165  * heap_scancache_end_internal () -
7166  * return: NO_ERROR
7167  * scan_cache(in):
7168  * scan_state(in):
7169  */
7170 static int
7171 heap_scancache_end_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool scan_state)
7172 {
7173  int ret = NO_ERROR;
7174 
7176  {
7177  er_log_debug (ARG_FILE_LINE, "heap_scancache_end_internal: Your scancache is not initialized");
7178  return ER_FAILED;
7179  }
7180 
7181  ret = heap_scancache_quick_end (thread_p, scan_cache);
7182 
7183  return ret;
7184 }
7185 
7186 /*
7187  * heap_scancache_end () - Stop caching information for a heap scan
7188  * return: NO_ERROR
7189  * scan_cache(in/out): Scan cache
7190  *
7191  * Note: Any fixed heap page on the given scan is freed and any memory
7192  * allocated by this scan is also freed. The scan_cache structure is undefined.
7193  */
7194 int
7196 {
7197  int ret;
7198 
7199  ret = heap_scancache_end_internal (thread_p, scan_cache, END_SCAN);
7200 
7201  return NO_ERROR;
7202 }
7203 
7204 /*
7205  * heap_scancache_end_when_scan_will_resume () -
7206  * return:
7207  * scan_cache(in):
7208  */
7209 int
7211 {
7212  int ret;
7213 
7214  ret = heap_scancache_end_internal (thread_p, scan_cache, CONTINUE_SCAN);
7215 
7216  return NO_ERROR;
7217 }
7218 
7219 /*
7220  * heap_scancache_end_modify () - End caching information for a heap
7221  * modification cache
7222  * return:
7223  * scan_cache(in/out): Scan cache
7224  *
7225  * Note: Any fixed heap page on the given scan is freed. The heap
7226  * best find space statistics for the heap are completely updated
7227  * with the ones stored in the scan cache.
7228  */
7229 void
7231 {
7232  int ret;
7233 
7234  ret = heap_scancache_force_modify (thread_p, scan_cache);
7235  if (ret == NO_ERROR)
7236  {
7237  ret = heap_scancache_quick_end (thread_p, scan_cache);
7238  }
7239 }
7240 
7241 /*
7242  * heap_get_if_diff_chn () - Get specified object of the given slotted page when
7243  * its cache coherency number is different
7244  * return: SCAN_CODE
7245  * (Either of S_SUCCESS,
7246  * S_SUCCESS_CHN_UPTODATE,
7247  * S_DOESNT_FIT,
7248  * S_DOESNT_EXIST)
7249  * pgptr(in): Pointer to slotted page
7250  * slotid(in): Slot identifier of current record.
7251  * recdes(in/out): Pointer to a record descriptor. Will be modified to
7252  * describe the desired record.
7253  * ispeeking(in): Indicates whether the record is going to be copied
7254  * (like a copy) or peeked (read at the buffer).
7255  * chn(in): Cache coherency number or NULL_CHN
7256  *
7257  * Note: If the given CHN is the same as the chn of the specified
7258  * object in the slotted page, the object may not be placed in
7259  * the given record descriptor. If the given CHN is NULL_CHN or
7260  * is not given, then the following process is followed depending
7261  * upon if we are peeking or not:
7262  * When ispeeking is PEEK, the desired record is peeked onto the
7263  * buffer pool. The address of the record descriptor is set
7264  * to the portion of the buffer pool where the record is stored.
7265  * For more information on peeking description, see the slotted module.
7266  *
7267  * When ispeeking is COPY, the desired record is read
7268  * onto the area pointed by the record descriptor. If the record
7269  * does not fit in such an area, the length of the record is
7270  * returned as a negative value in recdes->length and an error
7271  * condition is indicated.
7272  */
7273 static SCAN_CODE
7274 heap_get_if_diff_chn (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, INT16 slotid, RECDES * recdes, bool ispeeking, int chn,
7276 {
7277  RECDES chn_recdes; /* Used when we need to compare the cache coherency number and we are not peeking */
7278  SCAN_CODE scan;
7279  MVCC_REC_HEADER mvcc_header;
7280 
7281  /*
7282  * Don't retrieve the object when the object has the same cache
7283  * coherency number given by the caller. That is, the caller has the
7284  * valid cached object.
7285  */
7286 
7287  if (ispeeking == PEEK)
7288  {
7289  scan = spage_get_record (thread_p, pgptr, slotid, recdes, PEEK);
7290  if (scan != S_SUCCESS)
7291  {
7292  return scan;
7293  }
7294 
7295  /* For MVCC we need to obtain header and verify header */
7296  or_mvcc_get_header (recdes, &mvcc_header);
7297  if (scan == S_SUCCESS && mvcc_snapshot != NULL && mvcc_snapshot->snapshot_fnc != NULL)
7298  {
7299  if (mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot) == TOO_OLD_FOR_SNAPSHOT)
7300  {
7301  /* consider snapshot is not satisified only in case of TOO_OLD_FOR_SNAPSHOT;
7302  * TOO_NEW_FOR_SNAPSHOT records should be accepted, e.g. a recently updated record, locked at select */
7303  return S_SNAPSHOT_NOT_SATISFIED;
7304  }
7305  }
7306  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, chn))
7307  {
7308  /* Test chn if MVCC is disabled for record or if delete MVCCID is invalid and the record is inserted by
7309  * current transaction. */
7310  /* When testing chn is not required, the result is considered up-to-date. */
7311  scan = S_SUCCESS_CHN_UPTODATE;
7312  }
7313  }
7314  else
7315  {
7316  scan = spage_get_record (thread_p, pgptr, slotid, &chn_recdes, PEEK);
7317  if (scan != S_SUCCESS)
7318  {
7319  return scan;
7320  }
7321 
7322  /* For MVCC we need to obtain header and verify header */
7323  or_mvcc_get_header (&chn_recdes, &mvcc_header);
7324  if (scan == S_SUCCESS && mvcc_snapshot != NULL && mvcc_snapshot->snapshot_fnc != NULL)
7325  {
7326  if (mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot) == TOO_OLD_FOR_SNAPSHOT)
7327  {
7328  /* consider snapshot is not satisified only in case of TOO_OLD_FOR_SNAPSHOT;
7329  * TOO_NEW_FOR_SNAPSHOT records should be accepted, e.g. a recently updated record, locked at select */
7330  return S_SNAPSHOT_NOT_SATISFIED;
7331  }
7332  }
7333  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, chn))
7334  {
7335  /* Test chn if MVCC is disabled for record or if delete MVCCID is invalid and the record is inserted by
7336  * current transaction. */
7337  /* When testing chn is not required, the result is considered up-to-date. */
7338  scan = S_SUCCESS_CHN_UPTODATE;
7339  }
7340 
7341  if (scan != S_SUCCESS_CHN_UPTODATE)
7342  {
7343  /*
7344  * Note that we could copy the recdes.data from chn_recdes.data, but
7345  * I don't think it is much difference here, and we will have to deal
7346  * with all not fit conditions and so on, so we decide to use
7347  * spage_get_record instead.
7348  */
7349  scan = spage_get_record (thread_p, pgptr, slotid, recdes, COPY);
7350  }
7351  }
7352 
7353  return scan;
7354 }
7355 
7356 /*
7357  * heap_prepare_get_context () - Prepare for obtaining/processing heap object.
7358  * It may get class_oid, record_type, home page
7359  * and also forward_oid and forward_page in some
7360  * cases.
7361  *
7362  * return : SCAN_CODE: S_ERROR, S_DOESNT_EXIST and S_SUCCESS.
7363  * thread_p (in) : Thread entry.
7364  * context (in/out) : Heap get context used to store the information required for heap objects processing.
7365  * is_heap_scan (in) : Used to decide if it is acceptable to reach deleted objects or not.
7366  * non_ex_handling_type (in): Handling type for deleted objects
7367  * - LOG_ERROR_IF_DELETED: write the
7368  * ER_HEAP_UNKNOWN_OBJECT error to log
7369  * - LOG_WARNING_IF_DELETED: set only warning
7370  *
7371  * Note : the caller should manage the page unfix of both home and forward
7372  * pages (even in case of error, there may be pages latched).
7373  * The functions uses a multiple page latch; in some extreme cases,
7374  * if the home page was unfixed during fwd page fix, we need to recheck
7375  * the home page OID is still valid and re-PEEK the home record. We
7376  * allow this to repeat once.
7377  * For performance:
7378  * Make sure page unfix is performed in order fwd page, then home page.
7379  * Normal fix sequence (first attempt) is home page, then fwd page; if
7380  * the fwd page is unfixed before home, another thread will attempt to
7381  * fix fwd page, after having home fix; first try (CONDITIONAL) will
7382  * fail, and will trigger an ordered fix + UNCONDITIONAL.
7383  */
7384 SCAN_CODE
7385 heap_prepare_get_context (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, bool is_heap_scan,
7386  NON_EXISTENT_HANDLING non_ex_handling_type)
7387 {
7388  SPAGE_SLOT *slot_p = NULL;
7389  RECDES peek_recdes;
7390  SCAN_CODE scan = S_SUCCESS;
7391  int try_count = 0;
7392  int try_max = 1;
7393  int ret;
7394 
7395  assert (context->oid_p != NULL);
7396 
7397 try_again:
7398 
7399  /* First make sure object home_page is fixed. */
7400  ret = heap_prepare_object_page (thread_p, context->oid_p, &context->home_page_watcher, context->latch_mode);
7401  if (ret != NO_ERROR)
7402  {
7403  if (ret == ER_HEAP_UNKNOWN_OBJECT)
7404  {
7405  /* bad page id, consider the object does not exist and let the caller handle the case */
7406  return S_DOESNT_EXIST;
7407  }
7408 
7409  goto error;
7410  }
7411 
7412  /* Output class_oid if necessary. */
7413  if (context->class_oid_p != NULL && OID_ISNULL (context->class_oid_p)
7414  && heap_get_class_oid_from_page (thread_p, context->home_page_watcher.pgptr, context->class_oid_p) != NO_ERROR)
7415  {
7416  /* Unexpected. */
7417  assert_release (false);
7418  goto error;
7419  }
7420 
7421  /* Get slot. */
7422  slot_p = spage_get_slot (context->home_page_watcher.pgptr, context->oid_p->slotid);
7423  if (slot_p == NULL)
7424  {
7425  /* Slot doesn't exist. */
7426  if (!is_heap_scan)
7427  {
7428  /* Do not set error for heap scan and get record info. */
7430  context->oid_p->pageid, context->oid_p->slotid);
7431  }
7432 
7433  /* Output record type as REC_UNKNOWN. */
7434  context->record_type = REC_UNKNOWN;
7435 
7436  return S_DOESNT_EXIST;
7437  }
7438 
7439  /* Output record type. */
7440  context->record_type = slot_p->record_type;
7441 
7442  if (context->fwd_page_watcher.pgptr != NULL && slot_p->record_type != REC_RELOCATION
7443  && slot_p->record_type != REC_BIGONE)
7444  {
7445  /* Forward page no longer required. */
7446  pgbuf_ordered_unfix (thread_p, &context->fwd_page_watcher);
7447  }
7448 
7449  /* Fix required pages. */
7450  switch (slot_p->record_type)
7451  {
7452  case REC_RELOCATION:
7453  /* Need to get forward_oid and fix forward page */
7454  scan = spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, &peek_recdes, PEEK);
7455  if (scan != S_SUCCESS)
7456  {
7457  /* Unexpected. */
7458  assert_release (false);
7459  goto error;
7460  }
7461  /* Output forward_oid. */
7462  COPY_OID (&context->forward_oid, (OID *) peek_recdes.data);
7463 
7464  /* Try to latch forward_page. */
7466  ret = heap_prepare_object_page (thread_p, &context->forward_oid, &context->fwd_page_watcher, context->latch_mode);
7467  if (ret == NO_ERROR)
7468  {
7469  /* Pages successfully fixed. */
7470  if (context->home_page_watcher.page_was_unfixed)
7471  {
7472  /* Home_page/forward_page are both fixed. However, since home page was unfixed, record may have changed
7473  * (record type has changed or just the relocation link). Go back and repeat steps (if nothing was
7474  * changed, pages are already fixed). */
7475  if (try_count++ < try_max)
7476  {
7477  context->home_page_watcher.page_was_unfixed = false;
7478  goto try_again;
7479  }
7480  else
7481  {
7483  context->forward_oid.pageid);
7484  }
7485 
7486  goto error;
7487  }
7488  return S_SUCCESS;
7489  }
7490 
7491  goto error;
7492 
7493  case REC_BIGONE:
7494  /* Need to get forward_oid and forward_page (first overflow page). */
7495  scan = spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, &peek_recdes, PEEK);
7496  if (scan != S_SUCCESS)
7497  {
7498  /* Unexpected. */
7499  assert_release (false);
7500  goto error;
7501  }
7502  /* Output forward_oid. */
7503  COPY_OID (&context->forward_oid, (OID *) peek_recdes.data);
7504 
7505  /* Fix overflow page. Since overflow pages should be always accessed with their home pages latched, unconditional
7506  * latch should work; However, we need to use the same ordered_fix approach. */
7509  ret = heap_prepare_object_page (thread_p, &context->forward_oid, &context->fwd_page_watcher, context->latch_mode);
7510  if (ret == NO_ERROR)
7511  {
7512  /* Pages successfully fixed. */
7513  if (context->home_page_watcher.page_was_unfixed)
7514  {
7515  /* This is not expected. */
7516  assert (false);
7517  goto error;
7518  }
7519  return S_SUCCESS;
7520  }
7521 
7522  goto error;
7523 
7524  case REC_ASSIGN_ADDRESS:
7525  /* Object without content.. only the address has been assigned */
7526  if (is_heap_scan)
7527  {
7528  /* Just ignore record. */
7529  return S_DOESNT_EXIST;
7530  }
7531  if (spage_check_slot_owner (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid))
7532  {
7534  context->oid_p->pageid, context->oid_p->slotid);
7535  return S_DOESNT_EXIST;
7536  }
7537  else
7538  {
7540  context->oid_p->pageid, context->oid_p->slotid);
7541  goto error;
7542  }
7543 
7544  case REC_HOME:
7545  /* Only home page is needed. */
7546  return S_SUCCESS;
7547 
7549  case REC_MARKDELETED:
7550  /* Vacuumed/deleted record. */
7551  if (is_heap_scan)
7552  {
7553  /* Just ignore record. */
7554  return S_DOESNT_EXIST;
7555  }
7556 #if defined(SA_MODE)
7557  /* Accessing a REC_MARKDELETED record from a system class can happen in SA mode, when no MVCC operations have
7558  * been performed on the system class. */
7559  if (oid_is_system_class (context->class_oid_p))
7560  {
7562  context->oid_p->pageid, context->oid_p->slotid);
7563  return S_DOESNT_EXIST;
7564  }
7565 #endif /* SA_MODE */
7566 
7568  || non_ex_handling_type == LOG_WARNING_IF_DELETED)
7569  {
7570  /* A deleted class record, corresponding to a deleted class can be accessed through catalog update operations
7571  * on another class. This is possible if a class has an attribute holding a domain that references the
7572  * dropped class. Another situation is the client request for authentication, which fetches the object (an
7573  * instance of db_user) using dirty version. If it has been removed, it will be found as a deleted record. */
7575  context->oid_p->pageid, context->oid_p->slotid);
7576  }
7577  else
7578  {
7580  context->oid_p->pageid, context->oid_p->slotid);
7581  }
7582  return S_DOESNT_EXIST;
7583 
7584  case REC_NEWHOME:
7585  if (is_heap_scan)
7586  {
7587  /* Just ignore record. */
7588  return S_DOESNT_EXIST;
7589  }
7590  /* REC_NEWHOME are only allowed to be accessed through REC_RELOCATION slots. */
7591  /* FALLTHRU */
7592  default:
7593  /* Unexpected case. */
7595  context->oid_p->pageid, context->oid_p->slotid);
7596  goto error;
7597  }
7598 
7599  /* Impossible */
7600  assert_release (false);
7601 error:
7602  assert (ret == ER_LK_PAGE_TIMEOUT || er_errid () != NO_ERROR);
7603 
7604  heap_clean_get_context (thread_p, context);
7605  return S_ERROR;
7606 }
7607 
7608 /*
7609  * heap_get_mvcc_header () - Get record MVCC header.
7610  *
7611  * return : SCAN_CODE: S_SUCCESS, S_ERROR or S_DOESNT_EXIST.
7612  * thread_p (in) : Thread entry.
7613  * context (in) : Heap get context.
7614  * mvcc_header (out) : Record MVCC header.
7615  *
7616  * NOTE: This function gets MVCC header, if it has everything needed already
7617  * obtained: pages latched, forward OID (if the case), record type.
7618  */
7619 SCAN_CODE
7621 {
7622  RECDES peek_recdes;
7623  SCAN_CODE scan_code;
7624  PAGE_PTR home_page, forward_page;
7625  const OID *oid;
7626 
7627  assert (context != NULL && context->oid_p != NULL);
7628 
7629  oid = context->oid_p;
7630  home_page = context->home_page_watcher.pgptr;
7631  forward_page = context->fwd_page_watcher.pgptr;
7632 
7633  assert (home_page != NULL);
7634  assert (pgbuf_get_page_id (home_page) == oid->pageid && pgbuf_get_volume_id (home_page) == oid->volid);
7635  assert (context->record_type == REC_HOME || context->record_type == REC_RELOCATION
7636  || context->record_type == REC_BIGONE);
7637  assert (context->record_type == REC_HOME
7638  || (forward_page != NULL && pgbuf_get_page_id (forward_page) == context->forward_oid.pageid
7639  && pgbuf_get_volume_id (forward_page) == context->forward_oid.volid));
7640  assert (mvcc_header != NULL);
7641 
7642  /* Get header and verify snapshot. */
7643  switch (context->record_type)
7644  {
7645  case REC_HOME:
7646  scan_code = spage_get_record (thread_p, home_page, oid->slotid, &peek_recdes, PEEK);
7647  if (scan_code != S_SUCCESS)
7648  {
7649  /* Unexpected. */
7650  assert (false);
7651  return S_ERROR;
7652  }
7653  if (or_mvcc_get_header (&peek_recdes, mvcc_header) != NO_ERROR)
7654  {
7655  /* Unexpected. */
7656  assert (false);
7657  return S_ERROR;
7658  }
7659  return S_SUCCESS;
7660  case REC_BIGONE:
7661  assert (forward_page != NULL);
7662  if (heap_get_mvcc_rec_header_from_overflow (forward_page, mvcc_header, &peek_recdes) != NO_ERROR)
7663  {
7664  /* Unexpected. */
7665  assert (false);
7666  return S_ERROR;
7667  }
7668  return S_SUCCESS;
7669  case REC_RELOCATION:
7670  assert (forward_page != NULL);
7671  scan_code = spage_get_record (thread_p, forward_page, context->forward_oid.slotid, &peek_recdes, PEEK);
7672  if (scan_code != S_SUCCESS)
7673  {
7674  /* Unexpected. */
7675  assert (false);
7676  return S_ERROR;
7677  }
7678  if (or_mvcc_get_header (&peek_recdes, mvcc_header) != NO_ERROR)
7679  {
7680  /* Unexpected. */
7681  assert (false);
7682  return S_ERROR;
7683  }
7684  return S_SUCCESS;
7685  default:
7686  /* Unexpected. */
7687  assert (false);
7688  return S_ERROR;
7689  }
7690 
7691  /* Impossible. */
7692  assert (false);
7693  return S_ERROR;
7694 }
7695 
7696 /*
7697  * heap_get_record_data_when_all_ready () - Get record data when all required information is known. This can work only
7698  * for record types that actually have data: REC_HOME, REC_RELOCATION and
7699  * REC_BIGONE. Required information: home_page, forward_oid and forward page
7700  * for REC_RELOCATION and REC_BIGONE, and record type.
7701  *
7702  * return : SCAN_CODE: S_SUCCESS, S_ERROR, S_DOESNT_FIT.
7703  * thread_p (in) : Thread entry.
7704  * context (in/out) : Heap get context. Should contain all required information for object retrieving
7705  */
7706 SCAN_CODE
7708 {
7709  HEAP_SCANCACHE *scan_cache_p = context->scan_cache;
7710 
7711  /* We have everything set up to get record data. */
7712  assert (context != NULL);
7713 
7714  /* Assert ispeeking, scan_cache and recdes are compatible. If ispeeking is PEEK, it is the caller responsabilty to
7715  * keep the page latched while the recdes don't go out of scope. If ispeeking is COPY, we must have a preallocated
7716  * area to copy to. This means either scan_cache is not NULL (and scan_cache->area can be used) or recdes->data is
7717  * not NULL (and recdes->area_size defines how much can be copied). */
7718  assert ((context->ispeeking == PEEK)
7719  || (context->ispeeking == COPY && (scan_cache_p != NULL || context->recdes_p->data != NULL)));
7720 
7721  switch (context->record_type)
7722  {
7723  case REC_RELOCATION:
7724  /* Don't peek REC_RELOCATION. */
7725  if (scan_cache_p != NULL && (context->ispeeking != 0 || context->recdes_p->data == NULL)
7726  && heap_scan_cache_allocate_recdes_data (thread_p, scan_cache_p, context->recdes_p,
7727  DB_PAGESIZE * 2) != NO_ERROR)
7728  {
7729  ASSERT_ERROR ();
7730  return S_ERROR;
7731  }
7732 
7733  return spage_get_record (thread_p, context->fwd_page_watcher.pgptr, context->forward_oid.slotid,
7734  context->recdes_p, COPY);
7735  case REC_BIGONE:
7736  return heap_get_bigone_content (thread_p, scan_cache_p, context->ispeeking, &context->forward_oid,
7737  context->recdes_p);
7738  case REC_HOME:
7739  if (scan_cache_p != NULL && context->ispeeking == COPY && context->recdes_p->data == NULL
7740  && heap_scan_cache_allocate_recdes_data (thread_p, scan_cache_p, context->recdes_p,
7741  DB_PAGESIZE * 2) != NO_ERROR)
7742  {
7743  ASSERT_ERROR ();
7744  return S_ERROR;
7745  }
7746  return spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, context->recdes_p,
7747  context->ispeeking);
7748  default:
7749  break;
7750  }
7751  /* Shouldn't be here. */
7752  return S_ERROR;
7753 }
7754 
7755 /*
7756  * heap_next_internal () - Retrieve of peek next object.
7757  *
7758  * return : SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT,
7759  * S_END, S_ERROR).
7760  * thread_p (in) : Thread entry.
7761  * hfid (in) : Heap file identifier.
7762  * class_oid (in) : Class object identifier.
7763  * next_oid (in/out) : Object identifier of current record. Will be
7764  * set to next available record or NULL_OID
7765  * when there is not one.
7766  * recdes (in) : Pointer to a record descriptor. Will be
7767  * modified to describe the new record.
7768  * scan_cache (in) : Scan cache or NULL
7769  * ispeeking (in) : PEEK when the object is peeked scan_cache can't
7770  * be NULL COPY when the object is copied.
7771  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
7772  * information values.
7773  */
7774 static SCAN_CODE
7775 heap_next_internal (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
7776  HEAP_SCANCACHE * scan_cache, bool ispeeking, bool reversed_direction, DB_VALUE ** cache_recordinfo)
7777 {
7778  VPID vpid;
7779  VPID *vpidptr_incache;
7780  INT16 type = REC_UNKNOWN;
7781  OID oid;
7782  RECDES forward_recdes;
7783  SCAN_CODE scan = S_ERROR;
7784  int get_rec_info = cache_recordinfo != NULL;
7785  bool is_null_recdata;
7786  PGBUF_WATCHER curr_page_watcher;
7787  PGBUF_WATCHER old_page_watcher;
7788 
7789  assert (scan_cache != NULL);
7790 
7791 #if defined(CUBRID_DEBUG)
7792  if (scan_cache != NULL && scan_cache->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
7793  {
7794  er_log_debug (ARG_FILE_LINE, "heap_next: Your scancache is not initialized");
7796  return S_ERROR;
7797  }
7798  if (scan_cache != NULL && HFID_IS_NULL (&scan_cache->hfid))
7799  {
7801  "heap_next: scan_cache without heap.. heap file must be given to heap_scancache_start () when"
7802  " scan_cache is used with heap_first, heap_next, heap_prev heap_last");
7804  return S_ERROR;
7805  }
7806 #endif /* CUBRID_DEBUG */
7807 
7808  hfid = &scan_cache->node.hfid;
7809  if (!OID_ISNULL (&scan_cache->node.class_oid))
7810  {
7811  class_oid = &scan_cache->node.class_oid;
7812  }
7813 
7814  PGBUF_INIT_WATCHER (&curr_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
7815  PGBUF_INIT_WATCHER (&old_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
7816 
7817  if (OID_ISNULL (next_oid))
7818  {
7819  if (reversed_direction)
7820  {
7821  /* Retrieve the last record of the file. */
7822  if (heap_get_last_vpid (thread_p, hfid, &vpid) != NO_ERROR)
7823  {
7824  ASSERT_ERROR ();
7825  return S_ERROR;
7826  }
7827  oid.volid = vpid.volid;
7828  oid.pageid = vpid.pageid;
7829  oid.slotid = NULL_SLOTID;
7830  }
7831  else
7832  {
7833  /* Retrieve the first object of the heap */
7834  oid.volid = hfid->vfid.volid;
7835  oid.pageid = hfid->hpgid;
7836  oid.slotid = 0; /* i.e., will get slot 1 */
7837  }
7838  }
7839  else
7840  {
7841  oid = *next_oid;
7842  }
7843 
7844  is_null_recdata = (recdes->data == NULL);
7845 
7846  /* Start looking for next object */
7847  while (true)
7848  {
7849  /* Start looking for next object in current page. If we reach the end of this page without finding a new object,
7850  * fetch next page and continue looking there. If no objects are found, end scanning */
7851  while (true)
7852  {
7853  vpid.volid = oid.volid;
7854  vpid.pageid = oid.pageid;
7855 
7856  /*
7857  * Fetch the page where the object of OID is stored. Use previous
7858  * scan page whenever possible, otherwise, deallocate the page.
7859  */
7860  if (scan_cache->cache_last_fix_page == true && scan_cache->page_watcher.pgptr != NULL)
7861  {
7862  vpidptr_incache = pgbuf_get_vpid_ptr (scan_cache->page_watcher.pgptr);
7863  if (VPID_EQ (&vpid, vpidptr_incache))
7864  {
7865  /* replace with local watcher, scan cache watcher will be changed by called functions */
7866  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &curr_page_watcher);
7867  }
7868  else
7869  {
7870  /* Keep previous scan page fixed until we fixed the current one */
7871  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &old_page_watcher);
7872  }
7873  }
7874  if (curr_page_watcher.pgptr == NULL)
7875  {
7876  curr_page_watcher.pgptr =
7877  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, scan_cache,
7878  &curr_page_watcher);
7879  if (old_page_watcher.pgptr != NULL)
7880  {
7881  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7882  }
7883  if (curr_page_watcher.pgptr == NULL)
7884  {
7885  if (er_errid () == ER_PB_BAD_PAGEID)
7886  {
7888  oid.slotid);
7889  }
7890 
7891  /* something went wrong, return */
7892  assert (scan_cache->page_watcher.pgptr == NULL);
7893  return S_ERROR;
7894  }
7895  }
7896 
7897  if (get_rec_info)
7898  {
7899  /* Getting record information means that we need to scan all slots even if they store no object. */
7900  if (reversed_direction)
7901  {
7902  scan =
7903  spage_previous_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7904  }
7905  else
7906  {
7907  scan =
7908  spage_next_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7909  }
7911  {
7912  /* skip the header */
7913  scan =
7914  spage_next_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7915  }
7916  }
7917  else
7918  {
7919  /* Find the next object. Skip relocated records (i.e., new_home records). This records must be accessed
7920  * through the relocation record (i.e., the object). */
7921 
7922  while (true)
7923  {
7924  if (reversed_direction)
7925  {
7926  scan = spage_previous_record (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7927  }
7928  else
7929  {
7930  scan = spage_next_record (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7931  }
7932  if (scan != S_SUCCESS)
7933  {
7934  /* stop */
7935  break;
7936  }
7938  {
7939  /* skip the header */
7940  continue;
7941  }
7942  type = spage_get_record_type (curr_page_watcher.pgptr, oid.slotid);
7943  if (type == REC_NEWHOME || type == REC_ASSIGN_ADDRESS || type == REC_UNKNOWN)
7944  {
7945  /* skip */
7946  continue;
7947  }
7948 
7949  break;
7950  }
7951  }
7952 
7953  if (scan != S_SUCCESS)
7954  {
7955  if (scan == S_END)
7956  {
7957  /* Find next page of heap and continue scanning */
7958  if (reversed_direction)
7959  {
7960  (void) heap_vpid_prev (thread_p, hfid, curr_page_watcher.pgptr, &vpid);
7961  }
7962  else
7963  {
7964  (void) heap_vpid_next (thread_p, hfid, curr_page_watcher.pgptr, &vpid);
7965  }
7966  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &old_page_watcher);
7967  oid.volid = vpid.volid;
7968  oid.pageid = vpid.pageid;
7969  oid.slotid = -1;
7970  if (oid.pageid == NULL_PAGEID)
7971  {
7972  /* must be last page, end scanning */
7973  OID_SET_NULL (next_oid);
7974  if (old_page_watcher.pgptr != NULL)
7975  {
7976  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7977  }
7978  return scan;
7979  }
7980  }
7981  else
7982  {
7983  /* Error, stop scanning */
7984  if (old_page_watcher.pgptr != NULL)
7985  {
7986  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7987  }
7988  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
7989  return scan;
7990  }
7991  }
7992  else
7993  {
7994  /* found a new object */
7995  break;
7996  }
7997  }
7998 
7999  /* A record was found */
8000  if (get_rec_info)
8001  {
8002  scan =
8003  heap_get_record_info (thread_p, oid, recdes, forward_recdes, &curr_page_watcher, scan_cache, ispeeking,
8004  cache_recordinfo);
8005  }
8006  else
8007  {
8008  int cache_last_fix_page_save = scan_cache->cache_last_fix_page;
8009 
8010  scan_cache->cache_last_fix_page = true;
8011  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &scan_cache->page_watcher);
8012 
8013  scan = heap_scan_get_visible_version (thread_p, &oid, class_oid, recdes, scan_cache, ispeeking, NULL_CHN);
8014  scan_cache->cache_last_fix_page = cache_last_fix_page_save;
8015 
8016  if (!cache_last_fix_page_save && scan_cache->page_watcher.pgptr)
8017  {
8018  /* restore into curr_page_watcher and unfix later */
8019  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &curr_page_watcher);
8020  }
8021  }
8022 
8023  if (scan == S_SUCCESS)
8024  {
8025  /*
8026  * Make sure that the found object is an instance of the desired
8027  * class. If it isn't then continue looking.
8028  */
8029  if (class_oid == NULL || OID_ISNULL (class_oid) || !OID_IS_ROOTOID (&oid))
8030  {
8031  /* stop */
8032  *next_oid = oid;
8033  break;
8034  }
8035  else
8036  {
8037  /* continue looking */
8038  if (is_null_recdata)
8039  {
8040  /* reset recdes->data before getting next record */
8041  recdes->data = NULL;
8042  }
8043  continue;
8044  }
8045  }
8046  else if (scan == S_SNAPSHOT_NOT_SATISFIED || scan == S_DOESNT_EXIST)
8047  {
8048  /* the record does not satisfies snapshot or was deleted - continue */
8049  if (is_null_recdata)
8050  {
8051  /* reset recdes->data before getting next record */
8052  recdes->data = NULL;
8053  }
8054  continue;
8055  }
8056 
8057  /* scan was not successful, stop scanning */
8058  break;
8059  }
8060 
8061  if (old_page_watcher.pgptr != NULL)
8062  {
8063  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
8064  }
8065 
8066  if (curr_page_watcher.pgptr != NULL)
8067  {
8068  if (!scan_cache->cache_last_fix_page)
8069  {
8070  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
8071  }
8072  else
8073  {
8074  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &scan_cache->page_watcher);
8075  }
8076  }
8077 
8078  return scan;
8079 }
8080 
8081 /*
8082  * heap_first () - Retrieve or peek first object of heap
8083  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
8084  * hfid(in):
8085  * class_oid(in):
8086  * oid(in/out): Object identifier of current record.
8087  * Will be set to first available record or NULL_OID when there
8088  * is not one.
8089  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8090  * describe the new record.
8091  * scan_cache(in/out): Scan cache or NULL
8092  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
8093  * COPY when the object is copied
8094  *
8095  */
8096 SCAN_CODE
8097 heap_first (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, RECDES * recdes,
8098  HEAP_SCANCACHE * scan_cache, int ispeeking)
8099 {
8100  /* Retrieve the first record of the file */
8101  OID_SET_NULL (oid);
8102  oid->volid = hfid->vfid.volid;
8103 
8104  return heap_next (thread_p, hfid, class_oid, oid, recdes, scan_cache, ispeeking);
8105 }
8106 
8107 /*
8108  * heap_last () - Retrieve or peek last object of heap
8109  * return: SCAN_CODE
8110  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8111  * S_ERROR)
8112  * hfid(in):
8113  * class_oid(in):
8114  * oid(in/out): Object identifier of current record.
8115  * Will be set to last available record or NULL_OID when there is
8116  * not one.
8117  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8118  * describe the new record.
8119  * scan_cache(in/out): Scan cache or NULL
8120  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
8121  * COPY when the object is copied
8122  *
8123  */
8124 SCAN_CODE
8125 heap_last (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, RECDES * recdes,
8126  HEAP_SCANCACHE * scan_cache, int ispeeking)
8127 {
8128  /* Retrieve the first record of the file */
8129  OID_SET_NULL (oid);
8130  oid->volid = hfid->vfid.volid;
8131 
8132  return heap_prev (thread_p, hfid, class_oid, oid, recdes, scan_cache, ispeeking);
8133 }
8134 
8135 #if defined (ENABLE_UNUSED_FUNCTION)
8136 /*
8137  * heap_cmp () - Compare heap object with current content
8138  * return: int (> 0 recdes is larger,
8139  * < 0 recdes is smaller, and
8140  * = 0 same)
8141  * oid(in): The object to compare
8142  * recdes(in): Compare object against this content
8143  *
8144  * Note: Compare the heap object against given content in ASCII format.
8145  */
8146 int
8147 heap_cmp (THREAD_ENTRY * thread_p, const OID * oid, RECDES * recdes)
8148 {
8149  HEAP_SCANCACHE scan_cache;
8150  RECDES peek_recdes;
8151  int compare;
8152 
8153  heap_scancache_quick_start (&scan_cache);
8154  if (heap_get (thread_p, oid, &peek_recdes, &scan_cache, PEEK, NULL_CHN) != S_SUCCESS)
8155  {
8156  compare = 1;
8157  }
8158  else if (recdes->length > peek_recdes.length)
8159  {
8160  compare = memcmp (recdes->data, peek_recdes.data, peek_recdes.length);
8161  if (compare == 0)
8162  {
8163  compare = 1;
8164  }
8165  }
8166  else
8167  {
8168  compare = memcmp (recdes->data, peek_recdes.data, recdes->length);
8169  if (compare == 0 && recdes->length != peek_recdes.length)
8170  {
8171  compare = -1;
8172  }
8173  }
8174 
8175  heap_scancache_end (thread_p, &scan_cache);
8176 
8177  return compare;
8178 }
8179 #endif /* ENABLE_UNUSED_FUNCTION */
8180 
8181 /*
8182  * heap_scanrange_start () - Initialize a scanrange cursor
8183  * return: NO_ERROR
8184  * scan_range(in/out): Scan range
8185  * hfid(in): Heap file identifier
8186  * class_oid(in): Class identifier
8187  * For any class, NULL or NULL_OID can be given
8188  *
8189  * Note: A scanrange structure is initialized. The scanrange structure
8190  * is used to define a scan range (set of objects) and to cache
8191  * information about the latest fetched page and memory allocated
8192  * by the scan functions. This information is used in future
8193  * scans, for example, to avoid hashing for the same page in the
8194  * page buffer pool or defining another allocation area.
8195  * The caller is responsible for declaring the end of a scan
8196  * range so that the fixed pages and allocated memory are freed.
8197  * Using many scans at the same time should be avoided since page
8198  * buffers are fixed and locked for future references and there
8199  * is a limit of buffers in the page buffer pool. This is
8200  * analogous to fetching many pages at the same time.
8201  */
8202 int
8203 heap_scanrange_start (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, const HFID * hfid, const OID * class_oid,
8205 {
8206  int ret = NO_ERROR;
8207 
8208  /* Start the scan cache */
8209  ret = heap_scancache_start (thread_p, &scan_range->scan_cache, hfid, class_oid, true, false, mvcc_snapshot);
8210  if (ret != NO_ERROR)
8211  {
8212  goto exit_on_error;
8213  }
8214 
8215  OID_SET_NULL (&scan_range->first_oid);
8216  scan_range->first_oid.volid = hfid->vfid.volid;
8217  scan_range->last_oid = scan_range->first_oid;
8218 
8219  return ret;
8220 
8221 exit_on_error:
8222 
8223  OID_SET_NULL (&scan_range->first_oid);
8224  OID_SET_NULL (&scan_range->last_oid);
8225 
8226  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
8227 }
8228 
8229 /*
8230  * heap_scanrange_end () - End of a scanrange
8231  * return:
8232  * scan_range(in/out): Scanrange
8233  *
8234  * Note: Any fixed heap page on the given scan is freed and any memory
8235  * allocated by this scan is also freed. The scan_range structure is undefined.
8236  */
8237 void
8239 {
8240  /* Finish the scan cache */
8241  heap_scancache_end (thread_p, &scan_range->scan_cache);
8242  OID_SET_NULL (&scan_range->first_oid);
8243  OID_SET_NULL (&scan_range->last_oid);
8244 }
8245 
8246 /*
8247  * heap_scanrange_to_following () - Define the following scanrange
8248  * return: SCAN_CODE
8249  * (Either of S_SUCCESS, S_END, S_ERROR)
8250  * scan_range(in/out): Scanrange
8251  * start_oid(in): Desired OID for first element in the scanrange or NULL
8252  *
8253  * Note: The range of a scanrange is defined. The scanrange is defined
8254  * as follows:
8255  * a: When start_oid == NULL, the first scanrange object is the
8256  * next object after the last object in the previous scanrange
8257  * b: When start_oid is the same as a NULL_OID, the first object
8258  * is the first heap object.
8259  * c: The first object in the scanrange is the given object.
8260  * The last object in the scanrange is either the first object in
8261  * the scanrange or the one after the first object which is not a
8262  * relocated or multipage object.
8263  */
8264 SCAN_CODE
8265 heap_scanrange_to_following (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, OID * start_oid)
8266 {
8267  SCAN_CODE scan;
8268  RECDES recdes = RECDES_INITIALIZER;
8269  INT16 slotid;
8270  VPID *vpid;
8271 
8272  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8273  {
8274  return S_ERROR;
8275  }
8276 
8277  if (start_oid != NULL)
8278  {
8279  if (OID_ISNULL (start_oid))
8280  {
8281  /* Scanrange starts at first heap object */
8282  scan =
8283  heap_first (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8284  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8285  if (scan != S_SUCCESS)
8286  {
8287  return scan;
8288  }
8289  }
8290  else
8291  {
8292  /* Scanrange starts with the given object */
8293  scan_range->first_oid = *start_oid;
8294  scan = heap_get_visible_version (thread_p, &scan_range->last_oid, &scan_range->scan_cache.node.class_oid,
8295  &recdes, &scan_range->scan_cache, PEEK, NULL_CHN);
8296  if (scan != S_SUCCESS)
8297  {
8298  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8299  {
8300  scan =
8301  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8302  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8303  if (scan != S_SUCCESS)
8304  {
8305  return scan;
8306  }
8307  }
8308  else
8309  {
8310  return scan;
8311  }
8312  }
8313  }
8314  }
8315  else
8316  {
8317  /*
8318  * Scanrange ends with the prior object after the first object in the
8319  * the previous scanrange
8320  */
8321  scan_range->first_oid = scan_range->last_oid;
8322  scan =
8323  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8324  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8325  if (scan != S_SUCCESS)
8326  {
8327  return scan;
8328  }
8329  }
8330 
8331  scan_range->last_oid = scan_range->first_oid;
8332  if (scan_range->scan_cache.page_watcher.pgptr != NULL
8333  && (vpid = pgbuf_get_vpid_ptr (scan_range->scan_cache.page_watcher.pgptr)) != NULL
8334  && (vpid->pageid == scan_range->last_oid.pageid) && (vpid->volid == scan_range->last_oid.volid)
8335  && spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, scan_range->last_oid.slotid) == REC_HOME)
8336  {
8337  slotid = scan_range->last_oid.slotid;
8338  while (true)
8339  {
8340  if (spage_next_record (scan_range->scan_cache.page_watcher.pgptr, &slotid, &recdes, PEEK) != S_SUCCESS
8341  || spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, slotid) != REC_HOME)
8342  {
8343  break;
8344  }
8345  else
8346  {
8347  scan_range->last_oid.slotid = slotid;
8348  }
8349  }
8350  }
8351 
8352  return scan;
8353 }
8354 
8355 /*
8356  * heap_scanrange_to_prior () - Define the prior scanrange
8357  * return: SCAN_CODE
8358  * (Either of S_SUCCESS, S_END, S_ERROR)
8359  * scan_range(in/out): Scanrange
8360  * last_oid(in): Desired OID for first element in the scanrange or NULL
8361  *
8362  * Note: The range of a scanrange is defined. The scanrange is defined
8363  * as follows:
8364  * a: When last_oid == NULL, the last scanrange object is the
8365  * prior object after the first object in the previous
8366  * scanrange.
8367  * b: When last_oid is the same as a NULL_OID, the last object is
8368  * is the last heap object.
8369  * c: The last object in the scanrange is the given object.
8370  * The first object in the scanrange is either the last object in
8371  * the scanrange or the one before the first object which is not
8372  * a relocated or multipage object.
8373  */
8374 SCAN_CODE
8375 heap_scanrange_to_prior (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, OID * last_oid)
8376 {
8377  SCAN_CODE scan;
8378  RECDES recdes = RECDES_INITIALIZER;
8379  INT16 slotid;
8380 
8381  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8382  {
8383  return S_ERROR;
8384  }
8385 
8386  if (last_oid != NULL)
8387  {
8388  if (OID_ISNULL (last_oid))
8389  {
8390  /* Scanrange ends at last heap object */
8391  scan =
8392  heap_last (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8393  &scan_range->last_oid, &recdes, &scan_range->scan_cache, PEEK);
8394  if (scan != S_SUCCESS)
8395  {
8396  return scan;
8397  }
8398  }
8399  else
8400  {
8401  /* Scanrange ends with the given object */
8402  scan_range->last_oid = *last_oid;
8403  scan =
8404  heap_get_visible_version (thread_p, &scan_range->last_oid, &scan_range->scan_cache.node.class_oid, &recdes,
8405  &scan_range->scan_cache, PEEK, NULL_CHN);
8406  if (scan != S_SUCCESS)
8407  {
8408  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8409  {
8410  scan =
8411  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8412  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8413  if (scan != S_SUCCESS)
8414  {
8415  return scan;
8416  }
8417  }
8418  }
8419  }
8420  }
8421  else
8422  {
8423  /*
8424  * Scanrange ends with the prior object after the first object in the
8425  * the previous scanrange
8426  */
8427  scan_range->last_oid = scan_range->first_oid;
8428  scan =
8429  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8430  &scan_range->last_oid, &recdes, &scan_range->scan_cache, PEEK);
8431  if (scan != S_SUCCESS)
8432  {
8433  return scan;
8434  }
8435  }
8436 
8437  /*
8438  * Now define the first object for the scanrange. A scanrange range starts
8439  * when a relocated or multipage object is found or when the last object is
8440  * the page is found.
8441  */
8442 
8443  scan_range->first_oid = scan_range->last_oid;
8444  if (scan_range->scan_cache.page_watcher.pgptr != NULL)
8445  {
8446  slotid = scan_range->first_oid.slotid;
8447  while (true)
8448  {
8449  if (spage_previous_record (scan_range->scan_cache.page_watcher.pgptr, &slotid, &recdes, PEEK) != S_SUCCESS
8450  || slotid == HEAP_HEADER_AND_CHAIN_SLOTID
8451  || spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, slotid) != REC_HOME)
8452  {
8453  break;
8454  }
8455  else
8456  {
8457  scan_range->first_oid.slotid = slotid;
8458  }
8459  }
8460  }
8461 
8462  return scan;
8463 }
8464 
8465 /*
8466  * heap_scanrange_next () - Retrieve or peek next object in the scanrange
8467  * return: SCAN_CODE
8468  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8469  * S_ERROR)
8470  * next_oid(in/out): Object identifier of current record.
8471  * Will be set to next available record or NULL_OID when
8472  * there is not one.
8473  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8474  * describe the new record.
8475  * scan_range(in/out): Scan range ... Cannot be NULL
8476  * ispeeking(in): PEEK when the object is peeked,
8477  * COPY when the object is copied
8478  *
8479  */
8480 SCAN_CODE
8481 heap_scanrange_next (THREAD_ENTRY * thread_p, OID * next_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8482  int ispeeking)
8483 {
8484  SCAN_CODE scan;
8485 
8486  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8487  {
8488  return S_ERROR;
8489  }
8490 
8491  /*
8492  * If next_oid is less than the first OID in the scanrange.. get the first
8493  * object
8494  */
8495 
8496  if (OID_ISNULL (next_oid) || OID_LT (next_oid, &scan_range->first_oid))
8497  {
8498  /* Retrieve the first object in the scanrange */
8499  *next_oid = scan_range->first_oid;
8500  scan =
8501  heap_get_visible_version (thread_p, next_oid, &scan_range->scan_cache.node.class_oid, recdes,
8502  &scan_range->scan_cache, ispeeking, NULL_CHN);
8503  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8504  {
8505  scan =
8506  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, next_oid,
8507  recdes, &scan_range->scan_cache, ispeeking);
8508  }
8509  /* Make sure that we did not go overboard */
8510  if (scan == S_SUCCESS && OID_GT (next_oid, &scan_range->last_oid))
8511  {
8512  OID_SET_NULL (next_oid);
8513  scan = S_END;
8514  }
8515  }
8516  else
8517  {
8518  /* Make sure that this is not the last OID in the scanrange */
8519  if (OID_EQ (next_oid, &scan_range->last_oid))
8520  {
8521  OID_SET_NULL (next_oid);
8522  scan = S_END;
8523  }
8524  else
8525  {
8526  scan =
8527  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, next_oid,
8528  recdes, &scan_range->scan_cache, ispeeking);
8529  /* Make sure that we did not go overboard */
8530  if (scan == S_SUCCESS && OID_GT (next_oid, &scan_range->last_oid))
8531  {
8532  OID_SET_NULL (next_oid);
8533  scan = S_END;
8534  }
8535  }
8536  }
8537 
8538  return scan;
8539 }
8540 
8541 #if defined (ENABLE_UNUSED_FUNCTION)
8542 /*
8543  * heap_scanrange_prev () - RETRIEVE OR PEEK NEXT OBJECT IN THE SCANRANGE
8544  * return:
8545  * returns/side-effects: SCAN_CODE
8546  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8547  * S_ERROR)
8548  * prev_oid(in/out): Object identifier of current record.
8549  * Will be set to previous available record or NULL_OID when
8550  * there is not one.
8551  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8552  * describe the new record.
8553  * scan_range(in/out): Scan range ... Cannot be NULL
8554  * ispeeking(in): PEEK when the object is peeked,
8555  * COPY when the object is copied
8556  *
8557  */
8558 SCAN_CODE
8559 heap_scanrange_prev (THREAD_ENTRY * thread_p, OID * prev_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8560  int ispeeking)
8561 {
8562  SCAN_CODE scan;
8563 
8564  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8565  {
8566  return S_ERROR;
8567  }
8568 
8569  if (OID_ISNULL (prev_oid) || OID_GT (prev_oid, &scan_range->last_oid))
8570  {
8571  /* Retrieve the last object in the scanrange */
8572  *prev_oid = scan_range->last_oid;
8573  scan = heap_get (thread_p, prev_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8574  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8575  {
8576  scan =
8577  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, prev_oid,
8578  recdes, &scan_range->scan_cache, ispeeking);
8579  }
8580  /* Make sure that we did not go underboard */
8581  if (scan == S_SUCCESS && OID_LT (prev_oid, &scan_range->last_oid))
8582  {
8583  OID_SET_NULL (prev_oid);
8584  scan = S_END;
8585  }
8586  }
8587  else
8588  {
8589  /* Make sure that this is not the first OID in the scanrange */
8590  if (OID_EQ (prev_oid, &scan_range->first_oid))
8591  {
8592  OID_SET_NULL (prev_oid);
8593  scan = S_END;
8594  }
8595  else
8596  {
8597  scan =
8598  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, prev_oid,
8599  recdes, &scan_range->scan_cache, ispeeking);
8600  if (scan == S_SUCCESS && OID_LT (prev_oid, &scan_range->last_oid))
8601  {
8602  OID_SET_NULL (prev_oid);
8603  scan = S_END;
8604  }
8605  }
8606  }
8607 
8608  return scan;
8609 }
8610 
8611 /*
8612  * heap_scanrange_first () - Retrieve or peek first object in the scanrange
8613  * return: SCAN_CODE
8614  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8615  * S_ERROR)
8616  * first_oid(in/out): Object identifier.
8617  * Set to first available record or NULL_OID when there
8618  * is not one.
8619  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8620  * describe the new record.
8621  * scan_range(in/out): Scan range ... Cannot be NULL
8622  * ispeeking(in): PEEK when the object is peeked,
8623  * COPY when the object is copied
8624  *
8625  */
8626 SCAN_CODE
8627 heap_scanrange_first (THREAD_ENTRY * thread_p, OID * first_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8628  int ispeeking)
8629 {
8630  SCAN_CODE scan;
8631 
8632  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8633  {
8634  return S_ERROR;
8635  }
8636 
8637  /* Retrieve the first object in the scanrange */
8638  *first_oid = scan_range->first_oid;
8639  scan = heap_get (thread_p, first_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8640  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8641  {
8642  scan =
8643  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, first_oid,
8644  recdes, &scan_range->scan_cache, ispeeking);
8645  }
8646  /* Make sure that we did not go overboard */
8647  if (scan == S_SUCCESS && OID_GT (first_oid, &scan_range->last_oid))
8648  {
8649  OID_SET_NULL (first_oid);
8650  scan = S_END;
8651  }
8652 
8653  return scan;
8654 }
8655 
8656 /*
8657  * heap_scanrange_last () - Retrieve or peek last object in the scanrange
8658  * return: SCAN_CODE
8659  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8660  * S_ERROR)
8661  * last_oid(in/out): Object identifier.
8662  * Set to last available record or NULL_OID when there is
8663  * not one
8664  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8665  * describe the new record.
8666  * scan_range(in/out): Scan range ... Cannot be NULL
8667  * ispeeking(in): PEEK when the object is peeked,
8668  * COPY when the object is copied
8669  *
8670  */
8671 SCAN_CODE
8672 heap_scanrange_last (THREAD_ENTRY * thread_p, OID * last_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8673  int ispeeking)
8674 {
8675  SCAN_CODE scan;
8676 
8677  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8678  {
8679  return S_ERROR;
8680  }
8681 
8682  /* Retrieve the last object in the scanrange */
8683  *last_oid = scan_range->last_oid;
8684  scan = heap_get (thread_p, last_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8685  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8686  {
8687  scan =
8688  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, last_oid,
8689  recdes, &scan_range->scan_cache, ispeeking);
8690  }
8691  /* Make sure that we did not go underboard */
8692  if (scan == S_SUCCESS && OID_LT (last_oid, &scan_range->last_oid))
8693  {
8694  OID_SET_NULL (last_oid);
8695  scan = S_END;
8696  }
8697 
8698  return scan;
8699 }
8700 #endif
8701 
8702 /*
8703  * heap_does_exist () - Does object exist?
8704  * return: true/false
8705  * class_oid(in): Class identifier of object or NULL
8706  * oid(in): Object identifier
8707  *
8708  * Note: Check if the object associated with the given OID exist.
8709  * If the class of the object does not exist, the object does not
8710  * exist either. If the class is not given or a NULL_OID is
8711  * passed, the function finds the class oid.
8712  */
8713 bool
8714 heap_does_exist (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid)
8715 {
8716  VPID vpid;
8717  OID tmp_oid;
8718  PGBUF_WATCHER pg_watcher;
8719  bool doesexist = true;
8720  INT16 rectype;
8721  bool old_check_interrupt;
8722  int old_wait_msec;
8723 
8725 
8726  old_check_interrupt = logtb_set_check_interrupt (thread_p, false);
8727  old_wait_msec = xlogtb_reset_wait_msecs (thread_p, LK_INFINITE_WAIT);
8728 
8729  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
8730  {
8731  doesexist = false;
8732  goto exit_on_end;
8733  }
8734 
8735  /*
8736  * If the class is not NULL and it is different from the Rootclass,
8737  * make sure that it exist. Rootclass always exist.. not need to check
8738  * for it
8739  */
8740  if (class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid)
8741  && HEAP_ISVALID_OID (thread_p, class_oid) != DISK_VALID)
8742  {
8743  doesexist = false;
8744  goto exit_on_end;
8745  }
8746 
8747  while (doesexist)
8748  {
8749  if (oid->slotid == HEAP_HEADER_AND_CHAIN_SLOTID || oid->slotid < 0 || oid->pageid < 0 || oid->volid < 0)
8750  {
8751  doesexist = false;
8752  goto exit_on_end;
8753  }
8754 
8755  vpid.volid = oid->volid;
8756  vpid.pageid = oid->pageid;
8757 
8758  /* Fetch the page where the record is stored */
8759 
8760  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
8761  if (pg_watcher.pgptr == NULL)
8762  {
8763  if (er_errid () == ER_PB_BAD_PAGEID)
8764  {
8766  oid->slotid);
8767  }
8768 
8769  /* something went wrong, give up */
8770  doesexist = false;
8771  goto exit_on_end;
8772  }
8773 
8774  doesexist = spage_is_slot_exist (pg_watcher.pgptr, oid->slotid);
8775  rectype = spage_get_record_type (pg_watcher.pgptr, oid->slotid);
8776 
8777  /*
8778  * Check the class
8779  */
8780 
8781  if (doesexist && rectype != REC_ASSIGN_ADDRESS)
8782  {
8783  if (class_oid == NULL)
8784  {
8785  class_oid = &tmp_oid;
8786  OID_SET_NULL (class_oid);
8787  }
8788 
8789  if (OID_ISNULL (class_oid))
8790  {
8791  /*
8792  * Caller does not know the class of the object. Get the class
8793  * identifier from disk
8794  */
8795  if (heap_get_class_oid_from_page (thread_p, pg_watcher.pgptr, class_oid) != NO_ERROR)
8796  {
8797  assert_release (false);
8798  doesexist = false;
8799  goto exit_on_end;
8800  }
8801  assert (!OID_ISNULL (class_oid));
8802  }
8803 
8804  pgbuf_ordered_unfix (thread_p, &pg_watcher);
8805 
8806  /* If doesexist is true, then check its class */
8807  if (!OID_IS_ROOTOID (class_oid))
8808  {
8809  /*
8810  * Make sure that the class exist too. Loop with this
8811  */
8812  oid = class_oid;
8813  class_oid = oid_Root_class_oid;
8814  }
8815  else
8816  {
8817  break;
8818  }
8819  }
8820  else
8821  {
8822  break;
8823  }
8824  }
8825 
8826 exit_on_end:
8827 
8828  if (pg_watcher.pgptr != NULL)
8829  {
8830  pgbuf_ordered_unfix (thread_p, &pg_watcher);
8831  }
8832 
8833  (void) logtb_set_check_interrupt (thread_p, old_check_interrupt);
8834  (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msec);
8835 
8836  return doesexist;
8837 }
8838 
8839 /*
8840  * heap_is_object_not_null () - Check if object should be considered not NULL.
8841  *
8842  * return : True if object is visible or too new, false if it is deleted or if errors occur.
8843  * thread_p (in) : Thread entry.
8844  * class_oid (in) : Class OID.
8845  * oid (in) : Instance OID.
8846  */
8847 bool
8848 heap_is_object_not_null (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid)
8849 {
8850  bool old_check_interrupt = logtb_set_check_interrupt (thread_p, false);
8851  bool doesexist = false;
8852  HEAP_SCANCACHE scan_cache;
8853  SCAN_CODE scan = S_SUCCESS;
8854  OID local_class_oid = OID_INITIALIZER;
8855  MVCC_SNAPSHOT *mvcc_snapshot_ptr;
8856  MVCC_SNAPSHOT copy_mvcc_snapshot;
8857  bool is_scancache_started = false;
8858 
8859  er_stack_push ();
8860 
8861  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
8862  {
8863  goto exit_on_end;
8864  }
8865 
8866  /*
8867  * If the class is not NULL and it is different from the Root class,
8868  * make sure that it exist. Root class always exist.. not need to check for it
8869  */
8870  if (class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid)
8871  && HEAP_ISVALID_OID (thread_p, class_oid) != DISK_VALID)
8872  {
8873  goto exit_on_end;
8874  }
8875  if (class_oid == NULL)
8876  {
8877  class_oid = &local_class_oid;
8878  }
8879 
8880  if (heap_scancache_quick_start (&scan_cache) != NO_ERROR)
8881  {
8882  goto exit_on_end;
8883  }
8884  is_scancache_started = true;
8885 
8886  mvcc_snapshot_ptr = logtb_get_mvcc_snapshot (thread_p);
8887  if (mvcc_snapshot_ptr == NULL)
8888  {
8889  assert (false);
8890  goto exit_on_end;
8891  }
8892  /* Make a copy of snapshot. We need all MVCC information, but we also want to change the visibility function. */
8893  mvcc_snapshot_ptr->copy_to (copy_mvcc_snapshot);
8894  copy_mvcc_snapshot.snapshot_fnc = mvcc_is_not_deleted_for_snapshot;
8895  scan_cache.mvcc_snapshot = &copy_mvcc_snapshot;
8896 
8897  /* Check only if the last version of the object is not deleted, see mvcc_is_not_deleted_for_snapshot return values */
8898  scan = heap_get_visible_version (thread_p, oid, class_oid, NULL, &scan_cache, PEEK, NULL_CHN);
8899  if (scan != S_SUCCESS)
8900  {
8901  goto exit_on_end;
8902  }
8903  assert (!OID_ISNULL (class_oid));
8904 
8905  /* Check class exists. */
8906  doesexist = heap_does_exist (thread_p, oid_Root_class_oid, class_oid);
8907 
8908 exit_on_end:
8909  (void) logtb_set_check_interrupt (thread_p, old_check_interrupt);
8910 
8911  if (is_scancache_started)
8912  {
8913  heap_scancache_end (thread_p, &scan_cache);
8914  }
8915 
8916  /* We don't need to propagate errors from here. */
8917  er_stack_pop ();
8918 
8919  return doesexist;
8920 }
8921 
8922 /*
8923  * heap_get_num_objects () - Count the number of objects
8924  * return: number of records or -1 in case of an error
8925  * hfid(in): Object heap file identifier
8926  * npages(in):
8927  * nobjs(in):
8928  * avg_length(in):
8929  *
8930  * Note: Count the number of objects stored on the given heap.
8931  * This function is expensive since all pages of the heap are
8932  * fetched to find the number of objects.
8933  */
8934 int
8935 heap_get_num_objects (THREAD_ENTRY * thread_p, const HFID * hfid, int *npages, int *nobjs, int *avg_length)
8936 {
8937  VPID vpid; /* Page-volume identifier */
8938  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
8939  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
8940  HEAP_HDR_STATS *heap_hdr; /* Heap header */
8941  PGBUF_WATCHER hdr_pg_watcher;
8942 
8943  /*
8944  * Get the heap header in exclusive mode and call the synchronization to
8945  * update the statistics of the heap. The number of record/objects is
8946  * updated.
8947  */
8948 
8949  PGBUF_INIT_WATCHER (&hdr_pg_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
8950 
8951  vpid.volid = hfid->vfid.volid;
8952  vpid.pageid = hfid->hpgid;
8953 
8954  addr_hdr.vfid = &hfid->vfid;
8956 
8957  if (pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_pg_watcher) != NO_ERROR)
8958  {
8959  return ER_FAILED;
8960  }
8961 
8962  (void) pgbuf_check_page_ptype (thread_p, hdr_pg_watcher.pgptr, PAGE_HEAP);
8963 
8964  if (spage_get_record (thread_p, hdr_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
8965  {
8966  pgbuf_ordered_unfix (thread_p, &hdr_pg_watcher);
8967  return ER_FAILED;
8968  }
8969 
8970  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
8971  if (heap_stats_sync_bestspace (thread_p, hfid, heap_hdr, pgbuf_get_vpid_ptr (hdr_pg_watcher.pgptr), true, true) < 0)
8972  {
8973  pgbuf_ordered_unfix (thread_p, &hdr_pg_watcher);
8974  return ER_FAILED;
8975  }
8976  *npages = heap_hdr->estimates.num_pages;
8977  *nobjs = heap_hdr->estimates.num_recs;
8978  if (*nobjs > 0)
8979  {
8980  *avg_length = (int) ((heap_hdr->estimates.recs_sumlen / (float) *nobjs) + 0.9);
8981  }
8982  else
8983  {
8984  *avg_length = 0;
8985  }
8986 
8987  addr_hdr.pgptr = hdr_pg_watcher.pgptr;
8988  log_skip_logging (thread_p, &addr_hdr);
8989  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_pg_watcher);
8990 
8991  return *nobjs;
8992 }
8993 
8994 /*
8995  * heap_estimate () - Estimate the number of pages, objects, average length
8996  * return: number of pages estimated or -1 in case of an error
8997  * hfid(in): Object heap file identifier
8998  * npages(in):
8999  * nobjs(in):
9000  * avg_length(in):
9001  *
9002  * Note: Estimate the number of pages, objects, and average length of objects.
9003  */
9004 int
9005 heap_estimate (THREAD_ENTRY * thread_p, const HFID * hfid, int *npages, int *nobjs, int *avg_length)
9006 {
9007  VPID vpid; /* Page-volume identifier */
9008  PAGE_PTR hdr_pgptr = NULL; /* Page pointer */
9009  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
9010  HEAP_HDR_STATS *heap_hdr; /* Heap header */
9011 
9012  /*
9013  * Get the heap header in shared mode since it is an estimation of the
9014  * number of objects.
9015  */
9016 
9017  vpid.volid = hfid->vfid.volid;
9018  vpid.pageid = hfid->hpgid;
9019 
9020  hdr_pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH);
9021  if (hdr_pgptr == NULL)
9022  {
9023  /* something went wrong. Unable to fetch header page */
9024  return ER_FAILED;
9025  }
9026 
9027  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
9028 
9029  if (spage_get_record (thread_p, hdr_pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
9030  {
9031  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
9032  return ER_FAILED;
9033  }
9034 
9035  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
9036  *npages = heap_hdr->estimates.num_pages;
9037  *nobjs = heap_hdr->estimates.num_recs;
9038  if (*nobjs > 0)
9039  {
9040  *avg_length = (int) ((heap_hdr->estimates.recs_sumlen / (float) *nobjs) + 0.9);
9041  }
9042  else
9043  {
9044  *avg_length = 0;
9045  }
9046 
9047  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
9048 
9049  return *npages;
9050 }
9051 
9052 /*
9053  * heap_estimate_num_objects () - Estimate the number of objects
9054  * return: number of records estimated or -1 in case of an error
9055  * hfid(in): Object heap file identifier
9056  *
9057  * Note: Estimate the number of objects stored on the given heap.
9058  */
9059 int
9060 heap_estimate_num_objects (THREAD_ENTRY * thread_p, const HFID * hfid)
9061 {
9062  int ignore_npages = -1;
9063  int ignore_avg_reclen = -1;
9064  int nobjs = -1;
9065 
9066  if (heap_estimate (thread_p, hfid, &ignore_npages, &nobjs, &ignore_avg_reclen) == -1)
9067  {
9068  return ER_FAILED;
9069  }
9070 
9071  return nobjs;
9072 }
9073 
9074 /*
9075  * heap_estimate_avg_length () - Estimate the average length of records
9076  * return: error code
9077  * hfid(in): Object heap file identifier
9078  * avg_reclen(out) : average length
9079  *
9080  * Note: Estimate the avergae length of the objects stored on the heap.
9081  * This function is mainly used when we are creating the OID of
9082  * an object of which we do not know its length. Mainly for
9083  * loaddb during forward references to other objects.
9084  */
9085 static int
9086 heap_estimate_avg_length (THREAD_ENTRY * thread_p, const HFID * hfid, int &avg_reclen)
9087 {
9088  int ignore_npages;
9089  int ignore_nobjs;
9090 
9091  if (heap_estimate (thread_p, hfid, &ignore_npages, &ignore_nobjs, &avg_reclen) == -1)
9092  {
9093  return ER_FAILED;
9094  }
9095 
9096  return NO_ERROR;
9097 }
9098 
9099 /*
9100  * heap_get_capacity () - Find space consumed by heap
9101  * return: NO_ERROR
9102  * hfid(in): Object heap file identifier
9103  * num_recs(in/out): Total Number of objects
9104  * num_recs_relocated(in/out):
9105  * num_recs_inovf(in/out):
9106  * num_pages(in/out): Total number of heap pages
9107  * avg_freespace(in/out): Average free space per page
9108  * avg_freespace_nolast(in/out): Average free space per page without taking in
9109  * consideration last page
9110  * avg_reclength(in/out): Average object length
9111  * avg_overhead(in/out): Average overhead per page
9112  *
9113  * Note: Find the current storage facts/capacity for given heap.
9114  */
9115 static int
9116 heap_get_capacity (THREAD_ENTRY * thread_p, const HFID * hfid, INT64 * num_recs, INT64 * num_recs_relocated,
9117  INT64 * num_recs_inovf, INT64 * num_pages, int *avg_freespace, int *avg_freespace_nolast,
9118  int *avg_reclength, int *avg_overhead)
9119 {
9120  VPID vpid; /* Page-volume identifier */
9121  RECDES recdes; /* Header record descriptor */
9122  INT16 slotid; /* Slot of one object */
9123  OID *ovf_oid;
9124  int last_freespace;
9125  int ovf_len;
9126  int ovf_num_pages;
9127  int ovf_free_space;
9128  int ovf_overhead;
9129  int j;
9130  INT16 type = REC_UNKNOWN;
9131  int ret = NO_ERROR;
9132  INT64 sum_freespace = 0;
9133  INT64 sum_reclength = 0;
9134  INT64 sum_overhead = 0;
9135  PGBUF_WATCHER pg_watcher;
9136  PGBUF_WATCHER old_pg_watcher;
9137 
9138  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
9139  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
9140 
9141  *num_recs = 0;
9142  *num_pages = 0;
9143  *avg_freespace = 0;
9144  *avg_reclength = 0;
9145  *avg_overhead = 0;
9146  *num_recs_relocated = 0;
9147  *num_recs_inovf = 0;
9148  last_freespace = 0;
9149 
9150  vpid.volid = hfid->vfid.volid;
9151  vpid.pageid = hfid->hpgid;
9152 
9153  while (!VPID_ISNULL (&vpid))
9154  {
9155  pg_watcher.pgptr =
9156  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
9157  if (old_pg_watcher.pgptr != NULL)
9158  {
9159  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9160  }
9161 
9162  if (pg_watcher.pgptr == NULL)
9163  {
9164  /* something went wrong, return error */
9165  goto exit_on_error;
9166  }
9167 
9168  slotid = -1;
9169  j = spage_number_of_records (pg_watcher.pgptr);
9170 
9171  last_freespace = spage_get_free_space (thread_p, pg_watcher.pgptr);
9172 
9173  *num_pages += 1;
9174  sum_freespace += last_freespace;
9175  sum_overhead += j * spage_slot_size ();
9176 
9177  while ((j--) > 0)
9178  {
9179  if (spage_next_record (pg_watcher.pgptr, &slotid, &recdes, PEEK) == S_SUCCESS)
9180  {
9181  if (slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
9182  {
9183  type = spage_get_record_type (pg_watcher.pgptr, slotid);
9184  switch (type)
9185  {
9186  case REC_RELOCATION:
9187  *num_recs_relocated += 1;
9188  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9189  break;
9190  case REC_ASSIGN_ADDRESS:
9191  case REC_HOME:
9192  case REC_NEWHOME:
9193  /*
9194  * Note: for newhome (relocated), we are including the length
9195  * and number of records. In the relocation record (above)
9196  * we are just adding the overhead and number of
9197  * reclocation records.
9198  * for assign address, we assume the given size.
9199  */
9200  *num_recs += 1;
9201  sum_reclength += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9202  break;
9203  case REC_BIGONE:
9204  *num_recs += 1;
9205  *num_recs_inovf += 1;
9206  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9207 
9208  ovf_oid = (OID *) recdes.data;
9209  if (heap_ovf_get_capacity (thread_p, ovf_oid, &ovf_len, &ovf_num_pages, &ovf_overhead,
9210  &ovf_free_space) == NO_ERROR)
9211  {
9212  sum_reclength += ovf_len;
9213  *num_pages += ovf_num_pages;
9214  sum_freespace += ovf_free_space;
9215  sum_overhead += ovf_overhead;
9216  }
9217  break;
9218  case REC_MARKDELETED:
9219  /*
9220  * TODO Find out and document here why this is added to
9221  * the overhead. The record has been deleted so its
9222  * length should no longer have any meaning. Perhaps
9223  * the length of the slot should have been added instead?
9224  */
9225  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9226  break;
9228  default:
9229  break;
9230  }
9231  }
9232  }
9233  }
9234  (void) heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid);
9235  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
9236  }
9237 
9238  if (old_pg_watcher.pgptr != NULL)
9239  {
9240  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9241  }
9242 
9243  assert (pg_watcher.pgptr == NULL);
9244 
9245  if (*num_pages > 0)
9246  {
9247  /*
9248  * Don't take in consideration the last page for free space
9249  * considerations since the average free space will be contaminated.
9250  */
9251  *avg_freespace_nolast = ((*num_pages > 1) ? (int) ((sum_freespace - last_freespace) / (*num_pages - 1)) : 0);
9252  *avg_freespace = (int) (sum_freespace / *num_pages);
9253  *avg_overhead = (int) (sum_overhead / *num_pages);
9254  }
9255 
9256  if (*num_recs != 0)
9257  {
9258  *avg_reclength = (int) (sum_reclength / *num_recs);
9259  }
9260 
9261  return ret;
9262 
9263 exit_on_error:
9264 
9265  if (old_pg_watcher.pgptr != NULL)
9266  {
9267  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9268  }
9269  assert (pg_watcher.pgptr == NULL);
9270 
9271  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9272 }
9273 
9274 /*
9275 * heap_get_class_oid () - Get class for object. This function doesn't follow
9276 * MVCC versions. Caller must know to use right
9277 * version for this.
9278 *
9279 * return : Scan code.
9280 * thread_p (in) : Thread entry.
9281 * oid (in) : Object OID.
9282 * class_oid (out) : Output class OID.
9283 */
9284 SCAN_CODE
9285 heap_get_class_oid (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid)
9286 {
9287  PGBUF_WATCHER page_watcher;
9288  int err;
9289 
9291 
9292  assert (oid != NULL && !OID_ISNULL (oid) && class_oid != NULL);
9293  OID_SET_NULL (class_oid);
9294 
9295  err = heap_prepare_object_page (thread_p, oid, &page_watcher, PGBUF_LATCH_READ);
9296  if (err != NO_ERROR)
9297  {
9298  /* for non existent object, return S_DOESNT_EXIST and let the caller handle the case; */
9299  return err == ER_HEAP_UNKNOWN_OBJECT ? S_DOESNT_EXIST : S_ERROR;
9300  }
9301 
9302  /* Get class OID from HEAP_CHAIN. */
9303  if (heap_get_class_oid_from_page (thread_p, page_watcher.pgptr, class_oid) != NO_ERROR)
9304  {
9305  /* Unexpected. */
9306  assert_release (false);
9307  pgbuf_ordered_unfix (thread_p, &page_watcher);
9308  return S_ERROR;
9309  }
9310 
9311  pgbuf_ordered_unfix (thread_p, &page_watcher);
9312  return S_SUCCESS;
9313 }
9314 
9315 /*
9316  * heap_get_class_name () - Find classname when oid is a class
9317  * return: error_code
9318  *
9319  * class_oid(in): The Class Object identifier
9320  * class_name(out): Reference of the Class name pointer where name will reside;
9321  * The classname space must be released by the caller.
9322  *
9323  * Note: Find the name of the given class identifier. It asserts that the given OID is class OID.
9324  *
9325  * Note: Classname pointer must be released by the caller using free_and_init
9326  */
9327 int
9328 heap_get_class_name (THREAD_ENTRY * thread_p, const OID * class_oid, char **class_name)
9329 {
9330  return heap_get_class_name_alloc_if_diff (thread_p, class_oid, NULL, class_name);
9331 }
9332 
9333 /*
9334  * heap_get_class_name_alloc_if_diff () - Get the name of given class
9335  * name is malloc when different than given name
9336  * return: error_code if error(other than ER_HEAP_NODATA_NEWADDRESS) occur
9337  *
9338  * class_oid(in): The Class Object identifier
9339  * guess_classname(in): Guess name of class
9340  * classname_out(out): guess_classname when it is the real name. Don't need to free.
9341  * malloc classname when different from guess_classname.
9342  * Must be free by caller (free_and_init)
9343  * NULL in case of error
9344  *
9345  * Note: Find the name of the given class identifier. If the name is
9346  * the same as the guessed name, the guessed name is returned.
9347  * Otherwise, an allocated area with the name of the class is
9348  * returned.
9349  */
9350 int
9351 heap_get_class_name_alloc_if_diff (THREAD_ENTRY * thread_p, const OID * class_oid, char *guess_classname,
9352  char **classname_out)
9353 {
9354  char *classname = NULL;
9355  RECDES recdes;
9356  HEAP_SCANCACHE scan_cache;
9357  int error_code = NO_ERROR;
9358 
9359  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
9360 
9361  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) == S_SUCCESS)
9362  {
9363  classname = or_class_name (&recdes);
9364  if (guess_classname == NULL || strcmp (guess_classname, classname) != 0)
9365  {
9366  /*
9367  * The names are different.. return a copy that must be freed.
9368  */
9369  *classname_out = strdup (classname);
9370  if (*classname_out == NULL)
9371  {
9373  (strlen (classname) + 1) * sizeof (char));
9374  error_code = ER_FAILED;
9375  }
9376  }
9377  else
9378  {
9379  /*
9380  * The classnames are identical
9381  */
9382  *classname_out = guess_classname;
9383  }
9384  }
9385  else
9386  {
9387  ASSERT_ERROR_AND_SET (error_code);
9388  *classname_out = NULL;
9389  if (error_code == ER_HEAP_NODATA_NEWADDRESS)
9390  {
9391  /* clear ER_HEAP_NODATA_NEWADDRESS */
9392  er_clear ();
9393  error_code = NO_ERROR;
9394  }
9395  }
9396 
9397  heap_scancache_end (thread_p, &scan_cache);
9398 
9399  return error_code;
9400 }
9401 
9402 /*
9403  * heap_attrinfo_start () - Initialize an attribute information structure
9404  * return: NO_ERROR
9405  * class_oid(in): The class identifier of the instances where values
9406  * attributes values are going to be read.
9407  * requested_num_attrs(in): Number of requested attributes
9408  * If <=0 are given, it means interested on ALL.
9409  * attrids(in): Array of requested attributes
9410  * attr_info(in/out): The attribute information structure
9411  *
9412  * Note: Initialize an attribute information structure, so that values
9413  * of instances can be retrieved based on the desired attributes.
9414  * If the requested number of attributes is less than zero,
9415  * all attributes will be assumed instead. In this case
9416  * the attrids array should be NULL.
9417  *
9418  * The attrinfo structure is an structure where values of
9419  * instances can be read. For example an object is retrieved,
9420  * then some of its attributes are convereted to dbvalues and
9421  * placed in this structure.
9422  *
9423  * Note: The caller must call heap_attrinfo_end after he is done with
9424  * attribute information.
9425  */
9426 int
9427 heap_attrinfo_start (THREAD_ENTRY * thread_p, const OID * class_oid, int requested_num_attrs, const ATTR_ID * attrids,
9428  HEAP_CACHE_ATTRINFO * attr_info)
9429 {
9430  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9431  bool getall; /* Want all attribute values */
9432  int i;
9433  int ret = NO_ERROR;
9434 
9435  if (requested_num_attrs == 0)
9436  {
9437  /* initialize the attrinfo cache and return, there is nothing else to do */
9438  (void) memset (attr_info, '\0', sizeof (HEAP_CACHE_ATTRINFO));
9439 
9440  /* now set the num_values to -1 which indicates that this is an empty HEAP_CACHE_ATTRINFO and shouldn't be
9441  * operated on. */
9442  attr_info->num_values = -1;
9443  return NO_ERROR;
9444  }
9445 
9446  if (requested_num_attrs < 0)
9447  {
9448  getall = true;
9449  }
9450  else
9451  {
9452  getall = false;
9453  }
9454 
9455  /*
9456  * initialize attribute information
9457  *
9458  */
9459 
9460  attr_info->class_oid = *class_oid;
9461  attr_info->last_cacheindex = -1;
9462  attr_info->read_cacheindex = -1;
9463 
9464  attr_info->last_classrepr = NULL;
9465  attr_info->read_classrepr = NULL;
9466 
9467  OID_SET_NULL (&attr_info->inst_oid);
9468  attr_info->inst_chn = NULL_CHN;
9469  attr_info->values = NULL;
9470  attr_info->num_values = -1; /* initialize attr_info */
9471 
9472  /*
9473  * Find the most recent representation of the instances of the class, and
9474  * cache the structure that describe the attributes of this representation.
9475  * At the same time find the default values of attributes, the shared
9476  * attribute values and the class attribute values.
9477  */
9478 
9479  attr_info->last_classrepr =
9480  heap_classrepr_get (thread_p, &attr_info->class_oid, NULL, NULL_REPRID, &attr_info->last_cacheindex);
9481  if (attr_info->last_classrepr == NULL)
9482  {
9483  goto exit_on_error;
9484  }
9485 
9486  /*
9487  * If the requested attributes is < 0, get all attributes of the last
9488  * representation.
9489  */
9490 
9491  if (requested_num_attrs < 0)
9492  {
9493  requested_num_attrs = attr_info->last_classrepr->n_attributes;
9494  }
9495  else if (requested_num_attrs >
9496  (attr_info->last_classrepr->n_attributes + attr_info->last_classrepr->n_shared_attrs +
9497  attr_info->last_classrepr->n_class_attrs))
9498  {
9499  fprintf (stdout, " XXX There are not that many attributes. Num_attrs = %d, Num_requested_attrs = %d\n",
9500  attr_info->last_classrepr->n_attributes, requested_num_attrs);
9501  requested_num_attrs =
9502  attr_info->last_classrepr->n_attributes + attr_info->last_classrepr->n_shared_attrs +
9503  attr_info->last_classrepr->n_class_attrs;
9504  }
9505 
9506  if (requested_num_attrs > 0)
9507  {
9508  attr_info->values =
9509  (HEAP_ATTRVALUE *) db_private_alloc (thread_p, requested_num_attrs * sizeof (*(attr_info->values)));
9510  if (attr_info->values == NULL)
9511  {
9512  goto exit_on_error;
9513  }
9514  }
9515  else
9516  {
9517  attr_info->values = NULL;
9518  }
9519 
9520  attr_info->num_values = requested_num_attrs;
9521 
9522  /*
9523  * Set the attribute identifier of the desired attributes in the value
9524  * attribute information, and indicates that the current value is
9525  * unitialized. That is, it has not been read, set or whatever.
9526  */
9527 
9528  for (i = 0; i < attr_info->num_values; i++)
9529  {
9530  value = &attr_info->values[i];
9531  if (getall == true)
9532  {
9533  value->attrid = -1;
9534  }
9535  else
9536  {
9537  value->attrid = *attrids++;
9538  }
9539  value->state = HEAP_UNINIT_ATTRVALUE;
9540  value->do_increment = 0;
9541  value->last_attrepr = NULL;
9542  value->read_attrepr = NULL;
9543  }
9544 
9545  /*
9546  * Make last information to be recached for each individual attribute
9547  * value. Needed for WRITE and Default values
9548  */
9549 
9550  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR)
9551  {
9552  goto exit_on_error;
9553  }
9554 
9555  return ret;
9556 
9557 exit_on_error:
9558 
9559  heap_attrinfo_end (thread_p, attr_info);
9560 
9561  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9562 }
9563 
9564 #if 0 /* TODO: remove unused */
9565 /*
9566  * heap_moreattr_attrinfo () - Add another attribute to the attribute information
9567  * cache
9568  * return: NO_ERROR
9569  * attrid(in): The information of the attribute that will be needed
9570  * attr_info(in/out): The attribute information structure
9571  *
9572  * Note: The given attribute is included as part of the reading or
9573  * transformation process.
9574  */
9575 static int
9576 heap_moreattr_attrinfo (int attrid, HEAP_CACHE_ATTRINFO * attr_info)
9577 {
9578  HEAP_ATTRVALUE *new_values; /* The new value attribute array */
9579  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9580  int i;
9581  int ret = NO_ERROR;
9582 
9583  /*
9584  * If we get an empty HEAP_CACHE_ATTRINFO, this is an error. We can
9585  * not add more attributes to an improperly initialized HEAP_CACHE_ATTRINFO
9586  * structure.
9587  */
9588  if (attr_info->num_values == -1)
9589  {
9590  return ER_FAILED;
9591  }
9592 
9593  /*
9594  * Make sure that the attribute is not already included
9595  */
9596  for (i = 0; i < attr_info->num_values; i++)
9597  {
9598  value = &attr_info->values[i];
9599  if (value != NULL && value->attrid == attrid)
9600  {
9601  return NO_ERROR;
9602  }
9603  }
9604 
9605  /*
9606  * Resize the value attribute array and set the attribute identifier as
9607  * as part of the desired attribute list
9608  */
9609  i = (attr_info->num_values + 1) * sizeof (*(attr_info->values));
9610 
9611  new_values = (HEAP_ATTRVALUE *) db_private_realloc (NULL, attr_info->values, i);
9612  if (new_values == NULL)
9613  {
9614  goto exit_on_error;
9615  }
9616 
9617  attr_info->values = new_values;
9618 
9619  value = &attr_info->values[attr_info->num_values];
9620  value->attrid = attrid;
9621  value->state = HEAP_UNINIT_ATTRVALUE;
9622  value->last_attrepr = NULL;
9623  value->read_attrepr = NULL;
9624  attr_info->num_values++;
9625 
9626  /*
9627  * Recache attribute representation and get default value specifications
9628  * for new attribute. The default values are located on the last
9629  * representation
9630  */
9631 
9632  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR
9633  || db_value_domain_init (&value->dbvalue, value->read_attrepr->type, value->read_attrepr->domain->precision,
9634  value->read_attrepr->domain->scale) != NO_ERROR)
9635  {
9636  attr_info->num_values--;
9637  value->attrid = -1;
9638  goto exit_on_error;
9639  }
9640 
9641 end:
9642 
9643  return ret;
9644 
9645 exit_on_error:
9646 
9647  assert (ret != NO_ERROR);
9648  if (ret == NO_ERROR)
9649  {
9650  assert (er_errid () != NO_ERROR);
9651  ret = er_errid ();
9652  if (ret == NO_ERROR)
9653  {
9654  ret = ER_FAILED;
9655  }
9656  }
9657  goto end;
9658 }
9659 #endif
9660 
9661 /*
9662  * heap_attrinfo_recache_attrepr () - Recache attribute information for given attrinfo for
9663  * each attribute value
9664  * return: NO_ERROR
9665  * attr_info(in/out): The attribute information structure
9666  * islast_reset(in): Are we resetting information for last representation.
9667  *
9668  * Note: Recache the attribute information for given representation
9669  * identifier of the class in attr_info for each attribute value.
9670  * That is, set each attribute information to point to disk
9671  * related attribute information for given representation
9672  * identifier.
9673  * When we are resetting information for last representation,
9674  * attribute values are also initialized.
9675  */
9676 
9677 static int
9678 heap_attrinfo_recache_attrepr (HEAP_CACHE_ATTRINFO * attr_info, bool islast_reset)
9679 {
9680  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9681  int num_found_attrs; /* Num of found attributes */
9682  int srch_num_attrs; /* Num of attributes that can be searched */
9683  int srch_num_shared; /* Num of shared attrs that can be searched */
9684  int srch_num_class; /* Num of class attrs that can be searched */
9685  OR_ATTRIBUTE *search_attrepr; /* Information for disk attribute */
9686  int i, curr_attr;
9687  bool isattr_found;
9688  int ret = NO_ERROR;
9689 
9690  /*
9691  * Initialize the value domain for dbvalues of all desired attributes
9692  */
9693  if (islast_reset == true)
9694  {
9695  srch_num_attrs = attr_info->last_classrepr->n_attributes;
9696  }
9697  else
9698  {
9699  srch_num_attrs = attr_info->read_classrepr->n_attributes;
9700  }
9701 
9702  /* shared and class attributes must always use the latest representation */
9703  srch_num_shared = attr_info->last_classrepr->n_shared_attrs;
9704  srch_num_class = attr_info->last_classrepr->n_class_attrs;
9705 
9706  for (num_found_attrs = 0, curr_attr = 0; curr_attr < attr_info->num_values; curr_attr++)
9707  {
9708  /*
9709  * Go over the list of attributes (instance, shared, and class attrs)
9710  * until the desired attribute is found
9711  */
9712  isattr_found = false;
9713  if (islast_reset == true)
9714  {
9715  search_attrepr = attr_info->last_classrepr->attributes;
9716  }
9717  else
9718  {
9719  search_attrepr = attr_info->read_classrepr->attributes;
9720  }
9721 
9722  value = &attr_info->values[curr_attr];
9723 
9724  if (value->attrid == -1)
9725  {
9726  /* Case that we want all attributes */
9727  value->attrid = search_attrepr[curr_attr].id;
9728  }
9729 
9730  for (i = 0; isattr_found == false && i < srch_num_attrs; i++, search_attrepr++)
9731  {
9732  /*
9733  * Is this a desired instance attribute?
9734  */
9735  if (value->attrid == search_attrepr->id)
9736  {
9737  /*
9738  * Found it.
9739  * Initialize the attribute value information
9740  */
9741  isattr_found = true;
9742  value->attr_type = HEAP_INSTANCE_ATTR;
9743  if (islast_reset == true)
9744  {
9745  value->last_attrepr = search_attrepr;
9746  /*
9747  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9748  */
9749  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9750  {
9751  value->last_attrepr->type = DB_TYPE_OID;
9752  }
9753 
9754  if (value->state == HEAP_UNINIT_ATTRVALUE)
9755  {
9756  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9757  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9758  }
9759  }
9760  else
9761  {
9762  value->read_attrepr = search_attrepr;
9763  /*
9764  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9765  */
9766  if (value->read_attrepr->type == DB_TYPE_OBJECT)
9767  {
9768  value->read_attrepr->type = DB_TYPE_OID;
9769  }
9770  }
9771 
9772  num_found_attrs++;
9773  }
9774  }
9775 
9776  /*
9777  * if the desired attribute was not found in the instance attributes,
9778  * look for it in the shared attributes. We always use the last_repr
9779  * for shared attributes.
9780  */
9781 
9782  for (i = 0, search_attrepr = attr_info->last_classrepr->shared_attrs;
9783  isattr_found == false && i < srch_num_shared; i++, search_attrepr++)
9784  {
9785  /*
9786  * Is this a desired shared attribute?
9787  */
9788  if (value->attrid == search_attrepr->id)
9789  {
9790  /*
9791  * Found it.
9792  * Initialize the attribute value information
9793  */
9794  isattr_found = true;
9795  value->attr_type = HEAP_SHARED_ATTR;
9796  value->last_attrepr = search_attrepr;
9797  /*
9798  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9799  */
9800  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9801  {
9802  value->last_attrepr->type = DB_TYPE_OID;
9803  }
9804 
9805  if (value->state == HEAP_UNINIT_ATTRVALUE)
9806  {
9807  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9808  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9809  }
9810  num_found_attrs++;
9811  }
9812  }
9813 
9814  /*
9815  * if the desired attribute was not found in the instance/shared atttrs,
9816  * look for it in the class attributes. We always use the last_repr
9817  * for class attributes.
9818  */
9819 
9820  for (i = 0, search_attrepr = attr_info->last_classrepr->class_attrs; isattr_found == false && i < srch_num_class;
9821  i++, search_attrepr++)
9822  {
9823  /*
9824  * Is this a desired class attribute?
9825  */
9826 
9827  if (value->attrid == search_attrepr->id)
9828  {
9829  /*
9830  * Found it.
9831  * Initialize the attribute value information
9832  */
9833  isattr_found = true;
9834  value->attr_type = HEAP_CLASS_ATTR;
9835  if (islast_reset == true)
9836  {
9837  value->last_attrepr = search_attrepr;
9838  }
9839  else
9840  {
9841  value->read_attrepr = search_attrepr;
9842  }
9843  /*
9844  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9845  */
9846  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9847  {
9848  value->last_attrepr->type = DB_TYPE_OID;
9849  }
9850 
9851  if (value->state == HEAP_UNINIT_ATTRVALUE)
9852  {
9853  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9854  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9855  }
9856  num_found_attrs++;
9857  }
9858  }
9859  }
9860 
9861  if (num_found_attrs != attr_info->num_values && islast_reset == true)
9862  {
9863  ret = ER_HEAP_UNKNOWN_ATTRS;
9864  er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, attr_info->num_values - num_found_attrs);
9865  goto exit_on_error;
9866  }
9867 
9868  return ret;
9869 
9870 exit_on_error:
9871 
9872  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9873 }
9874 
9875 /*
9876  * heap_attrinfo_recache () - Recache attribute information for given attrinfo
9877  * return: NO_ERROR
9878  * reprid(in): Cache this class representation
9879  * attr_info(in/out): The attribute information structure
9880  *
9881  * Note: Recache the attribute information for given representation
9882  * identifier of the class in attr_info. That is, set each
9883  * attribute information to point to disk related attribute
9884  * information for given representation identifier.
9885  */
9886 static int
9888 {
9889  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9890  int i;
9891  int ret = NO_ERROR;
9892 
9893  /*
9894  * If we do not need to cache anything (case of only clear values and
9895  * disk repr structure).. return
9896  */
9897 
9898  if (attr_info->read_classrepr != NULL)
9899  {
9900  if (attr_info->read_classrepr->id == reprid)
9901  {
9902  return NO_ERROR;
9903  }
9904 
9905  /*
9906  * Do we need to free the current cached disk representation ?
9907  */
9908  if (attr_info->read_classrepr != attr_info->last_classrepr)
9909  {
9910  heap_classrepr_free_and_init (attr_info->read_classrepr, &attr_info->read_cacheindex);
9911  }
9912  attr_info->read_classrepr = NULL;
9913  }
9914 
9915  if (reprid == NULL_REPRID)
9916  {
9917  return NO_ERROR;
9918  }
9919 
9920  if (reprid == attr_info->last_classrepr->id)
9921  {
9922  /*
9923  * Take a short cut
9924  */
9925  if (attr_info->values != NULL)
9926  {
9927  for (i = 0; i < attr_info->num_values; i++)
9928  {
9929  value = &attr_info->values[i];
9930  value->read_attrepr = value->last_attrepr;
9931  }
9932  }
9933  attr_info->read_classrepr = attr_info->last_classrepr;
9934  attr_info->read_cacheindex = -1; /* Don't need to free this one */
9935  return NO_ERROR;
9936  }
9937 
9938  /*
9939  * Cache the desired class representation information
9940  */
9941  if (attr_info->values != NULL)
9942  {
9943  for (i = 0; i < attr_info->num_values; i++)
9944  {
9945  value = &attr_info->values[i];
9946  value->read_attrepr = NULL;
9947  }
9948  }
9949  attr_info->read_classrepr =
9950  heap_classrepr_get (thread_p, &attr_info->class_oid, NULL, reprid, &attr_info->read_cacheindex);
9951  if (attr_info->read_classrepr == NULL)
9952  {
9953  goto exit_on_error;
9954  }
9955 
9956  if (heap_attrinfo_recache_attrepr (attr_info, false) != NO_ERROR)
9957  {
9958  heap_classrepr_free_and_init (attr_info->read_classrepr, &attr_info->read_cacheindex);
9959 
9960  goto exit_on_error;
9961  }
9962 
9963  return ret;
9964 
9965 exit_on_error:
9966 
9967  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9968 }
9969 
9970 /*
9971  * heap_attrinfo_end () - Done with attribute information structure
9972  * return: void
9973  * attr_info(in/out): The attribute information structure
9974  *
9975  * Note: Release any memory allocated for attribute information related
9976  * reading of instances.
9977  */
9978 void
9980 {
9981  int ret = NO_ERROR;
9982 
9983  /* check to make sure the attr_info has been used */
9984  if (attr_info->num_values == -1)
9985  {
9986  return;
9987  }
9988 
9989  /*
9990  * Free any attribute and class representation information
9991  */
9992  ret = heap_attrinfo_clear_dbvalues (attr_info);
9993  ret = heap_attrinfo_recache (thread_p, NULL_REPRID, attr_info);
9994 
9995  if (attr_info->last_classrepr != NULL)
9996  {
9997  heap_classrepr_free_and_init (attr_info->last_classrepr, &attr_info->last_cacheindex);
9998  }
9999 
10000  if (attr_info->values)
10001  {
10002  db_private_free_and_init (thread_p, attr_info->values);
10003  }
10004  OID_SET_NULL (&attr_info->class_oid);
10005 
10006  /*
10007  * Bash this so that we ensure that heap_attrinfo_end is idempotent.
10008  */
10009  attr_info->num_values = -1;
10010 
10011 }
10012 
10013 /*
10014  * heap_attrinfo_clear_dbvalues () - Clear current dbvalues of attribute
10015  * information
10016  * return: NO_ERROR
10017  * attr_info(in/out): The attribute information structure
10018  *
10019  * Note: Clear any current dbvalues associated with attribute information.
10020  */
10021 int
10023 {
10024  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10025  OR_ATTRIBUTE *attrepr; /* Which one current repr of default one */
10026  int i;
10027  int ret = NO_ERROR;
10028 
10029  /* check to make sure the attr_info has been used */
10030  if (attr_info->num_values == -1)
10031  {
10032  return NO_ERROR;
10033  }
10034 
10035  if (attr_info->values != NULL)
10036  {
10037  for (i = 0; i < attr_info->num_values; i++)
10038  {
10039  value = &attr_info->values[i];
10040  if (value->state != HEAP_UNINIT_ATTRVALUE)
10041  {
10042  /*
10043  * Was the value set up from a default value or from a representation
10044  * of the object
10045  */
10046  attrepr = ((value->read_attrepr != NULL) ? value->read_attrepr : value->last_attrepr);
10047  if (attrepr != NULL)
10048  {
10049  if (pr_clear_value (&value->dbvalue) != NO_ERROR)
10050  {
10051  ret = ER_FAILED;
10052  }
10053  value->state = HEAP_UNINIT_ATTRVALUE;
10054  }
10055  }
10056  }
10057  }
10058  OID_SET_NULL (&attr_info->inst_oid);
10059  attr_info->inst_chn = NULL_CHN;
10060 
10061  return ret;
10062 }
10063 
10064 /*
10065  * heap_attrvalue_read () - Read attribute information of given attribute cache
10066  * and instance
10067  * return: NO_ERROR
10068  * recdes(in): Instance record descriptor
10069  * value(in): Disk value attribute information
10070  * attr_info(in/out): The attribute information structure
10071  *
10072  * Note: Read the dbvalue of the given value attribute information.
10073  */
10074 static int
10075 heap_attrvalue_read (RECDES * recdes, HEAP_ATTRVALUE * value, HEAP_CACHE_ATTRINFO * attr_info)
10076 {
10077  OR_BUF buf;
10078  PR_TYPE *pr_type; /* Primitive type array function structure */
10079  OR_ATTRIBUTE *volatile attrepr;
10080  char *disk_data = NULL;
10081  int disk_bound = false;
10082  volatile int disk_length = -1;
10083  int ret = NO_ERROR;
10084 
10085  /* Initialize disk value information */
10086  disk_data = NULL;
10087  disk_bound = false;
10088  disk_length = -1;
10089 
10090  /*
10091  * Does attribute exist in this disk representation?
10092  */
10093 
10094  if (recdes == NULL || recdes->data == NULL || value->read_attrepr == NULL || value->attr_type == HEAP_SHARED_ATTR
10095  || value->attr_type == HEAP_CLASS_ATTR)
10096  {
10097  /*
10098  * Either the attribute is a shared or class attr, or the attribute
10099  * does not exist in this disk representation, or we do not have
10100  * the disk object (recdes), get default value if any...
10101  */
10102  attrepr = value->last_attrepr;
10103  disk_length = value->last_attrepr->default_value.val_length;
10104  if (disk_length > 0)
10105  {
10106  disk_data = (char *) value->last_attrepr->default_value.value;
10107  disk_bound = true;
10108  }
10109  }
10110  else
10111  {
10112  attrepr = value->read_attrepr;
10113  /* Is it a fixed size attribute ? */
10114  if (value->read_attrepr->is_fixed != 0)
10115  {
10116  /*
10117  * A fixed attribute.
10118  */
10119  if (!OR_FIXED_ATT_IS_UNBOUND (recdes->data, attr_info->read_classrepr->n_variable,
10120  attr_info->read_classrepr->fixed_length, value->read_attrepr->position))
10121  {
10122  /*
10123  * The fixed attribute is bound. Access its information
10124  */
10125  disk_data =
10126  ((char *) recdes->data
10128  attr_info->read_classrepr->n_variable)
10129  + value->read_attrepr->location);
10130  disk_length = tp_domain_disk_size (value->read_attrepr->domain);
10131  disk_bound = true;
10132  }
10133  }
10134  else
10135  {
10136  /*
10137  * A variable attribute
10138  */
10139  if (!OR_VAR_IS_NULL (recdes->data, value->read_attrepr->location))
10140  {
10141  /*
10142  * The variable attribute is bound.
10143  * Find its location through the variable offset attribute table.
10144  */
10145  disk_data = ((char *) recdes->data + OR_VAR_OFFSET (recdes->data, value->read_attrepr->location));
10146 
10147  disk_bound = true;
10148  switch (TP_DOMAIN_TYPE (attrepr->domain))
10149  {
10150  case DB_TYPE_BLOB:
10151  case DB_TYPE_CLOB:
10152  case DB_TYPE_SET: /* it may be just a little bit fast */
10153  case DB_TYPE_MULTISET:
10154  case DB_TYPE_SEQUENCE:
10155  OR_VAR_LENGTH (disk_length, recdes->data, value->read_attrepr->location,
10156  attr_info->read_classrepr->n_variable);
10157  break;
10158  default:
10159  disk_length = -1; /* remains can read without disk_length */
10160  }
10161  }
10162  }
10163  }
10164 
10165  /*
10166  * From now on, I should only use attrepr.. it will point to either
10167  * a current value or a default one
10168  */
10169 
10170  /*
10171  * Clear/decache any old value
10172  */
10173  if (value->state != HEAP_UNINIT_ATTRVALUE)
10174  {
10175  (void) pr_clear_value (&value->dbvalue);
10176  }
10177 
10178  /*
10179  * Now make the dbvalue according to the disk data value
10180  */
10181 
10182  if (disk_data == NULL || disk_bound == false)
10183  {
10184  /* Unbound attribute, set it to null value */
10185  ret = db_value_domain_init (&value->dbvalue, attrepr->type, attrepr->domain->precision, attrepr->domain->scale);
10186  if (ret != NO_ERROR)
10187  {
10188  goto exit_on_error;
10189  }
10190  value->state = HEAP_READ_ATTRVALUE;
10191  }
10192  else
10193  {
10194  /*
10195  * Read the value according to disk information that was found
10196  */
10197  OR_BUF_INIT2 (buf, disk_data, disk_length);
10198  buf.error_abort = 1;
10199 
10200  switch (_setjmp (buf.env))
10201  {
10202  case 0:
10203  /* Do not copy the string--just use the pointer. The pr_ routines for strings and sets have different
10204  * semantics for length. A negative length value for strings means "don't copy the string, just use the
10205  * pointer". For sets, don't translate the set into memory representation at this time. It will only be
10206  * translated when needed. */
10207  pr_type = pr_type_from_id (attrepr->type);
10208  if (pr_type)
10209  {
10210  pr_type->data_readval (&buf, &value->dbvalue, attrepr->domain, disk_length, false, NULL, 0);
10211  }
10212  value->state = HEAP_READ_ATTRVALUE;
10213  break;
10214  default:
10215  /*
10216  * An error was found during the reading of the attribute value
10217  */
10218  (void) db_value_domain_init (&value->dbvalue, attrepr->type, attrepr->domain->precision,
10219  attrepr->domain->scale);
10220  value->state = HEAP_UNINIT_ATTRVALUE;
10221  ret = ER_FAILED;
10222  break;
10223  }
10224  }
10225 
10226  return ret;
10227 
10228 exit_on_error:
10229 
10230  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10231 }
10232 
10233 /*
10234  * heap_midxkey_get_value () -
10235  * return:
10236  * recdes(in):
10237  * att(in):
10238  * value(out):
10239  * attr_info(in):
10240  */
10241 static int
10243 {
10244  char *disk_data = NULL;
10245  bool found = true; /* Does attribute(att) exist in this disk representation? */
10246  int i;
10247 
10248  /* Initialize disk value information */
10249  disk_data = NULL;
10250  db_make_null (value);
10251 
10252  if (recdes != NULL && recdes->data != NULL && att != NULL)
10253  {
10254  if (or_rep_id (recdes) != attr_info->last_classrepr->id)
10255  {
10256  found = false;
10257  for (i = 0; i < attr_info->read_classrepr->n_attributes; i++)
10258  {
10259  if (attr_info->read_classrepr->attributes[i].id == att->id)
10260  {
10261  att = &attr_info->read_classrepr->attributes[i];
10262  found = true;
10263  break;
10264  }
10265  }
10266  }
10267 
10268  if (found == false)
10269  {
10270  /* It means that the representation has an attribute which was created after insertion of the record. In this
10271  * case, return the default value of the attribute if it exists. */
10272  if (att->default_value.val_length > 0)
10273  {
10274  disk_data = (char *) att->default_value.value;
10275  }
10276  }
10277  else
10278  {
10279  /* Is it a fixed size attribute ? */
10280  if (att->is_fixed != 0)
10281  { /* A fixed attribute. */
10282  if (!OR_FIXED_ATT_IS_UNBOUND (recdes->data, attr_info->read_classrepr->n_variable,
10283  attr_info->read_classrepr->fixed_length, att->position))
10284  {
10285  /* The fixed attribute is bound. Access its information */
10286  disk_data =
10287  ((char *) recdes->data +
10289  attr_info->read_classrepr->n_variable) + att->location);
10290  }
10291  }
10292  else
10293  { /* A variable attribute */
10294  if (!OR_VAR_IS_NULL (recdes->data, att->location))
10295  {
10296  /* The variable attribute is bound. Find its location through the variable offset attribute table. */
10297  disk_data = ((char *) recdes->data + OR_VAR_OFFSET (recdes->data, att->location));
10298  }
10299  }
10300  }
10301  }
10302  else
10303  {
10304  assert (0);
10305  return ER_FAILED;
10306  }
10307 
10308  if (disk_data != NULL)
10309  {
10310  OR_BUF buf;
10311 
10312  or_init (&buf, disk_data, -1);
10313  att->domain->type->data_readval (&buf, value, att->domain, -1, false, NULL, 0);
10314  }
10315 
10316  return NO_ERROR;
10317 }
10318 
10319 /*
10320  * heap_attrinfo_read_dbvalues () - Find db_values of desired attributes of given
10321  * instance
10322  * return: NO_ERROR
10323  * inst_oid(in): The instance oid
10324  * recdes(in): The instance Record descriptor
10325  * attr_info(in/out): The attribute information structure which describe the
10326  * desired attributes
10327  *
10328  * Note: Find DB_VALUES of desired attributes of given instance.
10329  * The attr_info structure must have already been initialized
10330  * with the desired attributes.
10331  *
10332  * If the inst_oid and the recdes are NULL, then we must be
10333  * reading only shared and/or class attributes which are found
10334  * in the last representation.
10335  */
10336 int
10337 heap_attrinfo_read_dbvalues (THREAD_ENTRY * thread_p, const OID * inst_oid, RECDES * recdes,
10338  HEAP_SCANCACHE * scan_cache, HEAP_CACHE_ATTRINFO * attr_info)
10339 {
10340  int i;
10341  REPR_ID reprid; /* The disk representation of the object */
10342  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10343  int ret = NO_ERROR;
10344 
10345  /* check to make sure the attr_info has been used */
10346  if (attr_info->num_values == -1)
10347  {
10348  return NO_ERROR;
10349  }
10350 
10351  /*
10352  * Make sure that we have the needed cached representation.
10353  */
10354 
10355  if (inst_oid != NULL && recdes != NULL && recdes->data != NULL)
10356  {
10357  reprid = or_rep_id (recdes);
10358 
10359  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10360  {
10361  /* Get the needed representation */
10362  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10363  if (ret != NO_ERROR)
10364  {
10365  goto exit_on_error;
10366  }
10367  }
10368  }
10369 
10370  /*
10371  * Go over each attribute and read it
10372  */
10373 
10374  for (i = 0; i < attr_info->num_values; i++)
10375  {
10376  value = &attr_info->values[i];
10377  ret = heap_attrvalue_read (recdes, value, attr_info);
10378  if (ret != NO_ERROR)
10379  {
10380  goto exit_on_error;
10381  }
10382  }
10383 
10384  /*
10385  * Cache the information of the instance
10386  */
10387  if (inst_oid != NULL && recdes != NULL && recdes->data != NULL)
10388  {
10389  attr_info->inst_chn = or_chn (recdes);
10390  attr_info->inst_oid = *inst_oid;
10391  }
10392 
10393  return ret;
10394 
10395 exit_on_error:
10396 
10397  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10398 }
10399 
10400 int
10402 {
10403  int i;
10404  REPR_ID reprid; /* The disk representation of the object */
10405  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10406  int ret = NO_ERROR;
10407 
10408  /* check to make sure the attr_info has been used */
10409  if (attr_info->num_values == -1)
10410  {
10411  return NO_ERROR;
10412  }
10413 
10414  /*
10415  * Make sure that we have the needed cached representation.
10416  */
10417 
10418  if (recdes != NULL)
10419  {
10420  reprid = or_rep_id (recdes);
10421 
10422  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10423  {
10424  /* Get the needed representation */
10425  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10426  if (ret != NO_ERROR)
10427  {
10428  goto exit_on_error;
10429  }
10430  }
10431  }
10432 
10433  /*
10434  * Go over each attribute and read it
10435  */
10436 
10437  for (i = 0; i < attr_info->num_values; i++)
10438  {
10439  value = &attr_info->values[i];
10440  ret = heap_attrvalue_read (recdes, value, attr_info);
10441  if (ret != NO_ERROR)
10442  {
10443  goto exit_on_error;
10444  }
10445  }
10446 
10447  return ret;
10448 
10449 exit_on_error:
10450 
10451  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10452 }
10453 
10454 /*
10455  * heap_attrinfo_delete_lob ()
10456  * return: NO_ERROR
10457  * thread_p(in):
10458  * recdes(in): The instance Record descriptor
10459  * attr_info(in): The attribute information structure which describe the
10460  * desired attributes
10461  *
10462  */
10463 int
10465 {
10466  int i;
10467  HEAP_ATTRVALUE *value;
10468  int ret = NO_ERROR;
10469 
10470  assert (attr_info != NULL);
10471  assert (attr_info->num_values > 0);
10472 
10473  /*
10474  * Make sure that we have the needed cached representation.
10475  */
10476 
10477  if (recdes != NULL)
10478  {
10479  REPR_ID reprid;
10480  reprid = or_rep_id (recdes);
10481  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10482  {
10483  /* Get the needed representation */
10484  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10485  if (ret != NO_ERROR)
10486  {
10487  goto exit_on_error;
10488  }
10489  }
10490  }
10491 
10492  /*
10493  * Go over each attribute and delete the data if it's lob type
10494  */
10495 
10496  for (i = 0; i < attr_info->num_values; i++)
10497  {
10498  value = &attr_info->values[i];
10499  if (value->last_attrepr->type == DB_TYPE_BLOB || value->last_attrepr->type == DB_TYPE_CLOB)
10500  {
10501  if (value->state == HEAP_UNINIT_ATTRVALUE && recdes != NULL)
10502  {
10503  ret = heap_attrvalue_read (recdes, value, attr_info);
10504  if (ret != NO_ERROR)
10505  {
10506  goto exit_on_error;
10507  }
10508  }
10509  if (!db_value_is_null (&value->dbvalue))
10510  {
10511  DB_ELO *elo;
10512  assert (db_value_type (&value->dbvalue) == DB_TYPE_BLOB
10513  || db_value_type (&value->dbvalue) == DB_TYPE_CLOB);
10514  elo = db_get_elo (&value->dbvalue);
10515  if (elo)
10516  {
10517  ret = db_elo_delete (elo);
10518  }
10519  value->state = HEAP_WRITTEN_ATTRVALUE;
10520  }
10521  }
10522  }
10523 
10524  return ret;
10525 
10526 exit_on_error:
10527 
10528  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10529 }
10530 
10531 /*
10532  * heap_attrinfo_dump () - Dump value of attribute information
10533  * return:
10534  * attr_info(in): The attribute information structure
10535  * dump_schema(in):
10536  *
10537  * Note: Dump attribute value of given attribute information.
10538  */
10539 void
10540 heap_attrinfo_dump (THREAD_ENTRY * thread_p, FILE * fp, HEAP_CACHE_ATTRINFO * attr_info, bool dump_schema)
10541 {
10542  int i;
10543  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10544  int ret = NO_ERROR;
10545 
10546  /* check to make sure the attr_info has been used */
10547  if (attr_info->num_values == -1)
10548  {
10549  fprintf (fp, " Empty attrinfo\n");
10550  return;
10551  }
10552 
10553  /*
10554  * Dump attribute schema information
10555  */
10556 
10557  if (dump_schema == true)
10558  {
10559  ret = heap_classrepr_dump (thread_p, fp, &attr_info->class_oid, attr_info->read_classrepr);
10560  }
10561 
10562  for (i = 0; i < attr_info->num_values; i++)
10563  {
10564  value = &attr_info->values[i];
10565  fprintf (fp, " Attrid = %d, state = %d, type = %s\n", value->attrid, value->state,
10566  pr_type_name (value->read_attrepr->type));
10567  /*
10568  * Dump the value in memory format
10569  */
10570 
10571  fprintf (fp, " Memory_value_format:\n");
10572  fprintf (fp, " value = ");
10573  db_fprint_value (fp, &value->dbvalue);
10574  fprintf (fp, "\n\n");
10575  }
10576 
10577 }
10578 
10579 /*
10580  * heap_attrvalue_locate () - Locate disk attribute value information
10581  * return: attrvalue or NULL
10582  * attrid(in): The desired attribute identifier
10583  * attr_info(in/out): The attribute information structure which describe the
10584  * desired attributes
10585  *
10586  * Note: Locate the disk attribute value information of an attribute
10587  * information structure which have been already initialized.
10588  */
10589 HEAP_ATTRVALUE *
10591 {
10592  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10593  int i;
10594 
10595  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10596  {
10597  if (attrid == value->attrid)
10598  {
10599  return value;
10600  }
10601  }
10602 
10603  return NULL;
10604 }
10605 
10606 /*
10607  * heap_locate_attribute () -
10608  * return:
10609  * attrid(in):
10610  * attr_info(in):
10611  */
10612 static OR_ATTRIBUTE *
10614 {
10615  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10616  int i;
10617 
10618  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10619  {
10620  if (attrid == value->attrid)
10621  {
10622  /* Some altered attributes might have only the last representations of them. */
10623  return (value->read_attrepr != NULL) ? value->read_attrepr : value->last_attrepr;
10624  }
10625  }
10626 
10627  return NULL;
10628 }
10629 
10630 /*
10631  * heap_locate_last_attrepr () -
10632  * return:
10633  * attrid(in):
10634  * attr_info(in):
10635  */
10636 OR_ATTRIBUTE *
10638 {
10639  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10640  int i;
10641 
10642  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10643  {
10644  if (attrid == value->attrid)
10645  {
10646  return value->last_attrepr;
10647  }
10648  }
10649 
10650  return NULL;
10651 }
10652 
10653 /*
10654  * heap_attrinfo_access () - Access an attribute value which has been already read
10655  * return:
10656  * attrid(in): The desired attribute identifier
10657  * attr_info(in/out): The attribute information structure which describe the
10658  * desired attributes
10659  *
10660  * Note: Find DB_VALUE of desired attribute identifier.
10661  * The dbvalue attributes must have been read by now using the
10662  * function heap_attrinfo_read_dbvalues ()
10663  */
10664 DB_VALUE *
10666 {
10667  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10668 
10669  /* check to make sure the attr_info has been used */
10670  if (attr_info->num_values == -1)
10671  {
10672  return NULL;
10673  }
10674 
10675  value = heap_attrvalue_locate (attrid, attr_info);
10676  if (value == NULL || value->state == HEAP_UNINIT_ATTRVALUE)
10677  {
10678  er_log_debug (ARG_FILE_LINE, "heap_attrinfo_access: Unknown attrid = %d", attrid);
10680  return NULL;
10681  }
10682 
10683  return &value->dbvalue;
10684 }
10685 
10686 /*
10687  * heap_get_class_subclasses () - get OIDs of subclasses for a given class
10688  * return : error code or NO_ERROR
10689  * thread_p (in) :
10690  * class_oid (in) : OID of the parent class
10691  * count (out) : size of the subclasses array
10692  * subclasses (out) : array containing OIDs of subclasses
10693  *
10694  * Note: The subclasses array is maintained as an array of OID's,
10695  * the last element in the array will satisfy the OID_ISNULL() test.
10696  * The array_size has the number of actual elements allocated in the
10697  * array which may be more than the number of slots that have non-NULL
10698  * OIDs. The function adds the subclass oids to the existing array.
10699  * If the array is not large enough, it is reallocated using realloc.
10700  */
10701 int
10702 heap_get_class_subclasses (THREAD_ENTRY * thread_p, const OID * class_oid, int *count, OID ** subclasses)
10703 {
10704  HEAP_SCANCACHE scan_cache;
10705  RECDES recdes;
10706  int error = NO_ERROR;
10707 
10708  error = heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
10709  if (error != NO_ERROR)
10710  {
10711  return error;
10712  }
10713 
10714  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
10715  {
10716  heap_scancache_end (thread_p, &scan_cache);
10717  return ER_FAILED;
10718  }
10719 
10720  error = orc_subclasses_from_record (&recdes, count, subclasses);
10721 
10722  heap_scancache_end (thread_p, &scan_cache);
10723 
10724  return error;
10725 }
10726 
10727 /*
10728  * heap_get_class_tde_algorithm () - get TDE_ALGORITHM of a given class based on the class flags
10729  * return : error code or NO_ERROR
10730  * thread_p (in) :
10731  * class_oid (in) : OID of the class
10732  * tde_algo (out) : TDE_ALGORITHM_NONE, TDE_ALGORITHM_AES,TDE_ALGORITHM_ARIA
10733  *
10734  * NOTE: this function extracts tde encryption information from class record
10735  */
10736 int
10737 heap_get_class_tde_algorithm (THREAD_ENTRY * thread_p, const OID * class_oid, TDE_ALGORITHM * tde_algo)
10738 {
10739  HEAP_SCANCACHE scan_cache;
10740  RECDES recdes;
10741  int error = NO_ERROR;
10742 
10743  assert (class_oid != NULL);
10744  assert (tde_algo != NULL);
10745 
10746  /* boot parameter heap file */
10747  if (OID_ISNULL (class_oid))
10748  {
10749  *tde_algo = TDE_ALGORITHM_NONE;
10750  return error;
10751  }
10752 
10753  error = heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
10754  if (error != NO_ERROR)
10755  {
10756  return error;
10757  }
10758 
10759  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
10760  {
10761  heap_scancache_end (thread_p, &scan_cache);
10762  return ER_FAILED;
10763  }
10764 
10765  or_class_tde_algorithm (&recdes, tde_algo);
10766 
10767  heap_scancache_end (thread_p, &scan_cache);
10768 
10769  return error;
10770 }
10771 
10772 /*
10773  * heap_class_get_partition_info () - Get partition information for the class
10774  * identified by class_oid
10775  * return : error code or NO_ERROR
10776  * class_oid (in) : class_oid
10777  * partition_info (in/out) : partition information
10778  * class_hfid (in/out) : HFID of the partitioned class
10779  * repr_id (in/out) : class representation id
10780  * has_partition_info (out):
10781  *
10782  * Note: This function extracts the partition information from a class OID.
10783  */
10784 static int
10785 heap_class_get_partition_info (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION * partition_info,
10786  HFID * class_hfid, REPR_ID * repr_id, int *has_partition_info)
10787 {
10788  int error = NO_ERROR;
10789  RECDES recdes;
10790  HEAP_SCANCACHE scan_cache;
10791 
10792  assert (class_oid != NULL);
10793 
10794  if (heap_scancache_quick_start_root_hfid (thread_p, &scan_cache) != NO_ERROR)
10795  {
10796  return ER_FAILED;
10797  }
10798 
10799  scan_cache.mvcc_snapshot = logtb_get_mvcc_snapshot (thread_p);
10800  if (scan_cache.mvcc_snapshot == NULL)
10801  {
10802  error = ER_FAILED;
10803  goto cleanup;
10804  }
10805 
10806  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
10807  {
10808  error = ER_FAILED;
10809  goto cleanup;
10810  }
10811 
10812  error = or_class_get_partition_info (&recdes, partition_info, repr_id, has_partition_info);
10813  if (error != NO_ERROR)
10814  {
10815  goto cleanup;
10816  }
10817 
10818  if (class_hfid != NULL)
10819  {
10820  or_class_hfid (&recdes, class_hfid);
10821  }
10822 
10823 cleanup:
10824  heap_scancache_end (thread_p, &scan_cache);
10825 
10826  return error;
10827 }
10828 
10829 /*
10830  * heap_get_partition_attributes () - get attribute ids for columns of
10831  * _db_partition class
10832  * return : error code or NO_ERROR
10833  * thread_p (in) :
10834  * cls_oid (in) : _db_partition class OID
10835  * type_id (in/out) : holder for the type attribute id
10836  * values_id (in/out) : holder for the values attribute id
10837  */
10838 static int
10839 heap_get_partition_attributes (THREAD_ENTRY * thread_p, const OID * cls_oid, ATTR_ID * type_id, ATTR_ID * values_id)
10840 {
10841  RECDES recdes;
10842  HEAP_SCANCACHE scan;
10843  HEAP_CACHE_ATTRINFO attr_info;
10844  int error = NO_ERROR;
10845  int i = 0;
10846  char *attr_name = NULL;
10847  bool is_scan_cache_started = false, is_attrinfo_started = false;
10848  char *string = NULL;
10849  int alloced_string = 0;
10850 
10851  if (type_id == NULL || values_id == NULL)
10852  {
10853  assert (false);
10854  error = ER_FAILED;
10855  goto cleanup;
10856  }
10857  *type_id = *values_id = NULL_ATTRID;
10858 
10859  if (heap_scancache_quick_start_root_hfid (thread_p, &scan) != NO_ERROR)
10860  {
10861  error = ER_FAILED;
10862  goto cleanup;
10863  }
10864  is_scan_cache_started = true;
10865 
10866  error = heap_attrinfo_start (thread_p, cls_oid, -1, NULL, &attr_info);
10867  if (error != NO_ERROR)
10868  {
10869  goto cleanup;
10870  }
10871  is_attrinfo_started = true;
10872 
10873  if (heap_get_class_record (thread_p, cls_oid, &recdes, &scan, PEEK) != S_SUCCESS)
10874  {
10875  error = ER_FAILED;
10876  goto cleanup;
10877  }
10878 
10879  for (i = 0; i < attr_info.num_values && (*type_id == NULL_ATTRID || *values_id == NULL_ATTRID); i++)
10880  {
10881  alloced_string = 0;
10882  string = NULL;
10883 
10884  error = or_get_attrname (&recdes, i, &string, &alloced_string);
10885  if (error != NO_ERROR)
10886  {
10887  ASSERT_ERROR ();
10888  goto cleanup;
10889  }
10890 
10891  attr_name = string;
10892  if (attr_name == NULL)
10893  {
10894  error = ER_FAILED;
10895  goto cleanup;
10896  }
10897  if (strcmp (attr_name, "ptype") == 0)
10898  {
10899  *type_id = i;
10900  }
10901 
10902  if (strcmp (attr_name, "pvalues") == 0)
10903  {
10904  *values_id = i;
10905  }
10906 
10907  if (string != NULL && alloced_string == 1)
10908  {
10909  db_private_free_and_init (thread_p, string);
10910  }
10911  }
10912 
10913  if (*type_id == NULL_ATTRID || *values_id == NULL_ATTRID)
10914  {
10916  error = ER_FAILED;
10917  }
10918 
10919 cleanup:
10920  if (is_attrinfo_started)
10921  {
10922  heap_attrinfo_end (thread_p, &attr_info);
10923  }
10924  if (is_scan_cache_started)
10925  {
10926  heap_scancache_end (thread_p, &scan);
10927  }
10928  return error;
10929 }
10930 
10931 /*
10932  * heap_get_partitions_from_subclasses () - Get partition information from a
10933  * list of subclasses
10934  * return : error code or NO_ERROR
10935  * thread_p (in) :
10936  * subclasses (in) : subclasses OIDs
10937  * parts_count (in/out) : number of "useful" elements in parts
10938  * parts (in/out) : partitions
10939  *
10940  * Note: Memory for the partition array must be allocated before calling this
10941  * function and must be enough to store all partitions. The value from
10942  * position 0 in the partitions array will contain information from the
10943  * master class
10944  */
10945 static int
10946 heap_get_partitions_from_subclasses (THREAD_ENTRY * thread_p, const OID * subclasses, int *parts_count,
10947  OR_PARTITION * parts)
10948 {
10949  int part_idx = 0, i;
10950  int error = NO_ERROR;
10951  HFID part_hfid;
10952  REPR_ID repr_id;
10953  int has_partition_info = 0;
10954 
10955  if (parts == NULL)
10956  {
10957  assert (false);
10958  error = ER_FAILED;
10959  goto cleanup;
10960  }
10961 
10962  /* the partition information for the master class will be set by the caller */
10963  part_idx = 1;
10964 
10965  /* loop through subclasses and load partition information if the subclass is a partition */
10966  for (i = 0; !OID_ISNULL (&subclasses[i]); i++)
10967  {
10968  /* Get partition information from this subclass. part_info will be the OID of the tuple from _db_partition
10969  * containing partition information */
10970  error =
10971  heap_class_get_partition_info (thread_p, &subclasses[i], &parts[part_idx], &part_hfid, &repr_id,
10972  &has_partition_info);
10973  if (error != NO_ERROR)
10974  {
10975  goto cleanup;
10976  }
10977 
10978  if (has_partition_info == 0)
10979  {
10980  /* this is not a partition, this is a simple subclass */
10981  continue;
10982  }
10983 
10984  COPY_OID (&(parts[part_idx].class_oid), &subclasses[i]);
10985  HFID_COPY (&(parts[part_idx].class_hfid), &part_hfid);
10986  parts[part_idx].rep_id = repr_id;
10987 
10988  part_idx++;
10989  }
10990  *parts_count = part_idx;
10991 
10992 cleanup:
10993  if (error != NO_ERROR)
10994  {
10995  /* free memory for the values of partitions */
10996  for (i = 1; i < part_idx; i++)
10997  {
10998  if (parts[i].values != NULL)
10999  {
11000  db_seq_free (parts[i].values);
11001  }
11002  }
11003  }
11004  return error;
11005 }
11006 
11007 /*
11008  * heap_get_class_partitions () - get partitions information for a class
11009  * return : error code or NO_ERROR
11010  * thread_p (in) :
11011  * class_oid (in) : class OID
11012  * parts (in/out) : partitions information
11013  * parts_count (in/out) : number of partitions
11014  */
11015 int
11016 heap_get_class_partitions (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION ** parts, int *parts_count)
11017 {
11018  int subclasses_count = 0;
11019  OID *subclasses = NULL;
11020  OR_PARTITION part_info;
11021  int error = NO_ERROR;
11022  OR_PARTITION *partitions = NULL;
11023  REPR_ID class_repr_id = NULL_REPRID;
11024  HFID class_hfid;
11025  int has_partition_info = 0;
11026 
11027  *parts = NULL;
11028  *parts_count = 0;
11029  part_info.values = NULL;
11030 
11031  /* This class might have partitions and subclasses. In order to get partition information we have to: 1. Get the OIDs
11032  * for all subclasses 2. Get partition information for all OIDs 3. Build information only for those subclasses which
11033  * are partitions */
11034  error =
11035  heap_class_get_partition_info (thread_p, class_oid, &part_info, &class_hfid, &class_repr_id, &has_partition_info);
11036  if (error != NO_ERROR)
11037  {
11038  goto cleanup;
11039  }
11040 
11041  if (has_partition_info == 0)
11042  {
11043  /* this class does not have partitions */
11044  error = NO_ERROR;
11045  goto cleanup;
11046  }
11047 
11048  /* Get OIDs for subclasses of class_oid. Some of them will be partitions */
11049  error = heap_get_class_subclasses (thread_p, class_oid, &subclasses_count, &subclasses);
11050  if (error != NO_ERROR)
11051  {
11052  goto cleanup;
11053  }
11054  else if (subclasses_count == 0)
11055  {
11056  /* This means that class_oid actually points to a partition and not the master class. We return NO_ERROR here
11057  * since there's no partition information */
11058  error = NO_ERROR;
11059  goto cleanup;
11060  }
11061 
11062  /* Allocate memory for partitions. We allocate more memory than needed here because the call to
11063  * heap_get_class_subclasses from above actually returned a larger count than the useful information. Also, not all
11064  * subclasses are necessarily partitions. */
11065  partitions = (OR_PARTITION *) db_private_alloc (thread_p, (subclasses_count + 1) * sizeof (OR_PARTITION));
11066  if (partitions == NULL)
11067  {
11068  error = ER_OUT_OF_VIRTUAL_MEMORY;
11069  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, (subclasses_count + 1) * sizeof (OR_PARTITION));
11070  goto cleanup;
11071  }
11072 
11073  error = heap_get_partitions_from_subclasses (thread_p, subclasses, parts_count, partitions);
11074  if (error != NO_ERROR)
11075  {
11076  ASSERT_ERROR ();
11077  goto cleanup;
11078  }
11079 
11080  /* fill the information for the root (partitioned class) */
11081  COPY_OID (&partitions[0].class_oid, class_oid);
11082  HFID_COPY (&partitions[0].class_hfid, &class_hfid);
11083  partitions[0].partition_type = part_info.partition_type;
11084  partitions[0].rep_id = class_repr_id;
11085  partitions[0].values = NULL;
11086  if (part_info.values != NULL)
11087  {
11088  partitions[0].values = set_copy (part_info.values);
11089  if (partitions[0].values == NULL)
11090  {
11091  error = er_errid ();
11092  goto cleanup;
11093  }
11094  set_free (part_info.values);
11095  part_info.values = NULL;
11096  }
11097 
11098  *parts = partitions;
11099 
11100 cleanup:
11101  if (subclasses != NULL)
11102  {
11103  free_and_init (subclasses);
11104  }
11105  if (part_info.values != NULL)
11106  {
11107  set_free (part_info.values);
11108  }
11109  if (error != NO_ERROR && partitions != NULL)
11110  {
11111  db_private_free (thread_p, partitions);
11112  *parts = NULL;
11113  *parts_count = 0;
11114  }
11115  return error;
11116 }
11117 
11118 /*
11119  * heap_clear_partition_info () - free partitions info from heap_get_class_partitions
11120  * return : void
11121  * thread_p (in) :
11122  * parts (in) : partitions information
11123  * parts_count (in) : number of partitions
11124  */
11125 void
11126 heap_clear_partition_info (THREAD_ENTRY * thread_p, OR_PARTITION * parts, int parts_count)
11127 {
11128  if (parts != NULL)
11129  {
11130  int i;
11131 
11132  for (i = 0; i < parts_count; i++)
11133  {
11134  if (parts[i].values != NULL)
11135  {
11136  db_seq_free (parts[i].values);
11137  }
11138  }
11139 
11140  db_private_free (thread_p, parts);
11141  }
11142 }
11143 
11144 /*
11145  * heap_get_class_supers () - get OIDs of superclasses of a class
11146  * return : error code or NO_ERROR
11147  * thread_p (in) : thread entry
11148  * class_oid (in) : OID of the subclass
11149  * super_oids (in/out) : OIDs of the superclasses
11150  * count (in/out) : number of elements in super_oids
11151  */
11152 int
11153 heap_get_class_supers (THREAD_ENTRY * thread_p, const OID * class_oid, OID ** super_oids, int *count)
11154 {
11155  HEAP_SCANCACHE scan_cache;
11156  RECDES recdes;
11157  int error = NO_ERROR;
11158 
11159  error = heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
11160  if (error != NO_ERROR)
11161  {
11162  return error;
11163  }
11164 
11165  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
11166  {
11167  heap_scancache_end (thread_p, &scan_cache);
11168  return ER_FAILED;
11169  }
11170 
11171  error = orc_superclasses_from_record (&recdes, count, super_oids);
11172 
11173  heap_scancache_end (thread_p, &scan_cache);
11174 
11175  return error;
11176 }
11177 
11178 /*
11179  * heap_attrinfo_check () -
11180  * return: NO_ERROR
11181  * inst_oid(in): The instance oid
11182  * attr_info(in): The attribute information structure which describe the
11183  * desired attributes
11184  */
11185 static int
11186 heap_attrinfo_check (const OID * inst_oid, HEAP_CACHE_ATTRINFO * attr_info)
11187 {
11188  int ret = NO_ERROR;
11189 
11190  if (inst_oid != NULL)
11191  {
11192  /*
11193  * The OIDs must be equal
11194  */
11195  if (!OID_EQ (&attr_info->inst_oid, inst_oid))
11196  {
11197  if (!OID_ISNULL (&attr_info->inst_oid))
11198  {
11199  ret = ER_HEAP_WRONG_ATTRINFO;
11200  er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ret, 6, attr_info->inst_oid.volid,
11201  attr_info->inst_oid.pageid, attr_info->inst_oid.slotid, inst_oid->volid, inst_oid->pageid,
11202  inst_oid->slotid);
11203  goto exit_on_error;
11204  }
11205 
11206  attr_info->inst_oid = *inst_oid;
11207  }
11208  }
11209  else
11210  {
11211  if (!OID_ISNULL (&attr_info->inst_oid))
11212  {
11213  ret = ER_HEAP_WRONG_ATTRINFO;
11214  er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ret, 6, attr_info->inst_oid.volid, attr_info->inst_oid.pageid,
11215  attr_info->inst_oid.slotid, NULL_VOLID, NULL_PAGEID, NULL_SLOTID);
11216  goto exit_on_error;
11217  }
11218  }
11219 
11220  return ret;
11221 
11222 exit_on_error:
11223 
11224  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11225 }
11226 
11227 /*
11228  * heap_attrinfo_set () - Set the value of given attribute
11229  * return: NO_ERROR
11230  * inst_oid(in): The instance oid
11231  * attrid(in): The identifier of the attribute to be set
11232  * attr_val(in): The memory value of the attribute
11233  * attr_info(in/out): The attribute information structure which describe the
11234  * desired attributes
11235  *
11236  * Note: Set DB_VALUE of desired attribute identifier.
11237  */
11238 int
11239 heap_attrinfo_set (const OID * inst_oid, ATTR_ID attrid, DB_VALUE * attr_val, HEAP_CACHE_ATTRINFO * attr_info)
11240 {
11241  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11242  PR_TYPE *pr_type; /* Primitive type array function structure */
11243  TP_DOMAIN_STATUS dom_status;
11244  int ret = NO_ERROR;
11245 
11246  /*
11247  * check to make sure the attr_info has been used, should never be empty.
11248  */
11249 
11250  if (attr_info->num_values == -1)
11251  {
11252  return ER_FAILED;
11253  }
11254 
11255  ret = heap_attrinfo_check (inst_oid, attr_info);
11256  if (ret != NO_ERROR)
11257  {
11258  goto exit_on_error;
11259  }
11260 
11261  value = heap_attrvalue_locate (attrid, attr_info);
11262  if (value == NULL)
11263  {
11264  goto exit_on_error;
11265  }
11266 
11267  pr_type = pr_type_from_id (value->last_attrepr->type);
11268  if (pr_type == NULL)
11269  {
11270  goto exit_on_error;
11271  }
11272 
11273  ret = pr_clear_value (&value->dbvalue);
11274  if (ret != NO_ERROR)
11275  {
11276  goto exit_on_error;
11277  }
11278 
11279  ret =
11280  db_value_domain_init (&value->dbvalue, value->last_attrepr->type, value->last_attrepr->domain->precision,
11281  value->last_attrepr->domain->scale);
11282  if (ret != NO_ERROR)
11283  {
11284  goto exit_on_error;
11285  }
11286 
11287  /*
11288  * As we use "writeval" to do the writing and that function gets
11289  * enough domain information, we can use non-exact domain matching
11290  * here to defer the coercion until it is written.
11291  */
11292  dom_status = tp_domain_check (value->last_attrepr->domain, attr_val, TP_EXACT_MATCH);
11293  if (dom_status == DOMAIN_COMPATIBLE)
11294  {
11295  /*
11296  * the domains match exactly, set the value and proceed. Copy
11297  * the source only if it's a set-valued thing (that's the purpose
11298  * of the third argument).
11299  */
11300  ret = pr_type->setval (&value->dbvalue, attr_val, TP_IS_SET_TYPE (pr_type->id));
11301  }
11302  else
11303  {
11304  /* the domains don't match, must attempt coercion */
11305  dom_status = tp_value_auto_cast (attr_val, &value->dbvalue, value->last_attrepr->domain);
11306  if (dom_status != DOMAIN_COMPATIBLE)
11307  {
11308  ret = tp_domain_status_er_set (dom_status, ARG_FILE_LINE, attr_val, value->last_attrepr->domain);
11309  assert (er_errid () != NO_ERROR);
11310 
11311  db_make_null (&value->dbvalue);
11312  }
11313  }
11314 
11315  if (ret != NO_ERROR)
11316  {
11317  goto exit_on_error;
11318  }
11319 
11320  value->state = HEAP_WRITTEN_ATTRVALUE;
11321 
11322  return ret;
11323 
11324 exit_on_error:
11325 
11326  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11327 }
11328 
11329 /*
11330  * heap_attrinfo_set_uninitialized () - Read unitialized attributes
11331  * return: NO_ERROR
11332  * inst_oid(in): The instance oid
11333  * recdes(in): The instance record descriptor
11334  * attr_info(in/out): The attribute information structure which describe the
11335  * desired attributes
11336  *
11337  * Note: Read the db values of the unitialized attributes from the
11338  * given recdes. This function is used when we are ready to
11339  * transform an object that has been updated/inserted in the server.
11340  * If the object has been updated, recdes must be the old object
11341  * (the one on disk), so we can set the rest of the uninitialized
11342  * attributes from the old object.
11343  * If the object is a new one, recdes should be NULL, since there
11344  * is not an object on disk, the rest of the unitialized
11345  * attributes are set from default values.
11346  */
11347 static int
11348 heap_attrinfo_set_uninitialized (THREAD_ENTRY * thread_p, OID * inst_oid, RECDES * recdes,
11349  HEAP_CACHE_ATTRINFO * attr_info)
11350 {
11351  int i;
11352  REPR_ID reprid; /* Representation of object */
11353  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11354  int ret = NO_ERROR;
11355 
11356  ret = heap_attrinfo_check (inst_oid, attr_info);
11357  if (ret != NO_ERROR)
11358  {
11359  goto exit_on_error;
11360  }
11361 
11362  /*
11363  * Make sure that we have the needed cached representation.
11364  */
11365 
11366  if (recdes != NULL)
11367  {
11368  reprid = or_rep_id (recdes);
11369  }
11370  else
11371  {
11372  reprid = attr_info->last_classrepr->id;
11373  }
11374 
11375  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
11376  {
11377  /* Get the needed representation */
11378  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
11379  if (ret != NO_ERROR)
11380  {
11381  goto exit_on_error;
11382  }
11383  }
11384 
11385  /*
11386  * Go over the attribute values and set the ones that have not been
11387  * initialized
11388  */
11389  for (i = 0; i < attr_info->num_values; i++)
11390  {
11391  value = &attr_info->values[i];
11392  if (value->state == HEAP_UNINIT_ATTRVALUE)
11393  {
11394  ret = heap_attrvalue_read (recdes, value, attr_info);
11395  if (ret != NO_ERROR)
11396  {
11397  goto exit_on_error;
11398  }
11399  }
11400  else if (value->state == HEAP_WRITTEN_ATTRVALUE
11401  && (value->last_attrepr->type == DB_TYPE_BLOB || value->last_attrepr->type == DB_TYPE_CLOB))
11402  {
11403  DB_VALUE *save;
11404  save = db_value_copy (&value->dbvalue);
11405  pr_clear_value (&value->dbvalue);
11406 
11407  /* read and delete old value */
11408  ret = heap_attrvalue_read (recdes, value, attr_info);
11409  if (ret != NO_ERROR)
11410  {
11411  goto exit_on_error;
11412  }
11413  if (!db_value_is_null (&value->dbvalue))
11414  {
11415  DB_ELO *elo;
11416 
11417  assert (db_value_type (&value->dbvalue) == DB_TYPE_BLOB
11418  || db_value_type (&value->dbvalue) == DB_TYPE_CLOB);
11419  elo = db_get_elo (&value->dbvalue);
11420  if (elo)
11421  {
11422  ret = db_elo_delete (elo);
11423  }
11424  pr_clear_value (&value->dbvalue);
11425  ret = (ret >= 0 ? NO_ERROR : ret);
11426  if (ret != NO_ERROR)
11427  {
11428  goto exit_on_error;
11429  }
11430  }
11431  value->state = HEAP_WRITTEN_ATTRVALUE;
11432  pr_clone_value (save, &value->dbvalue);
11433  pr_free_ext_value (save);
11434  }
11435  }
11436 
11437  if (recdes != NULL)
11438  {
11439  attr_info->inst_chn = or_chn (recdes);
11440  }
11441  else
11442  {
11443  attr_info->inst_chn = -1;
11444  }
11445 
11446  return ret;
11447 
11448 exit_on_error:
11449 
11450  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11451 }
11452 
11453 /*
11454  * heap_attrinfo_get_disksize () - Find the disk size needed to transform the object
11455  * represented by attr_info
11456  * return: size of the object
11457  * attr_info(in/out): The attribute information structure
11458  * is_mvcc_class(in): true, if MVCC class
11459  * offset_size_ptr(out): offset size
11460  *
11461  * Note: Find the disk size needed to transform the object represented
11462  * by the attribute information structure.
11463  */
11464 static int
11465 heap_attrinfo_get_disksize (HEAP_CACHE_ATTRINFO * attr_info, bool is_mvcc_class, int *offset_size_ptr)
11466 {
11467  int i, size;
11468  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11469 
11470  *offset_size_ptr = OR_BYTE_SIZE;
11471 
11472 re_check:
11473  size = 0;
11474  for (i = 0; i < attr_info->num_values; i++)
11475  {
11476  value = &attr_info->values[i];
11477 
11478  if (value->last_attrepr->is_fixed != 0)
11479  {
11480  size += tp_domain_disk_size (value->last_attrepr->domain);
11481  }
11482  else
11483  {
11484  size += pr_data_writeval_disk_size (&value->dbvalue);
11485  }
11486  }
11487 
11488  if (is_mvcc_class)
11489  {
11491  }
11492  else
11493  {
11494  size += OR_NON_MVCC_HEADER_SIZE;
11495  }
11496 
11497  size += OR_VAR_TABLE_SIZE_INTERNAL (attr_info->last_classrepr->n_variable, *offset_size_ptr);
11498  size += OR_BOUND_BIT_BYTES (attr_info->last_classrepr->n_attributes - attr_info->last_classrepr->n_variable);
11499 
11500  if (*offset_size_ptr == OR_BYTE_SIZE && size > OR_MAX_BYTE)
11501  {
11502  *offset_size_ptr = OR_SHORT_SIZE; /* 2byte */
11503  goto re_check;
11504  }
11505  if (*offset_size_ptr == OR_SHORT_SIZE && size > OR_MAX_SHORT)
11506  {
11507  *offset_size_ptr = BIG_VAR_OFFSET_SIZE; /* 4byte */
11508  goto re_check;
11509  }
11510 
11511  return size;
11512 }
11513 
11514 /*
11515  * heap_attrinfo_transform_to_disk () - Transform to disk an attribute information
11516  * kind of instance
11517  * return: SCAN_CODE
11518  * (Either of S_SUCCESS, S_DOESNT_FIT,
11519  * S_ERROR)
11520  * attr_info(in/out): The attribute information structure
11521  * old_recdes(in): where the object's disk format is deposited
11522  * new_recdes(in):
11523  *
11524  * Note: Transform the object represented by attr_info to disk format
11525  */
11526 SCAN_CODE
11528  record_descriptor * new_recdes)
11529 {
11530  return heap_attrinfo_transform_to_disk_internal (thread_p, attr_info, old_recdes, new_recdes, LOB_FLAG_INCLUDE_LOB);
11531 }
11532 
11533 /*
11534  * heap_attrinfo_transform_to_disk_except_lob () -
11535  * Transform to disk an attribute information
11536  * kind of instance. Do not create lob.
11537  * return: SCAN_CODE
11538  * (Either of S_SUCCESS, S_DOESNT_FIT,
11539  * S_ERROR)
11540  * attr_info(in/out): The attribute information structure
11541  * old_recdes(in): where the object's disk format is deposited
11542  * new_recdes(in):
11543  *
11544  * Note: Transform the object represented by attr_info to disk format
11545  */
11546 SCAN_CODE
11548  RECDES * old_recdes, record_descriptor * new_recdes)
11549 {
11550  return heap_attrinfo_transform_to_disk_internal (thread_p, attr_info, old_recdes, new_recdes, LOB_FLAG_EXCLUDE_LOB);
11551 }
11552 
11553 /*
11554  * heap_attrinfo_transform_to_disk_internal () -
11555  * Transform to disk an attribute information
11556  * kind of instance.
11557  * return: SCAN_CODE
11558  * (Either of S_SUCCESS, S_DOESNT_FIT,
11559  * S_ERROR)
11560  * attr_info(in/out): The attribute information structure
11561  * old_recdes(in): where the object's disk format is deposited
11562  * new_recdes(in):
11563  * lob_create_flag(in):
11564  *
11565  * Note: Transform the object represented by attr_info to disk format
11566  */
11567 static SCAN_CODE
11569  record_descriptor * new_recdes, int lob_create_flag)
11570 {
11571  OR_BUF orep, *buf;
11572  char *ptr_bound, *ptr_varvals;
11573  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11574  DB_VALUE temp_dbvalue;
11575  PR_TYPE *pr_type; /* Primitive type array function structure */
11576  unsigned int repid_bits;
11577  SCAN_CODE status;
11578  int i;
11579  DB_VALUE *dbvalue = NULL;
11580  size_t expected_size;
11581  int tmp;
11582  volatile int offset_size;
11583  volatile int mvcc_wasted_space = 0;
11584  int header_size;
11585  bool is_mvcc_class;
11586  // *INDENT-OFF*
11587  std::set<int> incremented_attrids;
11588  // *INDENT-ON*
11589 
11590  assert (new_recdes != NULL);
11591 
11592  /* check to make sure the attr_info has been used, it should not be empty. */
11593  if (attr_info->num_values == -1)
11594  {
11595  return S_ERROR;
11596  }
11597 
11598  /*
11599  * Get any of the values that have not been set/read
11600  */
11601  if (heap_attrinfo_set_uninitialized (thread_p, &attr_info->inst_oid, old_recdes, attr_info) != NO_ERROR)
11602  {
11603  return S_ERROR;
11604  }
11605 
11606  /* Start transforming the dbvalues into disk values for the object */
11607  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&(attr_info->class_oid));
11608 
11609  expected_size = heap_attrinfo_get_disksize (attr_info, is_mvcc_class, &tmp);
11610  offset_size = tmp;
11611 
11612  if (is_mvcc_class)
11613  {
11614  mvcc_wasted_space = (OR_MVCC_MAX_HEADER_SIZE - OR_MVCC_INSERT_HEADER_SIZE);
11615  if (old_recdes != NULL)
11616  {
11617  /* Update case, reserve space for previous version LSA. */
11618  expected_size += OR_MVCC_PREV_VERSION_LSA_SIZE;
11619  mvcc_wasted_space -= OR_MVCC_PREV_VERSION_LSA_SIZE;
11620  }
11621  }
11622 
11623  /* reserve enough space if need to add additional MVCC header info */
11624  expected_size += mvcc_wasted_space;
11625 
11626 resize_and_start:
11627 
11628  new_recdes->resize_buffer (expected_size);
11629  OR_BUF_INIT2 (orep, new_recdes->get_data_for_modify (), (int) expected_size);
11630  buf = &orep;
11631 
11632  switch (_setjmp (buf->env))
11633  {
11634  case 0:
11635  status = S_SUCCESS;
11636 
11637  /*
11638  * Store the representation of the class along with bound bit
11639  * flag information
11640  */
11641 
11642  repid_bits = attr_info->last_classrepr->id;
11643  /*
11644  * Do we have fixed value attributes ?
11645  */
11646  if ((attr_info->last_classrepr->n_attributes - attr_info->last_classrepr->n_variable) != 0)
11647  {
11648  repid_bits |= OR_BOUND_BIT_FLAG;
11649  }
11650 
11651  /* offset size */
11652  OR_SET_VAR_OFFSET_SIZE (repid_bits, offset_size);
11653 
11654  /*
11655  * We must increase the current value by one so that clients
11656  * can detect the change in object. That is, clients will need to
11657  * refetch the object.
11658  */
11659  attr_info->inst_chn++;
11660  if (is_mvcc_class)
11661  {
11662  if (old_recdes == NULL)
11663  {
11665  or_put_int (buf, repid_bits);
11666  or_put_int (buf, 0); /* CHN */
11667  or_put_bigint (buf, 0); /* MVCC insert id */
11668  header_size = OR_MVCC_INSERT_HEADER_SIZE;
11669  }
11670  else
11671  {
11672  LOG_LSA null_lsa = LSA_INITIALIZER;
11674  or_put_int (buf, repid_bits);
11675  or_put_int (buf, 0); /* CHN */
11676  or_put_bigint (buf, 0); /* MVCC insert id */
11677 
11678  assert ((buf->ptr + OR_MVCC_PREV_VERSION_LSA_SIZE) <= buf->endptr);
11679  or_put_data (buf, (char *) &null_lsa, OR_MVCC_PREV_VERSION_LSA_SIZE); /* prev version lsa */
11681  }
11682  }
11683  else
11684  {
11685  or_put_int (buf, repid_bits);
11686  or_put_int (buf, attr_info->inst_chn);
11687  header_size = OR_NON_MVCC_HEADER_SIZE;
11688  }
11689 
11690  /*
11691  * Calculate the pointer address to variable offset attribute table,
11692  * fixed attributes, and variable attributes
11693  */
11694 
11695  ptr_bound = OR_GET_BOUND_BITS (buf->buffer, attr_info->last_classrepr->n_variable,
11696  attr_info->last_classrepr->fixed_length);
11697 
11698  /*
11699  * Variable offset table is relative to the beginning of the buffer
11700  */
11701 
11702  ptr_varvals = (ptr_bound
11703  + OR_BOUND_BIT_BYTES (attr_info->last_classrepr->n_attributes
11704  - attr_info->last_classrepr->n_variable));
11705 
11706  /* Need to make sure that the bound array is not past the allocated buffer because OR_ENABLE_BOUND_BIT() will
11707  * just slam the bound bit without checking the length. */
11708 
11709  if (ptr_varvals + mvcc_wasted_space > buf->endptr)
11710  {
11711  // is it possible?
11712  expected_size += DB_PAGESIZE;
11713  goto resize_and_start;
11714  }
11715 
11716  for (i = 0; i < attr_info->num_values; i++)
11717  {
11718  value = &attr_info->values[i];
11719  dbvalue = &value->dbvalue;
11720  pr_type = value->last_attrepr->domain->type;
11721  if (pr_type == NULL)
11722  {
11723  return S_ERROR;
11724  }
11725 
11726  /*
11727  * Is this a fixed or variable attribute ?
11728  */
11729  if (value->last_attrepr->is_fixed != 0)
11730  {
11731  /*
11732  * Fixed attribute
11733  * Write the fixed attributes values, if unbound, does not matter
11734  * what value is stored. We need to set the appropriate bit in the
11735  * bound bit array for fixed attributes. For variable attributes,
11736  */
11737  buf->ptr = (buf->buffer
11738  + OR_FIXED_ATTRIBUTES_OFFSET_BY_OBJ (buf->buffer, attr_info->last_classrepr->n_variable)
11739  + value->last_attrepr->location);
11740 
11741  if (value->do_increment && (incremented_attrids.find (i) == incremented_attrids.end ()))
11742  {
11743  if (qdata_increment_dbval (dbvalue, dbvalue, value->do_increment) != NO_ERROR)
11744  {
11745  status = S_ERROR;
11746  break;
11747  }
11748  incremented_attrids.insert (i);
11749  }
11750 
11751  if (dbvalue == NULL || db_value_is_null (dbvalue) == true)
11752  {
11753  /*
11754  * This is an unbound value.
11755  * 1) Set any value in the fixed array value table, so we can
11756  * advance to next attribute.
11757  * 2) and set the bound bit as unbound
11758  */
11759  db_value_domain_init (&temp_dbvalue, value->last_attrepr->type,
11760  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
11761  dbvalue = &temp_dbvalue;
11762  OR_CLEAR_BOUND_BIT (ptr_bound, value->last_attrepr->position);
11763 
11764  /*
11765  * pad the appropriate amount, writeval needs to be modified
11766  * to accept a domain so it can perform this padding.
11767  */
11768  or_pad (buf, tp_domain_disk_size (value->last_attrepr->domain));
11769 
11770  }
11771  else
11772  {
11773  /*
11774  * Write the value.
11775  */
11776  OR_ENABLE_BOUND_BIT (ptr_bound, value->last_attrepr->position);
11777  pr_type->data_writeval (buf, dbvalue);
11778  }
11779  }
11780  else
11781  {
11782  /*
11783  * Variable attribute
11784  * 1) Set the offset to this value in the variable offset table
11785  * 2) Set the value in the variable value portion of the disk
11786  * object (Only if the value is bound)
11787  */
11788 
11789  /*
11790  * Write the offset onto the variable offset table and remember
11791  * the current pointer to the variable offset table
11792  */
11793 
11794  if (value->do_increment != 0)
11795  {
11796  status = S_ERROR;
11797  break;
11798  }
11799 
11800  buf->ptr = (char *) (OR_VAR_ELEMENT_PTR (buf->buffer, value->last_attrepr->location));
11801  /* compute the variable offsets relative to the end of the header (beginning of variable table) */
11802  or_put_offset_internal (buf, CAST_BUFLEN (ptr_varvals - buf->buffer - header_size), offset_size);
11803 
11804  if (dbvalue != NULL && db_value_is_null (dbvalue) != true)
11805  {
11806  /*
11807  * Now write the value and remember the current pointer
11808  * to variable value array for the next element.
11809  */
11810  buf->ptr = ptr_varvals;
11811 
11812  if (lob_create_flag == LOB_FLAG_INCLUDE_LOB && value->state == HEAP_WRITTEN_ATTRVALUE
11813  && (pr_type->id == DB_TYPE_BLOB || pr_type->id == DB_TYPE_CLOB))
11814  {
11815  DB_ELO dest_elo, *elo_p;
11816  char *save_meta_data, *new_meta_data;
11817  int error;
11818 
11819  assert (db_value_type (dbvalue) == DB_TYPE_BLOB || db_value_type (dbvalue) == DB_TYPE_CLOB);
11820 
11821  elo_p = db_get_elo (dbvalue);
11822 
11823  if (elo_p == NULL)
11824  {
11825  continue;
11826  }
11827 
11828  if (heap_get_class_name (thread_p, &(attr_info->class_oid), &new_meta_data) != NO_ERROR
11829  || new_meta_data == NULL)
11830  {
11831  status = S_ERROR;
11832  break;
11833  }
11834  save_meta_data = elo_p->meta_data;
11835  elo_p->meta_data = new_meta_data;
11836  error = db_elo_copy (db_get_elo (dbvalue), &dest_elo);
11837 
11838  free_and_init (elo_p->meta_data);
11839  elo_p->meta_data = save_meta_data;
11840 
11841  /* The purpose of HEAP_WRITTEN_LOB_ATTRVALUE is to avoid reenter this branch. In the first pass,
11842  * this branch is entered and elo is copied. When BUFFER_OVERFLOW happens, we need avoid to copy
11843  * elo again. Otherwize it will generate 2 copies. */
11844  value->state = HEAP_WRITTEN_LOB_ATTRVALUE;
11845 
11846  error = (error >= 0 ? NO_ERROR : error);
11847  if (error == NO_ERROR)
11848  {
11849  pr_clear_value (dbvalue);
11850  db_make_elo (dbvalue, pr_type->id, &dest_elo);
11851  dbvalue->need_clear = true;
11852  }
11853  else
11854  {
11855  status = S_ERROR;
11856  break;
11857  }
11858  }
11859 
11860  pr_type->data_writeval (buf, dbvalue);
11861  ptr_varvals = buf->ptr;
11862  }
11863  }
11864  }
11865 
11866  if (attr_info->last_classrepr->n_variable > 0)
11867  {
11868  /*
11869  * The last element of the variable offset table points to the end of
11870  * the object. The variable offset array starts with zero, so we can
11871  * just access n_variable...
11872  */
11873 
11874  /* Write the offset to the end of the variable attributes table */
11875  buf->ptr = ((char *) (OR_VAR_ELEMENT_PTR (buf->buffer, attr_info->last_classrepr->n_variable)));
11876  or_put_offset_internal (buf, CAST_BUFLEN (ptr_varvals - buf->buffer - header_size), offset_size);
11877  buf->ptr = PTR_ALIGN (buf->ptr, INT_ALIGNMENT);
11878  }
11879 
11880  /* Record the length of the object */
11881  new_recdes->set_record_length (ptr_varvals - buf->buffer);
11882 
11883  /* if not enough MVCC wasted space need to reallocate */
11884  if (ptr_varvals + mvcc_wasted_space <= buf->endptr)
11885  {
11886  break;
11887  }
11888 
11889  /*
11890  * if the longjmp status was anything other than ER_TF_BUFFER_OVERFLOW,
11891  * it represents an error condition and er_set will have been called
11892  */
11893  /* FALLTHRU */
11894  case ER_TF_BUFFER_OVERFLOW:
11895  expected_size += DB_PAGESIZE;
11896  goto resize_and_start;
11897 
11898  default:
11899  status = S_ERROR;
11900  break;
11901  }
11902 
11903  return status;
11904 }
11905 
11906 /*
11907  * heap_attrinfo_start_refoids () - Initialize an attribute information structure
11908  * with attributes that may reference other objects
11909  * return: NO_ERROR
11910  * class_oid(in): The class identifier of the instances where values
11911  * attributes values are going to be read.
11912  * attr_info(in/out): The attribute information structure
11913  *
11914  * Note: Initialize an attribute information structure with attributes
11915  * that may reference other objects (OIDs).
11916  *
11917  * Note: The caller must call heap_attrinfo_end after he is done with
11918  * attribute information.
11919  */
11920 
11921 static int
11922 heap_attrinfo_start_refoids (THREAD_ENTRY * thread_p, OID * class_oid, HEAP_CACHE_ATTRINFO * attr_info)
11923 {
11924  ATTR_ID guess_attrids[HEAP_GUESS_NUM_ATTRS_REFOIDS];
11925  ATTR_ID *set_attrids;
11926  int num_found_attrs;
11927  OR_CLASSREP *classrepr;
11928  int classrepr_cacheindex = -1;
11929  OR_ATTRIBUTE *search_attrepr;
11930  int i;
11931  int ret = NO_ERROR;
11932 
11933  attr_info->num_values = -1;
11934 
11935  /*
11936  * Find the current representation of the class, then scan all its
11937  * attributes finding the ones that may reference objects
11938  */
11939 
11940  classrepr = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
11941  if (classrepr == NULL)
11942  {
11943  return ER_FAILED;
11944  }
11945 
11946  /*
11947  * Go over the list of attributes until the desired attributes (OIDs, sets)
11948  * are found
11949  */
11950 
11951  if (classrepr->n_attributes > HEAP_GUESS_NUM_ATTRS_REFOIDS)
11952  {
11953  set_attrids = (ATTR_ID *) malloc (classrepr->n_attributes * sizeof (ATTR_ID));
11954  if (set_attrids == NULL)
11955  {
11957  classrepr->n_attributes * sizeof (ATTR_ID));
11958  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11959  return ER_OUT_OF_VIRTUAL_MEMORY;
11960  }
11961  }
11962  else
11963  {
11964  set_attrids = guess_attrids;
11965  }
11966 
11967  for (i = 0, num_found_attrs = 0; i < classrepr->n_attributes; i++)
11968  {
11969  search_attrepr = &classrepr->attributes[i];
11970  if (tp_domain_references_objects (search_attrepr->domain) == true)
11971  {
11972  set_attrids[num_found_attrs++] = search_attrepr->id;
11973  }
11974  }
11975 
11976  ret = heap_attrinfo_start (thread_p, class_oid, num_found_attrs, set_attrids, attr_info);
11977 
11978  if (set_attrids != guess_attrids)
11979  {
11980  free_and_init (set_attrids);
11981  }
11982 
11983  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11984 
11985  return ret;
11986 }
11987 
11988 /*
11989  * heap_attrinfo_start_with_index () -
11990  * return:
11991  * class_oid(in):
11992  * class_recdes(in):
11993  * attr_info(in):
11994  * idx_info(in):
11995  */
11996 int
11997 heap_attrinfo_start_with_index (THREAD_ENTRY * thread_p, OID * class_oid, RECDES * class_recdes,
11998  HEAP_CACHE_ATTRINFO * attr_info, HEAP_IDX_ELEMENTS_INFO * idx_info)
11999 {
12000  ATTR_ID guess_attrids[HEAP_GUESS_NUM_INDEXED_ATTRS];
12001  ATTR_ID *set_attrids;
12002  int num_found_attrs;
12003  OR_CLASSREP *classrepr = NULL;
12004  int classrepr_cacheindex = -1;
12005  OR_ATTRIBUTE *search_attrepr;
12006  int i, j;
12007  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
12008  int *num_btids;
12009  OR_INDEX *indexp;
12010 
12011  idx_info->has_single_col = 0;
12012  idx_info->has_multi_col = 0;
12013  idx_info->num_btids = 0;
12014 
12015  num_btids = &idx_info->num_btids;
12016 
12017  set_attrids = guess_attrids;
12018  attr_info->num_values = -1; /* initialize attr_info */
12019 
12020  classrepr = heap_classrepr_get (thread_p, class_oid, class_recdes, NULL_REPRID, &classrepr_cacheindex);
12021  if (classrepr == NULL)
12022  {
12023  return ER_FAILED;
12024  }
12025 
12026  if (classrepr->n_attributes > HEAP_GUESS_NUM_INDEXED_ATTRS)
12027  {
12028  set_attrids = (ATTR_ID *) malloc (classrepr->n_attributes * sizeof (ATTR_ID));
12029  if (set_attrids == NULL)
12030  {
12032  classrepr->n_attributes * sizeof (ATTR_ID));
12033  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12034  return ER_OUT_OF_VIRTUAL_MEMORY;
12035  }
12036  }
12037  else
12038  {
12039  set_attrids = guess_attrids;
12040  }
12041 
12042  /*
12043  * Read the number of BTID's in this class
12044  */
12045  *num_btids = classrepr->n_indexes;
12046 
12047  for (j = 0; j < *num_btids; j++)
12048  {
12049  indexp = &classrepr->indexes[j];
12050  if (indexp->n_atts == 1)
12051  {
12052  idx_info->has_single_col = 1;
12053  }
12054  else if (indexp->n_atts > 1)
12055  {
12056  idx_info->has_multi_col = 1;
12057  }
12058  /* check for already found both */
12059  if (idx_info->has_single_col && idx_info->has_multi_col)
12060  {
12061  break;
12062  }
12063  }
12064 
12065  /*
12066  * Go over the list of attrs until all indexed attributes (OIDs, sets)
12067  * are found
12068  */
12069  for (i = 0, num_found_attrs = 0, search_attrepr = classrepr->attributes; i < classrepr->n_attributes;
12070  i++, search_attrepr++)
12071  {
12072  if (search_attrepr->n_btids <= 0)
12073  {
12074  continue;
12075  }
12076 
12077  if (idx_info->has_single_col)
12078  {
12079  for (j = 0; j < *num_btids; j++)
12080  {
12081  indexp = &classrepr->indexes[j];
12082  if (indexp->n_atts == 1 && indexp->atts[0]->id == search_attrepr->id)
12083  {
12084  set_attrids[num_found_attrs++] = search_attrepr->id;
12085  break;
12086  }
12087  }
12088  }
12089  } /* for (i = 0 ...) */
12090 
12091  if (idx_info->has_multi_col == 0 && num_found_attrs == 0)
12092  {
12093  /* initialize the attrinfo cache and return, there is nothing else to do */
12094  /* (void) memset(attr_info, '\0', sizeof (HEAP_CACHE_ATTRINFO)); */
12095 
12096  /* now set the num_values to -1 which indicates that this is an empty HEAP_CACHE_ATTRINFO and shouldn't be
12097  * operated on. */
12098  attr_info->num_values = -1;
12099 
12100  /* free the class representation */
12101  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12102  }
12103  else
12104  { /* num_found_attrs > 0 */
12105  /* initialize attribute information */
12106  attr_info->class_oid = *class_oid;
12107  attr_info->last_cacheindex = classrepr_cacheindex;
12108  attr_info->read_cacheindex = -1;
12109  attr_info->last_classrepr = classrepr;
12110  attr_info->read_classrepr = NULL;
12111  OID_SET_NULL (&attr_info->inst_oid);
12112  attr_info->inst_chn = NULL_CHN;
12113  attr_info->num_values = num_found_attrs;
12114 
12115  if (num_found_attrs <= 0)
12116  {
12117  attr_info->values = NULL;
12118  }
12119  else
12120  {
12121  attr_info->values =
12122  (HEAP_ATTRVALUE *) db_private_alloc (thread_p, (num_found_attrs * sizeof (HEAP_ATTRVALUE)));
12123  if (attr_info->values == NULL)
12124  {
12125  /* free the class representation */
12126  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12127  attr_info->num_values = -1;
12128  goto error;
12129  }
12130  }
12131 
12132  /*
12133  * Set the attribute identifier of the desired attributes in the value
12134  * attribute information, and indicates that the current value is
12135  * unitialized. That is, it has not been read, set or whatever.
12136  */
12137  for (i = 0; i < attr_info->num_values; i++)
12138  {
12139  value = &attr_info->values[i];
12140  value->attrid = set_attrids[i];
12141  value->state = HEAP_UNINIT_ATTRVALUE;
12142  value->last_attrepr = NULL;
12143  value->read_attrepr = NULL;
12144  }
12145 
12146  /*
12147  * Make last information to be recached for each individual attribute
12148  * value. Needed for WRITE and Default values
12149  */
12150  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR)
12151  {
12152  /* classrepr will be freed in heap_attrinfo_end */
12153  heap_attrinfo_end (thread_p, attr_info);
12154  goto error;
12155  }
12156  }
12157 
12158  if (set_attrids != guess_attrids)
12159  {
12160  free_and_init (set_attrids);
12161  }
12162 
12163  if (num_found_attrs == 0 && idx_info->has_multi_col)
12164  {
12165  return 1;
12166  }
12167  else
12168  {
12169  return num_found_attrs;
12170  }
12171 
12172  /* **** */
12173 error:
12174 
12175  if (set_attrids != guess_attrids)
12176  {
12177  free_and_init (set_attrids);
12178  }
12179 
12180  return ER_FAILED;
12181 }
12182 
12183 /*
12184  * heap_classrepr_find_index_id () - Find the indicated index ID from the class repr
12185  * return: ID of desired index ot -1 if an error occurred.
12186  * classrepr(in): The class representation.
12187  * btid(in): The BTID of the interested index.
12188  *
12189  * Note: Locate the desired index by matching it with the passed BTID.
12190  * Return the ID of the index if found.
12191  */
12192 int
12194 {
12195  int i;
12196  int id = -1;
12197 
12198  for (i = 0; i < classrepr->n_indexes; i++)
12199  {
12200  if (BTID_IS_EQUAL (&(classrepr->indexes[i].btid), btid))
12201  {
12202  id = i;
12203  break;
12204  }
12205  }
12206 
12207  return id;
12208 }
12209 
12210 /*
12211  * heap_attrinfo_start_with_btid () - Initialize an attribute information structure
12212  * return: ID for the index which corresponds to the passed BTID.
12213  * If an error occurred, a -1 is returned.
12214  * class_oid(in): The class identifier of the instances where values
12215  * attributes values are going to be read.
12216  * btid(in): The BTID of the interested index.
12217  * attr_info(in/out): The attribute information structure
12218  *
12219  * Note: Initialize an attribute information structure, so that values
12220  * of instances can be retrieved based on the desired attributes.
12221  *
12222  * There are currently three functions which can be used to
12223  * initialize the attribute information structure; heap_attrinfo_start(),
12224  * heap_attrinfo_start_with_index() and this one. This function determines
12225  * which attributes belong to the passed BTID and populate the
12226  * information structure on those attributes.
12227  *
12228  * The attrinfo structure is an structure where values of
12229  * instances can be read. For example an object is retrieved,
12230  * then some of its attributes are convereted to dbvalues and
12231  * placed in this structure.
12232  *
12233  * Note: The caller must call heap_attrinfo_end after he is done with
12234  * attribute information.
12235  */
12236 int
12238 {
12239  ATTR_ID guess_attrids[HEAP_GUESS_NUM_INDEXED_ATTRS];
12240  ATTR_ID *set_attrids;
12241  OR_CLASSREP *classrepr = NULL;
12242  int i;
12243  int index_id = -1;
12244  int classrepr_cacheindex = -1;
12245  int num_found_attrs = 0;
12246 
12247  /*
12248  * We'll start by assuming that the number of attributes will fit into
12249  * the preallocated array.
12250  */
12251  set_attrids = guess_attrids;
12252 
12253  attr_info->num_values = -1; /* initialize attr_info */
12254 
12255  /*
12256  * Get the class representation so that we can access the indexes.
12257  */
12258  classrepr = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
12259  if (classrepr == NULL)
12260  {
12261  goto error;
12262  }
12263 
12264  /*
12265  * Get the index ID which corresponds to the BTID
12266  */
12267  index_id = heap_classrepr_find_index_id (classrepr, btid);
12268  if (index_id == -1)
12269  {
12270  goto error;
12271  }
12272 
12273  /*
12274  * Get the number of attributes associated with this index.
12275  * Allocate a new attribute ID array if we have more attributes
12276  * than will fit in the pre-allocated array.
12277  * Fill the array with the attribute ID's
12278  * Free the class representation.
12279  */
12280  num_found_attrs = classrepr->indexes[index_id].n_atts;
12281  if (num_found_attrs > HEAP_GUESS_NUM_INDEXED_ATTRS)
12282  {
12283  set_attrids = (ATTR_ID *) malloc (num_found_attrs * sizeof (ATTR_ID));
12284  if (set_attrids == NULL)
12285  {
12286  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, num_found_attrs * sizeof (ATTR_ID));
12287  goto error;
12288  }
12289  }
12290 
12291  for (i = 0; i < num_found_attrs; i++)
12292  {
12293  set_attrids[i] = classrepr->indexes[index_id].atts[i]->id;
12294  }
12295 
12296  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12297 
12298  /*
12299  * Get the attribute information for the collected ID's
12300  */
12301  if (num_found_attrs > 0)
12302  {
12303  if (heap_attrinfo_start (thread_p, class_oid, num_found_attrs, set_attrids, attr_info) != NO_ERROR)
12304  {
12305  goto error;
12306  }
12307  }
12308 
12309  /*
12310  * Free the attribute ID array if it was dynamically allocated
12311  */
12312  if (set_attrids != guess_attrids)
12313  {
12314  free_and_init (set_attrids);
12315  }
12316 
12317  return index_id;
12318 
12319  /* **** */
12320 error:
12321 
12322  if (classrepr)
12323  {
12324  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12325  }
12326 
12327  if (set_attrids != guess_attrids)
12328  {
12329  free_and_init (set_attrids);
12330  }
12331 
12332  return ER_FAILED;
12333 }
12334 
12335 #if defined (ENABLE_UNUSED_FUNCTION)
12336 /*
12337  * heap_attrvalue_get_index () -
12338  * return:
12339  * value_index(in):
12340  * attrid(in):
12341  * n_btids(in):
12342  * btids(in):
12343  * idx_attrinfo(in):
12344  */
12345 DB_VALUE *
12346 heap_attrvalue_get_index (int value_index, ATTR_ID * attrid, int *n_btids, BTID ** btids,
12347  HEAP_CACHE_ATTRINFO * idx_attrinfo)
12348 {
12349  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
12350 
12351  /* check to make sure the idx_attrinfo has been used, it should never be empty. */
12352  if (idx_attrinfo->num_values == -1)
12353  {
12354  return NULL;
12355  }
12356 
12357  if (value_index > idx_attrinfo->num_values || value_index < 0)
12358  {
12359  *n_btids = 0;
12360  *btids = NULL;
12361  *attrid = NULL_ATTRID;
12362  return NULL;
12363  }
12364  else
12365  {
12366  value = &idx_attrinfo->values[value_index];
12367  *n_btids = value->last_attrepr->n_btids;
12368  *btids = value->last_attrepr->btids;
12369  *attrid = value->attrid;
12370  return &value->dbvalue;
12371  }
12372 
12373 }
12374 #endif
12375 
12376 /*
12377  * heap_midxkey_key_get () -
12378  * return:
12379  * recdes(in):
12380  * midxkey(in/out):
12381  * index(in):
12382  * attrinfo(in):
12383  * func_domain(in):
12384  * key_domain(out):
12385  */
12386 static DB_MIDXKEY *
12388  DB_VALUE * func_res, TP_DOMAIN * func_domain, TP_DOMAIN ** key_domain)
12389 {
12390  char *nullmap_ptr;
12391  OR_ATTRIBUTE **atts;
12392  int num_atts, i, k;
12393  DB_VALUE value;
12394  OR_BUF buf;
12395  int error = NO_ERROR;
12396  TP_DOMAIN *set_domain = NULL;
12397  TP_DOMAIN *next_domain = NULL;
12398 
12399  assert (index != NULL);
12400 
12401  num_atts = index->n_atts;
12402  atts = index->atts;
12403  if (func_res)
12404  {
12405  num_atts = index->func_index_info->attr_index_start + 1;
12406  }
12407  assert (PTR_ALIGN (midxkey->buf, INT_ALIGNMENT) == midxkey->buf);
12408 
12409  or_init (&buf, midxkey->buf, -1);
12410 
12411  nullmap_ptr = midxkey->buf;
12412  or_advance (&buf, pr_midxkey_init_boundbits (nullmap_ptr, num_atts));
12413  k = 0;
12414  for (i = 0; i < num_atts && k < num_atts; i++)
12415  {
12416  if (index->func_index_info && (i == index->func_index_info->col_id))
12417  {
12418  assert (func_domain != NULL);
12419 
12420  if (!db_value_is_null (func_res))
12421  {
12422  func_domain->type->index_writeval (&buf, func_res);
12423  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12424  }
12425 
12426  if (key_domain != NULL)
12427  {
12428  if (k == 0)
12429  {
12430  assert (set_domain == NULL);
12431  set_domain = tp_domain_copy (func_domain, 0);
12432  if (set_domain == NULL)
12433  {
12434  assert (false);
12435  goto error;
12436  }
12437  next_domain = set_domain;
12438  }
12439  else
12440  {
12441  next_domain->next = tp_domain_copy (func_domain, 0);
12442  if (next_domain->next == NULL)
12443  {
12444  assert (false);
12445  goto error;
12446  }
12447  next_domain = next_domain->next;
12448  }
12449  }
12450 
12451  k++;
12452  }
12453  if (k == num_atts)
12454  {
12455  break;
12456  }
12457  error = heap_midxkey_get_value (recdes, atts[i], &value, attrinfo);
12458  if (error == NO_ERROR && !db_value_is_null (&value))
12459  {
12460  atts[i]->domain->type->index_writeval (&buf, &value);
12461  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12462  }
12463 
12464  if (DB_NEED_CLEAR (&value))
12465  {
12466  pr_clear_value (&value);
12467  }
12468  if (key_domain != NULL)
12469  {
12470  if (k == 0)
12471  {
12472  assert (set_domain == NULL);
12473  set_domain = tp_domain_copy (atts[i]->domain, 0);
12474  if (set_domain == NULL)
12475  {
12476  assert (false);
12477  goto error;
12478  }
12479  if (index->asc_desc[i] != 0)
12480  {
12481  set_domain->is_desc = 1;
12482  }
12483  next_domain = set_domain;
12484  }
12485  else
12486  {
12487  next_domain->next = tp_domain_copy (atts[i]->domain, 0);
12488  if (next_domain->next == NULL)
12489  {
12490  assert (false);
12491  goto error;
12492  }
12493  if (index->asc_desc[i] != 0)
12494  {
12495  next_domain->next->is_desc = 1;
12496  }
12497  next_domain = next_domain->next;
12498  }
12499  }
12500  k++;
12501  }
12502 
12503  midxkey->size = CAST_BUFLEN (buf.ptr - buf.buffer);
12504  midxkey->ncolumns = num_atts;
12505  midxkey->domain = NULL;
12506 
12507  if (key_domain != NULL)
12508  {
12509  *key_domain = tp_domain_construct (DB_TYPE_MIDXKEY, (DB_OBJECT *) 0, num_atts, 0, set_domain);
12510 
12511  if (*key_domain)
12512  {
12513  *key_domain = tp_domain_cache (*key_domain);
12514  }
12515  else
12516  {
12517  assert (false);
12518  goto error;
12519  }
12520  }
12521 
12522  return midxkey;
12523 
12524 error:
12525 
12526  if (set_domain)
12527  {
12528  TP_DOMAIN *td, *next;
12529 
12530  for (td = set_domain, next = NULL; td != NULL; td = next)
12531  {
12532  next = td->next;
12533  tp_domain_free (td);
12534  }
12535  }
12536 
12537  return NULL;
12538 }
12539 
12540 /*
12541  * heap_midxkey_key_generate () -
12542  * return:
12543  * recdes(in):
12544  * midxkey(in):
12545  * att_ids(in):
12546  * attrinfo(in):
12547  * func_res(out):
12548  * func_col_id(in):
12549  * func_attr_index_start(in):
12550  * midxkey_domain(in):
12551  */
12552 static DB_MIDXKEY *
12553 heap_midxkey_key_generate (THREAD_ENTRY * thread_p, RECDES * recdes, DB_MIDXKEY * midxkey, int *att_ids,
12554  HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res, int func_col_id,
12555  int func_attr_index_start, TP_DOMAIN * midxkey_domain)
12556 {
12557  char *nullmap_ptr;
12558  int num_vals, i, reprid, k;
12559  OR_ATTRIBUTE *att;
12560  DB_VALUE value;
12561  OR_BUF buf;
12562  int error = NO_ERROR;
12563 
12564  /*
12565  * Make sure that we have the needed cached representation.
12566  */
12567 
12568  if (recdes != NULL)
12569  {
12570  reprid = or_rep_id (recdes);
12571 
12572  if (attrinfo->read_classrepr == NULL || attrinfo->read_classrepr->id != reprid)
12573  {
12574  /* Get the needed representation */
12575  if (heap_attrinfo_recache (thread_p, reprid, attrinfo) != NO_ERROR)
12576  {
12577  return NULL;
12578  }
12579  }
12580  }
12581 
12582  assert (PTR_ALIGN (midxkey->buf, INT_ALIGNMENT) == midxkey->buf);
12583 
12584  or_init (&buf, midxkey->buf, -1);
12585 
12586  nullmap_ptr = midxkey->buf;
12587 
12588  /* On constructing index */
12589  num_vals = attrinfo->num_values;
12590  if (func_res)
12591  {
12592  num_vals = func_attr_index_start + 1;
12593  }
12594  or_advance (&buf, pr_midxkey_init_boundbits (nullmap_ptr, num_vals));
12595  k = 0;
12596  for (i = 0; i < num_vals && k < num_vals; i++)
12597  {
12598  if (i == func_col_id)
12599  {
12600  if (!db_value_is_null (func_res))
12601  {
12603  domain->type->index_writeval (&buf, func_res);
12604  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12605  }
12606  k++;
12607  }
12608  if (k == num_vals)
12609  {
12610  break;
12611  }
12612  att = heap_locate_attribute (att_ids[i], attrinfo);
12613 
12614  error = heap_midxkey_get_value (recdes, att, &value, attrinfo);
12615  if (error == NO_ERROR && !db_value_is_null (&value))
12616  {
12617  att->domain->type->index_writeval (&buf, &value);
12618  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12619  }
12620 
12621  if (DB_NEED_CLEAR (&value))
12622  {
12623  pr_clear_value (&value);
12624  }
12625 
12626  k++;
12627  }
12628 
12629  if (value.need_clear == true)
12630  {
12631  pr_clear_value (&value);
12632  }
12633  midxkey->size = CAST_BUFLEN (buf.ptr - buf.buffer);
12634  midxkey->ncolumns = num_vals;
12635  midxkey->domain = midxkey_domain;
12636  midxkey->min_max_val.position = -1;
12637  midxkey->min_max_val.type = MIN_COLUMN;
12638 
12639  return midxkey;
12640 }
12641 
12642 /*
12643  * heap_attrinfo_generate_key () - Generate a key from the attribute information.
12644  * return: Pointer to DB_VALUE containing the key.
12645  * n_atts(in): Size of attribute ID array.
12646  * att_ids(in): Array of attribute ID's
12647  * atts_prefix_length (in): array of attributes prefix index length
12648  * attr_info(in): Pointer to attribute information structure. This
12649  * structure contains the BTID's, the attributes and their
12650  * values.
12651  * recdes(in):
12652  * db_valuep(in): Pointer to a DB_VALUE. This db_valuep will be used to
12653  * contain the set key in the case of multi-column B-trees.
12654  * It is ignored for single-column B-trees.
12655  * buf(in): Buffer of midxkey value encoding
12656  * func_index_info(in): function index definition, if key is based on function index
12657  * midxkey_domain(in): domain of midxkey
12658  *
12659  * Note: Return a key for the specified attribute ID's
12660  *
12661  * If n_atts=1, the key will be the value of that attribute
12662  * and we will return a pointer to that DB_VALUE.
12663  *
12664  * If n_atts>1, the key will be a sequence of the attribute
12665  * values. The set will be constructed and contained with
12666  * the passed DB_VALUE. A pointer to this DB_VALUE is returned.
12667  *
12668  * It is important for the caller to deallocate this memory
12669  * by calling pr_clear_value().
12670  */
12671 DB_VALUE *
12672 heap_attrinfo_generate_key (THREAD_ENTRY * thread_p, int n_atts, int *att_ids, int *atts_prefix_length,
12673  HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, DB_VALUE * db_valuep, char *buf,
12674  FUNCTION_INDEX_INFO * func_index_info, TP_DOMAIN * midxkey_domain)
12675 {
12676  DB_VALUE *ret_valp;
12677  DB_VALUE *fi_res = NULL;
12678  int fi_attr_index_start = -1;
12679  int fi_col_id = -1;
12680 
12681  assert (DB_IS_NULL (db_valuep));
12682 
12683  if (func_index_info)
12684  {
12685  fi_attr_index_start = func_index_info->attr_index_start;
12686  fi_col_id = func_index_info->col_id;
12687  if (heap_eval_function_index (thread_p, func_index_info, n_atts, att_ids, attr_info, recdes, -1, db_valuep,
12688  NULL, NULL) != NO_ERROR)
12689  {
12690  return NULL;
12691  }
12692  fi_res = db_valuep;
12693  }
12694 
12695  /*
12696  * Multi-column index. The key is a sequence of the attribute values.
12697  * Return a pointer to the attributes DB_VALUE.
12698  */
12699  if ((n_atts > 1 && func_index_info == NULL) || (func_index_info && (func_index_info->attr_index_start + 1) > 1))
12700  {
12701  DB_MIDXKEY midxkey;
12702  int midxkey_size = recdes->length;
12703 
12704  if (func_index_info != NULL)
12705  {
12706  /* this will allocate more than it is needed to store the key, but there is no decent way to calculate the
12707  * correct size */
12708  midxkey_size += OR_VALUE_ALIGNED_SIZE (fi_res);
12709  }
12710 
12711  /* Allocate storage for the buf of midxkey */
12712  if (midxkey_size > DBVAL_BUFSIZE)
12713  {
12714  midxkey.buf = (char *) db_private_alloc (thread_p, midxkey_size);
12715  if (midxkey.buf == NULL)
12716  {
12717  return NULL;
12718  }
12719  }
12720  else
12721  {
12722  midxkey.buf = buf;
12723  }
12724 
12725  if (heap_midxkey_key_generate (thread_p, recdes, &midxkey, att_ids, attr_info, fi_res, fi_col_id,
12726  fi_attr_index_start, midxkey_domain) == NULL)
12727  {
12728  return NULL;
12729  }
12730 
12731  (void) pr_clear_value (db_valuep);
12732 
12733  db_make_midxkey (db_valuep, &midxkey);
12734 
12735  if (midxkey_size > DBVAL_BUFSIZE)
12736  {
12737  db_valuep->need_clear = true;
12738  }
12739 
12740  ret_valp = db_valuep;
12741  }
12742  else
12743  {
12744  /*
12745  * Single-column index. The key is simply the value of the attribute.
12746  * Return a pointer to the attributes DB_VALUE.
12747  */
12748  if (func_index_info)
12749  {
12750  ret_valp = db_valuep;
12751  return ret_valp;
12752  }
12753 
12754  ret_valp = heap_attrinfo_access (att_ids[0], attr_info);
12755  if (ret_valp != NULL && atts_prefix_length && n_atts == 1)
12756  {
12757  if (*atts_prefix_length != -1 && QSTR_IS_ANY_CHAR_OR_BIT (DB_VALUE_DOMAIN_TYPE (ret_valp)))
12758  {
12759  /* prefix index */
12760  pr_clone_value (ret_valp, db_valuep);
12761  db_string_truncate (db_valuep, *atts_prefix_length);
12762  ret_valp = db_valuep;
12763  }
12764  }
12765  }
12766 
12767  return ret_valp;
12768 }
12769 
12770 /*
12771  * heap_attrvalue_get_key () - Get B-tree key from attribute value(s)
12772  * return: Pointer to DB_VALUE containing the key.
12773  * btid_index(in): Index into an array of BTID's from the OR_CLASSREP
12774  * structure contained in idx_attrinfo.
12775  * idx_attrinfo(in): Pointer to attribute information structure. This
12776  * structure contains the BTID's, the attributes and their
12777  * values.
12778  * recdes(in):
12779  * btid(out): Pointer to a BTID. The value of the current BTID
12780  * will be returned.
12781  * db_value(in): Pointer to a DB_VALUE. This db_value will be used to
12782  * contain the set key in the case of multi-column B-trees.
12783  * It is ignored for single-column B-trees.
12784  * buf(in):
12785  * func_preds(in): cached function index expressions
12786  * key_domain(out): domain of key
12787  *
12788  * Note: Return a B-tree key for the specified B-tree ID.
12789  *
12790  * If the specified B-tree ID is associated with a single
12791  * attribute the key will be the value of that attribute
12792  * and we will return a pointer to that DB_VALUE.
12793  *
12794  * If the BTID is associated with multiple attributes the
12795  * key will be a set containing the values of the attributes.
12796  * The set will be constructed and contained within the
12797  * passed DB_VALUE. A pointer to this DB_VALUE is returned.
12798  * It is important for the caller to deallocate this memory
12799  * by calling pr_clear_value().
12800  */
12801 DB_VALUE *
12802 heap_attrvalue_get_key (THREAD_ENTRY * thread_p, int btid_index, HEAP_CACHE_ATTRINFO * idx_attrinfo, RECDES * recdes,
12803  BTID * btid, DB_VALUE * db_value, char *buf, FUNC_PRED_UNPACK_INFO * func_indx_pred,
12804  TP_DOMAIN ** key_domain)
12805 {
12806  OR_INDEX *index;
12807  int n_atts, reprid;
12808  DB_VALUE *ret_val = NULL;
12809  DB_VALUE *fi_res = NULL;
12810  TP_DOMAIN *fi_domain = NULL;
12811 
12812  assert (DB_IS_NULL (db_value));
12813 
12814  /*
12815  * check to make sure the idx_attrinfo has been used, it should
12816  * never be empty.
12817  */
12818  if ((idx_attrinfo->num_values == -1) || (btid_index >= idx_attrinfo->last_classrepr->n_indexes))
12819  {
12820  return NULL;
12821  }
12822 
12823  /*
12824  * Make sure that we have the needed cached representation.
12825  */
12826  if (recdes != NULL)
12827  {
12828  reprid = or_rep_id (recdes);
12829 
12830  if (idx_attrinfo->read_classrepr == NULL || idx_attrinfo->read_classrepr->id != reprid)
12831  {
12832  /* Get the needed representation */
12833  if (heap_attrinfo_recache (thread_p, reprid, idx_attrinfo) != NO_ERROR)
12834  {
12835  return NULL;
12836  }
12837  }
12838  }
12839 
12840  index = &(idx_attrinfo->last_classrepr->indexes[btid_index]);
12841  n_atts = index->n_atts;
12842  *btid = index->btid;
12843 
12844  /* is function index */
12845  if (index->func_index_info)
12846  {
12847  if (heap_eval_function_index (thread_p, NULL, -1, NULL, idx_attrinfo, recdes, btid_index, db_value,
12848  func_indx_pred, &fi_domain) != NO_ERROR)
12849  {
12850  return NULL;
12851  }
12852  fi_res = db_value;
12853  }
12854 
12855  /*
12856  * Multi-column index. Construct the key as a sequence of attribute
12857  * values. The sequence is contained in the passed DB_VALUE. A
12858  * pointer to this DB_VALUE is returned.
12859  */
12860  if ((n_atts > 1 && recdes != NULL && index->func_index_info == NULL)
12861  || (index->func_index_info && (index->func_index_info->attr_index_start + 1) > 1))
12862  {
12863  DB_MIDXKEY midxkey;
12864  int midxkey_size = recdes->length;
12865 
12866  if (index->func_index_info != NULL)
12867  {
12868  /* this will allocate more than it is needed to store the key, but there is no decent way to calculate the
12869  * correct size */
12870  midxkey_size += OR_VALUE_ALIGNED_SIZE (fi_res);
12871  }
12872 
12873  /* Allocate storage for the buf of midxkey */
12874  if (midxkey_size > DBVAL_BUFSIZE)
12875  {
12876  midxkey.buf = (char *) db_private_alloc (thread_p, midxkey_size);
12877  if (midxkey.buf == NULL)
12878  {
12879  return NULL;
12880  }
12881  }
12882  else
12883  {
12884  midxkey.buf = buf;
12885  }
12886 
12887  midxkey.min_max_val.position = -1;
12888 
12889  if (heap_midxkey_key_get (recdes, &midxkey, index, idx_attrinfo, fi_res, fi_domain, key_domain) == NULL)
12890  {
12891  return NULL;
12892  }
12893 
12894  (void) pr_clear_value (db_value);
12895 
12896  db_make_midxkey (db_value, &midxkey);
12897 
12898  if (midxkey_size > DBVAL_BUFSIZE)
12899  {
12900  db_value->need_clear = true;
12901  }
12902 
12903  ret_val = db_value;
12904  }
12905  else
12906  {
12907  /*
12908  * Single-column index. The key is simply the value of the attribute.
12909  * Return a pointer to the attributes DB_VALUE.
12910  */
12911 
12912  /* Find the matching attribute identified by the attribute ID */
12913  if (fi_res)
12914  {
12915  ret_val = fi_res;
12916  if (key_domain != NULL)
12917  {
12918  assert (fi_domain != NULL);
12919  *key_domain = tp_domain_cache (fi_domain);
12920  }
12921  return ret_val;
12922  }
12923  ret_val = heap_attrinfo_access (index->atts[0]->id, idx_attrinfo);
12924 
12925  if (ret_val != NULL && index->attrs_prefix_length != NULL && index->attrs_prefix_length[0] != -1)
12926  {
12928  {
12929  pr_clone_value (ret_val, db_value);
12930  db_string_truncate (db_value, index->attrs_prefix_length[0]);
12931  ret_val = db_value;
12932  }
12933  }
12934 
12935  if (key_domain != NULL)
12936  {
12937  if (index->attrs_prefix_length != NULL && index->attrs_prefix_length[0] != -1)
12938  {
12939  TP_DOMAIN *attr_dom;
12940  TP_DOMAIN *prefix_dom;
12941  DB_TYPE attr_type;
12942 
12943  attr_type = TP_DOMAIN_TYPE (index->atts[0]->domain);
12944 
12945  assert (QSTR_IS_ANY_CHAR_OR_BIT (attr_type));
12946 
12947  attr_dom = index->atts[0]->domain;
12948 
12949  prefix_dom =
12950  tp_domain_find_charbit (attr_type, TP_DOMAIN_CODESET (attr_dom), TP_DOMAIN_COLLATION (attr_dom),
12951  TP_DOMAIN_COLLATION_FLAG (attr_dom), attr_dom->precision, attr_dom->is_desc);
12952 
12953  if (prefix_dom == NULL)
12954  {
12955  prefix_dom = tp_domain_construct (attr_type, NULL, index->attrs_prefix_length[0], 0, NULL);
12956  if (prefix_dom != NULL)
12957  {
12958  prefix_dom->codeset = TP_DOMAIN_CODESET (attr_dom);
12959  prefix_dom->collation_id = TP_DOMAIN_COLLATION (attr_dom);
12960  prefix_dom->collation_flag = TP_DOMAIN_COLLATION_FLAG (attr_dom);
12961  prefix_dom->is_desc = attr_dom->is_desc;
12962  }
12963  }
12964 
12965  if (prefix_dom == NULL)
12966  {
12967  return NULL;
12968  }
12969  else
12970  {
12971  *key_domain = tp_domain_cache (prefix_dom);
12972  }
12973  }
12974  else
12975  {
12976  *key_domain = tp_domain_cache (index->atts[0]->domain);
12977  }
12978  }
12979  }
12980 
12981  return ret_val;
12982 }
12983 
12984 /*
12985  * heap_indexinfo_get_btid () -
12986  * return:
12987  * btid_index(in):
12988  * attrinfo(in):
12989  */
12990 BTID *
12991 heap_indexinfo_get_btid (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo)
12992 {
12993  if (btid_index != -1 && btid_index < attrinfo->last_classrepr->n_indexes)
12994  {
12995  return &(attrinfo->last_classrepr->indexes[btid_index].btid);
12996  }
12997  else
12998  {
12999  return NULL;
13000  }
13001 }
13002 
13003 /*
13004  * heap_indexinfo_get_num_attrs () -
13005  * return:
13006  * btid_index(in):
13007  * attrinfo(in):
13008  */
13009 int
13011 {
13012  if (btid_index != -1 && btid_index < attrinfo->last_classrepr->n_indexes)
13013  {
13014  return attrinfo->last_classrepr->indexes[btid_index].n_atts;
13015  }
13016  else
13017  {
13018  return 0;
13019  }
13020 }
13021 
13022 /*
13023  * heap_indexinfo_get_attrids () -
13024  * return: NO_ERROR
13025  * btid_index(in):
13026  * attrinfo(in):
13027  * attrids(in):
13028  */
13029 int
13030 heap_indexinfo_get_attrids (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo, ATTR_ID * attrids)
13031 {
13032  int i;
13033  int ret = NO_ERROR;
13034 
13035  if (btid_index != -1 && (btid_index < attrinfo->last_classrepr->n_indexes))
13036  {
13037  for (i = 0; i < attrinfo->last_classrepr->indexes[btid_index].n_atts; i++)
13038  {
13039  attrids[i] = attrinfo->last_classrepr->indexes[btid_index].atts[i]->id;
13040  }
13041  }
13042 
13043  return ret;
13044 }
13045 
13046 /*
13047  * heap_indexinfo_get_attrs_prefix_length () -
13048  * return: NO_ERROR
13049  * btid_index(in):
13050  * attrinfo(in):
13051  * keys_prefix_length(in/out):
13052  */
13053 int
13054 heap_indexinfo_get_attrs_prefix_length (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo, int *attrs_prefix_length,
13055  int len_attrs_prefix_length)
13056 {
13057  int i, length = -1;
13058  int ret = NO_ERROR;
13059 
13060  if (attrs_prefix_length && len_attrs_prefix_length > 0)
13061  {
13062  for (i = 0; i < len_attrs_prefix_length; i++)
13063  {
13064  attrs_prefix_length[i] = -1;
13065  }
13066  }
13067 
13068  if (btid_index != -1 && (btid_index < attrinfo->last_classrepr->n_indexes))
13069  {
13070  if (attrinfo->last_classrepr->indexes[btid_index].attrs_prefix_length && attrs_prefix_length)
13071  {
13072  length = MIN (attrinfo->last_classrepr->indexes[btid_index].n_atts, len_attrs_prefix_length);
13073  for (i = 0; i < length; i++)
13074  {
13075  attrs_prefix_length[i] = attrinfo->last_classrepr->indexes[btid_index].attrs_prefix_length[i];
13076  }
13077  }
13078  }
13079 
13080  return ret;
13081 }
13082 
13083 /*
13084  * heap_get_index_with_name () - get BTID of index with name index_name
13085  * return : error code or NO_ERROR
13086  * thread_p (in) :
13087  * class_oid (in) : class OID
13088  * index_name (in): index name
13089  * btid (in/out) : btid
13090  */
13091 int
13092 heap_get_index_with_name (THREAD_ENTRY * thread_p, OID * class_oid, const char *index_name, BTID * btid)
13093 {
13094  OR_CLASSREP *classrep = NULL;
13095  int idx_in_cache, i;
13096  int error = NO_ERROR;
13097 
13098  BTID_SET_NULL (btid);
13099 
13100  /* get the class representation so that we can access the indexes */
13101  classrep = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_in_cache);
13102  if (classrep == NULL)
13103  {
13104  return ER_FAILED;
13105  }
13106 
13107  for (i = 0; i < classrep->n_indexes; i++)
13108  {
13109  if (strcasecmp (classrep->indexes[i].btname, index_name) == 0)
13110  {
13111  BTID_COPY (btid, &classrep->indexes[i].btid);
13112  break;
13113  }
13114  }
13115  if (classrep != NULL)
13116  {
13117  heap_classrepr_free_and_init (classrep, &idx_in_cache);
13118  }
13119 
13120  return error;
13121 }
13122 
13123 /*
13124  * heap_get_indexinfo_of_btid () -
13125  * return: NO_ERROR
13126  * class_oid(in):
13127  * btid(in):
13128  * type(in):
13129  * num_attrs(in):
13130  * attr_ids(in):
13131  * btnamepp(in);
13132  */
13133 int
13134 heap_get_indexinfo_of_btid (THREAD_ENTRY * thread_p, const OID * class_oid, const BTID * btid, BTREE_TYPE * type,
13135  int *num_attrs, ATTR_ID ** attr_ids, int **attrs_prefix_length, char **btnamepp,
13136  int *func_index_col_id)
13137 {
13138  OR_CLASSREP *classrepp;
13139  OR_INDEX *indexp;
13140  int idx_in_cache, i, n = 0;
13141  int idx;
13142  int ret = NO_ERROR;
13143 
13144  /* initial value of output parameters */
13145  if (num_attrs)
13146  {
13147  *num_attrs = 0;
13148  }
13149 
13150  if (attr_ids)
13151  {
13152  *attr_ids = NULL;
13153  }
13154 
13155  if (btnamepp)
13156  {
13157  *btnamepp = NULL;
13158  }
13159 
13160  if (attrs_prefix_length)
13161  {
13162  *attrs_prefix_length = NULL;
13163  }
13164 
13165  if (func_index_col_id)
13166  {
13167  *func_index_col_id = -1;
13168  }
13169 
13170  /* get the class representation so that we can access the indexes */
13171  classrepp = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_in_cache);
13172  if (classrepp == NULL)
13173  {
13174  goto exit_on_error;
13175  }
13176 
13177  /* get the idx of the index which corresponds to the BTID */
13178  idx = heap_classrepr_find_index_id (classrepp, btid);
13179  if (idx < 0)
13180  {
13181  goto exit_on_error;
13182  }
13183  indexp = &classrepp->indexes[idx];
13184 
13185  /* get the type of this index */
13186  if (type)
13187  {
13188  *type = indexp->type;
13189  }
13190 
13191  /* get the number of attributes associated with this index */
13192  if (num_attrs)
13193  {
13194  *num_attrs = n = indexp->n_atts;
13195  }
13196  /* allocate a new attribute ID array */
13197  if (attr_ids)
13198  {
13199  *attr_ids = (ATTR_ID *) db_private_alloc (thread_p, n * sizeof (ATTR_ID));
13200 
13201  if (*attr_ids == NULL)
13202  {
13203  goto exit_on_error;
13204  }
13205 
13206  /* fill the array with the attribute ID's */
13207  for (i = 0; i < n; i++)
13208  {
13209  (*attr_ids)[i] = indexp->atts[i]->id;
13210  }
13211  }
13212 
13213  if (btnamepp)
13214  {
13215  *btnamepp = strdup (indexp->btname);
13216  }
13217 
13218  if (attrs_prefix_length && indexp->type == BTREE_INDEX)
13219  {
13220  *attrs_prefix_length = (int *) db_private_alloc (thread_p, n * sizeof (int));
13221 
13222  if (*attrs_prefix_length == NULL)
13223  {
13224  goto exit_on_error;
13225  }
13226 
13227  for (i = 0; i < n; i++)
13228  {
13229  if (indexp->attrs_prefix_length != NULL)
13230  {
13231  (*attrs_prefix_length)[i] = indexp->attrs_prefix_length[i];
13232  }
13233  else
13234  {
13235  (*attrs_prefix_length)[i] = -1;
13236  }
13237  }
13238  }
13239 
13240  if (func_index_col_id && indexp->func_index_info)
13241  {
13242  *func_index_col_id = indexp->func_index_info->col_id;
13243  }
13244 
13245  /* free the class representation */
13246  heap_classrepr_free_and_init (classrepp, &idx_in_cache);
13247 
13248  return ret;
13249 
13250 exit_on_error:
13251 
13252  if (attr_ids && *attr_ids)
13253  {
13254  db_private_free_and_init (thread_p, *attr_ids);
13255  }
13256 
13257  if (btnamepp && *btnamepp)
13258  {
13259  free_and_init (*btnamepp);
13260  }
13261 
13262  if (attrs_prefix_length)
13263  {
13264  if (*attrs_prefix_length)
13265  {
13266  db_private_free_and_init (thread_p, *attrs_prefix_length);
13267  }
13268  *attrs_prefix_length = NULL;
13269  }
13270 
13271  if (classrepp)
13272  {
13273  heap_classrepr_free_and_init (classrepp, &idx_in_cache);
13274  }
13275 
13276  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
13277 }
13278 
13279 /*
13280  * heap_get_referenced_by () - Find objects referenced by given object
13281  * return: int (object count or -1)
13282  * class_oid(in):
13283  * obj_oid(in): The object identifier
13284  * recdes(in): Object disk representation
13285  * max_oid_cnt(in/out): Size of OID list in OIDs
13286  * oid_list(in): Set to the array of referenced OIDs
13287  * (This area can be realloc, thus, it should have been
13288  * with malloc)
13289  *
13290  * Note: This function finds object identifiers referenced by the
13291  * given instance. If OID references are stored in the given
13292  * OID list. If the oid_list is not large enough to hold the
13293  * number of instances, the area (i.e., oid_list) is expanded
13294  * using realloc. The number of OID references is returned by the
13295  * function.
13296  *
13297  * Note: The oid_list pointer should be freed by the caller.
13298  * Note: Nested-sets, that is, set-of-sets inside the object are not traced.
13299  * Note: This function does not remove duplicate oids from the list, the
13300  * caller is responsible for checking and removing them if needed.
13301  */
13302 int
13303 heap_get_referenced_by (THREAD_ENTRY * thread_p, OID * class_oid, const OID * obj_oid, RECDES * recdes,
13304  int *max_oid_cnt, OID ** oid_list)
13305 {
13306  HEAP_CACHE_ATTRINFO attr_info;
13307  DB_TYPE dbtype;
13308  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
13310  DB_SET *set;
13311  OID *oid_ptr; /* iterator on oid_list */
13312  OID *attr_oid;
13313  int oid_cnt; /* number of OIDs fetched */
13314  int cnt; /* set element count */
13315  int new_max_oid;
13316  int i, j; /* loop counters */
13317 
13318  /*
13319  * We don't support class references in this function
13320  */
13321  if (oid_is_root (class_oid))
13322  {
13323  return 0;
13324  }
13325 
13326  if ((heap_attrinfo_start_refoids (thread_p, class_oid, &attr_info) != NO_ERROR)
13327  || heap_attrinfo_read_dbvalues (thread_p, obj_oid, recdes, NULL, &attr_info) != NO_ERROR)
13328  {
13329  goto error;
13330  }
13331 
13332  if (*oid_list == NULL)
13333  {
13334  *max_oid_cnt = 0;
13335  }
13336  else if (*max_oid_cnt <= 0)
13337  {
13338  /*
13339  * We better release oid_list since we do not know it size. This may
13340  * be a bug.
13341  */
13342  free_and_init (*oid_list);
13343  *max_oid_cnt = 0;
13344  }
13345 
13346  /*
13347  * Now start searching the attributes that may reference objects
13348  */
13349  oid_cnt = 0;
13350  oid_ptr = *oid_list;
13351 
13352  for (i = 0; i < attr_info.num_values; i++)
13353  {
13354  value = &attr_info.values[i];
13355  dbtype = db_value_type (&value->dbvalue);
13356  if (dbtype == DB_TYPE_OID && !db_value_is_null (&value->dbvalue)
13357  && (attr_oid = db_get_oid (&value->dbvalue)) != NULL && !OID_ISNULL (attr_oid))
13358  {
13359  /*
13360  * A simple attribute with reference an object (OID)
13361  */
13362  if (oid_cnt == *max_oid_cnt)
13363  {
13364  /*
13365  * We need to expand the area to deposit more OIDs.
13366  * Use 50% of the current size for expansion and at least 10 OIDs
13367  */
13368  if (*max_oid_cnt <= 0)
13369  {
13370  *max_oid_cnt = 0;
13371  new_max_oid = attr_info.num_values;
13372  }
13373  else
13374  {
13375  new_max_oid = (int) (*max_oid_cnt * 1.5) + 1;
13376  if (new_max_oid < attr_info.num_values)
13377  {
13378  new_max_oid = attr_info.num_values;
13379  }
13380  }
13381 
13382  if (new_max_oid < 10)
13383  {
13384  new_max_oid = 10;
13385  }
13386 
13387  oid_ptr = (OID *) realloc (*oid_list, new_max_oid * sizeof (OID));
13388  if (oid_ptr == NULL)
13389  {
13390  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, new_max_oid * sizeof (OID));
13391  goto error;
13392  }
13393 
13394  /*
13395  * Set the pointers and advance to current area pointer
13396  */
13397  *oid_list = oid_ptr;
13398  oid_ptr += *max_oid_cnt;
13399  *max_oid_cnt = new_max_oid;
13400  }
13401  *oid_ptr = *attr_oid;
13402  oid_ptr++;
13403  oid_cnt++;
13404  }
13405  else
13406  {
13407  if (TP_IS_SET_TYPE (dbtype))
13408  {
13409  /*
13410  * A set which may or may nor reference objects (OIDs)
13411  * Go through each element of the set
13412  */
13413 
13414  set = db_get_set (&value->dbvalue);
13415  cnt = db_set_size (set);
13416 
13417  for (j = 0; j < cnt; j++)
13418  {
13419  if (db_set_get (set, j, &db_value) != NO_ERROR)
13420  {
13421  goto error;
13422  }
13423 
13424  dbtype = db_value_type (&db_value);
13425  if (dbtype == DB_TYPE_OID && !db_value_is_null (&db_value)
13426  && (attr_oid = db_get_oid (&db_value)) != NULL && !OID_ISNULL (attr_oid))
13427  {
13428  if (oid_cnt == *max_oid_cnt)
13429  {
13430  /*
13431  * We need to expand the area to deposit more OIDs.
13432  * Use 50% of the current size for expansion.
13433  */
13434  if (*max_oid_cnt <= 0)
13435  {
13436  *max_oid_cnt = 0;
13437  new_max_oid = attr_info.num_values;
13438  }
13439  else
13440  {
13441  new_max_oid = (int) (*max_oid_cnt * 1.5) + 1;
13442  if (new_max_oid < attr_info.num_values)
13443  {
13444  new_max_oid = attr_info.num_values;
13445  }
13446  }
13447  if (new_max_oid < 10)
13448  {
13449  new_max_oid = 10;
13450  }
13451 
13452  oid_ptr = (OID *) realloc (*oid_list, new_max_oid * sizeof (OID));
13453  if (oid_ptr == NULL)
13454  {
13456  new_max_oid * sizeof (OID));
13457  goto error;
13458  }
13459 
13460  /*
13461  * Set the pointers and advance to current area pointer
13462  */
13463  *oid_list = oid_ptr;
13464  oid_ptr += *max_oid_cnt;
13465  *max_oid_cnt = new_max_oid;
13466  }
13467  *oid_ptr = *attr_oid;
13468  oid_ptr++;
13469  oid_cnt++;
13470  }
13471  }
13472  }
13473  }
13474  }
13475 
13476  /* free object area if no OIDs were encountered */
13477  if (oid_cnt == 0)
13478  /*
13479  * Unless we check whether *oid_list is NULL,
13480  * it may cause double-free of oid_list.
13481  */
13482  if (*oid_list != NULL)
13483  {
13484  free_and_init (*oid_list);
13485  }
13486 
13487  heap_attrinfo_end (thread_p, &attr_info);
13488 
13489  /* return number of OIDs fetched */
13490  return oid_cnt;
13491 
13492 error:
13493  /* XXXXXXX */
13494 
13495  free_and_init (*oid_list);
13496  *max_oid_cnt = 0;
13497  heap_attrinfo_end (thread_p, &attr_info);
13498 
13499  return ER_FAILED;
13500 }
13501 
13502 /*
13503  * heap_prefetch () - Prefetch objects
13504  * return: NO_ERROR
13505  * fetch_area is set to point to fetching area
13506  * class_oid(in): Class identifier for the instance oid
13507  * oid(in): Object that must be fetched if its cached state is invalid
13508  * prefetch(in): Prefetch structure
13509  *
13510  */
13511 int
13512 heap_prefetch (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid, LC_COPYAREA_DESC * prefetch)
13513 {
13514  VPID vpid;
13515  PAGE_PTR pgptr = NULL;
13516  int round_length;
13517  INT16 right_slotid, left_slotid;
13518  HEAP_DIRECTION direction;
13519  SCAN_CODE scan;
13520  int ret = NO_ERROR;
13521 
13522  /*
13523  * Prefetch other instances (i.e., neighbors) stored on the same page
13524  * of the given object OID. Relocated instances and instances in overflow are
13525  * not prefetched, nor instances that do not belong to the given class.
13526  * Prefetching stop once an error, such as out of space, is encountered.
13527  */
13528 
13529  vpid.volid = oid->volid;
13530  vpid.pageid = oid->pageid;
13531 
13532  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
13533  if (pgptr == NULL)
13534  {
13535  assert (er_errid () != NO_ERROR);
13536  ret = er_errid ();
13537  if (ret == ER_PB_BAD_PAGEID)
13538  {
13539  ret = ER_HEAP_UNKNOWN_OBJECT;
13540  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 3, oid->volid, oid->pageid, oid->slotid);
13541  }
13542 
13543  /*
13544  * Problems getting the page.. forget about prefetching...
13545  */
13546  return ret;
13547  }
13548 
13549  right_slotid = oid->slotid;
13550  left_slotid = oid->slotid;
13551  direction = HEAP_DIRECTION_BOTH;
13552 
13553  while (direction != HEAP_DIRECTION_NONE)
13554  {
13555  /*
13556  * Don't include the desired object again, forwarded instances, nor
13557  * instances that belong to other classes
13558  */
13559 
13560  /* Check to the right */
13561  if (direction == HEAP_DIRECTION_RIGHT || direction == HEAP_DIRECTION_BOTH)
13562  {
13563  scan = spage_next_record (pgptr, &right_slotid, prefetch->recdes, COPY);
13564  if (scan == S_SUCCESS && spage_get_record_type (pgptr, right_slotid) == REC_HOME)
13565  {
13566  prefetch->mobjs->num_objs++;
13567  COPY_OID (&((*prefetch->obj)->class_oid), class_oid);
13568  (*prefetch->obj)->oid.volid = oid->volid;
13569  (*prefetch->obj)->oid.pageid = oid->pageid;
13570  (*prefetch->obj)->oid.slotid = right_slotid;
13571  (*prefetch->obj)->length = prefetch->recdes->length;
13572  (*prefetch->obj)->offset = *prefetch->offset;
13573  (*prefetch->obj)->operation = LC_FETCH;
13574  (*prefetch->obj) = LC_NEXT_ONEOBJ_PTR_IN_COPYAREA (*prefetch->obj);
13575  round_length = DB_ALIGN (prefetch->recdes->length, HEAP_MAX_ALIGN);
13576  *prefetch->offset += round_length;
13577  prefetch->recdes->data += round_length;
13578  prefetch->recdes->area_size -= (round_length + sizeof (*(*prefetch->obj)));
13579  }
13580  else if (scan != S_SUCCESS)
13581  {
13582  /* Stop prefetching objects from the right */
13583  direction = ((direction == HEAP_DIRECTION_BOTH) ? HEAP_DIRECTION_LEFT : HEAP_DIRECTION_NONE);
13584  }
13585  }
13586 
13587  /* Check to the left */
13588  if (direction == HEAP_DIRECTION_LEFT || direction == HEAP_DIRECTION_BOTH)
13589  {
13590  scan = spage_previous_record (pgptr, &left_slotid, prefetch->recdes, COPY);
13591  if (scan == S_SUCCESS && left_slotid != HEAP_HEADER_AND_CHAIN_SLOTID
13592  && spage_get_record_type (pgptr, left_slotid) == REC_HOME)
13593  {
13594  prefetch->mobjs->num_objs++;
13595  COPY_OID (&((*prefetch->obj)->class_oid), class_oid);
13596  (*prefetch->obj)->oid.volid = oid->volid;
13597  (*prefetch->obj)->oid.pageid = oid->pageid;
13598  (*prefetch->obj)->oid.slotid = left_slotid;
13599  (*prefetch->obj)->length = prefetch->recdes->length;
13600  (*prefetch->obj)->offset = *prefetch->offset;
13601  (*prefetch->obj)->operation = LC_FETCH;
13602  (*prefetch->obj) = LC_NEXT_ONEOBJ_PTR_IN_COPYAREA (*prefetch->obj);
13603  round_length = DB_ALIGN (prefetch->recdes->length, HEAP_MAX_ALIGN);
13604  *prefetch->offset += round_length;
13605  prefetch->recdes->data += round_length;
13606  prefetch->recdes->area_size -= (round_length + sizeof (*(*prefetch->obj)));
13607  }
13608  else if (scan != S_SUCCESS)
13609  {
13610  /* Stop prefetching objects from the right */
13611  direction = ((direction == HEAP_DIRECTION_BOTH) ? HEAP_DIRECTION_RIGHT : HEAP_DIRECTION_NONE);
13612  }
13613  }
13614  }
13615 
13616  pgbuf_unfix_and_init (thread_p, pgptr);
13617 
13618  return ret;
13619 }
13620 
13621 static DISK_ISVALID
13623  INT32 * num_checked)
13624 {
13625  VPID vpid;
13626  VPID *vpidptr_ofpgptr;
13627  INT32 npages = 0;
13628  DISK_ISVALID valid_pg = DISK_VALID;
13629  bool spg_error = false;
13630  PGBUF_WATCHER pg_watcher;
13631  PGBUF_WATCHER old_pg_watcher;
13632 
13633  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
13634  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
13635 
13636  vpid.volid = hfid->vfid.volid;
13637  vpid.pageid = hfid->hpgid;
13638 
13639  while (!VPID_ISNULL (&vpid) && valid_pg == DISK_VALID)
13640  {
13641  npages++;
13642 
13643  valid_pg = file_check_vpid (thread_p, &hfid->vfid, &vpid);
13644  if (valid_pg != DISK_VALID)
13645  {
13646  break;
13647  }
13648 
13649  pg_watcher.pgptr =
13650  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
13651  if (old_pg_watcher.pgptr != NULL)
13652  {
13653  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
13654  }
13655  if (pg_watcher.pgptr == NULL)
13656  {
13657  /* something went wrong, return */
13658  valid_pg = DISK_ERROR;
13659  break;
13660  }
13661 #ifdef SPAGE_DEBUG
13662  if (spage_check (thread_p, pg_watcher.pgptr) != NO_ERROR)
13663  {
13664  /* if spage has an error, try to go on. but, this page is corrupted. */
13665  spg_error = true;
13666  }
13667 #endif
13668 
13669  if (heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid) != NO_ERROR)
13670  {
13671  pgbuf_ordered_unfix (thread_p, &pg_watcher);
13672  /* something went wrong, return */
13673  valid_pg = DISK_ERROR;
13674  break;
13675  }
13676 
13677  vpidptr_ofpgptr = pgbuf_get_vpid_ptr (pg_watcher.pgptr);
13678  if (VPID_EQ (&vpid, vpidptr_ofpgptr))
13679  {
13681  hfid->vfid.fileid, hfid->hpgid);
13682  VPID_SET_NULL (&vpid);
13683  valid_pg = DISK_ERROR;
13684  }
13685 
13686  if (chk_objs != NULL)
13687  {
13688  valid_pg = heap_chkreloc_next (thread_p, chk_objs, pg_watcher.pgptr);
13689  }
13690 
13691  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
13692  }
13693 
13694  if (old_pg_watcher.pgptr != NULL)
13695  {
13696  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
13697  }
13698  assert (pg_watcher.pgptr == NULL);
13699 
13700  *num_checked = npages;
13701  return (spg_error == true) ? DISK_ERROR : valid_pg;
13702 }
13703 
13704 #if defined (SA_MODE)
13705 /*
13706  * heap_file_map_chkreloc () - FILE_MAP_PAGE_FUNC to check relocations.
13707  *
13708  * return : error code
13709  * thread_p (in) : thread entry
13710  * page (in) : heap page pointer
13711  * stop (in) : not used
13712  * args (in) : HEAP_CHKALL_RELOCOIDS *
13713  */
13714 static int
13715 heap_file_map_chkreloc (THREAD_ENTRY * thread_p, PAGE_PTR * page, bool * stop, void *args)
13716 {
13717  HEAP_CHKALL_RELOCOIDS *chk_objs = (HEAP_CHKALL_RELOCOIDS *) args;
13718 
13719  DISK_ISVALID valid = DISK_VALID;
13720  int error_code = NO_ERROR;
13721 
13722  valid = heap_chkreloc_next (thread_p, chk_objs, *page);
13723  if (valid == DISK_INVALID)
13724  {
13725  assert_release (false);
13726  return ER_FAILED;
13727  }
13728  else if (valid == DISK_ERROR)
13729  {
13730  ASSERT_ERROR_AND_SET (error_code);
13731  return error_code;
13732  }
13733  return NO_ERROR;
13734 }
13735 
13736 /*
13737  * heap_check_all_pages_by_file_table () - check relocations using file table
13738  *
13739  * return : DISK_INVALID for unexpected errors, DISK_ERROR for expected errors, DISK_VALID for successful check
13740  * thread_p (in) : thread entry
13741  * hfid (in) : heap file identifier
13742  * chk_objs (in) : check relocation context
13743  */
13744 static DISK_ISVALID
13745 heap_check_all_pages_by_file_table (THREAD_ENTRY * thread_p, HFID * hfid, HEAP_CHKALL_RELOCOIDS * chk_objs)
13746 {
13747  int error_code = NO_ERROR;
13748 
13749  error_code =
13750  file_map_pages (thread_p, &hfid->vfid, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH, heap_file_map_chkreloc,
13751  chk_objs);
13752  if (error_code == ER_FAILED)
13753  {
13754  assert_release (false);
13755  return DISK_INVALID;
13756  }
13757  else if (error_code != NO_ERROR)
13758  {
13759  ASSERT_ERROR ();
13760  return DISK_ERROR;
13761  }
13762  return DISK_VALID;
13763 }
13764 #endif /* SA_MODE */
13765 
13766 /*
13767  * heap_check_all_pages () - Validate all pages known by given heap vs file manger
13768  * return: DISK_INVALID, DISK_VALID, DISK_ERROR
13769  * hfid(in): : Heap identifier
13770  *
13771  * Note: Verify that all pages known by the given heap are valid. That
13772  * is, that they are valid from the point of view of the file manager.
13773  */
13776 {
13777  VPID vpid; /* Page-volume identifier */
13778  PAGE_PTR pgptr = NULL; /* Page pointer */
13779  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
13780  RECDES hdr_recdes; /* Header record descriptor */
13781  DISK_ISVALID valid_pg = DISK_VALID;
13782  DISK_ISVALID valid = DISK_VALID;
13783  INT32 npages = 0;
13784  int i;
13786  HEAP_CHKALL_RELOCOIDS *chk_objs = &chk;
13787 #if defined (SA_MODE)
13788  int file_numpages;
13789 #endif /* SA_MODE */
13790 
13791  valid_pg = heap_chkreloc_start (chk_objs);
13792  if (valid_pg != DISK_VALID)
13793  {
13794  chk_objs = NULL;
13795  }
13796  else
13797  {
13798  chk_objs->verify_not_vacuumed = true;
13799  }
13800 
13801  /* Scan every page of the heap to find out if they are valid */
13802  valid_pg = heap_check_all_pages_by_heapchain (thread_p, hfid, chk_objs, &npages);
13803 
13804 #if defined (SA_MODE)
13805  if (file_get_num_user_pages (thread_p, &hfid->vfid, &file_numpages) != NO_ERROR)
13806  {
13807  ASSERT_ERROR ();
13808  return valid_pg == DISK_VALID ? DISK_ERROR : valid_pg;
13809  }
13810  if (file_numpages != -1 && file_numpages != npages)
13811  {
13812  DISK_ISVALID tmp_valid_pg = DISK_VALID;
13813 
13814  assert (false);
13815  if (chk_objs != NULL)
13816  {
13817  chk_objs->verify = false;
13818  (void) heap_chkreloc_end (chk_objs);
13819 
13820  tmp_valid_pg = heap_chkreloc_start (chk_objs);
13821  }
13822 
13823  /*
13824  * Scan every page of the heap using allocset.
13825  * This is for getting more information of the corrupted pages.
13826  */
13827  tmp_valid_pg = heap_check_all_pages_by_file_table (thread_p, hfid, chk_objs);
13828 
13829  if (chk_objs != NULL)
13830  {
13831  if (tmp_valid_pg == DISK_VALID)
13832  {
13833  tmp_valid_pg = heap_chkreloc_end (chk_objs);
13834  }
13835  else
13836  {
13837  chk_objs->verify = false;
13838  (void) heap_chkreloc_end (chk_objs);
13839  }
13840  }
13841 
13842  if (npages != file_numpages)
13843  {
13845  hfid->hpgid, npages, file_numpages);
13846  valid_pg = DISK_INVALID;
13847  }
13848  if (valid_pg == DISK_VALID && tmp_valid_pg != DISK_VALID)
13849  {
13850  valid_pg = tmp_valid_pg;
13851  }
13852  }
13853  else
13854 #endif /* SA_MODE */
13855  {
13856  if (chk_objs != NULL)
13857  {
13858  valid_pg = heap_chkreloc_end (chk_objs);
13859  }
13860  }
13861 
13862  if (valid_pg == DISK_VALID)
13863  {
13864  /*
13865  * Check the statistics entries in the header
13866  */
13867 
13868  /* Fetch the header page of the heap file */
13869  vpid.volid = hfid->vfid.volid;
13870  vpid.pageid = hfid->hpgid;
13871 
13872  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
13873  if (pgptr == NULL)
13874  {
13875  return DISK_ERROR;
13876  }
13877 
13878  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
13879 
13880  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
13881  {
13882  /* Unable to peek heap header record */
13883  pgbuf_unfix_and_init (thread_p, pgptr);
13884 
13885  return DISK_ERROR;
13886  }
13887 
13888  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
13889  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS && valid_pg != DISK_ERROR; i++)
13890  {
13891  if (!VPID_ISNULL (&heap_hdr->estimates.best[i].vpid))
13892  {
13893  valid = file_check_vpid (thread_p, &hfid->vfid, &heap_hdr->estimates.best[i].vpid);
13894  if (valid != DISK_VALID)
13895  {
13896  valid_pg = valid;
13897  break;
13898  }
13899  }
13900  }
13901 
13902 #if defined(SA_MODE)
13904  {
13905  HEAP_STATS_ENTRY *ent;
13906  void *last;
13907  int rc;
13908 
13909  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
13910 
13911  last = NULL;
13912  while ((ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, &last)) != NULL)
13913  {
13914  assert_release (!VPID_ISNULL (&ent->best.vpid));
13915  if (!VPID_ISNULL (&ent->best.vpid))
13916  {
13917  valid_pg = file_check_vpid (thread_p, &hfid->vfid, &ent->best.vpid);
13918  if (valid_pg != DISK_VALID)
13919  {
13920  break;
13921  }
13922  }
13923  assert_release (ent->best.freespace > 0);
13924  }
13925 
13926  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
13927 
13928  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
13929  }
13930 #endif
13931 
13932  pgbuf_unfix_and_init (thread_p, pgptr);
13933 
13934  /* Need to check for the overflow pages.... */
13935  }
13936 
13937  return valid_pg;
13938 }
13939 
13942 {
13943  FILE_TYPE file_type;
13944  VPID vpid;
13946 #if !defined (NDEBUG)
13947  FILE_DESCRIPTORS fdes;
13948 #endif /* !NDEBUG */
13949 
13950  if (file_get_type (thread_p, &hfid->vfid, &file_type) != NO_ERROR)
13951  {
13952  return DISK_ERROR;
13953  }
13954  if (file_type == FILE_UNKNOWN_TYPE || (file_type != FILE_HEAP && file_type != FILE_HEAP_REUSE_SLOTS))
13955  {
13956  assert_release (false);
13957  return DISK_INVALID;
13958  }
13959 
13960  if (heap_get_header_page (thread_p, hfid, &vpid) == NO_ERROR)
13961  {
13962  hfid->hpgid = vpid.pageid;
13963 
13964 #if !defined (NDEBUG)
13965  if (file_descriptor_get (thread_p, &hfid->vfid, &fdes) == NO_ERROR && !OID_ISNULL (&fdes.heap.class_oid))
13966  {
13968  }
13969 #endif /* NDEBUG */
13970  rv = heap_check_all_pages (thread_p, hfid);
13971  if (rv == DISK_INVALID)
13972  {
13973  assert_release (false);
13974  }
13975  else if (rv == DISK_ERROR)
13976  {
13977  ASSERT_ERROR ();
13978  }
13979  return rv;
13980  }
13981  else
13982  {
13983  ASSERT_ERROR ();
13984  return DISK_ERROR;
13985  }
13986 }
13987 
13988 /*
13989  * heap_check_all_heaps () - Validate all pages of all known heap files
13990  * return: DISK_INVALID, DISK_VALID, DISK_ERROR
13991  *
13992  * Note: Verify that all pages of all heap files are valid. That is,
13993  * that they are valid from the point of view of the file manager.
13994  */
13997 {
13998  int error_code = NO_ERROR;
13999  HFID hfid;
14000  DISK_ISVALID allvalid = DISK_VALID;
14001  DISK_ISVALID valid = DISK_VALID;
14003  OID class_oid = OID_INITIALIZER;
14004 
14005  while (true)
14006  {
14007  /* Go to each file, check only the heap files */
14008  error_code = file_tracker_interruptable_iterate (thread_p, FILE_HEAP, &vfid, &class_oid);
14009  if (error_code != NO_ERROR)
14010  {
14011  ASSERT_ERROR ();
14012  goto exit_on_error;
14013  }
14014  if (VFID_ISNULL (&vfid))
14015  {
14016  /* no more heap files */
14017  break;
14018  }
14019 
14020  hfid.vfid = vfid;
14021  valid = heap_check_heap_file (thread_p, &hfid);
14022  if (valid == DISK_ERROR)
14023  {
14024  goto exit_on_error;
14025  }
14026  if (valid != DISK_VALID)
14027  {
14028  allvalid = valid;
14029  }
14030  }
14031  assert (OID_ISNULL (&class_oid));
14032 
14033  return allvalid;
14034 
14035 exit_on_error:
14036  if (!OID_ISNULL (&class_oid))
14037  {
14038  lock_unlock_object (thread_p, &class_oid, oid_Root_class_oid, SCH_S_LOCK, true);
14039  }
14040 
14041  return ((allvalid == DISK_VALID) ? DISK_ERROR : allvalid);
14042 }
14043 
14044 /*
14045  * heap_dump_hdr () - Dump heap file header
14046  * return: NO_ERROR
14047  * heap_hdr(in): Header structure
14048  */
14049 static int
14050 heap_dump_hdr (FILE * fp, HEAP_HDR_STATS * heap_hdr)
14051 {
14052  int i, j;
14053  int avg_length;
14054  int ret = NO_ERROR;
14055 
14056  avg_length = ((heap_hdr->estimates.num_recs > 0)
14057  ? (int) ((heap_hdr->estimates.recs_sumlen / (float) heap_hdr->estimates.num_recs) + 0.9) : 0);
14058 
14059  fprintf (fp, "CLASS_OID = %2d|%4d|%2d, ", heap_hdr->class_oid.volid, heap_hdr->class_oid.pageid,
14060  heap_hdr->class_oid.slotid);
14061  fprintf (fp, "OVF_VFID = %4d|%4d, NEXT_VPID = %4d|%4d\n", heap_hdr->ovf_vfid.volid, heap_hdr->ovf_vfid.fileid,
14062  heap_hdr->next_vpid.volid, heap_hdr->next_vpid.pageid);
14063  fprintf (fp, "unfill_space = %4d\n", heap_hdr->unfill_space);
14064  fprintf (fp, "Estimated: num_pages = %d, num_recs = %d, avg reclength = %d\n", heap_hdr->estimates.num_pages,
14065  heap_hdr->estimates.num_recs, avg_length);
14066  fprintf (fp, "Estimated: num high best = %d, num others(not in array) high best = %d\n",
14067  heap_hdr->estimates.num_high_best, heap_hdr->estimates.num_other_high_best);
14068  fprintf (fp, "Hint of best set of vpids with head = %d\n", heap_hdr->estimates.head);
14069 
14070  for (j = 0, i = 0; i < HEAP_NUM_BEST_SPACESTATS; j++, i++)
14071  {
14072  if (j != 0 && j % 5 == 0)
14073  {
14074  fprintf (fp, "\n");
14075  }
14076  fprintf (fp, "%4d|%4d %4d,", heap_hdr->estimates.best[i].vpid.volid, heap_hdr->estimates.best[i].vpid.pageid,
14077  heap_hdr->estimates.best[i].freespace);
14078  }
14079  fprintf (fp, "\n");
14080 
14081  fprintf (fp, "Second best: num hints = %d, head of hints = %d, tail (next to insert) of hints = %d, num subs = %d\n",
14082  heap_hdr->estimates.num_second_best, heap_hdr->estimates.head_second_best,
14083  heap_hdr->estimates.tail_second_best, heap_hdr->estimates.num_substitutions);
14084  for (j = 0, i = 0; i < HEAP_NUM_BEST_SPACESTATS; j++, i++)
14085  {
14086  if (j != 0 && j % 5 == 0)
14087  {
14088  fprintf (fp, "\n");
14089  }
14090  fprintf (fp, "%4d|%4d,", heap_hdr->estimates.second_best[i].volid, heap_hdr->estimates.second_best[i].pageid);
14091  }
14092  fprintf (fp, "\n");
14093 
14094  fprintf (fp, "Last vpid = %4d|%4d\n", heap_hdr->estimates.last_vpid.volid, heap_hdr->estimates.last_vpid.pageid);
14095 
14096  fprintf (fp, "Next full search vpid = %4d|%4d\n", heap_hdr->estimates.full_search_vpid.volid,
14097  heap_hdr->estimates.full_search_vpid.pageid);
14098 
14099  return ret;
14100 }
14101 
14102 /*
14103  * heap_dump () - Dump heap file
14104  * return:
14105  * hfid(in): Heap file identifier
14106  * dump_records(in): If true, objects are printed in ascii format, otherwise, the
14107  * objects are not printed.
14108  *
14109  * Note: Dump a heap file. The objects are printed only when the value
14110  * of dump_records is true. This function is used for DEBUGGING PURPOSES.
14111  */
14112 void
14113 heap_dump (THREAD_ENTRY * thread_p, FILE * fp, HFID * hfid, bool dump_records)
14114 {
14115  VPID vpid; /* Page-volume identifier */
14116  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
14117  RECDES hdr_recdes; /* Header record descriptor */
14118  VFID ovf_vfid;
14119  OID oid;
14120  HEAP_SCANCACHE scan_cache;
14121  HEAP_CACHE_ATTRINFO attr_info;
14122  RECDES peek_recdes;
14123  FILE_DESCRIPTORS fdes;
14124  int ret = NO_ERROR;
14125  PGBUF_WATCHER pg_watcher;
14126  PGBUF_WATCHER old_pg_watcher;
14127 
14128  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
14129  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
14130 
14131  fprintf (fp, "\n\n*** DUMPING HEAP FILE: ");
14132  fprintf (fp, "volid = %d, Fileid = %d, Header-pageid = %d ***\n", hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid);
14133  (void) file_descriptor_dump (thread_p, &hfid->vfid, fp);
14134 
14135  /* Fetch the header page of the heap file */
14136 
14137  vpid.volid = hfid->vfid.volid;
14138  vpid.pageid = hfid->hpgid;
14139  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
14140  if (pg_watcher.pgptr == NULL)
14141  {
14142  /* Unable to fetch heap header page */
14143  return;
14144  }
14145 
14146  /* Peek the header record to dump the statistics */
14147 
14148  if (spage_get_record (thread_p, pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
14149  {
14150  /* Unable to peek heap header record */
14151  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14152  return;
14153  }
14154 
14155  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
14156  ret = heap_dump_hdr (fp, heap_hdr);
14157  if (ret != NO_ERROR)
14158  {
14159  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14160  return;
14161  }
14162 
14163  VFID_COPY (&ovf_vfid, &heap_hdr->ovf_vfid);
14164  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14165 
14166  /* now scan every page and dump it */
14167  vpid.volid = hfid->vfid.volid;
14168  vpid.pageid = hfid->hpgid;
14169  while (!VPID_ISNULL (&vpid))
14170  {
14171  pg_watcher.pgptr =
14172  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
14173  if (old_pg_watcher.pgptr != NULL)
14174  {
14175  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
14176  }
14177  if (pg_watcher.pgptr == NULL)
14178  {
14179  /* something went wrong, return */
14180  return;
14181  }
14182  spage_dump (thread_p, fp, pg_watcher.pgptr, dump_records);
14183  (void) heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid);
14184  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
14185  }
14186 
14187  if (old_pg_watcher.pgptr != NULL)
14188  {
14189  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
14190  }
14191  assert (pg_watcher.pgptr == NULL);
14192 
14193  /* Dump file table configuration */
14194  if (file_dump (thread_p, &hfid->vfid, fp) != NO_ERROR)
14195  {
14196  ASSERT_ERROR ();
14197  return;
14198  }
14199 
14200  if (!VFID_ISNULL (&ovf_vfid))
14201  {
14202  /* There is an overflow file for this heap file */
14203  fprintf (fp, "\nOVERFLOW FILE INFORMATION FOR HEAP FILE\n\n");
14204  if (file_dump (thread_p, &ovf_vfid, fp) != NO_ERROR)
14205  {
14206  ASSERT_ERROR ();
14207  return;
14208  }
14209  }
14210 
14211  /*
14212  * Dump schema definition
14213  */
14214 
14215  if (file_descriptor_get (thread_p, &hfid->vfid, &fdes) != NO_ERROR)
14216  {
14217  ASSERT_ERROR ();
14218  return;
14219  }
14220 
14221  if (!OID_ISNULL (&fdes.heap.class_oid))
14222  {
14223  if (heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info) != NO_ERROR)
14224  {
14225  return;
14226  }
14227 
14228  ret = heap_classrepr_dump (thread_p, fp, &fdes.heap.class_oid, attr_info.last_classrepr);
14229  if (ret != NO_ERROR)
14230  {
14231  heap_attrinfo_end (thread_p, &attr_info);
14232  return;
14233  }
14234 
14235  /* Dump individual Objects */
14236  if (dump_records == true)
14237  {
14238  if (heap_scancache_start (thread_p, &scan_cache, hfid, NULL, true, false, NULL) != NO_ERROR)
14239  {
14240  /* something went wrong, return */
14241  heap_attrinfo_end (thread_p, &attr_info);
14242  return;
14243  }
14244 
14245  OID_SET_NULL (&oid);
14246  oid.volid = hfid->vfid.volid;
14247 
14248  while (heap_next (thread_p, hfid, NULL, &oid, &peek_recdes, &scan_cache, PEEK) == S_SUCCESS)
14249  {
14250  fprintf (fp, "Object-OID = %2d|%4d|%2d,\n Length on disk = %d,\n", oid.volid, oid.pageid, oid.slotid,
14251  peek_recdes.length);
14252 
14253  if (heap_attrinfo_read_dbvalues (thread_p, &oid, &peek_recdes, NULL, &attr_info) != NO_ERROR)
14254  {
14255  fprintf (fp, " Error ... continue\n");
14256  continue;
14257  }
14258  heap_attrinfo_dump (thread_p, fp, &attr_info, false);
14259  }
14260  heap_scancache_end (thread_p, &scan_cache);
14261  }
14262  heap_attrinfo_end (thread_p, &attr_info);
14263  }
14264  else
14265  {
14266  /* boot_Db_parm.hfid */
14267  }
14268 
14269  fprintf (fp, "\n\n*** END OF DUMP FOR HEAP FILE ***\n\n");
14270 }
14271 
14272 /*
14273  * heap_dump_capacity () - dump heap file capacity
14274  *
14275  * return : error code
14276  * thread_p (in) : thread entry
14277  * fp (in) : output file
14278  * hfid (in) : heap file identifier
14279  */
14280 int
14281 heap_dump_capacity (THREAD_ENTRY * thread_p, FILE * fp, const HFID * hfid)
14282 {
14283  INT64 num_recs = 0;
14284  INT64 num_recs_relocated = 0;
14285  INT64 num_recs_inovf = 0;
14286  INT64 num_pages = 0;
14287  int avg_freespace = 0;
14288  int avg_freespace_nolast = 0;
14289  int avg_reclength = 0;
14290  int avg_overhead = 0;
14291  HEAP_CACHE_ATTRINFO attr_info;
14292  FILE_DESCRIPTORS fdes;
14293 
14294  int error_code = NO_ERROR;
14295 
14296  fprintf (fp, "IO_PAGESIZE = %d, DB_PAGESIZE = %d, Recv_overhead = %d\n", IO_PAGESIZE, DB_PAGESIZE,
14298 
14299  /* Go to each file, check only the heap files */
14300  error_code =
14301  heap_get_capacity (thread_p, hfid, &num_recs, &num_recs_relocated, &num_recs_inovf, &num_pages, &avg_freespace,
14302  &avg_freespace_nolast, &avg_reclength, &avg_overhead);
14303  if (error_code != NO_ERROR)
14304  {
14305  ASSERT_ERROR ();
14306  return error_code;
14307  }
14308  fprintf (fp, "HFID:%d|%d|%d, Num_recs = %" PRId64 ", Num_reloc_recs = %" PRId64 ",\n Num_recs_inovf = %" PRId64
14309  ", Avg_reclength = %d,\n Num_pages = %" PRId64 ", Avg_free_space_per_page = %d,\n"
14310  " Avg_free_space_per_page_without_lastpage = %d\n Avg_overhead_per_page = %d\n",
14311  (int) hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid, num_recs, num_recs_relocated, num_recs_inovf,
14312  avg_reclength, num_pages, avg_freespace, avg_freespace_nolast, avg_overhead);
14313 
14314  /* Dump schema definition */
14315  error_code = file_descriptor_get (thread_p, &hfid->vfid, &fdes);
14316  if (error_code != NO_ERROR)
14317  {
14318  ASSERT_ERROR ();
14319  return error_code;
14320  }
14321 
14322  if (!OID_ISNULL (&fdes.heap.class_oid))
14323  {
14324  error_code = heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info);
14325  if (error_code != NO_ERROR)
14326  {
14327  ASSERT_ERROR ();
14328  return error_code;
14329  }
14330  (void) heap_classrepr_dump (thread_p, fp, &fdes.heap.class_oid, attr_info.last_classrepr);
14331  heap_attrinfo_end (thread_p, &attr_info);
14332  }
14333  else
14334  {
14335  /* boot_Db_parm.hfid */
14336  }
14337 
14338  fprintf (fp, "\n");
14339  return NO_ERROR;
14340 }
14341 
14342 /*
14343  * Check consistency of heap from the point of view of relocation
14344  */
14345 
14346 /*
14347  * heap_chkreloc_start () - Start validating consistency of relocated objects in
14348  * heap
14349  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14350  * chk(in): Structure for checking relocation objects
14351  *
14352  */
14353 static DISK_ISVALID
14355 {
14356  chk->ht = mht_create ("Validate Relocation entries hash table", HEAP_CHK_ADD_UNFOUND_RELOCOIDS, oid_hash,
14358  if (chk->ht == NULL)
14359  {
14360  chk->ht = NULL;
14361  chk->unfound_reloc_oids = NULL;
14362  chk->max_unfound_reloc = -1;
14363  chk->num_unfound_reloc = -1;
14364  return DISK_ERROR;
14365  }
14366 
14367  chk->unfound_reloc_oids = (OID *) malloc (sizeof (*chk->unfound_reloc_oids) * HEAP_CHK_ADD_UNFOUND_RELOCOIDS);
14368  if (chk->unfound_reloc_oids == NULL)
14369  {
14372 
14373  if (chk->ht != NULL)
14374  {
14375  mht_destroy (chk->ht);
14376  }
14377 
14378  chk->ht = NULL;
14379  chk->unfound_reloc_oids = NULL;
14380  chk->max_unfound_reloc = -1;
14381  chk->num_unfound_reloc = -1;
14382  return DISK_ERROR;
14383  }
14384 
14386  chk->num_unfound_reloc = 0;
14387  chk->verify = true;
14388  chk->verify_not_vacuumed = false;
14390 
14391  return DISK_VALID;
14392 }
14393 
14394 /*
14395  * heap_chkreloc_end () - Finish validating consistency of relocated objects
14396  * in heap
14397  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14398  * chk(in): Structure for checking relocation objects
14399  *
14400  * Note: Scanning the unfound_reloc_oid list, remove those entries that
14401  * are also found in hash table (remove them from unfound_reloc
14402  * list and from hash table). At the end of the scan, if there
14403  * are any entries in either hash table or unfound_reloc_oid, the
14404  * heap is incosistent/corrupted.
14405  */
14406 static DISK_ISVALID
14408 {
14409  HEAP_CHK_RELOCOID *forward;
14410  DISK_ISVALID valid_reloc = DISK_VALID;
14411  int i;
14412 
14413  if (chk->not_vacuumed_res != DISK_VALID)
14414  {
14415  valid_reloc = chk->not_vacuumed_res;
14417  }
14418 
14419  /*
14420  * Check for any postponed unfound relocated OIDs that have not been
14421  * checked or found. If they are not in the hash table, it would be an
14422  * error. That is, we would have a relocated (content) object without an
14423  * object pointing to it. (relocation/home).
14424  */
14425  if (chk->verify == true)
14426  {
14427  for (i = 0; i < chk->num_unfound_reloc; i++)
14428  {
14429  forward = (HEAP_CHK_RELOCOID *) mht_get (chk->ht, &chk->unfound_reloc_oids[i]);
14430  if (forward != NULL)
14431  {
14432  /*
14433  * The entry was found.
14434  * Remove the entry and the memory space
14435  */
14436  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry
14437  * before it is removed. We may want to take advantage of that here to free the memory associated with
14438  * the entry */
14439  if (mht_rem (chk->ht, &chk->unfound_reloc_oids[i], NULL, NULL) != NO_ERROR)
14440  {
14441  valid_reloc = DISK_ERROR;
14442  }
14443  else
14444  {
14445  free_and_init (forward);
14446  }
14447  }
14448  else
14449  {
14450  er_log_debug (ARG_FILE_LINE, "Unable to find relocation/home object for relocated_oid=%d|%d|%d\n",
14451  (int) chk->unfound_reloc_oids[i].volid, chk->unfound_reloc_oids[i].pageid,
14452  (int) chk->unfound_reloc_oids[i].slotid);
14453 #if defined (SA_MODE)
14455  valid_reloc = DISK_INVALID;
14456 #endif /* SA_MODE */
14457  }
14458  }
14459  }
14460 
14461  /*
14462  * If there are entries in the hash table, it would be problems. That is,
14463  * the relocated (content) objects were not found. That is, the home object
14464  * points to a dangling content object, or what it points is not a
14465  * relocated (newhome) object.
14466  */
14467 
14468  if (mht_count (chk->ht) > 0)
14469  {
14470  (void) mht_map (chk->ht, heap_chkreloc_print_notfound, chk);
14471 #if defined (SA_MODE)
14472  valid_reloc = DISK_INVALID;
14473 #endif /* !SA_MODE */
14474  }
14475 
14476  mht_destroy (chk->ht);
14478 
14479  return valid_reloc;
14480 }
14481 
14482 /*
14483  * heap_chkreloc_print_notfound () - Print entry that does not have a relocated entry
14484  * return: NO_ERROR
14485  * ignore_reloc_oid(in): Key (relocated entry to real entry) of hash table
14486  * ent(in): The entry associated with key (real oid)
14487  * xchk(in): Structure for checking relocation objects
14488  *
14489  * Note: Print unfound relocated record information for this home
14490  * record with relocation address HEAP is inconsistent.
14491  */
14492 static int
14493 heap_chkreloc_print_notfound (const void *ignore_reloc_oid, void *ent, void *xchk)
14494 {
14495  HEAP_CHK_RELOCOID *forward = (HEAP_CHK_RELOCOID *) ent;
14497 
14498  if (chk->verify == true)
14499  {
14501  "Unable to find relocated record with oid=%d|%d|%d for home object with oid=%d|%d|%d\n",
14502  (int) forward->reloc_oid.volid, forward->reloc_oid.pageid, (int) forward->reloc_oid.slotid,
14503  (int) forward->real_oid.volid, forward->real_oid.pageid, (int) forward->real_oid.slotid);
14504 #if defined (SA_MODE)
14506 #endif /* SA_MODE */
14507  }
14508  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry before it is
14509  * removed. We may want to take advantage of that here to free the memory associated with the entry */
14510  (void) mht_rem (chk->ht, &forward->reloc_oid, NULL, NULL);
14511  free_and_init (forward);
14512 
14513  return NO_ERROR;
14514 }
14515 
14516 /*
14517  * heap_chkreloc_next () - Verify consistency of relocation records on page heap
14518  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14519  * thread_p(in) : thread context
14520  * chk(in): Structure for checking relocation objects
14521  * pgptr(in): Page pointer
14522  *
14523  * Note: While scanning objects of given page:
14524  * 1: if a relocation record is found, we check if that record
14525  * has already been seen (i.e., if it is in unfound_relc
14526  * list),
14527  * if it has been seen, we remove the entry from the
14528  * unfound_relc_oid list.
14529  * if it has not been seen, we add an entry to hash table
14530  * from reloc_oid to real_oid
14531  * Note: for optimization reasons, we may not scan the
14532  * unfound_reloc if it is too long, in this case the entry is
14533  * added to hash table.
14534  * 2: if a newhome (relocated) record is found, we check if the
14535  * real record has already been seen (i.e., check hash table),
14536  * if it has been seen, we remove the entry from hash table
14537  * otherwise, we add an entry into the unfound_reloc list
14538  */
14539 
14540 #define HEAP_CHKRELOC_UNFOUND_SHORT 5
14541 
14542 static DISK_ISVALID
14544 {
14545  HEAP_CHK_RELOCOID *forward;
14546  INT16 type = REC_UNKNOWN;
14547  RECDES recdes;
14548  OID oid, class_oid;
14549  OID *peek_oid;
14550  void *ptr;
14551  bool found;
14552  int i;
14553 
14554  if (chk->verify != true)
14555  {
14556  return DISK_VALID;
14557  }
14558 
14559  if (chk->verify_not_vacuumed && heap_get_class_oid_from_page (thread_p, pgptr, &class_oid) != NO_ERROR)
14560  {
14562  return DISK_ERROR;
14563  }
14564 
14565  oid.volid = pgbuf_get_volume_id (pgptr);
14566  oid.pageid = pgbuf_get_page_id (pgptr);
14567  oid.slotid = 0; /* i.e., will get slot 1 */
14568 
14569  while (spage_next_record (pgptr, &oid.slotid, &recdes, PEEK) == S_SUCCESS)
14570  {
14572  {
14573  continue;
14574  }
14575  type = spage_get_record_type (pgptr, oid.slotid);
14576 
14577  switch (type)
14578  {
14579  case REC_RELOCATION:
14580  /*
14581  * The record stored on the page is a relocation record,
14582  * get the new home for the record
14583  *
14584  * If we have already entries waiting to be check and the list is
14585  * not that big, check them. Otherwise, wait until the end for the
14586  * check since searching the list may be expensive
14587  */
14588  peek_oid = (OID *) recdes.data;
14589  found = false;
14591  {
14592  /*
14593  * Go a head and check since the list is very short.
14594  */
14595  for (i = 0; i < chk->num_unfound_reloc; i++)
14596  {
14597  if (OID_EQ (&chk->unfound_reloc_oids[i], peek_oid))
14598  {
14599  /*
14600  * Remove it from the unfound list
14601  */
14602  if ((i + 1) != chk->num_unfound_reloc)
14603  {
14605  }
14606  chk->num_unfound_reloc--;
14607  found = true;
14608  break;
14609  }
14610  }
14611  }
14612  if (found == false)
14613  {
14614  /*
14615  * Add it to hash table
14616  */
14617  forward = (HEAP_CHK_RELOCOID *) malloc (sizeof (HEAP_CHK_RELOCOID));
14618  if (forward == NULL)
14619  {
14620  /*
14621  * Out of memory
14622  */
14624 
14625  return DISK_ERROR;
14626  }
14627  forward->real_oid = oid;
14628  forward->reloc_oid = *peek_oid;
14629  if (mht_put (chk->ht, &forward->reloc_oid, forward) == NULL)
14630  {
14631  /*
14632  * Failure in mht_put
14633  */
14634  return DISK_ERROR;
14635  }
14636  }
14637  break;
14638 
14639  case REC_BIGONE:
14640  if (chk->verify_not_vacuumed)
14641  {
14642  MVCC_REC_HEADER rec_header;
14643  PAGE_PTR overflow_page;
14644  DISK_ISVALID tmp_valid;
14645  VPID overflow_vpid;
14646  OID *overflow_oid;
14647 
14648  /* get overflow page id */
14649  overflow_oid = (OID *) recdes.data;
14650  overflow_vpid.volid = overflow_oid->volid;
14651  overflow_vpid.pageid = overflow_oid->pageid;
14652  if (VPID_ISNULL (&overflow_vpid))
14653  {
14655  return DISK_ERROR;
14656  }
14657 
14658  /* fix page and get record */
14659  overflow_page =
14660  pgbuf_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH);
14661  if (overflow_page == NULL)
14662  {
14664  return DISK_ERROR;
14665  }
14666  if (heap_get_mvcc_rec_header_from_overflow (overflow_page, &rec_header, &recdes) != NO_ERROR)
14667  {
14668  pgbuf_unfix_and_init (thread_p, overflow_page);
14670  return DISK_ERROR;
14671  }
14672  pgbuf_unfix_and_init (thread_p, overflow_page);
14673 
14674  /* check header */
14675  tmp_valid = vacuum_check_not_vacuumed_rec_header (thread_p, &oid, &class_oid, &rec_header, -1);
14676  switch (tmp_valid)
14677  {
14678  case DISK_VALID:
14679  break;
14680  case DISK_INVALID:
14682  break;
14683  case DISK_ERROR:
14684  default:
14686  return DISK_ERROR;
14687  break;
14688  }
14689  }
14690  break;
14691 
14692  case REC_HOME:
14693  if (chk->verify_not_vacuumed)
14694  {
14695  DISK_ISVALID tmp_valid = vacuum_check_not_vacuumed_recdes (thread_p, &oid, &class_oid,
14696  &recdes, -1);
14697  switch (tmp_valid)
14698  {
14699  case DISK_VALID:
14700  break;
14701  case DISK_INVALID:
14703  break;
14704  case DISK_ERROR:
14705  default:
14707  return DISK_ERROR;
14708  break;
14709  }
14710  }
14711  break;
14712 
14713  case REC_NEWHOME:
14714  if (chk->verify_not_vacuumed)
14715  {
14716  DISK_ISVALID tmp_valid = vacuum_check_not_vacuumed_recdes (thread_p, &oid, &class_oid,
14717  &recdes, -1);
14718  switch (tmp_valid)
14719  {
14720  case DISK_VALID:
14721  break;
14722  case DISK_INVALID:
14724  break;
14725  case DISK_ERROR:
14726  default:
14728  return DISK_ERROR;
14729  break;
14730  }
14731  }
14732 
14733  /*
14734  * Remove the object from hash table or insert the object in unfound
14735  * reloc check list.
14736  */
14737  forward = (HEAP_CHK_RELOCOID *) mht_get (chk->ht, &oid);
14738  if (forward != NULL)
14739  {
14740  /*
14741  * The entry was found.
14742  * Remove the entry and the memory space
14743  */
14744  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry
14745  * before it is removed. We may want to take advantage of that here to free the memory associated with
14746  * the entry */
14747  (void) mht_rem (chk->ht, &forward->reloc_oid, NULL, NULL);
14748  free_and_init (forward);
14749  }
14750  else
14751  {
14752  /*
14753  * The entry is not in hash table.
14754  * Add entry into unfound_reloc list
14755  */
14756  if (chk->max_unfound_reloc <= chk->num_unfound_reloc)
14757  {
14758  /*
14759  * Need to realloc the area. Add 100 OIDs to it
14760  */
14762 
14763  ptr = realloc (chk->unfound_reloc_oids, i);
14764  if (ptr == NULL)
14765  {
14767  return DISK_ERROR;
14768  }
14769  else
14770  {
14771  chk->unfound_reloc_oids = (OID *) ptr;
14773  }
14774  }
14775  i = chk->num_unfound_reloc++;
14776  chk->unfound_reloc_oids[i] = oid;
14777  }
14778  break;
14779 
14780  case REC_MARKDELETED:
14782  default:
14783  break;
14784  }
14785  }
14786 
14787  return DISK_VALID;
14788 }
14789 
14790 /*
14791  * Chn guesses for class objects at clients
14792  */
14793 
14794 /*
14795  * Note: Currently, we do not try to guess chn of instances at clients.
14796  * We are just doing it for classes.
14797  *
14798  * We do not know if the object is cached on the client side at all, we
14799  * are just guessing that it is still cached if it was sent to it. This is
14800  * almost 100% true since classes are avoided during garbage collection.
14801 
14802  * Caller does not know the chn when the client is fetching instances of the
14803  * class without knowning the class_oid. That does not imply that the
14804  * class object is not cached on the workspace. The client just did not
14805  * know the class_oid of the given fetched object. The server finds it and
14806  * has to decide whether or not to sent the class object. If the server does
14807  * not send the class object, and the client does not have it; the client will
14808  * request the class object (another server call)
14809  */
14810 
14811 /*
14812  * heap_chnguess_initialize () - Initalize structure of chn guesses at clients
14813  * return: NO_ERROR
14814  *
14815  * Note: Initialize structures used to cache information of CHN guess
14816  * at client workspaces.
14817  * Note: We current maintain that information only for classes.
14818  */
14819 static int
14821 {
14822  HEAP_CHNGUESS_ENTRY *entry;
14823  int i;
14824  int ret = NO_ERROR;
14825 
14826  if (heap_Guesschn != NULL)
14827  {
14828  ret = heap_chnguess_finalize ();
14829  if (ret != NO_ERROR)
14830  {
14831  goto exit_on_error;
14832  }
14833  }
14834 
14835  heap_Guesschn_area.schema_change = false;
14836  heap_Guesschn_area.clock_hand = -1;
14837  heap_Guesschn_area.num_entries = HEAP_CLASSREPR_MAXCACHE;
14838 
14839  /*
14840  * Start with at least the fude factor of clients. Make sure that every
14841  * bit is used.
14842  */
14843  heap_Guesschn_area.num_clients = logtb_get_number_of_total_tran_indices ();
14844  if (heap_Guesschn_area.num_clients < HEAP_CHNGUESS_FUDGE_MININDICES)
14845  {
14846  heap_Guesschn_area.num_clients = HEAP_CHNGUESS_FUDGE_MININDICES;
14847  }
14848 
14849  /* Make sure every single bit is used */
14850  heap_Guesschn_area.nbytes = HEAP_NBITS_TO_NBYTES (heap_Guesschn_area.num_clients);
14851  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (heap_Guesschn_area.nbytes);
14852 
14853  /* Build the hash table from OID to CHN */
14854  heap_Guesschn_area.ht =
14855  mht_create ("Memory hash OID to chn at clients", HEAP_CLASSREPR_MAXCACHE, oid_hash, oid_compare_equals);
14856  if (heap_Guesschn_area.ht == NULL)
14857  {
14858  goto exit_on_error;
14859  }
14860 
14861  heap_Guesschn_area.entries =
14862  (HEAP_CHNGUESS_ENTRY *) malloc (sizeof (HEAP_CHNGUESS_ENTRY) * heap_Guesschn_area.num_entries);
14863  if (heap_Guesschn_area.entries == NULL)
14864  {
14866  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, sizeof (HEAP_CHNGUESS_ENTRY) * heap_Guesschn_area.num_entries);
14867  mht_destroy (heap_Guesschn_area.ht);
14868  goto exit_on_error;
14869  }
14870 
14871  heap_Guesschn_area.bitindex = (unsigned char *) malloc (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries);
14872  if (heap_Guesschn_area.bitindex == NULL)
14873  {
14876  (size_t) (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries));
14877  mht_destroy (heap_Guesschn_area.ht);
14878  free_and_init (heap_Guesschn_area.entries);
14879  goto exit_on_error;
14880  }
14881 
14882  /*
14883  * Initialize every entry as not recently freed
14884  */
14885  for (i = 0; i < heap_Guesschn_area.num_entries; i++)
14886  {
14887  entry = &heap_Guesschn_area.entries[i];
14888  entry->idx = i;
14889  entry->chn = NULL_CHN;
14890  entry->recently_accessed = false;
14891  OID_SET_NULL (&entry->oid);
14892  entry->bits = &heap_Guesschn_area.bitindex[i * heap_Guesschn_area.nbytes];
14893  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
14894  }
14895  heap_Guesschn = &heap_Guesschn_area;
14896 
14897  return ret;
14898 
14899 exit_on_error:
14900 
14901  return (ret == NO_ERROR) ? ER_FAILED : ret;
14902 }
14903 
14904 /*
14905  * heap_chnguess_realloc () - More clients that currently maintained
14906  * return: NO_ERROR
14907  *
14908  * Note: Expand the chn_guess structures to support at least the number
14909  * currently connected clients.
14910  */
14911 static int
14913 {
14914  int i;
14915  unsigned char *save_bitindex;
14916  int save_nbytes;
14917  HEAP_CHNGUESS_ENTRY *entry;
14918  int ret = NO_ERROR;
14919 
14920  if (heap_Guesschn == NULL)
14921  {
14922  return heap_chnguess_initialize ();
14923  }
14924 
14925  /*
14926  * Save current information, so we can copy them at a alater point
14927  */
14928  save_bitindex = heap_Guesschn_area.bitindex;
14929  save_nbytes = heap_Guesschn_area.nbytes;
14930 
14931  /*
14932  * Find the number of clients that need to be supported. Avoid small
14933  * increases since it is undesirable to realloc again. Increase by at least
14934  * the fudge factor.
14935  */
14936 
14937  heap_Guesschn->num_clients += HEAP_CHNGUESS_FUDGE_MININDICES;
14939 
14940  if (heap_Guesschn->num_clients < i)
14941  {
14942  heap_Guesschn->num_clients = i + HEAP_CHNGUESS_FUDGE_MININDICES;
14943  }
14944 
14945  /* Make sure every single bit is used */
14946  heap_Guesschn_area.nbytes = HEAP_NBITS_TO_NBYTES (heap_Guesschn_area.num_clients);
14947  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (heap_Guesschn_area.nbytes);
14948 
14949  heap_Guesschn_area.bitindex = (unsigned char *) malloc (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries);
14950  if (heap_Guesschn_area.bitindex == NULL)
14951  {
14954  (size_t) (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries));
14955  heap_Guesschn_area.bitindex = save_bitindex;
14956  heap_Guesschn_area.nbytes = save_nbytes;
14957  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (save_nbytes);
14958  goto exit_on_error;
14959  }
14960 
14961  /*
14962  * Now reset the bits for each entry
14963  */
14964 
14965  for (i = 0; i < heap_Guesschn_area.num_entries; i++)
14966  {
14967  entry = &heap_Guesschn_area.entries[i];
14968  entry->bits = &heap_Guesschn_area.bitindex[i * heap_Guesschn_area.nbytes];
14969  /*
14970  * Copy the bits
14971  */
14972  memcpy (entry->bits, &save_bitindex[i * save_nbytes], save_nbytes);
14973  HEAP_NBYTES_CLEARED (&entry->bits[save_nbytes], heap_Guesschn_area.nbytes - save_nbytes);
14974  }
14975  /*
14976  * Now throw previous storage
14977  */
14978  free_and_init (save_bitindex);
14979 
14980  return ret;
14981 
14982 exit_on_error:
14983 
14984  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
14985 }
14986 
14987 /*
14988  * heap_chnguess_finalize () - Finish chnguess information
14989  * return: NO_ERROR
14990  *
14991  * Note: Destroy hash table and memory for entries.
14992  */
14993 static int
14995 {
14996  int ret = NO_ERROR;
14997 
14998  if (heap_Guesschn == NULL)
14999  {
15000  return NO_ERROR; /* nop */
15001  }
15002 
15003  mht_destroy (heap_Guesschn->ht);
15004  free_and_init (heap_Guesschn->entries);
15005  free_and_init (heap_Guesschn->bitindex);
15006  heap_Guesschn->ht = NULL;
15007  heap_Guesschn->schema_change = false;
15008  heap_Guesschn->clock_hand = 0;
15009  heap_Guesschn->num_entries = 0;
15010  heap_Guesschn->num_clients = 0;
15011  heap_Guesschn->nbytes = 0;
15012 
15013  heap_Guesschn = NULL;
15014 
15015  return ret;
15016 }
15017 
15018 /*
15019  * heap_stats_bestspace_initialize () - Initialize structure of best space
15020  * return: NO_ERROR
15021  */
15022 static int
15024 {
15025  int ret = NO_ERROR;
15026 
15027  if (heap_Bestspace != NULL)
15028  {
15030  if (ret != NO_ERROR)
15031  {
15032  goto exit_on_error;
15033  }
15034  }
15035 
15036  heap_Bestspace = &heap_Bestspace_cache_area;
15037 
15038  pthread_mutex_init (&heap_Bestspace->bestspace_mutex, NULL);
15039 
15040  heap_Bestspace->num_stats_entries = 0;
15041 
15042  heap_Bestspace->hfid_ht =
15043  mht_create ("Memory hash HFID to {bestspace}", HEAP_STATS_ENTRY_MHT_EST_SIZE, heap_hash_hfid, heap_compare_hfid);
15044  if (heap_Bestspace->hfid_ht == NULL)
15045  {
15046  goto exit_on_error;
15047  }
15048 
15049  heap_Bestspace->vpid_ht =
15050  mht_create ("Memory hash VPID to {bestspace}", HEAP_STATS_ENTRY_MHT_EST_SIZE, heap_hash_vpid, heap_compare_vpid);
15051  if (heap_Bestspace->vpid_ht == NULL)
15052  {
15053  goto exit_on_error;
15054  }
15055 
15056  heap_Bestspace->num_alloc = 0;
15057  heap_Bestspace->num_free = 0;
15058  heap_Bestspace->free_list_count = 0;
15059  heap_Bestspace->free_list = NULL;
15060 
15061  return ret;
15062 
15063 exit_on_error:
15064 
15065  return (ret == NO_ERROR) ? ER_FAILED : ret;
15066 }
15067 
15068 /*
15069  * heap_stats_bestspace_finalize () - Finish best space information
15070  * return: NO_ERROR
15071  *
15072  * Note: Destroy hash table and memory for entries.
15073  */
15074 static int
15076 {
15077  HEAP_STATS_ENTRY *ent;
15078  int ret = NO_ERROR;
15079 
15080  if (heap_Bestspace == NULL)
15081  {
15082  return NO_ERROR;
15083  }
15084 
15085  if (heap_Bestspace->vpid_ht != NULL)
15086  {
15087  (void) mht_map_no_key (NULL, heap_Bestspace->vpid_ht, heap_stats_entry_free, NULL);
15088  while (heap_Bestspace->free_list_count > 0)
15089  {
15090  ent = heap_Bestspace->free_list;
15091  assert_release (ent != NULL);
15092 
15093  heap_Bestspace->free_list = ent->next;
15094  ent->next = NULL;
15095 
15096  free (ent);
15097 
15098  heap_Bestspace->free_list_count--;
15099  }
15100  assert_release (heap_Bestspace->free_list == NULL);
15101  }
15102 
15103  if (heap_Bestspace->vpid_ht != NULL)
15104  {
15105  mht_destroy (heap_Bestspace->vpid_ht);
15106  heap_Bestspace->vpid_ht = NULL;
15107  }
15108 
15109  if (heap_Bestspace->hfid_ht != NULL)
15110  {
15111  mht_destroy (heap_Bestspace->hfid_ht);
15112  heap_Bestspace->hfid_ht = NULL;
15113  }
15114 
15115  pthread_mutex_destroy (&heap_Bestspace->bestspace_mutex);
15116 
15117  heap_Bestspace = NULL;
15118 
15119  return ret;
15120 }
15121 
15122 /*
15123  * heap_chnguess_decache () - Decache a specific entry or all entries
15124  * return: NO_ERROR
15125  * oid(in): oid: class oid or NULL
15126  * IF NULL implies all classes
15127  *
15128  * Note: Remove from the hash the entry associated with given oid. If
15129  * oid is NULL, all entries in hash are removed.
15130  * This function is called when a class is updated or during
15131  * rollback when a class was changed
15132  */
15133 static int
15135 {
15136  HEAP_CHNGUESS_ENTRY *entry;
15137  int ret = NO_ERROR;
15138 
15139  if (heap_Guesschn == NULL)
15140  {
15141  return NO_ERROR; /* nop */
15142  }
15143 
15144  if (oid == NULL)
15145  {
15146  (void) mht_map (heap_Guesschn->ht, heap_chnguess_remove_entry, NULL);
15147  }
15148  else
15149  {
15150  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15151  if (entry != NULL)
15152  {
15153  (void) heap_chnguess_remove_entry (oid, entry, NULL);
15154  }
15155  }
15156 
15157  if (heap_Guesschn->schema_change == true && oid == NULL)
15158  {
15159  heap_Guesschn->schema_change = false;
15160  }
15161 
15162  return ret;
15163 }
15164 
15165 /*
15166  * heap_chnguess_remove_entry () - Remove an entry from chnguess hash table
15167  * return: NO_ERROR
15168  * oid_key(in): Key (oid) of chnguess table
15169  * ent(in): The entry of hash table
15170  * xignore(in): Extra arguments (currently ignored)
15171  *
15172  * Note: Remove from the hash the given entry. The entry is marked as
15173  * for immediate reuse.
15174  */
15175 static int
15176 heap_chnguess_remove_entry (const void *oid_key, void *ent, void *xignore)
15177 {
15178  HEAP_CHNGUESS_ENTRY *entry = (HEAP_CHNGUESS_ENTRY *) ent;
15179 
15180  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry before it is
15181  * removed. We may want to take advantage of that here to free the memory associated with the entry */
15182  (void) mht_rem (heap_Guesschn->ht, oid_key, NULL, NULL);
15183  OID_SET_NULL (&entry->oid);
15184  entry->chn = NULL_CHN;
15185  entry->recently_accessed = false;
15186  heap_Guesschn_area.clock_hand = entry->idx;
15187 
15188  return NO_ERROR;
15189 }
15190 
15191 #if defined (CUBRID_DEBUG)
15192 /*
15193  * heap_chnguess_dump () - Dump current chnguess hash table
15194  * return:
15195  *
15196  * Note: Dump all valid chnguess entries.
15197  */
15198 void
15199 heap_chnguess_dump (FILE * fp)
15200 {
15201  int max_tranindex, tran_index, i;
15202  HEAP_CHNGUESS_ENTRY *entry;
15203 
15204  if (heap_Guesschn != NULL)
15205  {
15206  fprintf (fp, "*** Dump of CLASS_OID to CHNGUESS at clients *** \n");
15207  fprintf (fp, "Schema_change = %d, clock_hand = %d,\n", heap_Guesschn->schema_change, heap_Guesschn->clock_hand);
15208  fprintf (fp, "Nentries = %d, Nactive_entries = %u, maxnum of clients = %d, nbytes = %d\n",
15209  heap_Guesschn->num_entries, mht_count (heap_Guesschn->ht), heap_Guesschn->num_clients,
15210  heap_Guesschn->nbytes);
15211  fprintf (fp, "Hash Table = %p, Entries = %p, Bitindex = %p\n", heap_Guesschn->ht, heap_Guesschn->entries,
15212  heap_Guesschn->bitindex);
15213 
15214  max_tranindex = logtb_get_number_of_total_tran_indices ();
15215  for (i = 0; i < heap_Guesschn->num_entries; i++)
15216  {
15217  entry = &heap_Guesschn_area.entries[i];
15218 
15219  if (!OID_ISNULL (&entry->oid))
15220  {
15221  fprintf (fp, " \nEntry_id %d", entry->idx);
15222  fprintf (fp, "OID = %2d|%4d|%2d, chn = %d, recently_free = %d,", entry->oid.volid, entry->oid.pageid,
15223  entry->oid.slotid, entry->chn, entry->recently_accessed);
15224 
15225  /* Dump one bit at a time */
15226  for (tran_index = 0; tran_index < max_tranindex; tran_index++)
15227  {
15228  if (tran_index % 40 == 0)
15229  {
15230  fprintf (fp, "\n ");
15231  }
15232  else if (tran_index % 10 == 0)
15233  {
15234  fprintf (fp, " ");
15235  }
15236  fprintf (fp, "%d", HEAP_BIT_GET (entry->bits, tran_index) ? 1 : 0);
15237  }
15238  fprintf (fp, "\n");
15239  }
15240  }
15241  }
15242 }
15243 #endif /* CUBRID_DEBUG */
15244 
15245 /*
15246  * heap_chnguess_get () - Guess chn of given oid for given tran index (at client)
15247  * return:
15248  * oid(in): OID from where to guess chn at client workspace
15249  * tran_index(in): The client transaction index
15250  *
15251  * Note: Find/guess the chn of the given OID object at the workspace of
15252  * given client transaction index
15253  */
15254 int
15255 heap_chnguess_get (THREAD_ENTRY * thread_p, const OID * oid, int tran_index)
15256 {
15257  int chn = NULL_CHN;
15258  HEAP_CHNGUESS_ENTRY *entry;
15259 
15260  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15261  {
15262  return NULL_CHN;
15263  }
15264 
15265  if (heap_Guesschn != NULL)
15266  {
15267  if (heap_Guesschn->num_clients <= tran_index)
15268  {
15269  if (heap_chnguess_realloc () != NO_ERROR)
15270  {
15271  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15272  return NULL_CHN;
15273  }
15274  }
15275 
15276  /*
15277  * Do we have this entry in hash table, if we do then check corresponding
15278  * bit for given client transaction index.
15279  */
15280 
15281  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15282  if (entry != NULL && HEAP_BIT_GET (entry->bits, tran_index))
15283  {
15284  chn = entry->chn;
15285  }
15286  }
15287 
15288  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15289 
15290  return chn;
15291 }
15292 
15293 /*
15294  * heap_chnguess_put () - Oid object is in the process of been sent to client
15295  * return: chn or NULL_CHN if not cached
15296  * oid(in): object oid
15297  * tran_index(in): The client transaction index
15298  * chn(in): cache coherency number.
15299  *
15300  * Note: Cache the information that object oid with chn has been sent
15301  * to client with trans_index.
15302  * If the function fails, it returns NULL_CHN. This failure is
15303  * more like a warning since the chnguess is just a caching structure.
15304  */
15305 int
15306 heap_chnguess_put (THREAD_ENTRY * thread_p, const OID * oid, int tran_index, int chn)
15307 {
15308  int i;
15309  bool can_continue;
15310  HEAP_CHNGUESS_ENTRY *entry;
15311 
15312  if (heap_Guesschn == NULL)
15313  {
15314  return NULL_CHN;
15315  }
15316 
15317  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15318  {
15319  return NULL_CHN;
15320  }
15321 
15322  if (heap_Guesschn->num_clients <= tran_index)
15323  {
15324  if (heap_chnguess_realloc () != NO_ERROR)
15325  {
15326  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15327  return NULL_CHN;
15328  }
15329  }
15330 
15331  /*
15332  * Is the entry already in the chnguess hash table ?
15333  */
15334  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15335  if (entry != NULL)
15336  {
15337  /*
15338  * If the cache coherence number is different reset all client entries
15339  */
15340  if (entry->chn != chn)
15341  {
15342  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
15343  entry->chn = chn;
15344  }
15345  }
15346  else
15347  {
15348  /*
15349  * Replace one of the entries that has not been used for a while.
15350  * Follow clock replacement algorithm.
15351  */
15352  can_continue = true;
15353  while (entry == NULL && can_continue == true)
15354  {
15355  can_continue = false;
15356  for (i = 0; i < heap_Guesschn->num_entries; i++)
15357  {
15358  /*
15359  * Increase the clock to next entry
15360  */
15361  heap_Guesschn->clock_hand++;
15362  if (heap_Guesschn->clock_hand >= heap_Guesschn->num_entries)
15363  {
15364  heap_Guesschn->clock_hand = 0;
15365  }
15366 
15367  entry = &heap_Guesschn->entries[heap_Guesschn->clock_hand];
15368  if (entry->recently_accessed == true)
15369  {
15370  /*
15371  * Set recently freed to false, so it can be replaced in next
15372  * if the entry is not referenced
15373  */
15374  entry->recently_accessed = false;
15375  entry = NULL;
15376  can_continue = true;
15377  }
15378  else
15379  {
15380  entry->oid = *oid;
15381  entry->chn = chn;
15382  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
15383  break;
15384  }
15385  }
15386  }
15387  }
15388 
15389  /*
15390  * Now set the desired client transaction index bit
15391  */
15392  if (entry != NULL)
15393  {
15394  HEAP_BIT_SET (entry->bits, tran_index);
15395  entry->recently_accessed = true;
15396  }
15397  else
15398  {
15399  chn = NULL_CHN;
15400  }
15401 
15402  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15403 
15404  return chn;
15405 }
15406 
15407 /*
15408  * heap_chnguess_clear () - Clear any cached information for given client
15409  * used when client is shutdown
15410  * return:
15411  * tran_index(in): The client transaction index
15412  *
15413  * Note: Clear the transaction index bit for all chnguess entries.
15414  */
15415 void
15416 heap_chnguess_clear (THREAD_ENTRY * thread_p, int tran_index)
15417 {
15418  int i;
15419  HEAP_CHNGUESS_ENTRY *entry;
15420 
15421  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15422  {
15423  return;
15424  }
15425 
15426  if (heap_Guesschn != NULL)
15427  {
15428  for (i = 0; i < heap_Guesschn->num_entries; i++)
15429  {
15430  entry = &heap_Guesschn_area.entries[i];
15431  if (!OID_ISNULL (&entry->oid))
15432  {
15433  HEAP_BIT_CLEAR (entry->bits, (unsigned int) tran_index);
15434  }
15435  }
15436  }
15437 
15438  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15439 
15440 }
15441 
15442 /*
15443  * Recovery functions
15444  */
15445 
15446 /*
15447  * heap_rv_redo_newpage () - Redo the statistics or a new page allocation for
15448  * a heap file
15449  * return: int
15450  * rcv(in): Recovery structure
15451  */
15452 int
15454 {
15455  RECDES recdes;
15456  INT16 slotid;
15457  int sp_success;
15458 
15459  (void) pgbuf_set_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
15460 
15461  /* Initialize header page */
15463 
15464  /* Now insert first record (either statistics or chain record) */
15465  recdes.area_size = recdes.length = rcv->length;
15466  recdes.type = REC_HOME;
15467  recdes.data = (char *) rcv->data;
15468  sp_success = spage_insert (thread_p, rcv->pgptr, &recdes, &slotid);
15469  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15470 
15471  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
15472  {
15473  if (sp_success != SP_SUCCESS)
15474  {
15476  }
15477  /* something went wrong. Unable to redo initialization of new heap page */
15478  assert (er_errid () != NO_ERROR);
15479  return er_errid ();
15480  }
15481 
15482  return NO_ERROR;
15483 }
15484 
15485 /*
15486  * heap_rv_undoredo_pagehdr () - Recover the header of a heap page
15487  * (either statistics/chain)
15488  * return: int
15489  * rcv(in): Recovery structure
15490  *
15491  * Note: Recover the update of the header or a heap page. The header
15492  * can be the heap header or a chain header.
15493  */
15494 int
15496 {
15497  RECDES recdes;
15498  int sp_success;
15499 
15500  (void) pgbuf_check_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
15501 
15502  recdes.area_size = recdes.length = rcv->length;
15503  recdes.type = REC_HOME;
15504  recdes.data = (char *) rcv->data;
15505 
15506  sp_success = spage_update (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
15507  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15508 
15509  if (sp_success != SP_SUCCESS)
15510  {
15511  /* something went wrong. Unable to redo update statistics for chain */
15512  if (sp_success != SP_ERROR)
15513  {
15515  }
15516  assert (er_errid () != NO_ERROR);
15517  return er_errid ();
15518  }
15519  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15520 
15521  return NO_ERROR;
15522 }
15523 
15524 /*
15525  * heap_rv_dump_statistics () - Dump statistics recovery information
15526  * return: int
15527  * ignore_length(in): Length of Recovery Data
15528  * data(in): The data being logged
15529  *
15530  * Note: Dump statistics recovery information
15531  */
15532 void
15533 heap_rv_dump_statistics (FILE * fp, int ignore_length, void *data)
15534 {
15535  int ret = NO_ERROR;
15536 
15537  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
15538 
15539  heap_hdr = (HEAP_HDR_STATS *) data;
15540  ret = heap_dump_hdr (fp, heap_hdr);
15541 }
15542 
15543 /*
15544  * heap_rv_dump_chain () - Dump chain recovery information
15545  * return: int
15546  * ignore_length(in): Length of Recovery Data
15547  * data(in): The data being logged
15548  */
15549 void
15550 heap_rv_dump_chain (FILE * fp, int ignore_length, void *data)
15551 {
15552  HEAP_CHAIN *chain;
15553 
15554  chain = (HEAP_CHAIN *) data;
15555  fprintf (fp, "CLASS_OID = %2d|%4d|%2d, PREV_VPID = %2d|%4d, NEXT_VPID = %2d|%4d, MAX_MVCCID=%llu, flags=%d.\n",
15556  chain->class_oid.volid, chain->class_oid.pageid, chain->class_oid.slotid, chain->prev_vpid.volid,
15557  chain->prev_vpid.pageid, chain->next_vpid.volid, chain->next_vpid.pageid,
15558  (unsigned long long int) chain->max_mvccid, (int) chain->flags);
15559 }
15560 
15561 /*
15562  * heap_rv_redo_insert () - Redo the insertion of an object
15563  * return: int
15564  * rcv(in): Recovery structure
15565  *
15566  * Note: Redo the insertion of an object at a specific location (OID).
15567  */
15568 int
15570 {
15571  INT16 slotid;
15572  RECDES recdes;
15573  int sp_success;
15574 
15575  slotid = rcv->offset;
15576  recdes.type = *(INT16 *) (rcv->data);
15577  recdes.data = (char *) (rcv->data) + sizeof (recdes.type);
15578  recdes.area_size = recdes.length = rcv->length - sizeof (recdes.type);
15579 
15580  if (recdes.type == REC_ASSIGN_ADDRESS)
15581  {
15582  /*
15583  * The data here isn't really the data to be inserted (because there
15584  * wasn't any); instead it's the number of bytes that were reserved
15585  * for future insertion. Change recdes.length to reflect the number
15586  * of bytes to reserve, but there's no need for a valid recdes.data:
15587  * spage_insert_for_recovery knows to ignore it in this case.
15588  */
15589  recdes.area_size = recdes.length = *(INT16 *) recdes.data;
15590  recdes.data = NULL;
15591  }
15592 
15593  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
15594  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15595 
15596  if (sp_success != SP_SUCCESS)
15597  {
15598  /* Unable to redo insertion */
15599  if (sp_success != SP_ERROR)
15600  {
15602  }
15603  assert (er_errid () != NO_ERROR);
15604  return er_errid ();
15605  }
15606 
15607  return NO_ERROR;
15608 }
15609 
15610 /*
15611  * heap_mvcc_log_insert () - Log MVCC insert heap operation.
15612  *
15613  * return : Void.
15614  * thread_p (in) : Thread entry.
15615  * p_recdes (in) : Newly inserted record.
15616  * p_addr (in) : Log address data.
15617  */
15618 static void
15619 heap_mvcc_log_insert (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr)
15620 {
15621 #define HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS 4
15622 
15623  int n_redo_crumbs = 0, data_copy_offset = 0, chn_offset;
15625  INT32 mvcc_flags;
15626  HEAP_PAGE_VACUUM_STATUS vacuum_status;
15627 
15628  assert (p_recdes != NULL);
15629  assert (p_addr != NULL);
15630 
15631  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
15632 
15633  /* Update chain. */
15635  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
15636  {
15637  /* Mark status change for recovery. */
15639  }
15640 
15641  /* Build redo crumbs */
15642  /* Add record type */
15643  redo_crumbs[n_redo_crumbs].length = sizeof (p_recdes->type);
15644  redo_crumbs[n_redo_crumbs++].data = &p_recdes->type;
15645 
15646  if (p_recdes->type != REC_BIGONE)
15647  {
15648  mvcc_flags = (INT32) OR_GET_MVCC_FLAG (p_recdes->data);
15649  chn_offset = OR_CHN_OFFSET;
15650 
15651  /* Add representation ID and flags field */
15652  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
15653  redo_crumbs[n_redo_crumbs++].data = p_recdes->data;
15654 
15655  /* Add CHN */
15656  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
15657  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + chn_offset;
15658 
15659  /* Set data copy offset after the record header */
15660  data_copy_offset = OR_HEADER_SIZE (p_recdes->data);
15661  }
15662 
15663  /* Add record data - record may be skipped if the record is not big one */
15664  redo_crumbs[n_redo_crumbs].length = p_recdes->length - data_copy_offset;
15665  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + data_copy_offset;
15666 
15667  /* Safe guard */
15668  assert (n_redo_crumbs <= HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS);
15669 
15670  /* Append redo crumbs; undo crumbs not necessary as the spage_delete physical operation uses the offset field of the
15671  * address */
15672  if (thread_p->no_logging)
15673  {
15674  log_append_undo_crumbs (thread_p, RVHF_MVCC_INSERT, p_addr, 0, NULL);
15675  }
15676  else
15677  {
15678  log_append_undoredo_crumbs (thread_p, RVHF_MVCC_INSERT, p_addr, 0, n_redo_crumbs, NULL, redo_crumbs);
15679  }
15680 }
15681 
15682 /*
15683  * heap_rv_mvcc_redo_insert () - Redo the MVCC insertion of an object
15684  * return: int
15685  * rcv(in): Recovery structure
15686  *
15687  * Note: MVCC redo the insertion of an object at a specific location (OID).
15688  */
15689 int
15691 {
15692  INT16 slotid;
15693  RECDES recdes;
15694  int chn, sp_success;
15696  INT16 record_type;
15697  bool vacuum_status_change = false;
15698 
15699  assert (rcv->pgptr != NULL);
15700  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
15701 
15702  slotid = rcv->offset;
15703  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
15704  {
15705  vacuum_status_change = true;
15706  }
15707  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15708  assert (slotid > 0);
15709 
15710  record_type = *(INT16 *) rcv->data;
15711  if (record_type == REC_BIGONE)
15712  {
15713  /* no data header */
15714  HEAP_SET_RECORD (&recdes, rcv->length - sizeof (record_type), rcv->length - sizeof (record_type), REC_BIGONE,
15715  rcv->data + sizeof (record_type));
15716  }
15717  else
15718  {
15720  int repid_and_flags, offset, mvcc_flag, offset_size;
15721 
15722  offset = sizeof (record_type);
15723 
15724  repid_and_flags = OR_GET_INT (rcv->data + offset);
15725  offset += OR_INT_SIZE;
15726 
15727  chn = OR_GET_INT (rcv->data + offset);
15728  offset += OR_INT_SIZE;
15729 
15730  mvcc_flag = (char) ((repid_and_flags >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK);
15731 
15732  assert (!(mvcc_flag & OR_MVCC_FLAG_VALID_DELID));
15733 
15734  if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_1BYTE)
15735  {
15736  offset_size = OR_BYTE_SIZE;
15737  }
15738  else if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_2BYTE)
15739  {
15740  offset_size = OR_SHORT_SIZE;
15741  }
15742  else
15743  {
15744  offset_size = OR_INT_SIZE;
15745  }
15746 
15747  MVCC_SET_REPID (&mvcc_rec_header, repid_and_flags & OR_MVCC_REPID_MASK);
15748  MVCC_SET_FLAG (&mvcc_rec_header, mvcc_flag);
15749  MVCC_SET_INSID (&mvcc_rec_header, rcv->mvcc_id);
15750  MVCC_SET_CHN (&mvcc_rec_header, chn);
15751 
15752  HEAP_SET_RECORD (&recdes, IO_DEFAULT_PAGE_SIZE + OR_MVCC_MAX_HEADER_SIZE, 0, record_type,
15753  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
15754  or_mvcc_add_header (&recdes, &mvcc_rec_header, repid_and_flags & OR_BOUND_BIT_FLAG, offset_size);
15755 
15756  memcpy (recdes.data + recdes.length, rcv->data + offset, rcv->length - offset);
15757  recdes.length += (rcv->length - offset);
15758  }
15759 
15760  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
15761 
15762  if (sp_success != SP_SUCCESS)
15763  {
15764  /* Unable to redo insertion */
15765  assert_release (false);
15766  return ER_FAILED;
15767  }
15768 
15769  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
15770  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15771 
15772  return NO_ERROR;
15773 }
15774 
15775 /*
15776  * heap_rv_undo_insert () - Undo the insertion of an object.
15777  * return: int
15778  * rcv(in): Recovery structure
15779  *
15780  * Note: Delete an object for recovery purposes. The OID of the object
15781  * is reused since the object was never committed.
15782  */
15783 int
15785 {
15786  INT16 slotid;
15787 
15788  slotid = rcv->offset;
15789  /* Clear HEAP_RV_FLAG_VACUUM_STATUS_CHANGE */
15790  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15791  (void) spage_delete_for_recovery (thread_p, rcv->pgptr, slotid);
15792  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15793 
15794  return NO_ERROR;
15795 }
15796 
15797 /*
15798  * heap_rv_redo_delete () - Redo the deletion of an object
15799  * return: int
15800  * rcv(in): Recovery structure
15801  *
15802  * Note: Redo the deletion of an object.
15803  * The OID of the object is not reuse since we don't know if the object was a
15804  * newly created object.
15805  */
15806 int
15808 {
15809  INT16 slotid;
15810 
15811  slotid = rcv->offset;
15812  (void) spage_delete (thread_p, rcv->pgptr, slotid);
15813  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15814 
15815  return NO_ERROR;
15816 }
15817 
15818 /*
15819  * heap_mvcc_log_delete () - Log normal MVCC heap delete operation (just
15820  * append delete MVCCID and next version OID).
15821  *
15822  * return : Void.
15823  * thread_p (in) : Thread entry.
15824  * p_addr (in) : Log address data.
15825  * rcvindex(in) : Index to recovery function
15826  */
15827 static void
15829 {
15830  char redo_data_buffer[OR_MVCCID_SIZE + MAX_ALIGNMENT];
15831  char *redo_data_p = PTR_ALIGN (redo_data_buffer, MAX_ALIGNMENT);
15832  char *ptr;
15833  int redo_data_size = 0;
15834  HEAP_PAGE_VACUUM_STATUS vacuum_status;
15835 
15836  assert (p_addr != NULL);
15838  || rcvindex == RVHF_MVCC_DELETE_OVERFLOW);
15839 
15840  if (LOG_IS_MVCC_HEAP_OPERATION (rcvindex))
15841  {
15842  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
15843 
15845  if (heap_page_get_vacuum_status (thread_p, p_addr->pgptr) != vacuum_status)
15846  {
15847  /* Mark vacuum status change for recovery. */
15849  }
15850  }
15851 
15852  /* Prepare redo data. */
15853  ptr = redo_data_p;
15854 
15855  if (rcvindex != RVHF_MVCC_DELETE_REC_HOME)
15856  {
15857  /* MVCCID must be packed also, since it is not saved in log record structure. */
15858  ptr = or_pack_mvccid (ptr, logtb_get_current_mvccid (thread_p));
15859  redo_data_size += OR_MVCCID_SIZE;
15860  }
15861 
15862  assert ((ptr - redo_data_buffer) <= (int) sizeof (redo_data_buffer));
15863 
15864  /* Log append undo/redo crumbs */
15865  if (thread_p->no_logging)
15866  {
15867  log_append_undo_data (thread_p, rcvindex, p_addr, 0, NULL);
15868  }
15869  else
15870  {
15871  log_append_undoredo_data (thread_p, rcvindex, p_addr, 0, redo_data_size, NULL, redo_data_p);
15872  }
15873 }
15874 
15875 /*
15876  * heap_rv_mvcc_undo_delete () - Undo the MVCC deletion of an object
15877  * return: int
15878  * rcv(in): Recovery structure
15879  */
15880 int
15882 {
15883  INT16 slotid;
15885  char data_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
15886  RECDES rebuild_record;
15887 
15888  slotid = rcv->offset;
15889  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15890  assert (slotid > 0);
15891 
15892  rebuild_record.data = PTR_ALIGN (data_buffer, MAX_ALIGNMENT);
15893  rebuild_record.area_size = DB_PAGESIZE;
15894  if (spage_get_record (thread_p, rcv->pgptr, slotid, &rebuild_record, COPY) != S_SUCCESS)
15895  {
15896  assert_release (false);
15897  return ER_FAILED;
15898  }
15899  assert (rebuild_record.type == REC_HOME || rebuild_record.type == REC_NEWHOME);
15900 
15901  if (or_mvcc_get_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15902  {
15903  assert_release (false);
15904  return ER_FAILED;
15905  }
15906  assert (MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_DELID));
15907  MVCC_CLEAR_FLAG_BITS (&mvcc_rec_header, OR_MVCC_FLAG_VALID_DELID);
15908 
15909  if (or_mvcc_set_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15910  {
15911  assert_release (false);
15912  return ER_FAILED;
15913  }
15914 
15915  if (spage_update (thread_p, rcv->pgptr, slotid, &rebuild_record) != SP_SUCCESS)
15916  {
15917  assert_release (false);
15918  return ER_FAILED;
15919  }
15920 
15921  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15922  return NO_ERROR;
15923 }
15924 
15925 /*
15926  * heap_rv_mvcc_undo_delete_overflow () - Undo MVCC delete of an overflow
15927  * record.
15928  *
15929  * return : Error code.
15930  * thread_p (in) : Thread entry.
15931  * rcv (in) : Recovery data.
15932  */
15933 int
15935 {
15936  MVCC_REC_HEADER mvcc_header;
15937 
15938  if (heap_get_mvcc_rec_header_from_overflow (rcv->pgptr, &mvcc_header, NULL) != NO_ERROR)
15939  {
15940  assert_release (false);
15941  return ER_FAILED;
15942  }
15943 
15944  /* All flags should be set. Overflow header should be set to maximum size */
15947 
15948  MVCC_SET_DELID (&mvcc_header, MVCCID_NULL);
15949 
15950  /* Change header. */
15951  if (heap_set_mvcc_rec_header_on_overflow (rcv->pgptr, &mvcc_header) != NO_ERROR)
15952  {
15953  assert_release (false);
15954  return ER_FAILED;
15955  }
15956 
15957  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15958  return NO_ERROR;
15959 }
15960 
15961 /*
15962  * heap_rv_mvcc_redo_delete_internal () - Internal function to be used by
15963  * heap_rv_mvcc_redo_delete_home and
15964  * heap_rv_mvcc_redo_delete_newhome.
15965  *
15966  * return : Error code.
15967  * thread_p (in) : Thread entry.
15968  * page (in) : Heap page.
15969  * slotid (in) : Recovered record slotid.
15970  * mvccid (in) : Delete MVCCID.
15971  */
15972 static int
15974 {
15975  RECDES rebuild_record;
15976  char data_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
15978 
15979  assert (page != NULL);
15980  assert (MVCCID_IS_NORMAL (mvccid));
15981 
15982  rebuild_record.data = PTR_ALIGN (data_buffer, MAX_ALIGNMENT);
15983  rebuild_record.area_size = DB_PAGESIZE;
15984 
15985  /* Get record. */
15986  if (spage_get_record (thread_p, page, slotid, &rebuild_record, COPY) != S_SUCCESS)
15987  {
15988  assert_release (false);
15989  return ER_FAILED;
15990  }
15991 
15992  /* Get MVCC header. */
15993  if (or_mvcc_get_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15994  {
15995  assert_release (false);
15996  return ER_FAILED;
15997  }
15998 
15999  /* Set delete MVCCID. */
16000  MVCC_SET_FLAG_BITS (&mvcc_rec_header, OR_MVCC_FLAG_VALID_DELID);
16001  MVCC_SET_DELID (&mvcc_rec_header, mvccid);
16002 
16003  /* Change header. */
16004  if (or_mvcc_set_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
16005  {
16006  assert_release (false);
16007  return ER_FAILED;
16008  }
16009 
16010  /* Update record in page. */
16011  if (spage_update (thread_p, page, slotid, &rebuild_record) != SP_SUCCESS)
16012  {
16013  assert_release (false);
16014  return ER_FAILED;
16015  }
16016 
16017  /* Success. */
16018  return NO_ERROR;
16019 }
16020 
16021 /*
16022  * heap_rv_mvcc_redo_delete_home () - Redo MVCC delete of REC_HOME record.
16023  *
16024  * return : Error code
16025  * thread_p (in) : Thread entry.
16026  * rcv (in) : Recovery data.
16027  */
16028 int
16030 {
16031  int error_code = NO_ERROR;
16032  int offset = 0;
16033  PGSLOTID slotid;
16034  bool vacuum_status_change = false;
16035 
16036  assert (rcv->pgptr != NULL);
16037  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
16038 
16039  slotid = rcv->offset;
16040  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
16041  {
16042  vacuum_status_change = true;
16043  }
16044  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16045  assert (slotid > 0);
16046 
16047  assert (offset == rcv->length);
16048 
16049  error_code = heap_rv_mvcc_redo_delete_internal (thread_p, rcv->pgptr, slotid, rcv->mvcc_id);
16050  if (error_code != NO_ERROR)
16051  {
16052  ASSERT_ERROR ();
16053  return error_code;
16054  }
16055  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
16056 
16057  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16058  return NO_ERROR;
16059 }
16060 
16061 /*
16062  * heap_rv_mvcc_redo_delete_overflow () - Redo MVCC delete of overflow record.
16063  *
16064  * return : Error code
16065  * thread_p (in) : Thread entry.
16066  * rcv (in) : Recovery data.
16067  */
16068 int
16070 {
16071  int offset = 0;
16072  MVCCID mvccid;
16073  MVCC_REC_HEADER mvcc_header;
16074 
16075  assert (rcv->pgptr != NULL);
16076 
16077  OR_GET_MVCCID (rcv->data + offset, &mvccid);
16078  offset += OR_MVCCID_SIZE;
16079 
16080  assert (offset == rcv->length);
16081 
16082  if (heap_get_mvcc_rec_header_from_overflow (rcv->pgptr, &mvcc_header, NULL) != NO_ERROR)
16083  {
16084  assert_release (false);
16085  return ER_FAILED;
16086  }
16088 
16090  MVCC_SET_DELID (&mvcc_header, mvccid);
16091 
16092  /* Update MVCC header. */
16093  if (heap_set_mvcc_rec_header_on_overflow (rcv->pgptr, &mvcc_header) != NO_ERROR)
16094  {
16095  assert_release (false);
16096  return ER_FAILED;
16097  }
16098 
16099  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16100  return NO_ERROR;
16101 }
16102 
16103 /*
16104  * heap_rv_mvcc_redo_delete_newhome () - Redo MVCC delete of REC_NEWHOME
16105  * record.
16106  *
16107  * return : Error code
16108  * thread_p (in) : Thread entry.
16109  * rcv (in) : Recovery data.
16110  */
16111 int
16113 {
16114  int error_code = NO_ERROR;
16115  int offset = 0;
16116  MVCCID mvccid;
16117 
16118  assert (rcv->pgptr != NULL);
16119 
16120  OR_GET_MVCCID (rcv->data + offset, &mvccid);
16121  offset += OR_MVCCID_SIZE;
16122 
16123  assert (offset == rcv->length);
16124 
16125  error_code = heap_rv_mvcc_redo_delete_internal (thread_p, rcv->pgptr, rcv->offset, mvccid);
16126  if (error_code != NO_ERROR)
16127  {
16128  ASSERT_ERROR ();
16129  return error_code;
16130  }
16131 
16132  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16133  return NO_ERROR;
16134 }
16135 
16136 /*
16137  * heap_rv_redo_mark_reusable_slot () - Marks a deleted slot as reusable; used
16138  * as a postponed log operation and a
16139  * REDO function
16140  * return: int
16141  * rcv(in): Recovery structure
16142  *
16143  * Note: Mark (during postponed operation execution)/Redo (during recovery)
16144  * the marking of a deleted slot as reusable.
16145  */
16146 int
16148 {
16149  INT16 slotid;
16150 
16151  slotid = rcv->offset;
16152  (void) spage_mark_deleted_slot_as_reusable (thread_p, rcv->pgptr, slotid);
16153  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16154 
16155  return NO_ERROR;
16156 }
16157 
16158 /*
16159  * heap_rv_undo_delete () - Undo the deletion of an object
16160  * return: int
16161  * rcv(in): Recovery structure
16162  */
16163 int
16165 {
16166  INT16 slotid;
16167  INT16 recdes_type;
16168  int error_code;
16169 
16170  error_code = heap_rv_redo_insert (thread_p, rcv);
16171  if (error_code != NO_ERROR)
16172  {
16173  return error_code;
16174  }
16175 
16176  /* vacuum atomicity */
16177  recdes_type = *(INT16 *) (rcv->data);
16178  if (recdes_type == REC_NEWHOME)
16179  {
16180  slotid = rcv->offset;
16181  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16182  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, slotid, recdes_type);
16183  if (error_code != NO_ERROR)
16184  {
16185  assert_release (false);
16186  return ER_FAILED;
16187  }
16188  }
16189 
16190  return NO_ERROR;
16191 }
16192 
16193 /*
16194  * heap_rv_undo_update () - Undo the update of an object
16195  * return: int
16196  * rev(in): Recovery structure
16197  */
16198 int
16200 {
16201  INT16 recdes_type;
16202  int error_code;
16203 
16204  error_code = heap_rv_undoredo_update (thread_p, rcv);
16205  if (error_code != NO_ERROR)
16206  {
16207  ASSERT_ERROR ();
16208  return error_code;
16209  }
16210 
16211  /* vacuum atomicity */
16212  recdes_type = *(INT16 *) (rcv->data);
16213  if (recdes_type == REC_HOME || recdes_type == REC_NEWHOME)
16214  {
16215  INT16 slotid;
16216 
16217  slotid = rcv->offset;
16218  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16219  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, slotid, recdes_type);
16220  if (error_code != NO_ERROR)
16221  {
16222  assert_release (false);
16223  return error_code;
16224  }
16225  }
16226 
16227  return NO_ERROR;
16228 }
16229 
16230 /*
16231  * heap_rv_redo_update () - Redo the update of an object
16232  * return: int
16233  * rcv(in): Recovrery structure
16234  */
16235 int
16237 {
16238  return heap_rv_undoredo_update (thread_p, rcv);
16239 }
16240 
16241 /*
16242  * heap_rv_undoredo_update () - Recover an update either for undo or redo
16243  * return: int
16244  * rcv(in): Recovery structure
16245  */
16246 int
16248 {
16249  INT16 slotid;
16250  RECDES recdes;
16251  int sp_success;
16252 
16253  slotid = rcv->offset;
16254  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16255  assert (slotid > 0);
16256 
16257  recdes.type = *(INT16 *) (rcv->data);
16258  recdes.data = (char *) (rcv->data) + sizeof (recdes.type);
16259  recdes.area_size = recdes.length = rcv->length - sizeof (recdes.type);
16260  if (recdes.area_size <= 0)
16261  {
16262  sp_success = SP_SUCCESS;
16263  }
16264  else
16265  {
16266  if (heap_update_physical (thread_p, rcv->pgptr, slotid, &recdes) != NO_ERROR)
16267  {
16268  assert_release (false);
16269  return ER_FAILED;
16270  }
16271  }
16272 
16273  return NO_ERROR;
16274 }
16275 
16276 /*
16277  * heap_rv_redo_reuse_page () - Redo the deletion of all objects in page for
16278  * reuse purposes
16279  * return: int
16280  * rcv(in): Recovery structure
16281  */
16282 int
16284 {
16285  VPID vpid;
16286  RECDES recdes;
16287  HEAP_CHAIN *chain; /* Chain to next and prev page */
16288  int sp_success;
16289  const bool is_header_page = ((rcv->offset != 0) ? true : false);
16290 
16291  (void) pgbuf_check_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
16292 
16293  vpid.volid = pgbuf_get_volume_id (rcv->pgptr);
16294  vpid.pageid = pgbuf_get_page_id (rcv->pgptr);
16295 
16296  /* We ignore the return value. It should be true (objects were deleted) except for the scenario when the redo actions
16297  * are applied twice. */
16298  (void) heap_delete_all_page_records (thread_p, &vpid, rcv->pgptr);
16299 
16300  /* At here, do not consider the header of heap. Later redo the update of the header of heap at RVHF_STATS log. */
16301  if (!is_header_page)
16302  {
16303  sp_success = spage_get_record (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK);
16304  if (sp_success != SP_SUCCESS)
16305  {
16306  /* something went wrong. Unable to redo update class_oid */
16307  if (sp_success != SP_ERROR)
16308  {
16310  }
16311  assert (er_errid () != NO_ERROR);
16312  return er_errid ();
16313  }
16314 
16315  chain = (HEAP_CHAIN *) recdes.data;
16316  COPY_OID (&(chain->class_oid), (OID *) (rcv->data));
16317  chain->max_mvccid = MVCCID_NULL;
16318  chain->flags = 0;
16320  }
16321 
16322  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16323 
16324  return NO_ERROR;
16325 }
16326 
16327 /*
16328  * heap_rv_redo_reuse_page_reuse_oid () - Redo the deletion of all objects in
16329  * a reusable oid heap page for reuse
16330  * purposes
16331  * return: int
16332  * rcv(in): Recovery structure
16333  */
16334 int
16336 {
16337  RECDES recdes;
16338  HEAP_CHAIN *chain; /* Chain to next and prev page */
16339  int sp_success;
16340  const bool is_header_page = ((rcv->offset != 0) ? true : false);
16341 
16342  (void) heap_reinitialize_page (thread_p, rcv->pgptr, is_header_page);
16343 
16344  (void) pgbuf_set_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
16345 
16346  /* At here, do not consider the header of heap. Later redo the update of the header of heap at RVHF_STATS log. */
16347  if (!is_header_page)
16348  {
16349  sp_success = spage_get_record (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK);
16350  if (sp_success != SP_SUCCESS)
16351  {
16352  /* something went wrong. Unable to redo update class_oid */
16353  if (sp_success != SP_ERROR)
16354  {
16356  }
16357  assert (er_errid () != NO_ERROR);
16358  return er_errid ();
16359  }
16360 
16361  chain = (HEAP_CHAIN *) recdes.data;
16362  COPY_OID (&(chain->class_oid), (OID *) (rcv->data));
16363  chain->max_mvccid = MVCCID_NULL;
16364  chain->flags = 0;
16366  }
16367 
16368  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16369 
16370  return NO_ERROR;
16371 }
16372 
16373 /*
16374  * heap_rv_dump_reuse_page () - Dump reuse page
16375  * return: int
16376  * ignore_length(in): Length of Recovery Data
16377  * ignore_data(in): The data being logged
16378  *
16379  * Note: Dump information about reuse of page.
16380  */
16381 void
16382 heap_rv_dump_reuse_page (FILE * fp, int ignore_length, void *ignore_data)
16383 {
16384  fprintf (fp, "Delete all objects in page for reuse purposes of page\n");
16385 }
16386 
16387 /*
16388  * xheap_get_class_num_objects_pages () -
16389  * return: NO_ERROR
16390  * hfid(in):
16391  * approximation(in):
16392  * nobjs(in):
16393  * npages(in):
16394  */
16395 int
16396 xheap_get_class_num_objects_pages (THREAD_ENTRY * thread_p, const HFID * hfid, int approximation, int *nobjs,
16397  int *npages)
16398 {
16399  int length, num;
16400  int ret;
16401 
16402  assert (!HFID_IS_NULL (hfid));
16403 
16404  if (approximation)
16405  {
16406  num = heap_estimate (thread_p, hfid, npages, nobjs, &length);
16407  }
16408  else
16409  {
16410  num = heap_get_num_objects (thread_p, hfid, npages, nobjs, &length);
16411  }
16412 
16413  if (num < 0)
16414  {
16415  return (((ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret);
16416  }
16417 
16418  return NO_ERROR;
16419 }
16420 
16421 /*
16422  * xheap_has_instance () -
16423  * return:
16424  * hfid(in):
16425  * class_oid(in):
16426  * has_visible_instance(in): true if we need to check for a visible record
16427  */
16428 int
16429 xheap_has_instance (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, int has_visible_instance)
16430 {
16431  OID oid;
16432  HEAP_SCANCACHE scan_cache;
16433  RECDES recdes;
16434  SCAN_CODE r;
16436 
16437  OID_SET_NULL (&oid);
16438 
16439  if (has_visible_instance)
16440  {
16441  mvcc_snapshot = logtb_get_mvcc_snapshot (thread_p);
16442  if (mvcc_snapshot == NULL)
16443  {
16444  return ER_FAILED;
16445  }
16446  }
16447  if (heap_scancache_start (thread_p, &scan_cache, hfid, class_oid, true, false, mvcc_snapshot) != NO_ERROR)
16448  {
16449  return ER_FAILED;
16450  }
16451 
16452  recdes.data = NULL;
16453  r = heap_first (thread_p, hfid, class_oid, &oid, &recdes, &scan_cache, true);
16454  heap_scancache_end (thread_p, &scan_cache);
16455 
16456  if (r == S_ERROR)
16457  {
16458  return ER_FAILED;
16459  }
16460  else if (r == S_DOESNT_EXIST || r == S_END)
16461  {
16462  return 0;
16463  }
16464  else
16465  {
16466  return 1;
16467  }
16468 }
16469 
16470 /*
16471  * heap_get_class_repr_id () -
16472  * return:
16473  * class_oid(in):
16474  */
16475 REPR_ID
16476 heap_get_class_repr_id (THREAD_ENTRY * thread_p, OID * class_oid)
16477 {
16478  OR_CLASSREP *rep = NULL;
16479  REPR_ID id;
16480  int idx_incache = -1;
16481 
16482  if (!class_oid || !idx_incache)
16483  {
16484  return 0;
16485  }
16486 
16487  rep = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_incache);
16488  if (rep == NULL)
16489  {
16490  return 0;
16491  }
16492 
16493  id = rep->id;
16494  heap_classrepr_free_and_init (rep, &idx_incache);
16495 
16496  return id;
16497 }
16498 
16499 /*
16500  * heap_set_autoincrement_value () -
16501  * return: NO_ERROR, or ER_code
16502  * attr_info(in):
16503  * scan_cache(in):
16504  * is_set(out): 1 if at least one autoincrement value has been set
16505  */
16506 int
16508  int *is_set)
16509 {
16510  int i, idx_in_cache;
16511  char *classname = NULL;
16512  char *attr_name = NULL;
16513  RECDES recdes; /* Used to obtain attribute name */
16514  char serial_name[AUTO_INCREMENT_SERIAL_NAME_MAX_LENGTH];
16515  HEAP_ATTRVALUE *value;
16516  DB_VALUE dbvalue_numeric, *dbvalue, key_val;
16517  OR_ATTRIBUTE *att;
16518  OID serial_class_oid;
16519  LC_FIND_CLASSNAME status;
16520  OR_CLASSREP *classrep;
16521  BTID serial_btid;
16522  DB_DATA_STATUS data_stat;
16523  HEAP_SCANCACHE local_scan_cache;
16524  bool use_local_scan_cache = false;
16525  int ret = NO_ERROR;
16526  int alloced_string = 0;
16527  char *string = NULL;
16528 
16529  if (!attr_info || !scan_cache)
16530  {
16531  return ER_FAILED;
16532  }
16533 
16534  *is_set = 0;
16535 
16536  recdes.data = NULL;
16537  recdes.area_size = 0;
16538 
16539  for (i = 0; i < attr_info->num_values; i++)
16540  {
16541  value = &attr_info->values[i];
16542  dbvalue = &value->dbvalue;
16543  att = &attr_info->last_classrepr->attributes[i];
16544 
16545  if (att->is_autoincrement && (value->state == HEAP_UNINIT_ATTRVALUE))
16546  {
16547  OID serial_obj_oid = att->auto_increment.serial_obj.load ().oid;
16548  if (OID_ISNULL (&serial_obj_oid))
16549  {
16550  memset (serial_name, '\0', sizeof (serial_name));
16551  recdes.data = NULL;
16552  recdes.area_size = 0;
16553 
16554  if (scan_cache->cache_last_fix_page == false)
16555  {
16556  scan_cache = &local_scan_cache;
16557  (void) heap_scancache_quick_start_root_hfid (thread_p, scan_cache);
16558  use_local_scan_cache = true;
16559  }
16560 
16561  if (heap_get_class_record (thread_p, &(attr_info->class_oid), &recdes, scan_cache, PEEK) != S_SUCCESS)
16562  {
16563  ret = ER_FAILED;
16564  goto exit_on_error;
16565  }
16566 
16567  if (heap_get_class_name (thread_p, &(att->classoid), &classname) != NO_ERROR || classname == NULL)
16568  {
16569  ASSERT_ERROR_AND_SET (ret);
16570  goto exit_on_error;
16571  }
16572 
16573  string = NULL;
16574  alloced_string = 0;
16575 
16576  ret = or_get_attrname (&recdes, att->id, &string, &alloced_string);
16577  if (ret != NO_ERROR)
16578  {
16579  ASSERT_ERROR ();
16580  goto exit_on_error;
16581  }
16582 
16583  attr_name = string;
16584  if (attr_name == NULL)
16585  {
16586  ret = ER_FAILED;
16587  goto exit_on_error;
16588  }
16589 
16590  SET_AUTO_INCREMENT_SERIAL_NAME (serial_name, classname, attr_name);
16591 
16592  if (string != NULL && alloced_string == 1)
16593  {
16594  db_private_free_and_init (thread_p, string);
16595  }
16596 
16597  free_and_init (classname);
16598 
16599  if (db_make_varchar (&key_val, DB_MAX_IDENTIFIER_LENGTH, serial_name, (int) strlen (serial_name),
16601  {
16602  ret = ER_FAILED;
16603  goto exit_on_error;
16604  }
16605 
16606  status = xlocator_find_class_oid (thread_p, CT_SERIAL_NAME, &serial_class_oid, NULL_LOCK);
16607  if (status == LC_CLASSNAME_ERROR || status == LC_CLASSNAME_DELETED)
16608  {
16609  ret = ER_FAILED;
16610  goto exit_on_error;
16611  }
16612 
16613  classrep = heap_classrepr_get (thread_p, &serial_class_oid, NULL, NULL_REPRID, &idx_in_cache);
16614  if (classrep == NULL)
16615  {
16616  ret = ER_FAILED;
16617  goto exit_on_error;
16618  }
16619 
16620  if (classrep->indexes)
16621  {
16622  BTREE_SEARCH search_result;
16623  OID serial_oid;
16624 
16625  BTID_COPY (&serial_btid, &(classrep->indexes[0].btid));
16626  search_result =
16627  xbtree_find_unique (thread_p, &serial_btid, S_SELECT, &key_val, &serial_class_oid, &serial_oid,
16628  false);
16629  heap_classrepr_free_and_init (classrep, &idx_in_cache);
16630  if (search_result != BTREE_KEY_FOUND)
16631  {
16632  ret = ER_FAILED;
16633  goto exit_on_error;
16634  }
16635 
16636  assert (!OID_ISNULL (&serial_oid));
16637  or_aligned_oid null_aligned_oid = { oid_Null_oid };
16638  or_aligned_oid serial_aligned_oid = { serial_oid };
16639  att->auto_increment.serial_obj.compare_exchange_strong (null_aligned_oid, serial_aligned_oid);
16640  }
16641  else
16642  {
16643  heap_classrepr_free_and_init (classrep, &idx_in_cache);
16644  ret = ER_FAILED;
16645  goto exit_on_error;
16646  }
16647  }
16648 
16649  if ((att->type == DB_TYPE_SHORT) || (att->type == DB_TYPE_INTEGER) || (att->type == DB_TYPE_BIGINT))
16650  {
16651  OID serial_obj_oid = att->auto_increment.serial_obj.load ().oid;
16652  if (xserial_get_next_value (thread_p, &dbvalue_numeric, &serial_obj_oid, 0, /* no cache */
16653  1, /* generate one value */
16654  GENERATE_AUTO_INCREMENT, false) != NO_ERROR)
16655  {
16656  ret = ER_FAILED;
16657  goto exit_on_error;
16658  }
16659 
16660  if (numeric_db_value_coerce_from_num (&dbvalue_numeric, dbvalue, &data_stat) != NO_ERROR)
16661  {
16662  ret = ER_FAILED;
16663  goto exit_on_error;
16664  }
16665  }
16666  else if (att->type == DB_TYPE_NUMERIC)
16667  {
16668  OID serial_obj_oid = att->auto_increment.serial_obj.load ().oid;
16669  if (xserial_get_next_value (thread_p, dbvalue, &serial_obj_oid, 0, /* no cache */
16670  1, /* generate one value */
16671  GENERATE_AUTO_INCREMENT, false) != NO_ERROR)
16672  {
16673  ret = ER_FAILED;
16674  goto exit_on_error;
16675  }
16676  }
16677 
16678  *is_set = 1;
16679  value->state = HEAP_READ_ATTRVALUE;
16680  }
16681  }
16682 
16683  if (use_local_scan_cache)
16684  {
16685  heap_scancache_end (thread_p, scan_cache);
16686  }
16687 
16688  return ret;
16689 
16690 exit_on_error:
16691  if (classname != NULL)
16692  {
16693  free_and_init (classname);
16694  }
16695 
16696  if (use_local_scan_cache)
16697  {
16698  heap_scancache_end (thread_p, scan_cache);
16699  }
16700  return ret;
16701 }
16702 
16703 /*
16704  * heap_attrinfo_set_uninitialized_global () -
16705  * return: NO_ERROR
16706  * inst_oid(in):
16707  * recdes(in):
16708  * attr_info(in):
16709  */
16710 int
16712  HEAP_CACHE_ATTRINFO * attr_info)
16713 {
16714  if (attr_info == NULL)
16715  {
16716  return ER_FAILED;
16717  }
16718 
16719  return heap_attrinfo_set_uninitialized (thread_p, inst_oid, recdes, attr_info);
16720 }
16721 
16722 /*
16723  * heap_get_class_info () - get HFID and file type for class.
16724  *
16725  * return : error code
16726  * thread_p (in) : thread entry
16727  * class_oid (in) : class OID
16728  * hfid_out (out) : output heap file identifier
16729  * ftype_out (out) : output heap file type
16730  * classname_out (out): output classname
16731  */
16732 int
16733 heap_get_class_info (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid_out,
16734  FILE_TYPE * ftype_out, char **classname_out)
16735 {
16736  int error_code = NO_ERROR;
16737 
16738  error_code = heap_hfid_cache_get (thread_p, class_oid, hfid_out, ftype_out, classname_out);
16739  if (error_code != NO_ERROR)
16740  {
16741  ASSERT_ERROR_AND_SET (error_code);
16742  return error_code;
16743  }
16744 
16745  return error_code;
16746 }
16747 
16748 /*
16749  * heap_compact_pages () - compact all pages from hfid of specified class OID
16750  * return: error_code
16751  * class_oid(out): the class oid
16752  */
16753 int
16754 heap_compact_pages (THREAD_ENTRY * thread_p, OID * class_oid)
16755 {
16756  int ret = NO_ERROR;
16757  VPID vpid;
16758  VPID next_vpid;
16759  LOG_DATA_ADDR addr;
16760  HFID hfid;
16761  PGBUF_WATCHER pg_watcher;
16762  PGBUF_WATCHER old_pg_watcher;
16763 
16764  if (class_oid == NULL)
16765  {
16767  }
16768 
16769  if (lock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, LK_UNCOND_LOCK) != LK_GRANTED)
16770  {
16771  return ER_FAILED;
16772  }
16773 
16774  ret = heap_get_class_info (thread_p, class_oid, &hfid, NULL, NULL);
16775  if (ret != NO_ERROR || HFID_IS_NULL (&hfid))
16776  {
16777  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16778  return ret;
16779  }
16780 
16781  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &hfid);
16782  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &hfid);
16783 
16784  addr.vfid = &hfid.vfid;
16785  addr.pgptr = NULL;
16786  addr.offset = 0;
16787 
16788  vpid.volid = hfid.vfid.volid;
16789  vpid.pageid = hfid.hpgid;
16790 
16791  if (pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_READ, &pg_watcher) != NO_ERROR)
16792  {
16793  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16794  ret = ER_FAILED;
16795  goto exit_on_error;
16796  }
16797 
16798  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
16799 
16800  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16801 
16802  /* skip header page */
16803  ret = heap_vpid_next (thread_p, &hfid, pg_watcher.pgptr, &next_vpid);
16804  if (ret != NO_ERROR)
16805  {
16806  goto exit_on_error;
16807  }
16808  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
16809 
16810  while (!VPID_ISNULL (&next_vpid))
16811  {
16812  vpid = next_vpid;
16813  pg_watcher.pgptr =
16814  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, X_LOCK, NULL, &pg_watcher);
16815  if (old_pg_watcher.pgptr != NULL)
16816  {
16817  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16818  }
16819  if (pg_watcher.pgptr == NULL)
16820  {
16821  ret = ER_FAILED;
16822  goto exit_on_error;
16823  }
16824 
16825  ret = heap_vpid_next (thread_p, &hfid, pg_watcher.pgptr, &next_vpid);
16826  if (ret != NO_ERROR)
16827  {
16828  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16829  goto exit_on_error;
16830  }
16831 
16832  if (spage_compact (thread_p, pg_watcher.pgptr) != NO_ERROR)
16833  {
16834  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16835  ret = ER_FAILED;
16836  goto exit_on_error;
16837  }
16838 
16839  addr.pgptr = pg_watcher.pgptr;
16840  log_skip_logging (thread_p, &addr);
16841  pgbuf_set_dirty (thread_p, pg_watcher.pgptr, DONT_FREE);
16842  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
16843  }
16844 
16845  if (old_pg_watcher.pgptr != NULL)
16846  {
16847  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16848  }
16849  assert (pg_watcher.pgptr == NULL);
16850 
16851  return ret;
16852 
16853 exit_on_error:
16854 
16855  if (pg_watcher.pgptr != NULL)
16856  {
16857  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16858  }
16859  if (old_pg_watcher.pgptr != NULL)
16860  {
16861  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16862  }
16863 
16864  return ret;
16865 }
16866 
16867 /*
16868  * heap_classrepr_dump_all () - dump all representations belongs to a class
16869  * return: none
16870  * fp(in): file pointer to print out
16871  * class_oid(in): class oid to be dumped
16872  */
16873 void
16874 heap_classrepr_dump_all (THREAD_ENTRY * thread_p, FILE * fp, OID * class_oid)
16875 {
16876  RECDES peek_recdes;
16877  HEAP_SCANCACHE scan_cache;
16878  OR_CLASSREP **rep_all;
16879  int count, i;
16880  char *classname;
16881  bool need_free_classname = false;
16882 
16883  if (heap_get_class_name (thread_p, class_oid, &classname) != NO_ERROR || classname == NULL)
16884  {
16885  classname = (char *) "unknown";
16886  er_clear ();
16887  }
16888  else
16889  {
16890  need_free_classname = true;
16891  }
16892 
16893  heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
16894 
16895  if (heap_get_class_record (thread_p, class_oid, &peek_recdes, &scan_cache, PEEK) == S_SUCCESS)
16896  {
16897  rep_all = or_get_all_representation (&peek_recdes, true, &count);
16898  fprintf (fp, "*** Dumping representations of class %s\n Classname = %s, Class-OID = %d|%d|%d, #Repr = %d\n",
16899  classname, classname, (int) class_oid->volid, class_oid->pageid, (int) class_oid->slotid, count);
16900 
16901  for (i = 0; i < count; i++)
16902  {
16903  assert (rep_all[i] != NULL);
16904  heap_classrepr_dump (thread_p, fp, class_oid, rep_all[i]);
16905  or_free_classrep (rep_all[i]);
16906  }
16907 
16908  fprintf (fp, "\n*** End of dump.\n");
16909  free_and_init (rep_all);
16910  }
16911 
16912  heap_scancache_end (thread_p, &scan_cache);
16913 
16914  if (need_free_classname)
16915  {
16916  free_and_init (classname);
16917  }
16918 }
16919 
16920 /*
16921  * heap_get_btid_from_index_name () - gets the BTID of an index using its name
16922  * and OID of class
16923  *
16924  * return: NO_ERROR, or error code
16925  * thread_p(in) : thread context
16926  * p_class_oid(in): OID of class
16927  * index_name(in) : name of index
16928  * p_found_btid(out): the BTREE ID of index
16929  *
16930  * Note : the 'p_found_btid' argument must be a pointer to a BTID value,
16931  * the found BTID is 'BTID_COPY-ed' into it.
16932  * Null arguments are not allowed.
16933  * If an index name is not found, the 'p_found_btid' is returned as
16934  * NULL BTID and no error is set.
16935  *
16936  */
16937 int
16938 heap_get_btid_from_index_name (THREAD_ENTRY * thread_p, const OID * p_class_oid, const char *index_name,
16939  BTID * p_found_btid)
16940 {
16941  int error = NO_ERROR;
16942  int classrepr_cacheindex = -1;
16943  int idx_cnt;
16944  OR_CLASSREP *classrepr = NULL;
16945  OR_INDEX *curr_index = NULL;
16946 
16947  assert (p_found_btid != NULL);
16948  assert (p_class_oid != NULL);
16949  assert (index_name != NULL);
16950 
16951  BTID_SET_NULL (p_found_btid);
16952 
16953  /* get the BTID associated from the index name : the only structure containing this info is OR_CLASSREP */
16954 
16955  /* get class representation */
16956  classrepr = heap_classrepr_get (thread_p, (OID *) p_class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
16957 
16958  if (classrepr == NULL)
16959  {
16960  error = er_errid ();
16961  if (error == NO_ERROR)
16962  {
16963  assert (error != NO_ERROR);
16964  error = ER_FAILED;
16965  }
16966  goto exit;
16967  }
16968 
16969  /* iterate through indexes looking for index name */
16970  for (idx_cnt = 0, curr_index = classrepr->indexes; idx_cnt < classrepr->n_indexes; idx_cnt++, curr_index++)
16971  {
16972  if (curr_index == NULL)
16973  {
16974  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_UNEXPECTED, 1, "Bad index information in class representation.");
16975  error = ER_UNEXPECTED;
16976  goto exit_cleanup;
16977  }
16978 
16979  if (intl_identifier_casecmp (curr_index->btname, index_name) == 0)
16980  {
16981  BTID_COPY (p_found_btid, &(curr_index->btid));
16982  break;
16983  }
16984  }
16985 
16986 exit_cleanup:
16987  if (classrepr)
16988  {
16989  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
16990  }
16991 
16992 exit:
16993  return error;
16994 }
16995 
16996 /*
16997  * heap_object_upgrade_domain - upgrades a single attibute in an instance from
16998  * the domain of current representation to the
16999  * domain of the last representation.
17000  *
17001  * return: error code , NO_ERROR if no error occured
17002  * thread_p(in) : thread context
17003  * upd_scancache(in): scan context
17004  * attr_info(in): aatribute info structure
17005  * oid(in): the oid of the object to process
17006  * att_id(in): attribute id within the class (same as in schema)
17007  *
17008  * Note : this function is used in ALTER CHANGE (with type change syntax)
17009  */
17010 int
17012  OID * oid, const ATTR_ID att_id)
17013 {
17014  int i = 0, error = NO_ERROR;
17015  HEAP_ATTRVALUE *value = NULL;
17016  int force_count = 0, updated_n_attrs_id = 0;
17017  ATTR_ID atts_id[1] = { 0 };
17018  DB_VALUE orig_value;
17019  TP_DOMAIN_STATUS status;
17020 
17021  db_make_null (&orig_value);
17022 
17023  if (upd_scancache == NULL || attr_info == NULL || oid == NULL)
17024  {
17025  error = ER_UNEXPECTED;
17026  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, "Unexpected NULL arguments.");
17027  goto exit;
17028  }
17029 
17030  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
17031  {
17032  TP_DOMAIN *dest_dom = value->last_attrepr->domain;
17033  bool log_warning = false;
17034  int warning_code = NO_ERROR;
17035  DB_TYPE dest_type;
17036  DB_TYPE src_type = DB_VALUE_DOMAIN_TYPE (&(value->dbvalue));
17037  int curr_prec = 0;
17038  int dest_prec = 0;
17039 
17040  dest_type = TP_DOMAIN_TYPE (dest_dom);
17041 
17042  if (att_id != value->attrid)
17043  {
17044  continue;
17045  }
17046 
17047  if (QSTR_IS_BIT (src_type))
17048  {
17049  curr_prec = db_get_string_length (&(value->dbvalue));
17050  }
17051  else if (QSTR_IS_ANY_CHAR (src_type))
17052  {
17053  if (TP_DOMAIN_CODESET (dest_dom) == INTL_CODESET_RAW_BYTES)
17054  {
17055  curr_prec = db_get_string_size (&(value->dbvalue));
17056  }
17057  else if (!DB_IS_NULL (&(value->dbvalue)))
17058  {
17059  curr_prec = db_get_string_length (&(value->dbvalue));
17060  }
17061  else
17062  {
17063  curr_prec = dest_dom->precision;
17064  }
17065  }
17066 
17067  dest_prec = dest_dom->precision;
17068 
17069  if (QSTR_IS_ANY_CHAR_OR_BIT (src_type) && QSTR_IS_ANY_CHAR_OR_BIT (dest_type))
17070  {
17071  /* check phase of ALTER TABLE .. CHANGE should not allow changing the domains from one flavour to another : */
17072  assert ((QSTR_IS_ANY_CHAR (src_type) && QSTR_IS_ANY_CHAR (dest_type))
17073  || (!QSTR_IS_ANY_CHAR (src_type) && !QSTR_IS_ANY_CHAR (dest_type)));
17074 
17075  assert ((QSTR_IS_BIT (src_type) && QSTR_IS_BIT (dest_type))
17076  || (!QSTR_IS_BIT (src_type) && !QSTR_IS_BIT (dest_type)));
17077 
17078  /* check string truncation */
17079  if (dest_prec < curr_prec)
17080  {
17083  {
17086  goto exit;
17087  }
17088  else
17089  {
17090  /* allow truncation in cast, just warning */
17091  log_warning = true;
17092  warning_code = ER_QPROC_SIZE_STRING_TRUNCATED;
17093  }
17094  }
17095  }
17096 
17097  error = pr_clone_value (&(value->dbvalue), &orig_value);
17098  if (error != NO_ERROR)
17099  {
17100  goto exit;
17101  }
17102 
17103  if (TP_IS_CHAR_TYPE (TP_DOMAIN_TYPE (dest_dom))
17104  && !(TP_IS_CHAR_TYPE (src_type) || src_type == DB_TYPE_ENUMERATION)
17107  {
17108  /* If destination is char/varchar, we need to first cast the value to a string with no precision, then to
17109  * destination type with the desired precision. */
17110  TP_DOMAIN *string_dom;
17111  if (TP_DOMAIN_TYPE (dest_dom) == DB_TYPE_NCHAR || TP_DOMAIN_TYPE (dest_dom) == DB_TYPE_VARNCHAR)
17112  {
17114  }
17115  else
17116  {
17118  }
17119  if ((status = tp_value_cast (&(value->dbvalue), &(value->dbvalue), string_dom, false)) != DOMAIN_COMPATIBLE)
17120  {
17121  error = tp_domain_status_er_set (status, ARG_FILE_LINE, &(value->dbvalue), string_dom);
17122  }
17123  }
17124 
17125  if (error == NO_ERROR)
17126  {
17127  if ((status = tp_value_cast (&(value->dbvalue), &(value->dbvalue), dest_dom, false)) != DOMAIN_COMPATIBLE)
17128  {
17129  error = tp_domain_status_er_set (status, ARG_FILE_LINE, &(value->dbvalue), dest_dom);
17130  }
17131  }
17132  if (error != NO_ERROR)
17133  {
17134  bool set_default_value = false;
17135  bool set_min_value = false;
17136  bool set_max_value = false;
17137 
17139  || (TP_IS_CHAR_TYPE (TP_DOMAIN_TYPE (dest_dom))
17141  {
17144  goto exit;
17145  }
17146 
17147  if (error == ER_IT_DATA_OVERFLOW)
17148  {
17149  int is_positive = -1; /* -1:UNKNOWN, 0:negative, 1:positive */
17150 
17151  /* determine sign of orginal value: */
17152  switch (src_type)
17153  {
17154  case DB_TYPE_INTEGER:
17155  is_positive = ((db_get_int (&value->dbvalue) >= 0) ? 1 : 0);
17156  break;
17157  case DB_TYPE_SMALLINT:
17158  is_positive = ((db_get_short (&value->dbvalue) >= 0) ? 1 : 0);
17159  break;
17160  case DB_TYPE_BIGINT:
17161  is_positive = ((db_get_bigint (&value->dbvalue) >= 0) ? 1 : 0);
17162  break;
17163  case DB_TYPE_FLOAT:
17164  is_positive = ((db_get_float (&value->dbvalue) >= 0) ? 1 : 0);
17165  break;
17166  case DB_TYPE_DOUBLE:
17167  is_positive = ((db_get_double (&value->dbvalue) >= 0) ? 1 : 0);
17168  break;
17169  case DB_TYPE_NUMERIC:
17170  is_positive = numeric_db_value_is_positive (&value->dbvalue);
17171  break;
17172  case DB_TYPE_MONETARY:
17173  is_positive = ((db_get_monetary (&value->dbvalue)->amount >= 0) ? 1 : 0);
17174  break;
17175 
17176  case DB_TYPE_CHAR:
17177  case DB_TYPE_VARCHAR:
17178  case DB_TYPE_NCHAR:
17179  case DB_TYPE_VARNCHAR:
17180  {
17181  const char *str = db_get_string (&(value->dbvalue));
17182  const char *str_end = str + db_get_string_length (&(value->dbvalue));
17183  const char *p = NULL;
17184 
17185  /* get the sign in the source string; look directly into the buffer string, no copy */
17186  p = str;
17187  while (char_isspace (*p) && p < str_end)
17188  {
17189  p++;
17190  }
17191 
17192  is_positive = ((p < str_end && (*p) == '-') ? 0 : 1);
17193  break;
17194  }
17195 
17196  default:
17197  is_positive = -1;
17198  break;
17199  }
17200 
17201  if (is_positive == 1)
17202  {
17203  set_max_value = true;
17204  }
17205  else if (is_positive == 0)
17206  {
17207  set_min_value = true;
17208  }
17209  else
17210  {
17211  set_default_value = true;
17212  }
17213  }
17214  else
17215  {
17216  set_default_value = true;
17217  }
17218  /* clear the error */
17219  er_clear ();
17220 
17221  log_warning = true;
17222 
17223  /* the casted value will be overwritten, so a clear is needed, here */
17224  pr_clear_value (&(value->dbvalue));
17225 
17226  if (set_max_value)
17227  {
17228  /* set max value of destination domain */
17229  error =
17230  db_value_domain_max (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17231  dest_dom->collation_id, &dest_dom->enumeration);
17232  if (error != NO_ERROR)
17233  {
17234  /* this should not happen */
17235  goto exit;
17236  }
17237 
17238  warning_code = ER_ALTER_CHANGE_CAST_FAILED_SET_MAX;
17239  }
17240  else if (set_min_value)
17241  {
17242  /* set min value of destination domain */
17243  error =
17244  db_value_domain_min (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17245  dest_dom->collation_id, &dest_dom->enumeration);
17246  if (error != NO_ERROR)
17247  {
17248  /* this should not happen */
17249  goto exit;
17250  }
17251  warning_code = ER_ALTER_CHANGE_CAST_FAILED_SET_MIN;
17252  }
17253  else
17254  {
17255  assert (set_default_value == true);
17256 
17257  /* set default value of destination domain */
17258  error =
17259  db_value_domain_default (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17260  dest_dom->collation_id, &dest_dom->enumeration);
17261  if (error != NO_ERROR)
17262  {
17263  /* this should not happen */
17264  goto exit;
17265  }
17267  }
17268  }
17269 
17270  if (!DB_IS_NULL (&orig_value))
17271  {
17272  assert (!DB_IS_NULL (&(value->dbvalue)));
17273  }
17274 
17275  if (log_warning)
17276  {
17277  assert (warning_code != NO_ERROR);
17278 
17279  /* Since we don't like to bother callers with the following warning which is just for a logging, it will be
17280  * poped once it is set. */
17281  er_stack_push ();
17282 
17283  if (warning_code == ER_QPROC_SIZE_STRING_TRUNCATED)
17284  {
17285  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, warning_code, 1, "ALTER TABLE .. CHANGE");
17286  }
17287  else
17288  {
17289  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, warning_code, 0);
17290  }
17291 
17292  /* forget the warning */
17293  er_stack_pop ();
17294  }
17295 
17296  value->state = HEAP_WRITTEN_ATTRVALUE;
17297  atts_id[updated_n_attrs_id] = value->attrid;
17298  updated_n_attrs_id++;
17299 
17300  break;
17301  }
17302 
17303  /* exactly one attribute should be changed */
17304  assert (updated_n_attrs_id == 1);
17305 
17306  if (updated_n_attrs_id != 1 || attr_info->read_classrepr == NULL || attr_info->last_classrepr == NULL
17307  || attr_info->read_classrepr->id >= attr_info->last_classrepr->id)
17308  {
17309  error = ER_UNEXPECTED;
17310  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, "Incorrect attribute information.");
17311  goto exit;
17312  }
17313 
17314  /* the class has XCH_M_LOCK */
17315  error =
17316  locator_attribute_info_force (thread_p, &upd_scancache->node.hfid, oid, attr_info, atts_id, updated_n_attrs_id,
17317  LC_FLUSH_UPDATE, SINGLE_ROW_UPDATE, upd_scancache, &force_count, false,
17320  if (error != NO_ERROR)
17321  {
17323  {
17324  error = NO_ERROR;
17325  }
17326 
17327  goto exit;
17328  }
17329 
17330 exit:
17331  pr_clear_value (&orig_value);
17332  return error;
17333 }
17334 
17335 /*
17336  * heap_eval_function_index - evaluate the result of the expression used in
17337  * a function index.
17338  *
17339  * thread_p(in) : thread context
17340  * func_index_info(in): function index information
17341  * n_atts(in): number of attributes involved
17342  * att_ids(in): attribute identifiers
17343  * attr_info(in): attribute info structure
17344  * recdes(in): record descriptor
17345  * btid_index(in): id of the function index used
17346  * func_pred_cache(in): cached function index expressions
17347  * result(out): result of the function expression
17348  * fi_domain(out): domain of function index (from regu_var)
17349  * return: error code
17350  */
17351 static int
17352 heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_index_info, int n_atts, int *att_ids,
17353  HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, int btid_index, DB_VALUE * result,
17354  FUNC_PRED_UNPACK_INFO * func_pred_cache, TP_DOMAIN ** fi_domain)
17355 {
17356  int error = NO_ERROR;
17357  OR_INDEX *index = NULL;
17358  char *expr_stream = NULL;
17359  int expr_stream_size = 0;
17361  XASL_UNPACK_INFO *unpack_info = NULL;
17362  DB_VALUE *res = NULL;
17363  int i, nr_atts;
17364  ATTR_ID *atts = NULL;
17365  bool atts_free = false, attrinfo_clear = false, attrinfo_end = false;
17366  HEAP_CACHE_ATTRINFO *cache_attr_info = NULL;
17367 
17368  if (func_index_info == NULL && btid_index > -1 && n_atts == -1)
17369  {
17370  index = &(attr_info->last_classrepr->indexes[btid_index]);
17371  if (func_pred_cache)
17372  {
17373  func_pred = func_pred_cache->func_pred;
17374  cache_attr_info = func_pred->cache_attrinfo;
17375  nr_atts = index->n_atts;
17376  }
17377  else
17378  {
17379  expr_stream = index->func_index_info->expr_stream;
17380  expr_stream_size = index->func_index_info->expr_stream_size;
17381  nr_atts = index->n_atts;
17382  atts = (ATTR_ID *) malloc (nr_atts * sizeof (ATTR_ID));
17383  if (atts == NULL)
17384  {
17386  error = ER_FAILED;
17387  goto end;
17388  }
17389  atts_free = true;
17390  for (i = 0; i < nr_atts; i++)
17391  {
17392  atts[i] = index->atts[i]->id;
17393  }
17394  cache_attr_info = attr_info;
17395  }
17396  }
17397  else
17398  {
17399  /* load index case */
17400  expr_stream = func_index_info->expr_stream;
17401  expr_stream_size = func_index_info->expr_stream_size;
17402  nr_atts = n_atts;
17403  atts = att_ids;
17404  cache_attr_info = func_index_info->expr->cache_attrinfo;
17405  func_pred = func_index_info->expr;
17406  }
17407 
17408  if (func_index_info == NULL)
17409  {
17410  /* insert case, read the values */
17411  if (func_pred == NULL)
17412  {
17413  if (stx_map_stream_to_func_pred (thread_p, &func_pred, expr_stream, expr_stream_size, &unpack_info))
17414  {
17415  error = ER_FAILED;
17416  goto end;
17417  }
17418  cache_attr_info = func_pred->cache_attrinfo;
17419 
17420  if (heap_attrinfo_start (thread_p, &attr_info->class_oid, nr_atts, atts, cache_attr_info) != NO_ERROR)
17421  {
17422  error = ER_FAILED;
17423  goto end;
17424  }
17425  attrinfo_end = true;
17426  }
17427 
17428  if (heap_attrinfo_read_dbvalues (thread_p, &attr_info->inst_oid, recdes, NULL, cache_attr_info) != NO_ERROR)
17429  {
17430  error = ER_FAILED;
17431  goto end;
17432  }
17433  attrinfo_clear = true;
17434  }
17435 
17436  error = fetch_peek_dbval (thread_p, func_pred->func_regu, NULL, &cache_attr_info->class_oid,
17437  &cache_attr_info->inst_oid, NULL, &res);
17438  if (error == NO_ERROR)
17439  {
17440  if (DB_IS_NULL (res) && func_pred->func_regu->domain != NULL)
17441  {
17442  /* Set expected domain in case of null values, just to be sure. The callers expects the domain to be set. */
17443  db_value_domain_init (res, TP_DOMAIN_TYPE (func_pred->func_regu->domain),
17444  func_pred->func_regu->domain->precision, func_pred->func_regu->domain->scale);
17445  }
17446  pr_clone_value (res, result);
17447  }
17448 
17449  if (fi_domain != NULL)
17450  {
17451  *fi_domain = tp_domain_cache (func_pred->func_regu->domain);
17452  }
17453 
17454  if (res != NULL && res->need_clear == true)
17455  {
17456  pr_clear_value (res);
17457  }
17458 
17459 end:
17460  if (attrinfo_clear && cache_attr_info)
17461  {
17462  heap_attrinfo_clear_dbvalues (cache_attr_info);
17463  }
17464  if (attrinfo_end && cache_attr_info)
17465  {
17466  heap_attrinfo_end (thread_p, cache_attr_info);
17467  }
17468  if (atts_free && atts)
17469  {
17470  free_and_init (atts);
17471  }
17472  if (unpack_info)
17473  {
17474  (void) qexec_clear_func_pred (thread_p, func_pred);
17475  free_xasl_unpack_info (thread_p, unpack_info);
17476  }
17477 
17478  return error;
17479 }
17480 
17481 /*
17482  * heap_init_func_pred_unpack_info () - if function indexes are found,
17483  * each function expression is unpacked and cached
17484  * in order to be used during bulk inserts
17485  * (insert ... select).
17486  * return: NO_ERROR, or ER_FAILED
17487  * thread_p(in): thread entry
17488  * attr_info(in): heap_cache_attrinfo
17489  * class_oid(in): the class oid
17490  * func_indx_preds(out):
17491  */
17492 int
17493 heap_init_func_pred_unpack_info (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * attr_info, const OID * class_oid,
17494  FUNC_PRED_UNPACK_INFO ** func_indx_preds)
17495 {
17496  OR_FUNCTION_INDEX *fi_info = NULL;
17497  int n_indexes;
17498  int i, j;
17499  int *att_ids = NULL;
17500  int error_status = NO_ERROR;
17501  OR_INDEX *idx;
17502  FUNC_PRED_UNPACK_INFO *fi_preds = NULL;
17503  int *attr_info_started = NULL;
17504  size_t size;
17505 
17506  if (attr_info == NULL || class_oid == NULL || func_indx_preds == NULL)
17507  {
17508  return ER_FAILED;
17509  }
17510 
17511  *func_indx_preds = NULL;
17512 
17513  n_indexes = attr_info->last_classrepr->n_indexes;
17514  for (i = 0; i < n_indexes; i++)
17515  {
17516  idx = &(attr_info->last_classrepr->indexes[i]);
17517  fi_info = idx->func_index_info;
17518  if (fi_info)
17519  {
17520  if (fi_preds == NULL)
17521  {
17522  size = n_indexes * sizeof (FUNC_PRED_UNPACK_INFO);
17523  fi_preds = (FUNC_PRED_UNPACK_INFO *) db_private_alloc (thread_p, size);
17524  if (!fi_preds)
17525  {
17527  error_status = ER_FAILED;
17528  goto error;
17529  }
17530  for (j = 0; j < n_indexes; j++)
17531  {
17532  fi_preds[j].func_pred = NULL;
17533  fi_preds[j].unpack_info = NULL;
17534  }
17535 
17536  size = n_indexes * sizeof (int);
17537  attr_info_started = (int *) db_private_alloc (thread_p, size);
17538  if (attr_info_started == NULL)
17539  {
17541  error_status = ER_FAILED;
17542  goto error;
17543  }
17544  for (j = 0; j < n_indexes; j++)
17545  {
17546  attr_info_started[j] = 0;
17547  }
17548  }
17549 
17550  if (stx_map_stream_to_func_pred (thread_p, &fi_preds[i].func_pred, fi_info->expr_stream,
17551  fi_info->expr_stream_size, &fi_preds[i].unpack_info))
17552  {
17553  error_status = ER_FAILED;
17554  goto error;
17555  }
17556 
17557  size = idx->n_atts * sizeof (ATTR_ID);
17558  att_ids = (ATTR_ID *) db_private_alloc (thread_p, size);
17559  if (!att_ids)
17560  {
17562  error_status = ER_FAILED;
17563  goto error;
17564  }
17565 
17566  for (j = 0; j < idx->n_atts; j++)
17567  {
17568  att_ids[j] = idx->atts[j]->id;
17569  }
17570 
17571  if (heap_attrinfo_start (thread_p, class_oid, idx->n_atts, att_ids,
17572  fi_preds[i].func_pred->cache_attrinfo) != NO_ERROR)
17573  {
17574  error_status = ER_FAILED;
17575  goto error;
17576  }
17577 
17578  attr_info_started[i] = 1;
17579 
17580  if (att_ids)
17581  {
17582  db_private_free_and_init (thread_p, att_ids);
17583  }
17584  }
17585  }
17586 
17587  if (attr_info_started != NULL)
17588  {
17589  db_private_free_and_init (thread_p, attr_info_started);
17590  }
17591 
17592  *func_indx_preds = fi_preds;
17593 
17594  return NO_ERROR;
17595 
17596 error:
17597  if (att_ids)
17598  {
17599  db_private_free_and_init (thread_p, att_ids);
17600  }
17601  heap_free_func_pred_unpack_info (thread_p, n_indexes, fi_preds, attr_info_started);
17602  if (attr_info_started != NULL)
17603  {
17604  db_private_free_and_init (thread_p, attr_info_started);
17605  }
17606 
17607  return error_status;
17608 }
17609 
17610 /*
17611  * heap_free_func_pred_unpack_info () -
17612  * return:
17613  * thread_p(in): thread entry
17614  * n_indexes(in): number of indexes
17615  * func_indx_preds(in):
17616  * attr_info_started(in): array of int (1 if corresponding cache_attrinfo
17617  * must be cleaned, 0 otherwise)
17618  * if null all cache_attrinfo must be cleaned
17619  */
17620 void
17621 heap_free_func_pred_unpack_info (THREAD_ENTRY * thread_p, int n_indexes, FUNC_PRED_UNPACK_INFO * func_indx_preds,
17622  int *attr_info_started)
17623 {
17624  int i;
17625 
17626  if (func_indx_preds == NULL)
17627  {
17628  return;
17629  }
17630 
17631  for (i = 0; i < n_indexes; i++)
17632  {
17633  if (func_indx_preds[i].func_pred)
17634  {
17635  if (attr_info_started == NULL || attr_info_started[i])
17636  {
17637  assert (func_indx_preds[i].func_pred->cache_attrinfo);
17638  (void) heap_attrinfo_end (thread_p, func_indx_preds[i].func_pred->cache_attrinfo);
17639  }
17640  (void) qexec_clear_func_pred (thread_p, func_indx_preds[i].func_pred);
17641  }
17642 
17643  if (func_indx_preds[i].unpack_info)
17644  {
17645  free_xasl_unpack_info (thread_p, func_indx_preds[i].unpack_info);
17646  }
17647  }
17648  db_private_free_and_init (thread_p, func_indx_preds);
17649 }
17650 
17651 /*
17652  * heap_header_capacity_start_scan () - start scan function for 'show heap ...'
17653  * return: NO_ERROR, or ER_code
17654  * thread_p(in): thread entry
17655  * show_type(in):
17656  * arg_values(in):
17657  * arg_cnt(in):
17658  * ptr(in/out): 'show heap' context
17659  */
17660 int
17661 heap_header_capacity_start_scan (THREAD_ENTRY * thread_p, int show_type, DB_VALUE ** arg_values, int arg_cnt,
17662  void **ptr)
17663 {
17664  int error = NO_ERROR;
17665  const char *class_name = NULL;
17667  OID class_oid;
17668  LC_FIND_CLASSNAME status;
17669  HEAP_SHOW_SCAN_CTX *ctx = NULL;
17670  OR_PARTITION *parts = NULL;
17671  int i = 0;
17672  int parts_count = 0;
17673  bool is_all = false;
17674 
17675  assert (arg_cnt == 2);
17676  assert (DB_VALUE_TYPE (arg_values[0]) == DB_TYPE_CHAR);
17677  assert (DB_VALUE_TYPE (arg_values[1]) == DB_TYPE_INTEGER);
17678 
17679  *ptr = NULL;
17680 
17681  class_name = db_get_string (arg_values[0]);
17682 
17683  partition_type = (DB_CLASS_PARTITION_TYPE) db_get_int (arg_values[1]);
17684 
17685  ctx = (HEAP_SHOW_SCAN_CTX *) db_private_alloc (thread_p, sizeof (HEAP_SHOW_SCAN_CTX));
17686  if (ctx == NULL)
17687  {
17688  ASSERT_ERROR ();
17689  error = er_errid ();
17690  goto cleanup;
17691  }
17692  memset (ctx, 0, sizeof (HEAP_SHOW_SCAN_CTX));
17693 
17694  status = xlocator_find_class_oid (thread_p, class_name, &class_oid, S_LOCK);
17695  if (status == LC_CLASSNAME_ERROR || status == LC_CLASSNAME_DELETED)
17696  {
17697  error = ER_LC_UNKNOWN_CLASSNAME;
17698  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, class_name);
17699  goto cleanup;
17700  }
17701 
17702  is_all = (show_type == SHOWSTMT_ALL_HEAP_HEADER || show_type == SHOWSTMT_ALL_HEAP_CAPACITY);
17703 
17704  if (is_all && partition_type == DB_PARTITIONED_CLASS)
17705  {
17706  error = heap_get_class_partitions (thread_p, &class_oid, &parts, &parts_count);
17707  if (error != NO_ERROR)
17708  {
17709  goto cleanup;
17710  }
17711 
17712  ctx->hfids = (HFID *) db_private_alloc (thread_p, parts_count * sizeof (HFID));
17713  if (ctx->hfids == NULL)
17714  {
17715  ASSERT_ERROR ();
17716  error = er_errid ();
17717  goto cleanup;
17718  }
17719 
17720  for (i = 0; i < parts_count; i++)
17721  {
17722  HFID_COPY (&ctx->hfids[i], &parts[i].class_hfid);
17723  }
17724 
17725  ctx->hfids_count = parts_count;
17726  }
17727  else
17728  {
17729  ctx->hfids = (HFID *) db_private_alloc (thread_p, sizeof (HFID));
17730  if (ctx->hfids == NULL)
17731  {
17732  ASSERT_ERROR ();
17733  error = er_errid ();
17734  goto cleanup;
17735  }
17736 
17737  error = heap_get_class_info (thread_p, &class_oid, &ctx->hfids[0], NULL, NULL);
17738  if (error != NO_ERROR)
17739  {
17740  goto cleanup;
17741  }
17742 
17743  ctx->hfids_count = 1;
17744  }
17745 
17746  *ptr = ctx;
17747  ctx = NULL;
17748 
17749 cleanup:
17750 
17751  if (parts != NULL)
17752  {
17753  heap_clear_partition_info (thread_p, parts, parts_count);
17754  }
17755 
17756  if (ctx != NULL)
17757  {
17758  if (ctx->hfids != NULL)
17759  {
17760  db_private_free (thread_p, ctx->hfids);
17761  }
17762 
17763  db_private_free_and_init (thread_p, ctx);
17764  }
17765 
17766  return error;
17767 }
17768 
17769 /*
17770  * heap_header_next_scan () - next scan function for
17771  * 'show (all) heap header'
17772  * return: NO_ERROR, or ER_code
17773  * thread_p(in):
17774  * cursor(in):
17775  * out_values(in/out):
17776  * out_cnt(in):
17777  * ptr(in): 'show heap' context
17778  */
17779 SCAN_CODE
17780 heap_header_next_scan (THREAD_ENTRY * thread_p, int cursor, DB_VALUE ** out_values, int out_cnt, void *ptr)
17781 {
17782  int error = NO_ERROR;
17783  HEAP_SHOW_SCAN_CTX *ctx = NULL;
17784  VPID vpid;
17785  HEAP_HDR_STATS *heap_hdr = NULL;
17786  RECDES hdr_recdes;
17787  int i = 0;
17788  int idx = 0;
17789  PAGE_PTR pgptr = NULL;
17790  HFID *hfid_p;
17791  char *class_name = NULL;
17792  int avg_length = 0;
17793  char buf[512] = { 0 };
17794  char temp[64] = { 0 };
17795  char *buf_p, *end;
17796 
17797  ctx = (HEAP_SHOW_SCAN_CTX *) ptr;
17798 
17799  if (cursor >= ctx->hfids_count)
17800  {
17801  return S_END;
17802  }
17803 
17804  hfid_p = &ctx->hfids[cursor];
17805 
17806  vpid.volid = hfid_p->vfid.volid;
17807  vpid.pageid = hfid_p->hpgid;
17808 
17809  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
17810  if (pgptr == NULL)
17811  {
17812  ASSERT_ERROR ();
17813  error = er_errid ();
17814  goto cleanup;
17815  }
17816 
17817  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
17818  {
17819  error = ER_SP_INVALID_HEADER;
17821  goto cleanup;
17822  }
17823 
17824  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
17825 
17826  if (heap_get_class_name (thread_p, &(heap_hdr->class_oid), &class_name) != NO_ERROR || class_name == NULL)
17827  {
17828  ASSERT_ERROR_AND_SET (error);
17829  goto cleanup;
17830  }
17831 
17832  idx = 0;
17833 
17834  /* Class_name */
17835  error = db_make_string_copy (out_values[idx], class_name);
17836  idx++;
17837  if (error != NO_ERROR)
17838  {
17839  goto cleanup;
17840  }
17841 
17842  /* Class_oid */
17843  oid_to_string (buf, sizeof (buf), &heap_hdr->class_oid);
17844  error = db_make_string_copy (out_values[idx], buf);
17845  idx++;
17846  if (error != NO_ERROR)
17847  {
17848  goto cleanup;
17849  }
17850 
17851  /* HFID */
17852  db_make_int (out_values[idx], hfid_p->vfid.volid);
17853  idx++;
17854 
17855  db_make_int (out_values[idx], hfid_p->vfid.fileid);
17856  idx++;
17857 
17858  db_make_int (out_values[idx], hfid_p->hpgid);
17859  idx++;
17860 
17861  /* Overflow_vfid */
17862  vfid_to_string (buf, sizeof (buf), &heap_hdr->ovf_vfid);
17863  error = db_make_string_copy (out_values[idx], buf);
17864  idx++;
17865  if (error != NO_ERROR)
17866  {
17867  goto cleanup;
17868  }
17869 
17870  /* Next_vpid */
17871  vpid_to_string (buf, sizeof (buf), &heap_hdr->next_vpid);
17872  error = db_make_string_copy (out_values[idx], buf);
17873  idx++;
17874  if (error != NO_ERROR)
17875  {
17876  goto cleanup;
17877  }
17878 
17879  /* Unfill space */
17880  db_make_int (out_values[idx], heap_hdr->unfill_space);
17881  idx++;
17882 
17883  /* Estimated */
17884  db_make_bigint (out_values[idx], heap_hdr->estimates.num_pages);
17885  idx++;
17886 
17887  db_make_bigint (out_values[idx], heap_hdr->estimates.num_recs);
17888  idx++;
17889 
17890  avg_length = ((heap_hdr->estimates.num_recs > 0)
17891  ? (int) ((heap_hdr->estimates.recs_sumlen / (float) heap_hdr->estimates.num_recs) + 0.9) : 0);
17892  db_make_int (out_values[idx], avg_length);
17893  idx++;
17894 
17895  db_make_int (out_values[idx], heap_hdr->estimates.num_high_best);
17896  idx++;
17897 
17898  db_make_int (out_values[idx], heap_hdr->estimates.num_other_high_best);
17899  idx++;
17900 
17901  db_make_int (out_values[idx], heap_hdr->estimates.head);
17902  idx++;
17903 
17904  /* Estimates_best_list */
17905  buf_p = buf;
17906  end = buf + sizeof (buf);
17907  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
17908  {
17909  if (i > 0)
17910  {
17911  if (fill_string_to_buffer (&buf_p, end, ", ") == -1)
17912  {
17913  break;
17914  }
17915  }
17916 
17917  heap_bestspace_to_string (temp, sizeof (temp), heap_hdr->estimates.best + i);
17918  if (fill_string_to_buffer (&buf_p, end, temp) == -1)
17919  {
17920  break;
17921  }
17922  }
17923 
17924  error = db_make_string_copy (out_values[idx], buf);
17925  idx++;
17926  if (error != NO_ERROR)
17927  {
17928  goto cleanup;
17929  }
17930 
17931  db_make_int (out_values[idx], heap_hdr->estimates.num_second_best);
17932  idx++;
17933 
17934  db_make_int (out_values[idx], heap_hdr->estimates.head_second_best);
17935  idx++;
17936 
17937  db_make_int (out_values[idx], heap_hdr->estimates.tail_second_best);
17938  idx++;
17939 
17940  db_make_int (out_values[idx], heap_hdr->estimates.num_substitutions);
17941  idx++;
17942 
17943  /* Estimates_second_best */
17944  buf_p = buf;
17945  end = buf + sizeof (buf);
17946  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
17947  {
17948  if (i > 0)
17949  {
17950  if (fill_string_to_buffer (&buf_p, end, ", ") == -1)
17951  {
17952  break;
17953  }
17954  }
17955 
17956  vpid_to_string (temp, sizeof (temp), heap_hdr->estimates.second_best + i);
17957  if (fill_string_to_buffer (&buf_p, end, temp) == -1)
17958  {
17959  break;
17960  }
17961  }
17962 
17963  error = db_make_string_copy (out_values[idx], buf);
17964  idx++;
17965  if (error != NO_ERROR)
17966  {
17967  goto cleanup;
17968  }
17969 
17970  vpid_to_string (buf, sizeof (buf), &heap_hdr->estimates.last_vpid);
17971  error = db_make_string_copy (out_values[idx], buf);
17972  idx++;
17973  if (error != NO_ERROR)
17974  {
17975  goto cleanup;
17976  }
17977 
17978  vpid_to_string (buf, sizeof (buf), &heap_hdr->estimates.full_search_vpid);
17979  error = db_make_string_copy (out_values[idx], buf);
17980  idx++;
17981  if (error != NO_ERROR)
17982  {
17983  goto cleanup;
17984  }
17985 
17986  assert (idx == out_cnt);
17987 
17988 cleanup:
17989 
17990  if (pgptr != NULL)
17991  {
17992  pgbuf_unfix_and_init (thread_p, pgptr);
17993  }
17994 
17995  if (class_name != NULL)
17996  {
17997  free_and_init (class_name);
17998  }
17999 
18000  return (error == NO_ERROR) ? S_SUCCESS : S_ERROR;
18001 }
18002 
18003 /*
18004  * heap_capacity_next_scan () - next scan function for
18005  * 'show (all) heap capacity'
18006  * return: NO_ERROR, or ER_code
18007  * thread_p(in):
18008  * cursor(in):
18009  * out_values(in/out):
18010  * out_cnt(in):
18011  * ptr(in): 'show heap' context
18012  */
18013 SCAN_CODE
18014 heap_capacity_next_scan (THREAD_ENTRY * thread_p, int cursor, DB_VALUE ** out_values, int out_cnt, void *ptr)
18015 {
18016  int error = NO_ERROR;
18017  HEAP_SHOW_SCAN_CTX *ctx = NULL;
18018  HFID *hfid_p = NULL;
18019  HEAP_CACHE_ATTRINFO attr_info;
18020  OR_CLASSREP *repr = NULL;
18021  char *classname = NULL;
18022  char class_oid_str[64] = { 0 };
18023  bool is_heap_attrinfo_started = false;
18024  INT64 num_recs = 0;
18025  INT64 num_relocated_recs = 0;
18026  INT64 num_overflowed_recs = 0;
18027  INT64 num_pages = 0;
18028  int avg_rec_len = 0;
18029  int avg_free_space_per_page = 0;
18030  int avg_free_space_without_last_page = 0;
18031  int avg_overhead_per_page = 0;
18032  int val = 0;
18033  int idx = 0;
18034  FILE_DESCRIPTORS fdes;
18035 
18036  ctx = (HEAP_SHOW_SCAN_CTX *) ptr;
18037 
18038  if (cursor >= ctx->hfids_count)
18039  {
18040  return S_END;
18041  }
18042 
18043  hfid_p = &ctx->hfids[cursor];
18044 
18045  error =
18046  heap_get_capacity (thread_p, hfid_p, &num_recs, &num_relocated_recs, &num_overflowed_recs, &num_pages,
18047  &avg_free_space_per_page, &avg_free_space_without_last_page, &avg_rec_len,
18048  &avg_overhead_per_page);
18049  if (error != NO_ERROR)
18050  {
18051  goto cleanup;
18052  }
18053 
18054  error = file_descriptor_get (thread_p, &hfid_p->vfid, &fdes);
18055  if (error != NO_ERROR)
18056  {
18057  ASSERT_ERROR ();
18058  goto cleanup;
18059  }
18060 
18061  error = heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info);
18062  if (error != NO_ERROR)
18063  {
18064  goto cleanup;
18065  }
18066 
18067  is_heap_attrinfo_started = true;
18068 
18069  repr = attr_info.last_classrepr;
18070  if (repr == NULL)
18071  {
18072  error = ER_HEAP_UNKNOWN_OBJECT;
18074  fdes.heap.class_oid.slotid);
18075  goto cleanup;
18076  }
18077 
18078  if (heap_get_class_name (thread_p, &fdes.heap.class_oid, &classname) != NO_ERROR || classname == NULL)
18079  {
18080  ASSERT_ERROR_AND_SET (error);
18081  goto cleanup;
18082  }
18083 
18084  idx = 0;
18085 
18086  error = db_make_string_copy (out_values[idx], classname);
18087  idx++;
18088  if (error != NO_ERROR)
18089  {
18090  goto cleanup;
18091  }
18092 
18093  oid_to_string (class_oid_str, sizeof (class_oid_str), &fdes.heap.class_oid);
18094  error = db_make_string_copy (out_values[idx], class_oid_str);
18095  idx++;
18096  if (error != NO_ERROR)
18097  {
18098  goto cleanup;
18099  }
18100 
18101  db_make_int (out_values[idx], hfid_p->vfid.volid);
18102  idx++;
18103 
18104  db_make_int (out_values[idx], hfid_p->vfid.fileid);
18105  idx++;
18106 
18107  db_make_int (out_values[idx], hfid_p->hpgid);
18108  idx++;
18109 
18110  db_make_bigint (out_values[idx], num_recs);
18111  idx++;
18112 
18113  db_make_bigint (out_values[idx], num_relocated_recs);
18114  idx++;
18115 
18116  db_make_bigint (out_values[idx], num_overflowed_recs);
18117  idx++;
18118 
18119  db_make_bigint (out_values[idx], num_pages);
18120  idx++;
18121 
18122  db_make_int (out_values[idx], avg_rec_len);
18123  idx++;
18124 
18125  db_make_int (out_values[idx], avg_free_space_per_page);
18126  idx++;
18127 
18128  db_make_int (out_values[idx], avg_free_space_without_last_page);
18129  idx++;
18130 
18131  db_make_int (out_values[idx], avg_overhead_per_page);
18132  idx++;
18133 
18134  db_make_int (out_values[idx], repr->id);
18135  idx++;
18136 
18137  db_make_int (out_values[idx], repr->n_attributes);
18138  idx++;
18139 
18140  val = repr->n_attributes - repr->n_variable - repr->n_shared_attrs - repr->n_class_attrs;
18141  db_make_int (out_values[idx], val);
18142  idx++;
18143 
18144  db_make_int (out_values[idx], repr->n_variable);
18145  idx++;
18146 
18147  db_make_int (out_values[idx], repr->n_shared_attrs);
18148  idx++;
18149 
18150  db_make_int (out_values[idx], repr->n_class_attrs);
18151  idx++;
18152 
18153  db_make_int (out_values[idx], repr->fixed_length);
18154  idx++;
18155 
18156  assert (idx == out_cnt);
18157 
18158 cleanup:
18159 
18160  if (classname != NULL)
18161  {
18162  free_and_init (classname);
18163  }
18164 
18165  if (is_heap_attrinfo_started)
18166  {
18167  heap_attrinfo_end (thread_p, &attr_info);
18168  }
18169 
18170  return (error == NO_ERROR) ? S_SUCCESS : S_ERROR;
18171 }
18172 
18173 /*
18174  * heap_header_capacity_end_scan() - end scan function of
18175  * 'show (all) heap ...'
18176  * return: NO_ERROR, or ER_code
18177  * thread_p(in):
18178  * ptr(in/out): 'show heap' context
18179  */
18180 int
18182 {
18183  HEAP_SHOW_SCAN_CTX *ctx;
18184 
18185  ctx = (HEAP_SHOW_SCAN_CTX *) (*ptr);
18186 
18187  if (ctx == NULL)
18188  {
18189  return NO_ERROR;
18190  }
18191 
18192  if (ctx->hfids != NULL)
18193  {
18194  db_private_free (thread_p, ctx->hfids);
18195  }
18196 
18197  db_private_free (thread_p, ctx);
18198  *ptr = NULL;
18199 
18200  return NO_ERROR;
18201 }
18202 
18203 static char *
18204 heap_bestspace_to_string (char *buf, int buf_size, const HEAP_BESTSPACE * hb)
18205 {
18206  snprintf (buf, buf_size, "((%d|%d), %d)", hb->vpid.volid, hb->vpid.pageid, hb->freespace);
18207  buf[buf_size - 1] = '\0';
18208 
18209  return buf;
18210 }
18211 
18212 /*
18213  * fill_string_to_buffer () - fill string into buffer
18214  *
18215  * -----------------------------
18216  * | buffer |
18217  * -----------------------------
18218  * ^ ^
18219  * | |
18220  * start end
18221  *
18222  * return: the count of characters (not include '\0') which has been
18223  * filled into buffer; -1 means error.
18224  * start(in/out): After filling, start move to the '\0' position.
18225  * end(in): The first unavailble position.
18226  * str(in):
18227  */
18228 static int
18229 fill_string_to_buffer (char **start, char *end, const char *str)
18230 {
18231  int len = (int) strlen (str);
18232 
18233  if (*start + len >= end)
18234  {
18235  return -1;
18236  }
18237 
18238  memcpy (*start, str, len);
18239  *start += len;
18240  **start = '\0';
18241 
18242  return len;
18243 }
18244 
18245 /*
18246  * heap_get_page_info () - Obtain page information.
18247  *
18248  * return : SCAN_CODE.
18249  * thread_p (in) : Thread entry.
18250  * cls_oid (in) : Class object identifier.
18251  * hfid (in) : Heap file identifier.
18252  * vpid (in) : Page identifier.
18253  * pgptr (in) : Pointer to the cached page.
18254  * page_info (in) : Pointers to DB_VALUES where page information is stored.
18255  */
18256 static SCAN_CODE
18257 heap_get_page_info (THREAD_ENTRY * thread_p, const OID * cls_oid, const HFID * hfid, const VPID * vpid,
18258  const PAGE_PTR pgptr, DB_VALUE ** page_info)
18259 {
18260  RECDES recdes;
18261 
18262  if (page_info == NULL)
18263  {
18264  /* no need to get page info */
18265  return S_SUCCESS;
18266  }
18267 
18268  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
18269  {
18270  /* Error obtaining header slot */
18271  return S_ERROR;
18272  }
18273 
18274  db_make_oid (page_info[HEAP_PAGE_INFO_CLASS_OID], cls_oid);
18275 
18276  if (hfid->hpgid == vpid->pageid && hfid->vfid.volid == vpid->volid)
18277  {
18278  HEAP_HDR_STATS *hdr_stats = (HEAP_HDR_STATS *) recdes.data;
18280  db_make_int (page_info[HEAP_PAGE_INFO_NEXT_PAGE], hdr_stats->next_vpid.pageid);
18281  }
18282  else
18283  {
18284  HEAP_CHAIN *chain = (HEAP_CHAIN *) recdes.data;
18287  }
18288 
18289  /* Obtain information from spage header */
18290  return spage_get_page_header_info (pgptr, page_info);
18291 }
18292 
18293 /*
18294  * heap_page_next () - Advance to next page in chain and obtain information.
18295  *
18296  * return : SCAN_CODE.
18297  * thread_p (in) : Thread entry.
18298  * class_oid (in) : Class object identifier.
18299  * hfid (in) : Heap file identifier.
18300  * next_vpid (in) : Next page identifier.
18301  * cache_pageinfo (in) : Pointers to DB_VALUEs where page information is
18302  * stored.
18303  */
18304 SCAN_CODE
18305 heap_page_next (THREAD_ENTRY * thread_p, const OID * class_oid, const HFID * hfid, VPID * next_vpid,
18306  DB_VALUE ** cache_pageinfo)
18307 {
18308  PGBUF_WATCHER pg_watcher;
18309  PGBUF_WATCHER old_pg_watcher;
18310  SCAN_CODE scan = S_SUCCESS;
18311 
18312  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
18313  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
18314 
18315  /* get next page */
18316  if (VPID_ISNULL (next_vpid))
18317  {
18318  /* set to first page */
18319  next_vpid->pageid = hfid->hpgid;
18320  next_vpid->volid = hfid->vfid.volid;
18321  }
18322  else
18323  {
18324  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, next_vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
18325  if (pg_watcher.pgptr == NULL)
18326  {
18327  return S_ERROR;
18328  }
18329  /* get next page */
18330  heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, next_vpid);
18331  if (OID_ISNULL (next_vpid))
18332  {
18333  /* no more pages to scan */
18334  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18335  return S_END;
18336  }
18337  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
18338  }
18339 
18340  /* get page pointer to next page */
18341  pg_watcher.pgptr =
18342  heap_scan_pb_lock_and_fetch (thread_p, next_vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
18343  if (old_pg_watcher.pgptr != NULL)
18344  {
18345  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
18346  }
18347  if (pg_watcher.pgptr == NULL)
18348  {
18349  return S_ERROR;
18350  }
18351 
18352  /* read page information and return scan code */
18353  scan = heap_get_page_info (thread_p, class_oid, hfid, next_vpid, pg_watcher.pgptr, cache_pageinfo);
18354 
18355  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18356  return scan;
18357 }
18358 
18359 /*
18360  * heap_page_prev () - Advance to previous page in chain and obtain
18361  * information.
18362  *
18363  * return : SCAN_CODE.
18364  * thread_p (in) : Thread entry.
18365  * class_oid (in) : Class object identifier.
18366  * hfid (in) : Heap file identifier.
18367  * prev_vpid (in) : Previous page identifier.
18368  * cache_pageinfo (in) : Pointers to DB_VALUEs where page information is
18369  * stored.
18370  */
18371 SCAN_CODE
18372 heap_page_prev (THREAD_ENTRY * thread_p, const OID * class_oid, const HFID * hfid, VPID * prev_vpid,
18373  DB_VALUE ** cache_pageinfo)
18374 {
18375  PGBUF_WATCHER pg_watcher;
18376  PGBUF_WATCHER old_pg_watcher;
18377  SCAN_CODE scan = S_SUCCESS;
18378 
18379  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
18380  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
18381 
18382  /* get next page */
18383  if (VPID_ISNULL (prev_vpid))
18384  {
18385  /* set to last page */
18386  if (heap_get_last_vpid (thread_p, hfid, prev_vpid) != NO_ERROR)
18387  {
18388  ASSERT_ERROR ();
18389  return S_ERROR;
18390  }
18391  }
18392  else
18393  {
18394  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, prev_vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
18395  if (pg_watcher.pgptr == NULL)
18396  {
18397  return S_ERROR;
18398  }
18399  /* get next page */
18400  heap_vpid_prev (thread_p, hfid, pg_watcher.pgptr, prev_vpid);
18401  if (OID_ISNULL (prev_vpid))
18402  {
18403  /* no more pages to scan */
18404  return S_END;
18405  }
18406  /* get next page */
18407  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
18408  }
18409 
18410  pg_watcher.pgptr =
18411  heap_scan_pb_lock_and_fetch (thread_p, prev_vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
18412  if (old_pg_watcher.pgptr != NULL)
18413  {
18414  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
18415  }
18416  if (pg_watcher.pgptr == NULL)
18417  {
18418  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18419  return S_ERROR;
18420  }
18421 
18422  /* read page information and return scan code */
18423  scan = heap_get_page_info (thread_p, class_oid, hfid, prev_vpid, pg_watcher.pgptr, cache_pageinfo);
18424 
18425  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18426  return scan;
18427 }
18428 
18429 /*
18430  * heap_get_record_info () - Heap function to obtain record information and
18431  * record data.
18432  *
18433  * return : SCAN CODE (S_SUCCESS or S_ERROR).
18434  * thread_p (in) : Thread entry.
18435  * oid (in) : Object identifier.
18436  * recdes (out) : Record descriptor (to save record data).
18437  * forward_recdes (in) : Record descriptor used by REC_RELOCATION & REC_BIGONE
18438  * records.
18439  * pgptr (in/out) : Pointer to the page this object belongs to.
18440  * scan_cache (in) : Heap scan cache.
18441  * ispeeking (in) : PEEK/COPY.
18442  * record_info (out) : Stores record information.
18443  */
18444 static SCAN_CODE
18445 heap_get_record_info (THREAD_ENTRY * thread_p, const OID oid, RECDES * recdes, RECDES forward_recdes,
18446  PGBUF_WATCHER * page_watcher, HEAP_SCANCACHE * scan_cache, bool ispeeking,
18447  DB_VALUE ** record_info)
18448 {
18449  SPAGE_SLOT *slot_p = NULL;
18450  SCAN_CODE scan = S_SUCCESS;
18451  OID forward_oid;
18452  MVCC_REC_HEADER mvcc_header;
18453 
18454  assert (page_watcher != NULL);
18455  assert (record_info != NULL);
18456  assert (recdes != NULL);
18457 
18458  /* careful adding values in the right order */
18459  db_make_int (record_info[HEAP_RECORD_INFO_T_VOLUMEID], oid.volid);
18460  db_make_int (record_info[HEAP_RECORD_INFO_T_PAGEID], oid.pageid);
18461  db_make_int (record_info[HEAP_RECORD_INFO_T_SLOTID], oid.slotid);
18462 
18463  /* get slot info */
18464  slot_p = spage_get_slot (page_watcher->pgptr, oid.slotid);
18465  if (slot_p == NULL)
18466  {
18467  assert (0);
18468  }
18469  db_make_int (record_info[HEAP_RECORD_INFO_T_OFFSET], slot_p->offset_to_record);
18470  db_make_int (record_info[HEAP_RECORD_INFO_T_LENGTH], slot_p->record_length);
18471  db_make_int (record_info[HEAP_RECORD_INFO_T_REC_TYPE], slot_p->record_type);
18472 
18473  /* get record info */
18474  switch (slot_p->record_type)
18475  {
18476  case REC_NEWHOME:
18477  case REC_HOME:
18478  if (scan_cache != NULL && ispeeking == COPY && recdes->data == NULL)
18479  {
18480  scan_cache->assign_recdes_to_area (*recdes);
18481  /* The default allocated space is enough to save the instance. */
18482  }
18483  if (scan_cache != NULL && scan_cache->cache_last_fix_page == true)
18484  {
18485  scan = spage_get_record (thread_p, page_watcher->pgptr, oid.slotid, recdes, ispeeking);
18486  pgbuf_replace_watcher (thread_p, page_watcher, &scan_cache->page_watcher);
18487  }
18488  else
18489  {
18490  scan = spage_get_record (thread_p, page_watcher->pgptr, oid.slotid, recdes, COPY);
18491  pgbuf_ordered_unfix (thread_p, page_watcher);
18492  }
18493  db_make_int (record_info[HEAP_RECORD_INFO_T_REPRID], or_rep_id (recdes));
18494  db_make_int (record_info[HEAP_RECORD_INFO_T_REPRID], or_chn (recdes));
18495  or_mvcc_get_header (recdes, &mvcc_header);
18496  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_INSID], MVCC_GET_INSID (&mvcc_header));
18497  if (MVCC_IS_HEADER_DELID_VALID (&mvcc_header))
18498  {
18499  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_DELID], MVCC_GET_DELID (&mvcc_header));
18500  }
18501  else
18502  {
18504  }
18505  db_make_int (record_info[HEAP_RECORD_INFO_T_CHN], OR_GET_MVCC_CHN (&mvcc_header));
18506  db_make_int (record_info[HEAP_RECORD_INFO_T_MVCC_FLAGS], MVCC_GET_FLAG (&mvcc_header));
18508  {
18510  }
18511  else
18512  {
18514  }
18515  break;
18516 
18517  case REC_BIGONE:
18518  /* Get the address of the content of the multiple page object */
18519  COPY_OID (&forward_oid, (OID *) forward_recdes.data);
18520  pgbuf_ordered_unfix (thread_p, page_watcher);
18521 
18522  /* Now get the content of the multiple page object. */
18523  /* Try to reuse the previously allocated area */
18524  if (scan_cache != NULL && (ispeeking == PEEK || recdes->data == NULL))
18525  {
18526  /* It is guaranteed that scan_cache is not NULL. */
18527  scan_cache->assign_recdes_to_area (*recdes);
18528 
18529  while ((scan = heap_ovf_get (thread_p, &forward_oid, recdes, NULL_CHN, NULL)) == S_DOESNT_FIT)
18530  {
18531  /* The object did not fit into such an area, reallocate a new area */
18532  assert (recdes->length < 0);
18533  scan_cache->assign_recdes_to_area (*recdes, (size_t) (-recdes->length));
18534  }
18535  if (scan != S_SUCCESS)
18536  {
18537  recdes->data = NULL;
18538  }
18539  }
18540  else
18541  {
18542  scan = heap_ovf_get (thread_p, &forward_oid, recdes, NULL_CHN, NULL);
18543  }
18544  if (scan != S_SUCCESS)
18545  {
18546  return S_ERROR;
18547  }
18548  db_make_int (record_info[HEAP_RECORD_INFO_T_REPRID], or_rep_id (recdes));
18549  db_make_int (record_info[HEAP_RECORD_INFO_T_REPRID], or_chn (recdes));
18550 
18551  or_mvcc_get_header (recdes, &mvcc_header);
18552  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_INSID], MVCC_GET_INSID (&mvcc_header));
18553  if (MVCC_IS_HEADER_DELID_VALID (&mvcc_header))
18554  {
18555  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_DELID], MVCC_GET_DELID (&mvcc_header));
18556  }
18557  else
18558  {
18560  }
18561  db_make_int (record_info[HEAP_RECORD_INFO_T_CHN], OR_GET_MVCC_CHN (&mvcc_header));
18562  db_make_int (record_info[HEAP_RECORD_INFO_T_MVCC_FLAGS], MVCC_GET_FLAG (&mvcc_header));
18564  {
18566  }
18567  else
18568  {
18570  }
18571  break;
18572  case REC_RELOCATION:
18573  case REC_MARKDELETED:
18575  case REC_ASSIGN_ADDRESS:
18576  case REC_UNKNOWN:
18577  default:
18578  db_make_null (record_info[HEAP_RECORD_INFO_T_REPRID]);
18579  db_make_null (record_info[HEAP_RECORD_INFO_T_CHN]);
18580  db_make_null (record_info[HEAP_RECORD_INFO_T_MVCC_INSID]);
18582  db_make_null (record_info[HEAP_RECORD_INFO_T_MVCC_FLAGS]);
18583 
18585 
18586  recdes->area_size = -1;
18587  recdes->data = NULL;
18588  if (scan_cache != NULL && scan_cache->cache_last_fix_page)
18589  {
18590  assert (PGBUF_IS_CLEAN_WATCHER (&(scan_cache->page_watcher)));
18591  if (page_watcher->pgptr != NULL)
18592  {
18593  pgbuf_replace_watcher (thread_p, page_watcher, &scan_cache->page_watcher);
18594  }
18595  }
18596  else if (page_watcher->pgptr != NULL)
18597  {
18598  pgbuf_ordered_unfix (thread_p, page_watcher);
18599  }
18600  break;
18601  }
18602 
18603  return scan;
18604 }
18605 
18606 /*
18607  * heap_next () - Retrieve or peek next object
18608  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
18609  * hfid(in):
18610  * class_oid(in):
18611  * next_oid(in/out): Object identifier of current record.
18612  * Will be set to next available record or NULL_OID when
18613  * there is not one.
18614  * recdes(in/out): Pointer to a record descriptor. Will be modified to
18615  * describe the new record.
18616  * scan_cache(in/out): Scan cache or NULL
18617  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
18618  * COPY when the object is copied
18619  *
18620  */
18621 SCAN_CODE
18622 heap_next (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18623  HEAP_SCANCACHE * scan_cache, int ispeeking)
18624 {
18625  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, false, NULL);
18626 }
18627 
18628 /*
18629  * heap_next_record_info () - Retrieve or peek next object.
18630  *
18631  * return : SCAN_CODE.
18632  * thread_p (in) : Thread entry.
18633  * hfid (in) : Heap file identifier.
18634  * class_oid (in) : Class Object identifier.
18635  * next_oid (in/out) : Current object identifier. Will store the next
18636  * scanned object identifier.
18637  * recdes (in) : Record descriptor.
18638  * scan_cache (in) : Scan cache.
18639  * ispeeking (in) : PEEK/COPY.
18640  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
18641  * information values.
18642  *
18643  * NOTE: This function is similar to heap next. The difference is that all
18644  * slots are scanned in their order in the heap file and along with
18645  * record data also information about that record is obtained.
18646  */
18647 SCAN_CODE
18648 heap_next_record_info (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18649  HEAP_SCANCACHE * scan_cache, int ispeeking, DB_VALUE ** cache_recordinfo)
18650 {
18651  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, false,
18652  cache_recordinfo);
18653 }
18654 
18655 /*
18656  * heap_prev () - Retrieve or peek next object
18657  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
18658  * hfid(in):
18659  * class_oid(in):
18660  * next_oid(in/out): Object identifier of current record.
18661  * Will be set to next available record or NULL_OID when
18662  * there is not one.
18663  * recdes(in/out): Pointer to a record descriptor. Will be modified to
18664  * describe the new record.
18665  * scan_cache(in/out): Scan cache or NULL
18666  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
18667  * COPY when the object is copied
18668  *
18669  */
18670 SCAN_CODE
18671 heap_prev (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18672  HEAP_SCANCACHE * scan_cache, int ispeeking)
18673 {
18674  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, true, NULL);
18675 }
18676 
18677 /*
18678  * heap_prev_record_info () - Retrieve or peek next object.
18679  *
18680  * return : SCAN_CODE.
18681  * thread_p (in) : Thread entry.
18682  * hfid (in) : Heap file identifier.
18683  * class_oid (in) : Class Object identifier.
18684  * prev_oid (in/out) : Current object identifier. Will store the
18685  * previous scanned object identifier.
18686  * recdes (in) : Record descriptor.
18687  * scan_cache (in) : Scan cache.
18688  * ispeeking (in) : PEEK/COPY.
18689  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
18690  * information values
18691  *
18692  * NOTE: This function is similar to heap next. The difference is that all
18693  * slots are scanned in their order in the heap file and along with
18694  * record data also information about that record is obtained.
18695  */
18696 SCAN_CODE
18697 heap_prev_record_info (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18698  HEAP_SCANCACHE * scan_cache, int ispeeking, DB_VALUE ** cache_recordinfo)
18699 {
18700  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, true,
18701  cache_recordinfo);
18702 }
18703 
18704 /*
18705  * heap_get_mvcc_rec_header_from_overflow () - Get record header from overflow
18706  * page.
18707  *
18708  * return :
18709  * PAGE_PTR ovf_page (in) : overflow page pointer
18710  * MVCC_REC_HEADER * mvcc_header (in/out) : MVCC record header
18711  * recdes(in/out): if not NULL then receives first overflow page
18712  */
18713 int
18715 {
18716  RECDES ovf_recdes;
18717 
18718  assert (ovf_page != NULL);
18719  assert (mvcc_header != NULL);
18720 
18721  if (peek_recdes == NULL)
18722  {
18723  peek_recdes = &ovf_recdes;
18724  }
18725  peek_recdes->data = overflow_get_first_page_data (ovf_page);
18726  peek_recdes->length = OR_MVCC_MAX_HEADER_SIZE;
18727 
18728  return or_mvcc_get_header (peek_recdes, mvcc_header);
18729 }
18730 
18731 /*
18732  * heap_set_mvcc_rec_header_on_overflow () - Updates MVCC record header on
18733  * overflow page data.
18734  *
18735  * return : Void.
18736  * ovf_page (in) : First overflow page.
18737  * mvcc_header (in) : MVCC Record header.
18738  */
18739 int
18741 {
18742  RECDES ovf_recdes;
18743 
18744  assert (ovf_page != NULL);
18745  assert (mvcc_header != NULL);
18746 
18747  ovf_recdes.data = overflow_get_first_page_data (ovf_page);
18748  ovf_recdes.area_size = ovf_recdes.length = OR_HEADER_SIZE (ovf_recdes.data);
18749  /* Safe guard */
18750  assert (ovf_recdes.length == OR_MVCC_MAX_HEADER_SIZE);
18751 
18752  /* Make sure the header has maximum size for overflow records */
18753  if (!MVCC_IS_FLAG_SET (mvcc_header, OR_MVCC_FLAG_VALID_INSID))
18754  {
18755  /* Add MVCCID_ALL_VISIBLE for insert MVCCID */
18757  MVCC_SET_INSID (mvcc_header, MVCCID_ALL_VISIBLE);
18758  }
18759 
18760  if (!MVCC_IS_FLAG_SET (mvcc_header, OR_MVCC_FLAG_VALID_DELID))
18761  {
18762  /* Add MVCCID_NULL for delete MVCCID */
18764  MVCC_SET_DELID (mvcc_header, MVCCID_NULL);
18765  }
18766 
18767  /* Safe guard */
18769  return or_mvcc_set_header (&ovf_recdes, mvcc_header);
18770 }
18771 
18772 /*
18773  * heap_get_bigone_content () - get content of a big record
18774  *
18775  * return : scan code.
18776  * thread_p (in) :
18777  * scan_cache (in) : Scan cache
18778  * ispeeking(in) : 0 if the content will be copied.
18779  * forward_oid(in) : content oid.
18780  * recdes(in/out) : record descriptor that will contain its content
18781  */
18782 SCAN_CODE
18783 heap_get_bigone_content (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool ispeeking, OID * forward_oid,
18784  RECDES * recdes)
18785 {
18786  SCAN_CODE scan = S_SUCCESS;
18787 
18788  /* Try to reuse the previously allocated area No need to check the snapshot since was already checked */
18789  if (scan_cache != NULL
18790  && (ispeeking == PEEK || recdes->data == NULL || scan_cache->is_recdes_assigned_to_area (*recdes)))
18791  {
18792  scan_cache->assign_recdes_to_area (*recdes);
18793 
18794  while ((scan = heap_ovf_get (thread_p, forward_oid, recdes, NULL_CHN, NULL)) == S_DOESNT_FIT)
18795  {
18796  /*
18797  * The object did not fit into such an area, reallocate a new area
18798  */
18799  assert (recdes->length < 0);
18800  scan_cache->assign_recdes_to_area (*recdes, (size_t) (-recdes->length));
18801  }
18802  if (scan != S_SUCCESS)
18803  {
18804  recdes->data = NULL;
18805  }
18806  }
18807  else
18808  {
18809  scan = heap_ovf_get (thread_p, forward_oid, recdes, NULL_CHN, NULL);
18810  }
18811 
18812  return scan;
18813 }
18814 
18815 /*
18816  * heap_get_class_oid_from_page () - Gets heap page owner class OID.
18817  *
18818  * return : Error code.
18819  * thread_p (in) : Thread entry.
18820  * page_p (in) : Heap page.
18821  * class_oid (out) : Class identifier.
18822  */
18823 int
18824 heap_get_class_oid_from_page (THREAD_ENTRY * thread_p, PAGE_PTR page_p, OID * class_oid)
18825 {
18826  RECDES chain_recdes;
18827  HEAP_CHAIN *chain;
18828 
18829  if (spage_get_record (thread_p, page_p, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
18830  {
18831  assert (0);
18833  return ER_FAILED;
18834  }
18835 
18836  chain = (HEAP_CHAIN *) chain_recdes.data;
18837  COPY_OID (class_oid, &(chain->class_oid));
18838 
18839  /*
18840  * kludge, root class is identified with a NULL class OID but we must
18841  * substitute the actual OID here - think about this
18842  */
18843  if (OID_ISNULL (class_oid))
18844  {
18845  /* root class class oid, substitute with global */
18846  COPY_OID (class_oid, oid_Root_class_oid);
18847  }
18848  return NO_ERROR;
18849 }
18850 
18851 /*
18852  * heap_mvcc_log_home_change_on_delete () - Log the change of record in home page when MVCC delete does not
18853  * change a REC_HOME to REC_HOME.
18854  *
18855  * return : Void.
18856  * thread_p (in) : Thread entry.
18857  * old_recdes (in) : NULL or a REC_RELOCATION record.
18858  * new_recdes (in) : Record including delete info (MVCCID and next version).
18859  * p_addr (in) : Log data address.
18860  */
18861 static void
18862 heap_mvcc_log_home_change_on_delete (THREAD_ENTRY * thread_p, RECDES * old_recdes, RECDES * new_recdes,
18863  LOG_DATA_ADDR * p_addr)
18864 {
18865  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
18866 
18867  /* REC_RELOCATION type record was brought back to home page or REC_HOME has been converted to
18868  * REC_RELOCATION/REC_BIGONE. */
18869 
18870  /* Update heap chain for vacuum. */
18872  if (heap_page_get_vacuum_status (thread_p, p_addr->pgptr) != vacuum_status)
18873  {
18874  /* Mark vacuum status change for recovery. */
18876  }
18877 
18878  if (thread_p->no_logging)
18879  {
18880  log_append_undo_recdes (thread_p, RVHF_MVCC_DELETE_MODIFY_HOME, p_addr, old_recdes);
18881  }
18882  else
18883  {
18884  log_append_undoredo_recdes (thread_p, RVHF_MVCC_DELETE_MODIFY_HOME, p_addr, old_recdes, new_recdes);
18885  }
18886 }
18887 
18888 /*
18889  * heap_mvcc_log_home_no_change () - Update page chain for vacuum and notify vacuum even when home page is not changed.
18890  * Used by update/delete of REC_RELOCATION and REC_BIGONE.
18891  *
18892  * return : Void.
18893  * thread_p (in) : Thread entry.
18894  * p_addr (in) : Data address for logging.
18895  */
18896 static void
18898 {
18899  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
18900 
18901  /* Update heap chain for vacuum. */
18903  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
18904  {
18905  /* Mark vacuum status change for recovery. */
18907  }
18908 
18909  log_append_undoredo_data (thread_p, RVHF_MVCC_NO_MODIFY_HOME, p_addr, 0, 0, NULL, NULL);
18910 }
18911 
18912 /*
18913  * heap_rv_redo_update_and_update_chain () - Redo update record as part of MVCC delete operation.
18914  * return: int
18915  * rcv(in): Recovery structure
18916  */
18917 int
18919 {
18920  int error_code = NO_ERROR;
18921  bool vacuum_status_change = false;
18922  PGSLOTID slotid;
18923 
18924  assert (rcv->pgptr != NULL);
18925  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
18926 
18927  slotid = rcv->offset;
18928  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
18929  {
18930  vacuum_status_change = true;
18931  }
18932  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
18933  assert (slotid > 0);
18934 
18935  error_code = heap_rv_redo_update (thread_p, rcv);
18936  if (error_code != NO_ERROR)
18937  {
18938  ASSERT_ERROR ();
18939  return error_code;
18940  }
18941 
18942  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
18943  /* Page was already marked as dirty */
18944  return NO_ERROR;
18945 }
18946 
18947 /*
18948  * heap_attrinfo_check_unique_index () - check whether exists an unique index on
18949  * specified attributes
18950  * return: true, if there is an index containing specified attributes
18951  * thread_p(in): thread entry
18952  * attr_info(in): attribute info
18953  * att_id(in): attribute ids
18954  * n_att_id(in): count attributes
18955  */
18956 bool
18958  int n_att_id)
18959 {
18960  OR_INDEX *index;
18961  int num_btids, i, j, k;
18962 
18963  if (attr_info == NULL || att_id == NULL)
18964  {
18965  return false;
18966  }
18967 
18968  num_btids = attr_info->last_classrepr->n_indexes;
18969  for (i = 0; i < num_btids; i++)
18970  {
18971  index = &(attr_info->last_classrepr->indexes[i]);
18972  if (btree_is_unique_type (index->type))
18973  {
18974  for (j = 0; j < n_att_id; j++)
18975  {
18976  for (k = 0; k < index->n_atts; k++)
18977  {
18978  if (att_id[j] == (ATTR_ID) (index->atts[k]->id))
18979  { /* the index key_type has updated attr */
18980  return true;
18981  }
18982  }
18983  }
18984  }
18985  }
18986 
18987  return false;
18988 }
18989 
18990 #if defined(ENABLE_UNUSED_FUNCTION)
18991 /*
18992  * heap_try_fetch_header_page () -
18993  * try to fetch header page, having home page already fetched
18994  *
18995  * return: error code
18996  * thread_p(in): thread entry
18997  * home_pgptr_p(out):
18998  * home_vpid_p(in):
18999  * oid_p(in):
19000  * hdr_pgptr_p(out):
19001  * hdr_vpid_p(in):
19002  * scan_cache(in):
19003  * again_count_p(in/out):
19004  * again_max(in):
19005  */
19006 /* TODO - fix er_clear */
19007 STATIC_INLINE int
19008 heap_try_fetch_header_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
19009  const OID * oid_p, PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
19010  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
19011 {
19012  int error_code = NO_ERROR;
19013 
19014  *hdr_pgptr_p = pgbuf_fix (thread_p, hdr_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19015  if (*hdr_pgptr_p != NULL)
19016  {
19017  return NO_ERROR;
19018  }
19019 
19020  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19021  *hdr_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, hdr_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
19022  if (*hdr_pgptr_p == NULL)
19023  {
19024  error_code = er_errid ();
19025  if (error_code == ER_PB_BAD_PAGEID)
19026  {
19027  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_HEAP_UNKNOWN_OBJECT, 3, hdr_vpid_p->volid, hdr_vpid_p->pageid,
19028  0);
19029  error_code = ER_HEAP_UNKNOWN_OBJECT;
19030  }
19031  }
19032  else
19033  {
19034  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19035  if (*home_pgptr_p == NULL)
19036  {
19037  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
19038  if ((*again_count_p)++ >= again_max)
19039  {
19040  error_code = er_errid ();
19041  if (error_code == ER_PB_BAD_PAGEID)
19042  {
19044  oid_p->slotid);
19045  error_code = ER_HEAP_UNKNOWN_OBJECT;
19046  }
19047  else if (error_code == NO_ERROR)
19048  {
19050  home_vpid_p->pageid);
19051  error_code = ER_PAGE_LATCH_ABORTED;
19052  }
19053  }
19054  }
19055  }
19056 
19057  return error_code;
19058 }
19059 
19060 /*
19061  * heap_try_fetch_forward_page () -
19062  * try to fetch forward page, having home page already fetched
19063  *
19064  * return: error code
19065  * thread_p(in): thread entry
19066  * home_pgptr_p(out):
19067  * home_vpid_p(in):
19068  * oid_p(in):
19069  * fwd_pgptr_p(out):
19070  * fwd_vpid_p(in):
19071  * fwd_oid_p(in):
19072  * scan_cache(in):
19073  * again_count_p(in/out):
19074  * again_max(in):
19075  */
19076 STATIC_INLINE int
19077 heap_try_fetch_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
19078  const OID * oid_p, PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p, const OID * fwd_oid_p,
19079  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
19080 {
19081  int error_code = NO_ERROR;
19082 
19083  *fwd_pgptr_p = pgbuf_fix (thread_p, fwd_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19084  if (*fwd_pgptr_p != NULL)
19085  {
19086  return NO_ERROR;
19087  }
19088 
19089  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19090  *fwd_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, fwd_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
19091  if (*fwd_pgptr_p == NULL)
19092  {
19093  error_code = er_errid ();
19094  if (error_code == ER_PB_BAD_PAGEID)
19095  {
19097  fwd_oid_p->slotid);
19098  error_code = ER_HEAP_UNKNOWN_OBJECT;
19099  }
19100  }
19101  else
19102  {
19103  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19104  if (*home_pgptr_p == NULL)
19105  {
19106  pgbuf_unfix_and_init (thread_p, *fwd_pgptr_p);
19107  if ((*again_count_p)++ >= again_max)
19108  {
19109  error_code = er_errid ();
19110  if (error_code == ER_PB_BAD_PAGEID)
19111  {
19113  oid_p->slotid);
19114  error_code = ER_HEAP_UNKNOWN_OBJECT;
19115  }
19116  else if (error_code == NO_ERROR)
19117  {
19119  home_vpid_p->pageid);
19120  error_code = ER_PAGE_LATCH_ABORTED;
19121  }
19122  }
19123  }
19124  }
19125 
19126  return error_code;
19127 }
19128 
19129 /*
19130  * heap_try_fetch_header_with_forward_page () -
19131  * try to fetch header and forward page, having home page already fetched
19132  *
19133  * return: error code
19134  * thread_p(in): thread entry
19135  * home_pgptr_p(out):
19136  * home_vpid_p(in):
19137  * oid_p(in):
19138  * hdr_pgptr_p(out):
19139  * hdr_vpid_p(in):
19140  * fwd_pgptr_p(out):
19141  * fwd_vpid_p(in):
19142  * fwd_oid_p(in):
19143  * scan_cache(in):
19144  * again_count_p(in/out):
19145  * again_max(in):
19146  */
19147 STATIC_INLINE int
19148 heap_try_fetch_header_with_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
19149  const OID * oid_p, PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
19150  PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p, const OID * fwd_oid_p,
19151  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
19152 {
19153  int error_code = NO_ERROR;
19154 
19155  *hdr_pgptr_p = pgbuf_fix (thread_p, hdr_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19156  if (*hdr_pgptr_p != NULL)
19157  {
19158  return NO_ERROR;
19159  }
19160 
19161  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19162  pgbuf_unfix_and_init (thread_p, *fwd_pgptr_p);
19163  *hdr_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, hdr_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
19164  if (*hdr_pgptr_p == NULL)
19165  {
19166  error_code = er_errid ();
19167  if (error_code == ER_PB_BAD_PAGEID)
19168  {
19169  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_HEAP_UNKNOWN_OBJECT, 3, hdr_vpid_p->volid, hdr_vpid_p->pageid,
19170  0);
19171  error_code = ER_HEAP_UNKNOWN_OBJECT;
19172  }
19173  }
19174  else
19175  {
19176  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19177  if (*home_pgptr_p == NULL)
19178  {
19179  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
19180  if ((*again_count_p)++ >= again_max)
19181  {
19182  error_code = er_errid ();
19183  if (error_code == ER_PB_BAD_PAGEID)
19184  {
19186  oid_p->slotid);
19187  error_code = ER_HEAP_UNKNOWN_OBJECT;
19188  }
19189  else if (error_code == NO_ERROR)
19190  {
19192  home_vpid_p->pageid);
19193  error_code = ER_PAGE_LATCH_ABORTED;
19194  }
19195  }
19196  }
19197  else
19198  {
19199  *fwd_pgptr_p = pgbuf_fix (thread_p, fwd_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19200  if (*fwd_pgptr_p == NULL)
19201  {
19202  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
19203  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19204  if ((*again_count_p)++ >= again_max)
19205  {
19206  error_code = er_errid ();
19207  if (error_code == ER_PB_BAD_PAGEID)
19208  {
19210  fwd_oid_p->pageid, fwd_oid_p->slotid);
19211  error_code = ER_HEAP_UNKNOWN_OBJECT;
19212  }
19213  else if (er_errid () == NO_ERROR)
19214  {
19216  fwd_vpid_p->pageid);
19217  }
19218  }
19219  }
19220  }
19221  }
19222 
19223  return error_code;
19224 }
19225 #endif /* ENABLE_UNUSED_FUNCTION */
19226 
19227 /*
19228  * heap_get_header_page () -
19229  * return: error code
19230  * btid(in): Heap file identifier
19231  * header_vpid(out):
19232  *
19233  * Note: get the page identifier of the first allocated page of the given file.
19234  */
19235 int
19236 heap_get_header_page (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * header_vpid)
19237 {
19238  assert (!VFID_ISNULL (&hfid->vfid));
19239 
19240  return file_get_sticky_first_page (thread_p, &hfid->vfid, header_vpid);
19241 }
19242 
19243 /*
19244  * heap_scancache_quick_start_root_hfid () - Start caching information for a
19245  * heap scan on root hfid
19246  * return: NO_ERROR
19247  * thread_p(in):
19248  * scan_cache(in/out): Scan cache
19249  *
19250  * Note: this is similar to heap_scancache_quick_start, except it sets the
19251  * HFID of root in the scan_cache (otherwise remains NULL).
19252  * This should be used to avoid inconsistency when using ordered fix.
19253  */
19254 int
19256 {
19257  HFID root_hfid;
19258 
19259  (void) boot_find_root_heap (&root_hfid);
19260  (void) heap_scancache_quick_start_internal (scan_cache, &root_hfid);
19261  scan_cache->page_latch = S_LOCK;
19262 
19263  return NO_ERROR;
19264 }
19265 
19266 /*
19267  * heap_scancache_quick_start_with_class_oid () - Start caching information for
19268  * a heap scan on a class.
19269  *
19270  * return: NO_ERROR
19271  * thread_p(in):
19272  * scan_cache(in/out): Scan cache
19273  * class_oid(in): class
19274  *
19275  * Note: this is similar to heap_scancache_quick_start, except it sets the
19276  * HFID of class in the scan_cache (otherwise remains NULL).
19277  * This should be used to avoid inconsistency when using ordered fix.
19278  * This has a page latch overhead on top of heap_scancache_quick_start.
19279  *
19280  */
19281 int
19283 {
19284  HFID class_hfid;
19285 
19286  heap_get_class_info (thread_p, class_oid, &class_hfid, NULL, NULL);
19287  (void) heap_scancache_quick_start_with_class_hfid (thread_p, scan_cache, &class_hfid);
19288  scan_cache->page_latch = S_LOCK;
19289 
19290  return NO_ERROR;
19291 }
19292 
19293 /*
19294  * heap_scancache_quick_start_with_class_hfid () - Start caching information for
19295  * a heap scan on a class.
19296  *
19297  * return: NO_ERROR
19298  * thread_p(in):
19299  * scan_cache(in/out): Scan cache
19300  * class_oid(in): class
19301  *
19302  * Note: this is similar to heap_scancache_quick_start, except it sets the
19303  * HFID of class in the scan_cache (otherwise remains NULL).
19304  * This should be used to avoid inconsistency when using ordered fix.
19305  *
19306  */
19307 int
19309 {
19310  (void) heap_scancache_quick_start_internal (scan_cache, hfid);
19311  scan_cache->page_latch = S_LOCK;
19312 
19313  return NO_ERROR;
19314 }
19315 
19316 /*
19317  * heap_scancache_quick_start_modify_with_class_oid () -
19318  * Start caching information for a heap scan on class.
19319  *
19320  * return: NO_ERROR
19321  * thread_p(in):
19322  * scan_cache(in/out): Scan cache
19323  * class_oid(in): class
19324  *
19325  * Note: this is similar to heap_scancache_quick_start_modify, except it sets
19326  * the HFID of class in the scan_cache (otherwise remains NULL).
19327  * This should be used to avoid inconsistency when using ordered fix.
19328  * This has a page latch overhead on top of heap_scancache_quick_start.
19329  */
19330 int
19332 {
19333  HFID class_hfid;
19334 
19335  heap_get_class_info (thread_p, class_oid, &class_hfid, NULL, NULL);
19336  (void) heap_scancache_quick_start_internal (scan_cache, &class_hfid);
19337  scan_cache->page_latch = X_LOCK;
19338 
19339  return NO_ERROR;
19340 }
19341 
19342 /*
19343  * heap_link_watchers () - link page watchers of a child operation to it's
19344  * parent
19345  * child(in): child operation context
19346  * parent(in): parent operation context
19347  *
19348  * NOTE: Sometimes, parts of a heap operation are executed in a parent heap
19349  * operation, skipping the fixing of pages and location of records.
19350  * Since page watchers are identified by address, we must use a single
19351  * location for them, and reference it everywhere.
19352  */
19353 static void
19355 {
19356  assert (child != NULL);
19357  assert (parent != NULL);
19358 
19359  child->header_page_watcher_p = &parent->header_page_watcher;
19360  child->forward_page_watcher_p = &parent->forward_page_watcher;
19362  child->home_page_watcher_p = &parent->home_page_watcher;
19363 }
19364 
19365 /*
19366  * heap_unfix_watchers () - unfix context pages
19367  * thread_p(in): thread entry
19368  * context(in): operation context
19369  *
19370  * NOTE: This function only unfixes physical watchers. Calling this in a child
19371  * operation that was linked to the parent with heap_link_watchers will
19372  * have no effect on the fixed pages.
19373  */
19374 static void
19376 {
19377  assert (context != NULL);
19378 
19379  /* unfix pages */
19380  if (context->home_page_watcher.pgptr != NULL)
19381  {
19382  pgbuf_ordered_unfix (thread_p, &context->home_page_watcher);
19383  }
19384  if (context->overflow_page_watcher.pgptr != NULL)
19385  {
19386  pgbuf_ordered_unfix (thread_p, &context->overflow_page_watcher);
19387  }
19388  if (context->header_page_watcher.pgptr != NULL)
19389  {
19390  pgbuf_ordered_unfix (thread_p, &context->header_page_watcher);
19391  }
19392  if (context->forward_page_watcher.pgptr != NULL)
19393  {
19394  pgbuf_ordered_unfix (thread_p, &context->forward_page_watcher);
19395  }
19396 }
19397 
19398 /*
19399  * heap_clear_operation_context () - clear a heap operation context
19400  * context(in): the context
19401  * hfid_p(in): heap file identifier
19402  */
19403 static void
19405 {
19406  assert (context != NULL);
19407  assert (hfid_p != NULL);
19408 
19409  /* keep hfid */
19410  HFID_COPY (&context->hfid, hfid_p);
19411 
19412  /* initialize watchers to HFID */
19417 
19418  /* by default link physical watchers to usage watchers on same context */
19419  heap_link_watchers (context, context);
19420 
19421  /* nullify everything else */
19422  context->type = HEAP_OPERATION_NONE;
19424  OID_SET_NULL (&context->oid);
19425  OID_SET_NULL (&context->class_oid);
19426  context->recdes_p = NULL;
19427  context->scan_cache_p = NULL;
19428 
19429  context->map_recdes.data = NULL;
19430  context->map_recdes.length = 0;
19431  context->map_recdes.area_size = 0;
19432  context->map_recdes.type = REC_UNKNOWN;
19433 
19434  OID_SET_NULL (&context->ovf_oid);
19435 
19436  context->home_recdes.data = NULL;
19437  context->home_recdes.length = 0;
19438  context->home_recdes.area_size = 0;
19439  context->home_recdes.type = REC_UNKNOWN;
19440 
19441  context->record_type = REC_UNKNOWN;
19442  context->file_type = FILE_UNKNOWN_TYPE;
19443  OID_SET_NULL (&context->res_oid);
19444  context->is_logical_old = false;
19445  context->is_redistribute_insert_with_delid = false;
19446  context->is_bulk_op = false;
19447 
19448  context->time_track = NULL;
19449 }
19450 
19451 /*
19452  * heap_mark_class_as_modified () - add to transaction's modified class list
19453  * and cache/decache coherency number
19454  * thread_p(in): thread entry
19455  * oid_p(in): class OID
19456  * chn(in): coherency number (required iff decache == false)
19457  * decache(in): (false => cache, true => decache)
19458  */
19459 static int
19460 heap_mark_class_as_modified (THREAD_ENTRY * thread_p, OID * oid_p, int chn, bool decache)
19461 {
19462  char *classname = NULL;
19463 
19464  assert (oid_p != NULL);
19465 
19466  if (heap_Guesschn == NULL || HFID_IS_NULL (&(heap_Classrepr->rootclass_hfid)))
19467  {
19468  /* nothing to do */
19469  return NO_ERROR;
19470  }
19471 
19472  if (heap_get_class_name (thread_p, oid_p, &classname) != NO_ERROR || classname == NULL)
19473  {
19474  ASSERT_ERROR ();
19475  return ER_FAILED;
19476  }
19477  if (log_add_to_modified_class_list (thread_p, classname, oid_p) != NO_ERROR)
19478  {
19479  free_and_init (classname);
19480  return ER_FAILED;
19481  }
19482 
19483  free_and_init (classname);
19484 
19485  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
19486  {
19487  return ER_FAILED;
19488  }
19489  heap_Guesschn->schema_change = true;
19490 
19491  if (decache)
19492  {
19493  (void) heap_chnguess_decache (oid_p);
19494  }
19495  else
19496  {
19497  (void) heap_chnguess_put (thread_p, oid_p, LOG_FIND_THREAD_TRAN_INDEX (thread_p), chn);
19498  }
19499 
19500  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
19501 
19502  /* all ok */
19503  return NO_ERROR;
19504 }
19505 
19506 /*
19507  * heap_get_file_type () - get the file type from a heap operation context
19508  * thread_p(in): thread entry
19509  * context(in): operation context
19510  * returns: file type
19511  */
19512 static FILE_TYPE
19514 {
19515  FILE_TYPE file_type;
19516  if (context->scan_cache_p != NULL)
19517  {
19518  assert (HFID_EQ (&context->hfid, &context->scan_cache_p->node.hfid));
19520 
19521  return context->scan_cache_p->file_type;
19522  }
19523  else
19524  {
19525  if (heap_get_class_info (thread_p, &context->class_oid, NULL, &file_type, NULL) != NO_ERROR)
19526  {
19527  ASSERT_ERROR ();
19528  return FILE_UNKNOWN_TYPE;
19529  }
19530  assert (file_type == FILE_HEAP || file_type == FILE_HEAP_REUSE_SLOTS);
19531  return file_type;
19532  }
19533 }
19534 
19535 /*
19536  * heap_is_valid_oid () - check if provided OID is valid
19537  * oid_p(in): object identifier
19538  * returns: error code or NO_ERROR
19539  */
19540 static int
19541 heap_is_valid_oid (THREAD_ENTRY * thread_p, OID * oid_p)
19542 {
19543  DISK_ISVALID oid_valid = HEAP_ISVALID_OID (thread_p, oid_p);
19544 
19545  if (oid_valid != DISK_VALID)
19546  {
19547  if (oid_valid != DISK_ERROR)
19548  {
19549  assert (false);
19551  oid_p->slotid);
19552  }
19553  return ER_FAILED;
19554  }
19555  else
19556  {
19557  return NO_ERROR;
19558  }
19559 }
19560 
19561 /*
19562  * heap_fix_header_page () - fix header page for a heap operation context
19563  * thread_p(in): thread entry
19564  * context(in): operation context
19565  * returns: error code or NO_ERROR
19566  */
19567 static int
19569 {
19570  VPID header_vpid;
19571  int rc;
19572 
19573  assert (context != NULL);
19574  assert (context->header_page_watcher_p != NULL);
19575 
19576  if (context->header_page_watcher_p->pgptr != NULL)
19577  {
19578  /* already fixed */
19579  return NO_ERROR;
19580  }
19581 
19582  /* fix header page */
19583  header_vpid.volid = context->hfid.vfid.volid;
19584  header_vpid.pageid = context->hfid.hpgid;
19585 
19586  /* fix page */
19587  rc = pgbuf_ordered_fix (thread_p, &header_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->header_page_watcher_p);
19588  if (rc != NO_ERROR)
19589  {
19590  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
19591  {
19592  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_PAGE_LATCH_ABORTED, 2, header_vpid.volid, header_vpid.pageid);
19593  rc = ER_PAGE_LATCH_ABORTED;
19594  }
19595  return rc;
19596  }
19597 
19598  /* check page type */
19599  (void) pgbuf_check_page_ptype (thread_p, context->header_page_watcher_p->pgptr, PAGE_HEAP);
19600 
19601  /* all ok */
19602  return NO_ERROR;
19603 }
19604 
19605 /*
19606  * heap_fix_forward_page () - fix forward page for a heap operation context
19607  * thread_p(in): thread entry
19608  * context(in): operation context
19609  * forward_oid_hint(in): location of forward object (if known)
19610  * returns: error code or NO_ERROR
19611  *
19612  * NOTE: If forward_oid_hint is provided, this function will fix it's page. If
19613  * not, the function will treat the context's home_recdes as a forwarding
19614  * record descriptor and read the identifier from it.
19615  */
19616 static int
19617 heap_fix_forward_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, OID * forward_oid_hint)
19618 {
19619  VPID forward_vpid;
19620  OID forward_oid;
19621  int rc;
19622 
19623  assert (context != NULL);
19624  assert (context->forward_page_watcher_p != NULL);
19625 
19626  if (context->forward_page_watcher_p->pgptr != NULL)
19627  {
19628  /* already fixed */
19629  return NO_ERROR;
19630  }
19631 
19632  if (forward_oid_hint == NULL)
19633  {
19634  assert (context->home_recdes.data != NULL);
19635 
19636  /* cast home record as forward oid if no hint is provided */
19637  forward_oid = *((OID *) context->home_recdes.data);
19638  }
19639  else
19640  {
19641  /* oid is provided, use it */
19642  COPY_OID (&forward_oid, forward_oid_hint);
19643  }
19644 
19645  /* prepare VPID */
19646  forward_vpid.pageid = forward_oid.pageid;
19647  forward_vpid.volid = forward_oid.volid;
19648 
19649  /* fix forward page */
19651  rc = pgbuf_ordered_fix (thread_p, &forward_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->forward_page_watcher_p);
19652  if (rc != NO_ERROR)
19653  {
19654  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
19655  {
19656  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_PAGE_LATCH_ABORTED, 2, forward_vpid.volid, forward_vpid.pageid);
19657  }
19658  return ER_FAILED;
19659  }
19660  (void) pgbuf_check_page_ptype (thread_p, context->forward_page_watcher_p->pgptr, PAGE_HEAP);
19661 
19662 #if defined(CUBRID_DEBUG)
19663  if (spage_get_record_type (context->forward_page_watcher_p->pgptr, forward_oid.slotid) != REC_NEWHOME)
19664  {
19666  forward_oid.slotid);
19667  return ER_FAILED;
19668  }
19669 #endif
19670 
19671  /* all ok */
19672  return NO_ERROR;
19673 }
19674 
19675 /*
19676  * heap_build_forwarding_recdes () - build a record descriptor for pointing to
19677  * a forward object
19678  * recdes_p(in): record descriptor to build into
19679  * rec_type(in): type of record
19680  * forward_oid(in): the oid where the forwarding record will point
19681  */
19682 static void
19683 heap_build_forwarding_recdes (RECDES * recdes_p, INT16 rec_type, OID * forward_oid)
19684 {
19685  assert (recdes_p != NULL);
19686  assert (forward_oid != NULL);
19687 
19688  recdes_p->type = rec_type;
19689  recdes_p->data = (char *) forward_oid;
19690 
19691  recdes_p->length = sizeof (OID);
19692  recdes_p->area_size = sizeof (OID);
19693 }
19694 
19695 /*
19696  * heap_insert_adjust_recdes_header () - adjust record header for insert
19697  * operation
19698  * thread_p(in): thread entry
19699  * insert_context(in/out): insert context
19700  * is_mvcc_class(in): true, if MVCC class
19701  * returns: error code or NO_ERROR
19702  *
19703  * NOTE: For MVCC class, it will add an insert_id to the header. For non-MVCC class, it will clear all flags.
19704  * The function will alter the provided record descriptor data area.
19705  */
19706 static int
19707 heap_insert_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * insert_context, bool is_mvcc_class)
19708 {
19710  int record_size;
19711  int repid_and_flag_bits = 0, mvcc_flags = 0;
19712  char *new_ins_mvccid_pos_p, *start_p, *existing_data_p;
19713  MVCCID mvcc_id;
19714  bool use_optimization = false;
19715 
19716  assert (insert_context != NULL);
19717  assert (insert_context->type == HEAP_OPERATION_INSERT);
19718  assert (insert_context->recdes_p != NULL);
19719 
19720  record_size = insert_context->recdes_p->length;
19721 
19722  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (insert_context->recdes_p->data);
19723  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
19724 
19725 #if defined (SERVER_MODE)
19726  /* In case of partitions, it is possible to have OR_MVCC_FLAG_VALID_PREV_VERSION flag. */
19727  use_optimization = (is_mvcc_class && (insert_context->update_in_place == UPDATE_INPLACE_NONE)
19728  && (!(mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION))
19729  && !heap_is_big_length (record_size + OR_MVCCID_SIZE) && !insert_context->is_bulk_op);
19730 #endif
19731 
19732  if (use_optimization)
19733  {
19734  /*
19735  * Most common case. Since is UPDATE_INPLACE_NONE, the header does not have DELID.
19736  * Optimize header adjustment.
19737  */
19738  assert (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID));
19739  mvcc_id = logtb_get_current_mvccid (thread_p);
19740 
19741  start_p = insert_context->recdes_p->data;
19742  /* Skip bytes up to insid_offset */
19743  new_ins_mvccid_pos_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
19744 
19745  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_INSID))
19746  {
19747  /* Sets MVCC INSID flag, overwrite first four bytes. */
19748  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_INSID << OR_MVCC_FLAG_SHIFT_BITS);
19749  OR_PUT_INT (start_p, repid_and_flag_bits);
19750 
19751  /* Move the record data before inserting INSID */
19752  assert (insert_context->recdes_p->area_size >= insert_context->recdes_p->length + OR_MVCCID_SIZE);
19753  existing_data_p = new_ins_mvccid_pos_p;
19754  memmove (new_ins_mvccid_pos_p + OR_MVCCID_SIZE, existing_data_p,
19755  insert_context->recdes_p->length - OR_MVCC_INSERT_ID_OFFSET);
19756  insert_context->recdes_p->length += OR_MVCCID_SIZE;
19757  }
19758 
19759  /* Sets the MVCC INSID */
19760  OR_PUT_BIGINT (new_ins_mvccid_pos_p, &mvcc_id);
19761 
19762  return NO_ERROR;
19763  }
19764 
19765  /* read MVCC header from record */
19766  if (or_mvcc_get_header (insert_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19767  {
19768  return ER_FAILED;
19769  }
19770 
19771  if (insert_context->update_in_place != UPDATE_INPLACE_OLD_MVCCID)
19772  {
19773 #if defined (SERVER_MODE)
19774  if (is_mvcc_class && !insert_context->is_bulk_op)
19775  {
19776  /* get MVCC id */
19777  mvcc_id = logtb_get_current_mvccid (thread_p);
19778 
19779  /* set MVCC INSID if necessary */
19780  if (!MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_INSID))
19781  {
19782  MVCC_SET_FLAG (&mvcc_rec_header, OR_MVCC_FLAG_VALID_INSID);
19783  record_size += OR_MVCCID_SIZE;
19784  }
19785  MVCC_SET_INSID (&mvcc_rec_header, mvcc_id);
19786  }
19787  else
19788 #endif /* SERVER_MODE */
19789  {
19790  int curr_header_size, new_header_size;
19791 
19792  /* strip MVCC information */
19793  curr_header_size = mvcc_header_size_lookup[mvcc_rec_header.mvcc_flag];
19794  MVCC_CLEAR_ALL_FLAG_BITS (&mvcc_rec_header);
19795  new_header_size = mvcc_header_size_lookup[mvcc_rec_header.mvcc_flag];
19796 
19797  /* compute new record size */
19798  record_size -= (curr_header_size - new_header_size);
19799  }
19800  }
19801  else if (MVCC_IS_HEADER_DELID_VALID (&mvcc_rec_header))
19802  {
19803  insert_context->is_redistribute_insert_with_delid = true;
19804  }
19805 
19806  MVCC_CLEAR_FLAG_BITS (&mvcc_rec_header, OR_MVCC_FLAG_VALID_PREV_VERSION);
19807 
19808  if (is_mvcc_class && heap_is_big_length (record_size))
19809  {
19810  /* for multipage records, set MVCC header size to maximum size */
19811  HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE (&mvcc_rec_header);
19812  }
19813 
19814  /* write the header back to the record */
19815  if (or_mvcc_set_header (insert_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19816  {
19817  return ER_FAILED;
19818  }
19819 
19820  /* all ok */
19821  return NO_ERROR;
19822 }
19823 
19824 /*
19825  * heap_update_adjust_recdes_header () - adjust record header for update
19826  * operation
19827  * thread_p(in): thread entry
19828  * update_context(in/out): update context
19829  * is_mvcc_class(in): specifies whether is MVCC class
19830  * returns: error code or NO_ERROR
19831  *
19832  * NOTE: For MVCC operation, it will add an insert_id and prev version to the header. The prev_version_lsa will be
19833  * filled at the end of the update, in heap_update_set_prev_version().
19834  * For non-MVCC operations, it will clear all flags.
19835  * The function will alter the provided record descriptor data area.
19836  */
19837 static int
19838 heap_update_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * update_context, bool is_mvcc_class)
19839 {
19841  int record_size;
19842  int repid_and_flag_bits = 0, mvcc_flags = 0, update_mvcc_flags;
19843  char *start_p, *new_ins_mvccid_pos_p, *existing_data_p, *new_data_p;
19844  MVCCID mvcc_id;
19845  bool use_optimization = false;
19846  LOG_LSA null_lsa = LSA_INITIALIZER;
19847  bool is_mvcc_op = false;
19848 
19849  assert (update_context != NULL);
19850  assert (update_context->type == HEAP_OPERATION_UPDATE);
19851  assert (update_context->recdes_p != NULL);
19852 
19853  record_size = update_context->recdes_p->length;
19854 
19855  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (update_context->recdes_p->data);
19856  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
19858 
19859  is_mvcc_op = HEAP_UPDATE_IS_MVCC_OP (is_mvcc_class, update_context->update_in_place);
19860 #if defined (SERVER_MODE)
19861  use_optimization = (is_mvcc_op && !heap_is_big_length (record_size + OR_MVCCID_SIZE + OR_MVCC_PREV_VERSION_LSA_SIZE));
19862 #endif
19863 
19864  if (use_optimization)
19865  {
19866  /*
19867  * Most common case. Since is UPDATE_INPLACE_NONE, the header does not have DELID.
19868  * Optimize header adjustment.
19869  */
19870  assert (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID));
19871  mvcc_id = logtb_get_current_mvccid (thread_p);
19872  start_p = update_context->recdes_p->data;
19873 
19874  /* Skip bytes up to insid_offset */
19875  new_ins_mvccid_pos_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
19876 
19877  /* Check whether we need to set flags and to reserve space. */
19878  if ((mvcc_flags & update_mvcc_flags) != update_mvcc_flags)
19879  {
19880  /* Need to set flags and reserve space for MVCCID and/or PREV LSA */
19881  existing_data_p = new_ins_mvccid_pos_p;
19882 
19883  /* Computes added bytes and new flags */
19884  if (mvcc_flags & OR_MVCC_FLAG_VALID_INSID)
19885  {
19886  existing_data_p += OR_MVCCID_SIZE;
19887  }
19888 
19889  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
19890  {
19891  existing_data_p += OR_MVCC_PREV_VERSION_LSA_SIZE;
19892  }
19893 
19894  /* Sets the new flags, overwrite first four bytes. */
19895  repid_and_flag_bits |= (update_mvcc_flags << OR_MVCC_FLAG_SHIFT_BITS);
19896  OR_PUT_INT (start_p, repid_and_flag_bits);
19897 
19898  /* Move the record data before inserting INSID and LOG_LSA */
19899  new_data_p = new_ins_mvccid_pos_p + OR_MVCCID_SIZE + OR_MVCC_PREV_VERSION_LSA_SIZE;
19900  assert (existing_data_p < new_data_p);
19901  assert (update_context->recdes_p->area_size >= update_context->recdes_p->length
19902  + CAST_BUFLEN (new_data_p - existing_data_p));
19903  memmove (new_data_p, existing_data_p,
19904  update_context->recdes_p->length - CAST_BUFLEN (existing_data_p - start_p));
19905  update_context->recdes_p->length += (CAST_BUFLEN (new_data_p - existing_data_p));
19906  }
19907 
19908  /* Sets the MVCC INSID */
19909  OR_PUT_BIGINT (new_ins_mvccid_pos_p, &mvcc_id);
19910 
19911  /*
19912  * Adds NULL LSA after INSID. The prev_version_lsa will be filled at the end of the update,
19913  * in heap_update_set_prev_version().
19914  */
19915  memcpy (new_ins_mvccid_pos_p + OR_MVCCID_SIZE, &null_lsa, OR_MVCC_PREV_VERSION_LSA_SIZE);
19916  return NO_ERROR;
19917  }
19918 
19919  /* read MVCC header from record */
19920  if (or_mvcc_get_header (update_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19921  {
19922  return ER_FAILED;
19923  }
19924 
19925  if (update_context->update_in_place != UPDATE_INPLACE_OLD_MVCCID)
19926  {
19927 #if defined (SERVER_MODE)
19928  if (is_mvcc_class)
19929  {
19930  /* get MVCC id */
19931  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
19932 
19933  /* set MVCC INSID if necessary */
19934  if (!MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_INSID))
19935  {
19936  MVCC_SET_FLAG (&mvcc_rec_header, OR_MVCC_FLAG_VALID_INSID);
19937  record_size += OR_MVCCID_SIZE;
19938  }
19939  MVCC_SET_INSID (&mvcc_rec_header, mvcc_id);
19940  }
19941  else
19942 #endif /* SERVER_MODE */
19943  {
19944  int curr_header_size, new_header_size;
19945 
19946  /* strip MVCC information */
19947  curr_header_size = mvcc_header_size_lookup[mvcc_rec_header.mvcc_flag];
19948  MVCC_CLEAR_ALL_FLAG_BITS (&mvcc_rec_header);
19949  new_header_size = mvcc_header_size_lookup[mvcc_rec_header.mvcc_flag];
19950 
19951  /* compute new record size */
19952  record_size -= (curr_header_size - new_header_size);
19953  }
19954  }
19955 
19956 #if defined (SERVER_MODE)
19957  if (is_mvcc_op)
19958  {
19959  if (!MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_PREV_VERSION))
19960  {
19962  record_size += OR_MVCC_PREV_VERSION_LSA_SIZE;
19963  }
19964 
19965  /* The prev_version_lsa will be filled at the end of the update, in heap_update_set_prev_version() */
19966  LSA_SET_NULL (&mvcc_rec_header.prev_version_lsa);
19967  }
19968  else
19969 #endif /* SERVER_MODE */
19970  {
19972  }
19973 
19974  if (is_mvcc_class && heap_is_big_length (record_size))
19975  {
19976  /* for multipage records, set MVCC header size to maximum size */
19977  HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE (&mvcc_rec_header);
19978  }
19979 
19980  /* write the header back to the record */
19981  if (or_mvcc_set_header (update_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19982  {
19983  return ER_FAILED;
19984  }
19985 
19986  /* all ok */
19987  return NO_ERROR;
19988 }
19989 
19990 /*
19991  * heap_insert_handle_multipage_record () - handle a multipage object for insert
19992  * thread_p(in): thread entry
19993  * context(in): operation context
19994  *
19995  * NOTE: In case of multipage records, this function will perform the overflow
19996  * insertion and provide a forwarding record descriptor in map_recdes.
19997  * recdes_p will point to the map_recdes structure for insertion in home
19998  * page.
19999  */
20000 static int
20002 {
20003  assert (context != NULL);
20004  assert (context->type == HEAP_OPERATION_INSERT || context->type == HEAP_OPERATION_UPDATE);
20005  assert (context->recdes_p != NULL);
20006 
20007  /* check for big record */
20008  if (!heap_is_big_length (context->recdes_p->length))
20009  {
20010  return NO_ERROR;
20011  }
20012 
20013  /* insert overflow record */
20014  if (heap_ovf_insert (thread_p, &context->hfid, &context->ovf_oid, context->recdes_p) == NULL)
20015  {
20016  return ER_FAILED;
20017  }
20018 
20019  /* Add a map record to point to the record in overflow */
20020  /* NOTE: MVCC information is held in overflow record */
20022 
20023  /* use map_recdes for page insertion */
20024  context->recdes_p = &context->map_recdes;
20025 
20026  /* all ok */
20027  return NO_ERROR;
20028 }
20029 
20030 /*
20031  * heap_get_insert_location_with_lock () - get a page (and possibly and slot)
20032  * for insert and lock the OID
20033  * thread_p(in): thread entry
20034  * context(in): operation context
20035  * home_hint_p(in): if not null, will try to find and lock a slot in hinted page
20036  * returns: error code or NO_ERROR
20037  *
20038  * NOTE: For all operations, this function will find a suitable page, put it
20039  * in context->home_page_watcher, find a suitable slot, lock it and
20040  * put the exact insert location in context->res_oid.
20041  * NOTE: If a home hint is present, the function will search for a free and
20042  * lockable slot ONLY in the hinted page. If no hint is present, it will
20043  * find the page on it's own.
20044  */
20045 static int
20047  PGBUF_WATCHER * home_hint_p)
20048 {
20049  int slot_count, slot_id, lk_result;
20050  LOCK lock;
20051  int error_code = NO_ERROR;
20052 
20053  /* check input */
20054  assert (context != NULL);
20055  assert (context->type == HEAP_OPERATION_INSERT);
20056  assert (context->recdes_p != NULL);
20057 
20058  if (home_hint_p == NULL)
20059  {
20060  /* find and fix page for insert */
20061  if (heap_stats_find_best_page (thread_p, &context->hfid, context->recdes_p->length,
20062  (context->recdes_p->type != REC_NEWHOME), context->recdes_p->length,
20063  context->scan_cache_p, context->home_page_watcher_p) == NULL)
20064  {
20065  ASSERT_ERROR_AND_SET (error_code);
20066  return error_code;
20067  }
20068  }
20069  else
20070  {
20071  assert (home_hint_p->pgptr != NULL);
20072 
20073  /* check page for space and use hinted page as insert page */
20074  if (spage_max_space_for_new_record (thread_p, home_hint_p->pgptr) < context->recdes_p->length)
20075  {
20076  return ER_SP_NOSPACE_IN_PAGE;
20077  }
20078 
20079  context->home_page_watcher_p = home_hint_p;
20080  }
20081  assert (context->home_page_watcher_p->pgptr != NULL);
20082 
20083  /* partially populate output OID */
20085  context->res_oid.pageid = pgbuf_get_page_id (context->home_page_watcher_p->pgptr);
20086 
20087  /*
20088  * Find a slot that is lockable and lock it
20089  */
20090  /* determine lock type */
20091  if (OID_IS_ROOTOID (&context->class_oid))
20092  {
20093  /* class creation */
20094  lock = SCH_M_LOCK;
20095  }
20096  else
20097  {
20098  /* instance */
20099  if (context->is_bulk_op)
20100  {
20101  lock = NULL_LOCK;
20102  }
20103  else
20104  {
20105  lock = X_LOCK;
20106  }
20107  }
20108 
20109  /* retrieve number of slots in page */
20110  slot_count = spage_number_of_slots (context->home_page_watcher_p->pgptr);
20111 
20112  /* find REC_DELETED_WILL_REUSE slot or add new slot */
20113  /* slot_id == slot_count means add new slot */
20114  for (slot_id = 0; slot_id <= slot_count; slot_id++)
20115  {
20116  slot_id = spage_find_free_slot (context->home_page_watcher_p->pgptr, NULL, slot_id);
20117  if (slot_id == SP_ERROR)
20118  {
20119  break; /* this will not happen */
20120  }
20121 
20122  context->res_oid.slotid = slot_id;
20123 
20124  if (lock == NULL_LOCK)
20125  {
20126  /* immediately return without locking it */
20127  return NO_ERROR;
20128  }
20129 
20130  /* lock the object to be inserted conditionally */
20131  lk_result = lock_object (thread_p, &context->res_oid, &context->class_oid, lock, LK_COND_LOCK);
20132  if (lk_result == LK_GRANTED)
20133  {
20134  /* successfully locked! */
20135  return NO_ERROR;
20136  }
20137  else if (lk_result != LK_NOTGRANTED_DUE_TIMEOUT)
20138  {
20139 #if !defined(NDEBUG)
20140  if (lk_result == LK_NOTGRANTED_DUE_ABORTED)
20141  {
20142  LOG_TDES *tdes = LOG_FIND_CURRENT_TDES (thread_p);
20144  }
20145  else
20146  {
20147  assert (false); /* unknown locking error */
20148  }
20149 #endif
20150  break; /* go to error case */
20151  }
20152  }
20153 
20154  /* either lock error or no slot was found in page (which should not happen) */
20155  OID_SET_NULL (&context->res_oid);
20156  if (context->home_page_watcher_p != home_hint_p)
20157  {
20158  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
20159  }
20160  else
20161  {
20162  context->home_page_watcher_p = NULL;
20163  }
20164  assert (false);
20165  return ER_FAILED;
20166 }
20167 
20168 /*
20169  * heap_find_location_and_insert_rec_newhome () - find location in a heap page
20170  * and then insert context->record
20171  * thread_p(in): thread entry
20172  * context(in): operation context
20173  * returns: error code or NO_ERROR
20174  *
20175  * NOTE: This function will find a suitable page, put it in
20176  * context->home_page_watcher, insert context->recdes_p into that page
20177  * and put recdes location into context->res_oid.
20178  * Currently, this function is called only for REC_NEWHOME records, when
20179  * lock acquisition is not required.
20180  * The caller must log the inserted data.
20181  */
20182 static int
20184 {
20185  int sp_success;
20186  int error_code = NO_ERROR;
20187 
20188  /* check input */
20189  assert (context != NULL);
20190  assert (context->type == HEAP_OPERATION_INSERT);
20191  assert (context->recdes_p != NULL);
20192  assert (context->recdes_p->type == REC_NEWHOME);
20193 
20194 #if defined(CUBRID_DEBUG)
20195  if (heap_is_big_length (context->recdes_p->length))
20196  {
20198  "heap_insert_internal: This function does not accept"
20199  " objects longer than %d. An object of %d was given\n", heap_Maxslotted_reclength, recdes->length);
20200  return ER_FAILED;
20201  }
20202 #endif
20203 
20204  if (heap_stats_find_best_page (thread_p, &context->hfid, context->recdes_p->length, false, context->recdes_p->length,
20205  context->scan_cache_p, context->home_page_watcher_p) == NULL)
20206  {
20207  ASSERT_ERROR_AND_SET (error_code);
20208  return error_code;
20209  }
20210 
20211 #if !defined(NDEBUG)
20212  if (context->scan_cache_p != NULL)
20213  {
20214  OID heap_class_oid;
20215 
20216  assert (heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid) ==
20217  NO_ERROR);
20218 
20219  assert (OID_EQ (&heap_class_oid, &context->scan_cache_p->node.class_oid));
20220  }
20221 #endif
20222 
20223  assert (context->home_page_watcher_p->pgptr != NULL);
20224  (void) pgbuf_check_page_ptype (thread_p, context->home_page_watcher_p->pgptr, PAGE_HEAP);
20225 
20226  sp_success =
20227  spage_insert (thread_p, context->home_page_watcher_p->pgptr, context->recdes_p, &context->res_oid.slotid);
20228  if (sp_success == SP_SUCCESS)
20229  {
20231  context->res_oid.pageid = pgbuf_get_page_id (context->home_page_watcher_p->pgptr);
20232 
20233  return NO_ERROR;
20234  }
20235  else
20236  {
20237  assert (false);
20238  if (sp_success != SP_ERROR)
20239  {
20241  }
20242  OID_SET_NULL (&context->res_oid);
20243  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
20244  return ER_FAILED;
20245  }
20246 }
20247 
20248 /*
20249  * heap_insert_newhome () - will find an insert location for a REC_NEWHOME
20250  * record and will insert it there
20251  * thread_p(in): thread entry
20252  * parent_context(in): the context of the parent operation
20253  * recdes_p(in): record descriptor of newhome record
20254  * out_oid_p(in): pointer to an OID object to be populated with the result
20255  * OID of the insert
20256  * newhome_pg_watcher(out): if not null, should keep the page watcher of newhome
20257  - necessary to set prev version afterwards
20258  * returns: error code or NO_ERROR
20259  *
20260  * NOTE: This function works ONLY in an MVCC operation. It will create a new
20261  * context for the insert operation.
20262  */
20263 static int
20264 heap_insert_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * parent_context, RECDES * recdes_p,
20265  OID * out_oid_p, PGBUF_WATCHER * newhome_pg_watcher)
20266 {
20267  HEAP_OPERATION_CONTEXT ins_context;
20268  int error_code = NO_ERROR;
20269 
20270  /* check input */
20271  assert (recdes_p != NULL);
20272  assert (parent_context != NULL);
20273  assert (parent_context->type == HEAP_OPERATION_DELETE || parent_context->type == HEAP_OPERATION_UPDATE);
20274 
20275  /* build insert context */
20276  heap_create_insert_context (&ins_context, &parent_context->hfid, &parent_context->class_oid, recdes_p, NULL);
20277 
20278  /* physical insertion */
20279  error_code = heap_find_location_and_insert_rec_newhome (thread_p, &ins_context);
20280  if (error_code != NO_ERROR)
20281  {
20282  ASSERT_ERROR ();
20283  return error_code;
20284  }
20285 
20286  HEAP_PERF_TRACK_EXECUTE (thread_p, parent_context);
20287 
20288  /* log operation */
20289 
20290  /* This is a relocation of existing record, be it deleted or updated. Vacuum is not supposed to be notified since he
20291  * never check REC_NEWHOME type records. An MVCC type logging is not required here, a simple RVHF_INSERT will do. */
20292  heap_log_insert_physical (thread_p, ins_context.home_page_watcher_p->pgptr, &ins_context.hfid.vfid,
20293  &ins_context.res_oid, ins_context.recdes_p, false, false);
20294 
20295  HEAP_PERF_TRACK_LOGGING (thread_p, parent_context);
20296 
20297  /* advertise insert location */
20298  if (out_oid_p != NULL)
20299  {
20300  COPY_OID (out_oid_p, &ins_context.res_oid);
20301  }
20302 
20303  /* mark insert page as dirty */
20304  pgbuf_set_dirty (thread_p, ins_context.home_page_watcher_p->pgptr, DONT_FREE);
20305 
20306  if (newhome_pg_watcher != NULL)
20307  {
20308  /* keep the page watcher, necessary for heap_update_set_prev_version() */
20309  pgbuf_replace_watcher (thread_p, ins_context.home_page_watcher_p, newhome_pg_watcher);
20310  }
20311 
20312  /* unfix all pages of insert context */
20313  heap_unfix_watchers (thread_p, &ins_context);
20314  /* all ok */
20315  return NO_ERROR;
20316 }
20317 
20318 /*
20319  * heap_insert_physical () - physical insert into heap page
20320  * thread_p(in): thread entry
20321  * context(in): operation context
20322  * is_mvcc_op(in): MVCC or non-MVCC operation
20323  *
20324  * NOTE: This function should receive a fixed page and a location in res_oid,
20325  * where the context->recdes_p will go in.
20326  */
20327 static int
20329 {
20330  /* check input */
20331  assert (context != NULL);
20332  assert (context->type == HEAP_OPERATION_INSERT);
20333  assert (context->recdes_p != NULL);
20334  assert (context->home_page_watcher_p->pgptr != NULL);
20335 
20336  /* assume we have the exact location for insert as well as a fixed page */
20337  assert (context->res_oid.volid != NULL_VOLID);
20338  assert (context->res_oid.pageid != NULL_PAGEID);
20339  assert (context->res_oid.slotid != NULL_SLOTID);
20340 
20341 #if defined(CUBRID_DEBUG)
20342  /* function should have received map record if input record was multipage */
20343  if (heap_is_big_length (context->recdes_p->length))
20344  {
20346  "heap_insert_internal: This function does not accept"
20347  " objects longer than %d. An object of %d was given\n", heap_Maxslotted_reclength, recdes->length);
20348  return ER_FAILED;
20349  }
20350 
20351  /* check we're inserting in a page of desired class */
20352  if (!OID_ISNULL (&context->class_oid))
20353  {
20354  OID heap_class_oid;
20355  int rc;
20356 
20357  rc = heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid);
20358  assert (rc == NO_ERROR);
20359  assert (OID_EQ (&heap_class_oid, &context->class_oid));
20360  }
20361 #endif
20362 
20363  /* physical insertion */
20364  if (spage_insert_at (thread_p, context->home_page_watcher_p->pgptr, context->res_oid.slotid, context->recdes_p) !=
20365  SP_SUCCESS)
20366  {
20368  OID_SET_NULL (&context->res_oid);
20369  return ER_FAILED;
20370  }
20371 
20372  /* all ok */
20373  return NO_ERROR;
20374 }
20375 
20376 /*
20377  * heap_log_insert_physical () - add logging information for physical insertion
20378  * thread_p(in): thread entry
20379  * page_p(in): page where insert was performed
20380  * vfid_p(in): virtual file id
20381  * oid_p(in): newly inserted object id
20382  * recdes_p(in): record descriptor of inserted record
20383  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20384  * is_redistribute_op(in): whether the insertion is due to partition
20385  * redistribute operation and has a valid delid
20386  */
20387 static void
20388 heap_log_insert_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * recdes_p,
20389  bool is_mvcc_op, bool is_redistribute_op)
20390 {
20391  LOG_DATA_ADDR log_addr;
20392 
20393  /* populate address field */
20394  log_addr.vfid = vfid_p;
20395  log_addr.offset = oid_p->slotid;
20396  log_addr.pgptr = page_p;
20397 
20398  if (is_mvcc_op)
20399  {
20400  if (is_redistribute_op)
20401  {
20402  /* this is actually a deleted record, inserted due to a PARTITION reorganize operation. Log this operation
20403  * separately */
20404  heap_mvcc_log_redistribute (thread_p, recdes_p, &log_addr);
20405  }
20406  else
20407  {
20408  /* MVCC logging */
20409  heap_mvcc_log_insert (thread_p, recdes_p, &log_addr);
20410  }
20411  }
20412  else
20413  {
20414  INT16 bytes_reserved;
20415  RECDES temp_recdes;
20416 
20417  if (recdes_p->type == REC_ASSIGN_ADDRESS)
20418  {
20419  /* special case for REC_ASSIGN */
20420  temp_recdes.type = recdes_p->type;
20421  temp_recdes.area_size = sizeof (bytes_reserved);
20422  temp_recdes.length = sizeof (bytes_reserved);
20423  bytes_reserved = (INT16) recdes_p->length;
20424  temp_recdes.data = (char *) &bytes_reserved;
20425  log_append_undoredo_recdes (thread_p, RVHF_INSERT, &log_addr, NULL, &temp_recdes);
20426  }
20427  else if (recdes_p->type == REC_NEWHOME)
20428  {
20429  /* replication for REC_NEWHOME is performed by following the link (OID) from REC_RELOCATION */
20430  log_append_undoredo_recdes (thread_p, RVHF_INSERT_NEWHOME, &log_addr, NULL, recdes_p);
20431  }
20432  else
20433  {
20434  log_append_undoredo_recdes (thread_p, RVHF_INSERT, &log_addr, NULL, recdes_p);
20435  }
20436  }
20437 }
20438 
20439 /*
20440  * heap_delete_adjust_header () - adjust MVCC record header for delete operation
20441  *
20442  * header_p(in): MVCC record header
20443  * mvcc_id(in): MVCC identifier
20444  * need_mvcc_header_max_size(in): true, if need maximum size for MVCC header
20445  *
20446  * NOTE: Only applicable for MVCC operations.
20447  */
20448 static void
20449 heap_delete_adjust_header (MVCC_REC_HEADER * header_p, MVCCID mvcc_id, bool need_mvcc_header_max_size)
20450 {
20451  assert (header_p != NULL);
20452 
20454  MVCC_SET_DELID (header_p, mvcc_id);
20455 
20456  if (need_mvcc_header_max_size)
20457  {
20458  /* set maximum MVCC header size */
20460  }
20461 }
20462 
20463 /*
20464  * heap_get_delete_location () - find the desired object and fix the page
20465  * thread_p(in): thread entry
20466  * context(in): delete operation context
20467  * return: error code or NO_ERROR
20468  */
20469 static int
20471 {
20472  VPID vpid;
20473 
20474  /* check input */
20475  assert (context != NULL);
20476  assert (!OID_ISNULL (&context->oid));
20477  assert (!HFID_IS_NULL (&context->hfid));
20478 
20479  /* get vpid from object */
20480  vpid.pageid = context->oid.pageid;
20481  vpid.volid = context->oid.volid;
20482 
20483  /* first try to retrieve cached fixed page from scancache */
20484  if (context->scan_cache_p != NULL && context->scan_cache_p->page_watcher.pgptr != NULL
20485  && context->scan_cache_p->cache_last_fix_page == true)
20486  {
20487  VPID *vpid_incache_p = pgbuf_get_vpid_ptr (context->scan_cache_p->page_watcher.pgptr);
20488 
20489  if (VPID_EQ (&vpid, vpid_incache_p))
20490  {
20491  /* we can get it from the scancache */
20492  pgbuf_replace_watcher (thread_p, &context->scan_cache_p->page_watcher, context->home_page_watcher_p);
20493  }
20494  else
20495  {
20496  /* last scancache fixed page is not desired page */
20497  pgbuf_ordered_unfix (thread_p, &context->scan_cache_p->page_watcher);
20498  }
20499  assert (context->scan_cache_p->page_watcher.pgptr == NULL);
20500  }
20501 
20502  /* if scancache page was not suitable, fix desired page */
20503  if (context->home_page_watcher_p->pgptr == NULL)
20504  {
20505  (void) heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, context->scan_cache_p,
20506  context->home_page_watcher_p);
20507  if (context->home_page_watcher_p->pgptr == NULL)
20508  {
20509  int rc;
20510 
20511  if (er_errid () == ER_PB_BAD_PAGEID)
20512  {
20514  context->oid.pageid, context->oid.slotid);
20515  }
20516 
20517  /* something went wrong, return */
20518  ASSERT_ERROR_AND_SET (rc);
20519  return rc;
20520  }
20521  }
20522 
20523 #if !defined(NDEBUG)
20524  if (context->scan_cache_p != NULL)
20525  {
20526  OID heap_class_oid;
20527 
20528  assert (heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid) ==
20529  NO_ERROR);
20530  assert ((OID_EQ (&heap_class_oid, &context->scan_cache_p->node.class_oid))
20531  || (OID_ISNULL (&context->scan_cache_p->node.class_oid)
20533  context->oid.slotid) == REC_ASSIGN_ADDRESS));
20534  }
20535 #endif
20536 
20537  /* all ok */
20538  return NO_ERROR;
20539 }
20540 
20541 /*
20542  * heap_delete_bigone () - delete a REC_BIGONE record
20543  * thread_p(in): thread entry
20544  * context(in): operation context
20545  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20546  */
20547 static int
20548 heap_delete_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
20549 {
20550  OID overflow_oid;
20551  int rc;
20552 
20553  /* check input */
20554  assert (context != NULL);
20555  assert (context->type == HEAP_OPERATION_DELETE);
20556  assert (context->home_recdes.data != NULL);
20557  assert (context->home_page_watcher_p != NULL);
20558  assert (context->home_page_watcher_p->pgptr != NULL);
20559  assert (context->overflow_page_watcher_p != NULL);
20560  assert (context->overflow_page_watcher_p->pgptr == NULL);
20561 
20562  /* MVCC info is in overflow page, we only keep and OID in home */
20563  overflow_oid = *((OID *) context->home_recdes.data);
20564 
20565  /* reset overflow watcher rank */
20567 
20568  if (is_mvcc_op)
20569  {
20570  MVCC_REC_HEADER overflow_header;
20571  VPID overflow_vpid;
20572  LOG_DATA_ADDR log_addr;
20573  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
20574 
20575  /* fix overflow page */
20576  overflow_vpid.pageid = overflow_oid.pageid;
20577  overflow_vpid.volid = overflow_oid.volid;
20579  rc = pgbuf_ordered_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->overflow_page_watcher_p);
20580  if (rc != NO_ERROR)
20581  {
20582  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
20583  {
20585  overflow_vpid.pageid);
20586  }
20587  return rc;
20588  }
20589 
20590  /* check overflow page type */
20591  (void) pgbuf_check_page_ptype (thread_p, context->overflow_page_watcher_p->pgptr, PAGE_OVERFLOW);
20592 
20593  /* fetch header from overflow */
20594  if (heap_get_mvcc_rec_header_from_overflow (context->overflow_page_watcher_p->pgptr, &overflow_header, NULL) !=
20595  NO_ERROR)
20596  {
20597  return ER_FAILED;
20598  }
20600 
20601  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20602 
20603  /* log operation */
20604  log_addr.pgptr = context->overflow_page_watcher_p->pgptr;
20605  log_addr.vfid = &context->hfid.vfid;
20606  log_addr.offset = overflow_oid.slotid;
20607  heap_mvcc_log_delete (thread_p, &log_addr, RVHF_MVCC_DELETE_OVERFLOW);
20608 
20609  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20610 
20611  /* adjust header; we don't care to make header max size since it's already done */
20612  heap_delete_adjust_header (&overflow_header, mvcc_id, false);
20613 
20614  /* write header to overflow */
20615  rc = heap_set_mvcc_rec_header_on_overflow (context->overflow_page_watcher_p->pgptr, &overflow_header);
20616  if (rc != NO_ERROR)
20617  {
20618  return rc;
20619  }
20620 
20621  /* set page as dirty */
20622  pgbuf_set_dirty (thread_p, context->overflow_page_watcher_p->pgptr, DONT_FREE);
20623 
20624  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20625 
20626  /* Home record is not changed, but page max MVCCID and vacuum status have to change. Also vacuum needs to be
20627  * vacuum with the location of home record (REC_RELOCATION). */
20628  log_addr.vfid = &context->hfid.vfid;
20629  log_addr.pgptr = context->home_page_watcher_p->pgptr;
20630  log_addr.offset = context->oid.slotid;
20631  heap_mvcc_log_home_no_change (thread_p, &log_addr);
20632 
20633  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
20634 
20635  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20636 
20638  }
20639  else
20640  {
20641  bool is_reusable = heap_is_reusable_oid (context->file_type);
20642 
20643  /* fix header page */
20644  rc = heap_fix_header_page (thread_p, context);
20645  if (rc != NO_ERROR)
20646  {
20647  return rc;
20648  }
20649 
20650  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20651 
20652  if (context->home_page_watcher_p->page_was_unfixed)
20653  {
20654  /*
20655  * Need to get the record again, since record may have changed
20656  * by other transactions (INSID removed by VACUUM, page compact).
20657  * The object was already locked, so the record size may be the
20658  * same or smaller (INSID removed by VACUUM).
20659  */
20660  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
20661  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20662  &context->home_recdes, is_peeking) != S_SUCCESS)
20663  {
20664  return ER_FAILED;
20665  }
20666  }
20667 
20668  /* log operation */
20669  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
20670  &context->home_recdes, is_reusable, NULL);
20671 
20672  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20673 
20674  /* physical deletion of home record */
20675  rc = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
20676  if (rc != NO_ERROR)
20677  {
20678  return rc;
20679  }
20680 
20681  /* physical deletion of overflow record */
20682  if (heap_ovf_delete (thread_p, &context->hfid, &overflow_oid, NULL) == NULL)
20683  {
20684  return ER_FAILED;
20685  }
20686 
20687  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20688 
20690  }
20691 
20692  /* all ok */
20693  return NO_ERROR;
20694 }
20695 
20696 /*
20697  * heap_delete_relocation () - delete a REC_RELOCATION record
20698  * thread_p(in): thread entry
20699  * context(in): operation context
20700  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20701  * returns: error code or NO_ERROR
20702  */
20703 static int
20704 heap_delete_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
20705 {
20706  RECDES forward_recdes;
20707  OID forward_oid;
20708  int rc;
20709 
20710  /* check input */
20711  assert (context != NULL);
20712  assert (context->type == HEAP_OPERATION_DELETE);
20713  assert (context->record_type == REC_RELOCATION);
20714  assert (context->home_page_watcher_p != NULL);
20715  assert (context->home_page_watcher_p->pgptr != NULL);
20716  assert (context->forward_page_watcher_p != NULL);
20717 
20718  /* get forward oid */
20719  forward_oid = *((OID *) context->home_recdes.data);
20720 
20721  /* fix forward page */
20722  if (heap_fix_forward_page (thread_p, context, &forward_oid) != NO_ERROR)
20723  {
20724  return ER_FAILED;
20725  }
20726 
20727  /* get forward record */
20728  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes, PEEK) !=
20729  S_SUCCESS)
20730  {
20731  return ER_FAILED;
20732  }
20733 
20734  HEAP_PERF_TRACK_PREPARE (thread_p, context);
20735 
20736  if (is_mvcc_op)
20737  {
20738  RECDES new_forward_recdes, new_home_recdes;
20739  MVCC_REC_HEADER forward_rec_header;
20740  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
20742  OID new_forward_oid;
20743  int adjusted_size;
20744  bool fits_in_home, fits_in_forward;
20745  bool update_old_home = false;
20746  bool update_old_forward = false;
20747  bool remove_old_forward = false;
20748  bool is_adjusted_size_big = false;
20749  int delid_offset, repid_and_flag_bits, mvcc_flags;
20750  char *build_recdes_data;
20751  bool use_optimization;
20752 
20753  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (forward_recdes.data);
20754  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
20755  adjusted_size = forward_recdes.length;
20756 
20757  /*
20758  * Uses the optimization in most common cases, for now : if DELID not set and adjusted size is not big size.
20759  * Decide whether the deleted record has big size from beginning. After fixing header page, it may be possible
20760  * that the deleted record to not have big size. Since is a very rare case, don't care to optimize this case.
20761  */
20762  use_optimization = true;
20763  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID))
20764  {
20765  adjusted_size += OR_MVCCID_SIZE;
20766  is_adjusted_size_big = heap_is_big_length (adjusted_size);
20767  if (is_adjusted_size_big)
20768  {
20769  /* Rare case, do not optimize it now. */
20770  use_optimization = false;
20771  }
20772  }
20773  else
20774  {
20775  /* Rare case, do not optimize it now. */
20776  is_adjusted_size_big = false;
20777  use_optimization = false;
20778  }
20779 
20780 #if !defined(NDEBUG)
20781  if (is_adjusted_size_big)
20782  {
20783  /* not exactly necessary, but we'll be able to compare sizes */
20784  adjusted_size = forward_recdes.length - mvcc_header_size_lookup[mvcc_flags] + OR_MVCC_MAX_HEADER_SIZE;
20785  }
20786 #endif
20787 
20788  /* fix header if necessary */
20789  fits_in_home =
20790  spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, adjusted_size);
20791  fits_in_forward =
20792  spage_is_updatable (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, adjusted_size);
20793  if (is_adjusted_size_big || (!fits_in_forward && !fits_in_home))
20794  {
20795  /* fix header page */
20796  rc = heap_fix_header_page (thread_p, context);
20797  if (rc != NO_ERROR)
20798  {
20799  return ER_FAILED;
20800  }
20801 
20803  {
20804  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix()
20805  * call */
20806  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
20807  &forward_recdes, PEEK) != S_SUCCESS)
20808  {
20809  return ER_FAILED;
20810  }
20811 
20812  /* Recomputes the header size, do not recomputes is_adjusted_size_big. */
20813  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (forward_recdes.data);
20814  if (mvcc_flags != ((repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK))
20815  {
20816  /* Rare case - disable optimization, in case that the flags was modified meanwhile. */
20817  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
20818  use_optimization = false;
20819 
20820 #if !defined(NDEBUG)
20821  if (is_adjusted_size_big)
20822  {
20823  /* not exactly necessary, but we'll be able to compare sizes */
20824  adjusted_size = forward_recdes.length - mvcc_header_size_lookup[mvcc_flags]
20826  }
20827 #endif
20828  }
20829  }
20830  }
20831 
20832  /* Build the new record. */
20834  REC_UNKNOWN, PTR_ALIGN (buffer, MAX_ALIGNMENT));
20835  if (use_optimization)
20836  {
20837  char *start_p;
20838 
20839  delid_offset = OR_MVCC_DELETE_ID_OFFSET (mvcc_flags);
20840  build_recdes_data = start_p = new_forward_recdes.data;
20841 
20842  /* Copy up to MVCC DELID first. */
20843  memcpy (build_recdes_data, forward_recdes.data, delid_offset);
20844  build_recdes_data += delid_offset;
20845 
20846  /* Sets MVCC DELID flag, overwrite first four bytes. */
20847  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_DELID << OR_MVCC_FLAG_SHIFT_BITS);
20848  OR_PUT_INT (start_p, repid_and_flag_bits);
20849 
20850  /* Sets the MVCC DELID. */
20851  OR_PUT_BIGINT (build_recdes_data, &mvcc_id);
20852  build_recdes_data += OR_MVCCID_SIZE;
20853 
20854  /* Copy remaining data. */
20855 #if !defined(NDEBUG)
20856  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
20857  {
20858  /* Check that we need to copy from offset of LOG LSA up to the end of the buffer. */
20859  assert (delid_offset == OR_MVCC_PREV_VERSION_LSA_OFFSET (mvcc_flags));
20860  }
20861  else
20862  {
20863  /* Check that we need to copy from end of MVCC header up to the end of the buffer. */
20864  assert (delid_offset == mvcc_header_size_lookup[mvcc_flags]);
20865  }
20866 #endif
20867 
20868  memcpy (build_recdes_data, forward_recdes.data + delid_offset, forward_recdes.length - delid_offset);
20869  new_forward_recdes.length = adjusted_size;
20870  }
20871  else
20872  {
20873  int forward_rec_header_size;
20874  /*
20875  * Rare case - don't care to optimize it for now. Get the MVCC header, build adjusted record
20876  * header - slow operation.
20877  */
20878  if (or_mvcc_get_header (&forward_recdes, &forward_rec_header) != NO_ERROR)
20879  {
20880  return ER_FAILED;
20881  }
20882  assert (forward_rec_header.mvcc_flag == mvcc_flags);
20883  heap_delete_adjust_header (&forward_rec_header, mvcc_id, is_adjusted_size_big);
20884  or_mvcc_add_header (&new_forward_recdes, &forward_rec_header, OR_GET_BOUND_BIT_FLAG (forward_recdes.data),
20885  OR_GET_OFFSET_SIZE (forward_recdes.data));
20886 
20887  forward_rec_header_size = mvcc_header_size_lookup[mvcc_flags];
20888  memcpy (new_forward_recdes.data + new_forward_recdes.length, forward_recdes.data + forward_rec_header_size,
20889  forward_recdes.length - forward_rec_header_size);
20890  new_forward_recdes.length += forward_recdes.length - forward_rec_header_size;
20891  assert (new_forward_recdes.length == adjusted_size);
20892  }
20893 
20894  /* determine what operations on home/forward pages are necessary and execute extra operations for each case */
20895  if (is_adjusted_size_big)
20896  {
20897  /* insert new overflow record */
20898  if (heap_ovf_insert (thread_p, &context->hfid, &new_forward_oid, &new_forward_recdes) == NULL)
20899  {
20900  return ER_FAILED;
20901  }
20902 
20903  /* home record descriptor will be an overflow OID and will be placed in original home page */
20904  heap_build_forwarding_recdes (&new_home_recdes, REC_BIGONE, &new_forward_oid);
20905 
20906  /* remove old forward record */
20907  remove_old_forward = true;
20908  update_old_home = true;
20909 
20911  }
20912  else if (fits_in_home)
20913  {
20914  /* updated forward record fits in home page */
20915  new_home_recdes = new_forward_recdes;
20916  new_home_recdes.type = REC_HOME;
20917 
20918  /* clear forward rebuild_record (just to be safe) */
20919  new_forward_recdes.area_size = 0;
20920  new_forward_recdes.length = 0;
20921  new_forward_recdes.type = REC_UNKNOWN;
20922  new_forward_recdes.data = NULL;
20923 
20924  /* remove old forward record */
20925  remove_old_forward = true;
20926  update_old_home = true;
20927 
20929  }
20930  else if (fits_in_forward)
20931  {
20932  /* updated forward record fits in old forward page */
20933  new_forward_recdes.type = REC_NEWHOME;
20934 
20935  /* home record will not be touched */
20936  update_old_forward = true;
20937 
20939  }
20940  else
20941  {
20942  /* doesn't fit in either home or forward page */
20943  /* insert a new forward record */
20944  new_forward_recdes.type = REC_NEWHOME;
20945  rc = heap_insert_newhome (thread_p, context, &new_forward_recdes, &new_forward_oid, NULL);
20946  if (rc != NO_ERROR)
20947  {
20948  return rc;
20949  }
20950 
20951  /* new home record will be a REC_RELOCATION and will be placed in the original home page */
20952  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &new_forward_oid);
20953 
20954  /* remove old forward record */
20955  remove_old_forward = true;
20956  update_old_home = true;
20957 
20959  }
20960 
20961  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20962 
20963  /*
20964  * Update old home record (if necessary)
20965  */
20966  if (update_old_home)
20967  {
20968  LOG_DATA_ADDR home_addr;
20969 
20970  if (context->home_page_watcher_p->page_was_unfixed)
20971  {
20972  /*
20973  * Need to get the record again, since record may have changed
20974  * by other transactions (INSID removed by VACUUM, page compact).
20975  * The object was already locked, so the record size may be the
20976  * same or smaller (INSID removed by VACUUM).
20977  */
20978  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
20979  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20980  &context->home_recdes, is_peeking) != S_SUCCESS)
20981  {
20982  return ER_FAILED;
20983  }
20984  }
20985 
20986  /* log operation */
20987  home_addr.vfid = &context->hfid.vfid;
20988  home_addr.pgptr = context->home_page_watcher_p->pgptr;
20989  home_addr.offset = context->oid.slotid;
20990 
20991  heap_mvcc_log_home_change_on_delete (thread_p, &context->home_recdes, &new_home_recdes, &home_addr);
20992 
20993  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20994 
20995  /* update home record */
20996  rc = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20997  &new_home_recdes);
20998  if (rc != NO_ERROR)
20999  {
21000  return rc;
21001  }
21002 
21003  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21004  }
21005  else
21006  {
21007  /* Home record is not changed, but page max MVCCID and vacuum status have to change. Also vacuum needs to be
21008  * vacuum with the location of home record (REC_BIGONE). */
21009  LOG_DATA_ADDR home_addr;
21010 
21011  /* log operation */
21012  home_addr.vfid = &context->hfid.vfid;
21013  home_addr.pgptr = context->home_page_watcher_p->pgptr;
21014  home_addr.offset = context->oid.slotid;
21015  heap_mvcc_log_home_no_change (thread_p, &home_addr);
21016  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21017 
21018  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21019  }
21020 
21021  /*
21022  * Update old forward record (if necessary)
21023  */
21024  if (update_old_forward)
21025  {
21026  LOG_DATA_ADDR forward_addr;
21027 
21028  /* log operation */
21029  forward_addr.vfid = &context->hfid.vfid;
21030  forward_addr.pgptr = context->forward_page_watcher_p->pgptr;
21031  forward_addr.offset = forward_oid.slotid;
21032  heap_mvcc_log_delete (thread_p, &forward_addr, RVHF_MVCC_DELETE_REC_NEWHOME);
21033 
21034  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21035 
21036  /* physical update of forward record */
21037  rc =
21038  heap_update_physical (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21039  &new_forward_recdes);
21040  if (rc != NO_ERROR)
21041  {
21042  return rc;
21043  }
21044 
21045  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21046  }
21047 
21048  /*
21049  * Delete old forward record (if necessary)
21050  */
21051  if (remove_old_forward)
21052  {
21053  LOG_DATA_ADDR forward_addr;
21054 
21055  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix() call
21056  */
21058  {
21059  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21060  &forward_recdes, PEEK) != S_SUCCESS)
21061  {
21062  return ER_FAILED;
21063  }
21064  }
21065 
21066  /* operation logging */
21067  forward_addr.vfid = &context->hfid.vfid;
21068  forward_addr.pgptr = context->forward_page_watcher_p->pgptr;
21069  forward_addr.offset = forward_oid.slotid;
21070 
21071  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &forward_addr, &forward_recdes, NULL);
21072  if (heap_is_reusable_oid (context->file_type))
21073  {
21074  log_append_postpone (thread_p, RVHF_MARK_REUSABLE_SLOT, &forward_addr, 0, NULL);
21075  }
21076 
21077  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21078 
21079  /* physical removal of forward record */
21080  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21081  if (rc != NO_ERROR)
21082  {
21083  return rc;
21084  }
21085 
21086  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21087  }
21088  }
21089  else
21090  {
21091  bool is_reusable = heap_is_reusable_oid (context->file_type);
21092 
21093  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21094 
21095  if (context->home_page_watcher_p->page_was_unfixed)
21096  {
21097  /*
21098  * Need to get the record again, since record may have changed
21099  * by other transactions (INSID removed by VACUUM, page compact).
21100  * The object was already locked, so the record size may be the
21101  * same or smaller (INSID removed by VACUUM).
21102  */
21103  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21104  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21105  &context->home_recdes, is_peeking) != S_SUCCESS)
21106  {
21107  return ER_FAILED;
21108  }
21109  }
21110  /*
21111  * Delete home record
21112  */
21113 
21114  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21115  &context->home_recdes, is_reusable, NULL);
21116 
21117  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21118 
21119  /* physical deletion of home record */
21120  rc = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
21121  if (rc != NO_ERROR)
21122  {
21123  return rc;
21124  }
21125 
21126  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21127 
21129  {
21130  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix() call
21131  */
21132  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21133  &forward_recdes, PEEK) != S_SUCCESS)
21134  {
21135  return ER_FAILED;
21136  }
21137  }
21138  /*
21139  * Delete forward record
21140  */
21141  /*
21142  * It should be safe to mark the new home slot as reusable regardless
21143  * of the heap type (reusable OID or not) as the relocated record
21144  * should not be referenced anywhere in the database.
21145  */
21146  heap_log_delete_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21147  &forward_recdes, true, NULL);
21148 
21149  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21150 
21151  /* physical deletion of forward record */
21152  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21153  if (rc != NO_ERROR)
21154  {
21155  return rc;
21156  }
21157 
21158  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21159 
21161  }
21162 
21163  /* all ok */
21164  return NO_ERROR;
21165 }
21166 
21167 /*
21168  * heap_delete_home () - delete a REC_HOME (or REC_ASSIGN_ADDRESS) record
21169  * thread_p(in): thread entry
21170  * context(in): operation context
21171  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
21172  * returns: error code or NO_ERROR
21173  */
21174 static int
21175 heap_delete_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21176 {
21177  int error_code = NO_ERROR;
21178 
21179  /* check input */
21180  assert (context != NULL);
21181  assert (context->record_type == REC_HOME || context->record_type == REC_ASSIGN_ADDRESS);
21182  assert (context->type == HEAP_OPERATION_DELETE);
21183  assert (context->home_page_watcher_p != NULL);
21184  assert (context->home_page_watcher_p->pgptr != NULL);
21185 
21186  if (context->home_page_watcher_p->page_was_unfixed)
21187  {
21188  /*
21189  * Need to get the record again, since record may have changed
21190  * by other transactions (INSID removed by VACUUM, page compact).
21191  * The object was already locked, so the record size may be the
21192  * same or smaller (INSID removed by VACUUM).
21193  */
21194  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21195  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21196  &context->home_recdes, is_peeking) != S_SUCCESS)
21197  {
21198  assert (false);
21199  return ER_FAILED;
21200  }
21201  }
21202 
21203  /* operation */
21204  if (is_mvcc_op)
21205  {
21206  MVCC_REC_HEADER record_header;
21207  RECDES built_recdes;
21208  RECDES forwarding_recdes;
21209  RECDES *home_page_updated_recdes;
21210  OID forward_oid;
21211  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
21213  int adjusted_size;
21214  bool is_adjusted_size_big = false;
21215  int delid_offset, repid_and_flag_bits, mvcc_flags;
21216  char *build_recdes_data;
21217  bool use_optimization;
21218 
21219  /* Build the new record descriptor. */
21220  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (context->home_recdes.data);
21221  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
21222  adjusted_size = context->home_recdes.length;
21223 
21224  /* Uses the optimization in most common cases, for now : if DELID not set and adjusted size is not big size. */
21225  use_optimization = true;
21226  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID))
21227  {
21228  adjusted_size += OR_MVCCID_SIZE;
21229  is_adjusted_size_big = heap_is_big_length (adjusted_size);
21230  if (is_adjusted_size_big)
21231  {
21232  /* Rare case, do not optimize it now. */
21233  use_optimization = false;
21234  }
21235  }
21236  else
21237  {
21238  /* Rare case, do not optimize it now. */
21239  is_adjusted_size_big = false;
21240  use_optimization = false;
21241  }
21242 
21243 #if !defined(NDEBUG)
21244  if (is_adjusted_size_big)
21245  {
21246  /* not exactly necessary, but we'll be able to compare sizes */
21247  adjusted_size = context->home_recdes.length - mvcc_header_size_lookup[mvcc_flags] + OR_MVCC_MAX_HEADER_SIZE;
21248  }
21249 #endif
21250 
21251  /* Build the new record. */
21253  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
21254  if (use_optimization)
21255  {
21256  char *start_p;
21257 
21258  delid_offset = OR_MVCC_DELETE_ID_OFFSET (mvcc_flags);
21259 
21260  build_recdes_data = start_p = built_recdes.data;
21261 
21262  /* Copy up to MVCC DELID first. */
21263  memcpy (build_recdes_data, context->home_recdes.data, delid_offset);
21264  build_recdes_data += delid_offset;
21265 
21266  /* Sets MVCC DELID flag, overwrite first four bytes. */
21267  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_DELID << OR_MVCC_FLAG_SHIFT_BITS);
21268  OR_PUT_INT (start_p, repid_and_flag_bits);
21269 
21270  /* Sets the MVCC DELID. */
21271  OR_PUT_BIGINT (build_recdes_data, &mvcc_id);
21272  build_recdes_data += OR_MVCC_DELETE_ID_SIZE;
21273 
21274  /* Copy remaining data. */
21275 #if !defined(NDEBUG)
21276  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
21277  {
21278  /* Check that we need to copy from offset of LOG LSA up to the end of the buffer. */
21279  assert (delid_offset == OR_MVCC_PREV_VERSION_LSA_OFFSET (mvcc_flags));
21280  }
21281  else
21282  {
21283  /* Check that we need to copy from end of MVCC header up to the end of the buffer. */
21284  assert (delid_offset == mvcc_header_size_lookup[mvcc_flags]);
21285  }
21286 #endif
21287 
21288  memcpy (build_recdes_data, context->home_recdes.data + delid_offset,
21289  context->home_recdes.length - delid_offset);
21290  built_recdes.length = adjusted_size;
21291  }
21292  else
21293  {
21294  int header_size;
21295  /*
21296  * Rare case - don't care to optimize it for now. Get the MVCC header, build adjusted record
21297  * header - slow operation.
21298  */
21299  error_code = or_mvcc_get_header (&context->home_recdes, &record_header);
21300  if (error_code != NO_ERROR)
21301  {
21302  ASSERT_ERROR ();
21303  return error_code;
21304  }
21305  assert (record_header.mvcc_flag == mvcc_flags);
21306 
21307  heap_delete_adjust_header (&record_header, mvcc_id, is_adjusted_size_big);
21308  or_mvcc_add_header (&built_recdes, &record_header, OR_GET_BOUND_BIT_FLAG (context->home_recdes.data),
21309  OR_GET_OFFSET_SIZE (context->home_recdes.data));
21310  header_size = mvcc_header_size_lookup[mvcc_flags];
21311  memcpy (built_recdes.data + built_recdes.length, context->home_recdes.data + header_size,
21312  context->home_recdes.length - header_size);
21313  built_recdes.length += (context->home_recdes.length - header_size);
21314  assert (built_recdes.length == adjusted_size);
21315  }
21316 
21317  /* determine type */
21318  if (is_adjusted_size_big)
21319  {
21320  built_recdes.type = REC_BIGONE;
21321  }
21322  else if (!spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21323  built_recdes.length))
21324  {
21325  built_recdes.type = REC_NEWHOME;
21326  }
21327  else
21328  {
21329  built_recdes.type = REC_HOME;
21330  }
21331 
21332  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21333 
21334  /* check whether relocation is necessary */
21335  if (built_recdes.type == REC_BIGONE || built_recdes.type == REC_NEWHOME)
21336  {
21337  /*
21338  * Relocation necessary
21339  */
21340  LOG_DATA_ADDR rec_address;
21341 
21342  /* insertion of built record */
21343  if (built_recdes.type == REC_BIGONE)
21344  {
21345  /* new record is overflow record - REC_BIGONE case */
21346  forwarding_recdes.type = REC_BIGONE;
21347  if (heap_ovf_insert (thread_p, &context->hfid, &forward_oid, &built_recdes) == NULL)
21348  {
21349  ASSERT_ERROR_AND_SET (error_code);
21350  return error_code;
21351  }
21352 
21354  }
21355  else
21356  {
21357  /* new record is relocated - REC_NEWHOME case */
21358  forwarding_recdes.type = REC_RELOCATION;
21359 
21360  /* insert NEWHOME record */
21361  error_code = heap_insert_newhome (thread_p, context, &built_recdes, &forward_oid, NULL);
21362  if (error_code != NO_ERROR)
21363  {
21364  ASSERT_ERROR ();
21365  return error_code;
21366  }
21367 
21369  }
21370 
21371  /* build forwarding rebuild_record */
21372  heap_build_forwarding_recdes (&forwarding_recdes, forwarding_recdes.type, &forward_oid);
21373 
21374  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21375 
21376  if (context->home_page_watcher_p->page_was_unfixed)
21377  {
21378  /*
21379  * Need to get the record again, since record may have changed
21380  * by other transactions (INSID removed by VACUUM, page compact).
21381  * The object was already locked, so the record size may be the
21382  * same or smaller (INSID removed by VACUUM).
21383  */
21384  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21385  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21386  &context->home_recdes, is_peeking) != S_SUCCESS)
21387  {
21388  assert (false);
21389  return ER_FAILED;
21390  }
21391  }
21392 
21393  /* log relocation */
21394  rec_address.pgptr = context->home_page_watcher_p->pgptr;
21395  rec_address.vfid = &context->hfid.vfid;
21396  rec_address.offset = context->oid.slotid;
21397  heap_mvcc_log_home_change_on_delete (thread_p, &context->home_recdes, &forwarding_recdes, &rec_address);
21398 
21399  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21400 
21401  /* we'll update the home page with the forwarding record */
21402  home_page_updated_recdes = &forwarding_recdes;
21403  }
21404  else
21405  {
21406  LOG_DATA_ADDR rec_address;
21407 
21408  /*
21409  * No relocation, can be updated in place
21410  */
21411 
21412  rec_address.pgptr = context->home_page_watcher_p->pgptr;
21413  rec_address.vfid = &context->hfid.vfid;
21414  rec_address.offset = context->oid.slotid;
21415  heap_mvcc_log_delete (thread_p, &rec_address, RVHF_MVCC_DELETE_REC_HOME);
21416 
21417  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21418 
21419  /* we'll update the home page with the built record, since it fits in home page */
21420  home_page_updated_recdes = &built_recdes;
21421 
21423  }
21424 
21425  /* update home page and check operation result */
21426  error_code =
21427  heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21428  home_page_updated_recdes);
21429  if (error_code != NO_ERROR)
21430  {
21431  ASSERT_ERROR ();
21432  return error_code;
21433  }
21434 
21435  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21436  }
21437  else
21438  {
21439  bool is_reusable = heap_is_reusable_oid (context->file_type);
21440 
21441  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21442 
21443  /* log operation */
21444  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21445  &context->home_recdes, is_reusable, NULL);
21446 
21447  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21448 
21449  /* physical deletion */
21450  error_code = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
21451 
21452  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21453 
21455 
21456  assert (error_code == NO_ERROR || er_errid () != NO_ERROR);
21457  return error_code;
21458  }
21459 
21460  /* all ok */
21461  return NO_ERROR;
21462 }
21463 
21464 /*
21465  * heap_delete_physical () - physical deletion of a record
21466  * thread_p(in): thread entry
21467  * hfid_p(in): heap file identifier where record is located
21468  * page_p(in): page where record is stored
21469  * oid_p(in): object identifier of record
21470  */
21471 static int
21472 heap_delete_physical (THREAD_ENTRY * thread_p, HFID * hfid_p, PAGE_PTR page_p, OID * oid_p)
21473 {
21474  int free_space;
21475 
21476  /* check input */
21477  assert (hfid_p != NULL);
21478  assert (page_p != NULL);
21479  assert (oid_p != NULL);
21480  assert (oid_p->slotid != NULL_SLOTID);
21481 
21482  /* save old freespace */
21483  free_space = spage_get_free_space_without_saving (thread_p, page_p, NULL);
21484 
21485  /* physical deletion */
21486  if (spage_delete (thread_p, page_p, oid_p->slotid) == NULL_SLOTID)
21487  {
21488  return ER_FAILED;
21489  }
21490 
21491  /* update statistics */
21492  heap_stats_update (thread_p, page_p, hfid_p, free_space);
21493 
21494  /* mark page as dirty */
21495  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
21496 
21497  /* all ok */
21498  return NO_ERROR;
21499 }
21500 
21501 /*
21502  * heap_log_delete_physical () - log physical deletion
21503  * thread_p(in): thread entry
21504  * page_p(in): page pointer
21505  * vfid_p(in): virtual file identifier
21506  * oid_p(in): object identifier of deleted record
21507  * recdes_p(in): record descriptor of deleted record
21508  * mark_reusable(in): if true, will mark the slot as reusable
21509  * undo_lsa(out): lsa to the undo record; needed to set previous version lsa of record at update
21510  */
21511 static void
21512 heap_log_delete_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * recdes_p,
21513  bool mark_reusable, LOG_LSA * undo_lsa)
21514 {
21515  LOG_DATA_ADDR log_addr;
21516 
21517  /* check input */
21518  assert (page_p != NULL);
21519  assert (vfid_p != NULL);
21520  assert (oid_p != NULL);
21521  assert (recdes_p != NULL);
21522 
21523  /* populate address */
21524  log_addr.offset = oid_p->slotid;
21525  log_addr.pgptr = page_p;
21526  log_addr.vfid = vfid_p;
21527 
21528  if (recdes_p->type == REC_ASSIGN_ADDRESS)
21529  {
21530  /* special case for REC_ASSIGN */
21531  RECDES temp_recdes;
21532  INT16 bytes_reserved;
21533 
21534  temp_recdes.type = recdes_p->type;
21535  temp_recdes.area_size = sizeof (bytes_reserved);
21536  temp_recdes.length = sizeof (bytes_reserved);
21537  bytes_reserved = (INT16) recdes_p->length;
21538  temp_recdes.data = (char *) &bytes_reserved;
21539 
21540  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &log_addr, &temp_recdes, NULL);
21541  }
21542  else
21543  {
21544  /* log record descriptor */
21545  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &log_addr, recdes_p, NULL);
21546  }
21547 
21548  if (undo_lsa)
21549  {
21550  /* get, set undo lsa before log_append_postpone() will make it inaccessible */
21551  LSA_COPY (undo_lsa, logtb_find_current_tran_lsa (thread_p));
21552  }
21553 
21554  /* log postponed operation */
21555  if (mark_reusable)
21556  {
21557  log_append_postpone (thread_p, RVHF_MARK_REUSABLE_SLOT, &log_addr, 0, NULL);
21558  }
21559 }
21560 
21561 /*
21562  * heap_update_bigone () - update a REC_BIGONE record
21563  * thread_p(in): thread entry
21564  * context(in): operation context
21565  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
21566  */
21567 static int
21568 heap_update_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21569 {
21570  int error_code = NO_ERROR;
21571  bool is_old_home_updated;
21572  RECDES new_home_recdes;
21573  VFID ovf_vfid;
21574 
21575  assert (context != NULL);
21576  assert (context->type == HEAP_OPERATION_UPDATE);
21577  assert (context->recdes_p != NULL);
21578  assert (context->home_page_watcher_p != NULL);
21579  assert (context->home_page_watcher_p->pgptr != NULL);
21580  assert (context->overflow_page_watcher_p != NULL);
21581 
21582  /* read OID of overflow record */
21583  context->ovf_oid = *((OID *) context->home_recdes.data);
21584 
21585  /* fix header page */
21586  error_code = heap_fix_header_page (thread_p, context);
21587  if (error_code != NO_ERROR)
21588  {
21589  ASSERT_ERROR ();
21590  goto exit;
21591  }
21592 
21593  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21594 
21595  if (is_mvcc_op)
21596  {
21597  /* log old overflow record and set prev version lsa */
21598 
21599  /* This undo log record have two roles: 1) to keep the old record version; 2) to reach the record at undo
21600  * in order to check if it should have its insert id and prev version vacuumed; */
21601  RECDES ovf_recdes = RECDES_INITIALIZER;
21602  VPID ovf_vpid;
21603  PAGE_PTR first_pgptr;
21604 
21605  if (heap_get_bigone_content (thread_p, context->scan_cache_p, COPY, &context->ovf_oid, &ovf_recdes) != S_SUCCESS)
21606  {
21607  error_code = ER_FAILED;
21608  goto exit;
21609  }
21610 
21611  VPID_GET_FROM_OID (&ovf_vpid, &context->ovf_oid);
21612  first_pgptr = pgbuf_fix (thread_p, &ovf_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
21613  if (first_pgptr == NULL)
21614  {
21615  error_code = ER_FAILED;
21616  goto exit;
21617  }
21618 
21619  if (heap_ovf_find_vfid (thread_p, &context->hfid, &ovf_vfid, false, PGBUF_UNCONDITIONAL_LATCH) == NULL)
21620  {
21621  error_code = ER_FAILED;
21622  goto exit;
21623  }
21624 
21625  /* actual logging */
21626  log_append_undo_recdes2 (thread_p, RVHF_MVCC_UPDATE_OVERFLOW, &ovf_vfid, first_pgptr, -1, &ovf_recdes);
21627  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21628 
21629  pgbuf_set_dirty (thread_p, first_pgptr, FREE);
21630 
21631  /* set prev version lsa */
21633  }
21634 
21635  /* Proceed with the update. the new record is prepared and for mvcc it should have the prev version lsa set */
21636  if (heap_is_big_length (context->recdes_p->length))
21637  {
21638  /* overflow -> overflow update */
21639  is_old_home_updated = false;
21640 
21641  if (heap_ovf_update (thread_p, &context->hfid, &context->ovf_oid, context->recdes_p) == NULL)
21642  {
21643  ASSERT_ERROR_AND_SET (error_code);
21644  goto exit;
21645  }
21646  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21647 
21648  if (is_mvcc_op)
21649  {
21650  /* log home no change; vacuum needs it to reach the updated overflow record */
21651  LOG_DATA_ADDR log_addr (&context->hfid.vfid, context->home_page_watcher_p->pgptr, context->oid.slotid);
21652 
21653  heap_mvcc_log_home_no_change (thread_p, &log_addr);
21654 
21655  /* dirty home page because of logging */
21656  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21657  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21658  }
21659  }
21660  else if (spage_update (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, context->recdes_p) ==
21661  SP_SUCCESS)
21662  {
21663  /* overflow -> rec home update (new record fits in home page) */
21664  is_old_home_updated = true;
21665 
21666  /* update it's type in the page */
21667  context->record_type = context->recdes_p->type = REC_HOME;
21668  spage_update_record_type (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21669  context->recdes_p->type);
21670 
21671  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21672 
21673  new_home_recdes = *context->recdes_p;
21674 
21675  /* dirty home page */
21676  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21677  }
21678  else
21679  {
21680  /* overflow -> rec relocation update (home record will point to the new_home record) */
21681  OID newhome_oid;
21682 
21683  /* insert new home */
21684  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21685  context->recdes_p->type = REC_NEWHOME;
21686  error_code = heap_insert_newhome (thread_p, context, context->recdes_p, &newhome_oid, NULL);
21687  if (error_code != NO_ERROR)
21688  {
21689  ASSERT_ERROR ();
21690  goto exit;
21691  }
21692 
21693  /* prepare record descriptor */
21694  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &newhome_oid);
21695 
21696  /* update home */
21697  error_code = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21698  &new_home_recdes);
21699  if (error_code != NO_ERROR)
21700  {
21701  ASSERT_ERROR ();
21702  goto exit;
21703  }
21704  is_old_home_updated = true;
21705 
21706  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21707  }
21708 
21709  if (is_old_home_updated)
21710  {
21711  /* log home update operation and remove old overflow record */
21712  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid,
21713  &context->oid, &context->home_recdes, &new_home_recdes,
21714  (is_mvcc_op ? RVHF_UPDATE_NOTIFY_VACUUM : RVHF_UPDATE));
21715  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21716 
21717  /* the old overflow record is no longer needed, it was linked only by old home */
21718  if (heap_ovf_delete (thread_p, &context->hfid, &context->ovf_oid, NULL) == NULL)
21719  {
21720  ASSERT_ERROR_AND_SET (error_code);
21721  goto exit;
21722  }
21723  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21724  }
21725 
21726  /* location did not change */
21727  COPY_OID (&context->res_oid, &context->oid);
21728 
21730 
21731  /* Fall through to exit. */
21732 
21733 exit:
21734  return error_code;
21735 }
21736 
21737 /*
21738  * heap_update_relocation () - update a REC_RELOCATION/REC_NEWHOME combo
21739  * thread_p(in): thread entry
21740  * context(in): operation context
21741  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
21742  */
21743 static int
21744 heap_update_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21745 {
21746  RECDES forward_recdes;
21747  char forward_recdes_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
21748  OID forward_oid;
21749  int rc;
21750  RECDES new_home_recdes;
21751  OID new_forward_oid;
21752  bool fits_in_home, fits_in_forward;
21753  bool update_old_home = false;
21754  bool update_old_forward = false;
21755  bool remove_old_forward = false;
21756  LOG_LSA prev_version_lsa = LSA_INITIALIZER;
21757  PGBUF_WATCHER newhome_pg_watcher; /* fwd pg watcher required for heap_update_set_prev_version() */
21758  PGBUF_WATCHER *newhome_pg_watcher_p = NULL;
21759 
21760  assert (context != NULL);
21761  assert (context->recdes_p != NULL);
21762  assert (context->type == HEAP_OPERATION_UPDATE);
21763  assert (context->home_page_watcher_p != NULL);
21764  assert (context->home_page_watcher_p->pgptr != NULL);
21765  assert (context->forward_page_watcher_p != NULL);
21766 
21767  /* get forward oid */
21768  forward_oid = *((OID *) context->home_recdes.data);
21769 
21770  /* fix forward page */
21771  rc = heap_fix_forward_page (thread_p, context, &forward_oid);
21772  if (rc != NO_ERROR)
21773  {
21774  ASSERT_ERROR ();
21775  goto exit;
21776  }
21777 
21778  /* fix header if necessary */
21779  fits_in_home =
21780  spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, context->recdes_p->length);
21781  fits_in_forward =
21782  spage_is_updatable (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21783  context->recdes_p->length);
21784  if (heap_is_big_length (context->recdes_p->length) || (!fits_in_forward && !fits_in_home))
21785  {
21786  /* fix header page */
21787  rc = heap_fix_header_page (thread_p, context);
21788  if (rc != NO_ERROR)
21789  {
21790  ASSERT_ERROR ();
21791  goto exit;
21792  }
21793  }
21794 
21795  /* get forward record */
21796  forward_recdes.area_size = DB_PAGESIZE;
21797  forward_recdes.data = PTR_ALIGN (forward_recdes_buffer, MAX_ALIGNMENT);
21798  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes, COPY) !=
21799  S_SUCCESS)
21800  {
21801  assert (false);
21802  ASSERT_ERROR_AND_SET (rc);
21803  goto exit;
21804  }
21805 
21806  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21807 
21808  /* determine what operations on home/forward pages are necessary and execute extra operations for each case */
21809  if (heap_is_big_length (context->recdes_p->length))
21810  {
21811  /* insert new overflow record */
21812  if (heap_ovf_insert (thread_p, &context->hfid, &new_forward_oid, context->recdes_p) == NULL)
21813  {
21814  ASSERT_ERROR_AND_SET (rc);
21815  goto exit;
21816  }
21817 
21818  /* home record descriptor will be an overflow OID and will be placed in original home page */
21819  heap_build_forwarding_recdes (&new_home_recdes, REC_BIGONE, &new_forward_oid);
21820 
21821  /* remove old forward record */
21822  remove_old_forward = true;
21823  update_old_home = true;
21824 
21826  }
21827  else if (!fits_in_forward && !fits_in_home)
21828  {
21829  /* insert a new forward record */
21830 
21831  if (is_mvcc_op)
21832  {
21833  /* necessary later to set prev version, which is required only for mvcc objects */
21834  newhome_pg_watcher_p = &newhome_pg_watcher;
21836  }
21837 
21838  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21839  context->recdes_p->type = REC_NEWHOME;
21840  rc = heap_insert_newhome (thread_p, context, context->recdes_p, &new_forward_oid, newhome_pg_watcher_p);
21841  if (rc != NO_ERROR)
21842  {
21843  ASSERT_ERROR ();
21844  goto exit;
21845  }
21846 
21847  /* new home record will be a REC_RELOCATION and will be placed in the original home page */
21848  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &new_forward_oid);
21849 
21850  /* remove old forward record */
21851  remove_old_forward = true;
21852  update_old_home = true;
21853 
21855  }
21856  else if (fits_in_home)
21857  {
21858  /* updated forward record fits in home page */
21859  context->recdes_p->type = REC_HOME;
21860  new_home_recdes = *context->recdes_p;
21861 
21862  /* remove old forward record */
21863  remove_old_forward = true;
21864  update_old_home = true;
21865 
21867  }
21868  else if (fits_in_forward)
21869  {
21870  /* updated forward record fits in old forward page */
21871  context->recdes_p->type = REC_NEWHOME;
21872 
21873  /* home record will not be touched */
21874  update_old_forward = true;
21875 
21877  }
21878  else
21879  {
21880  /* impossible case */
21881  assert (false);
21882  rc = ER_FAILED;
21883  goto exit;
21884  }
21885 
21886  /* The old rec_newhome must be removed or updated */
21887  assert (remove_old_forward != update_old_forward);
21888  /* Remove rec_newhome only in case of old_home update */
21889  assert (remove_old_forward == update_old_home);
21890 
21891  /*
21892  * Update old home record (if necessary)
21893  */
21894  if (update_old_home)
21895  {
21896  /* log operation */
21897  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21898  &context->home_recdes, &new_home_recdes,
21899  (is_mvcc_op ? RVHF_UPDATE_NOTIFY_VACUUM : RVHF_UPDATE));
21900  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21901 
21902  /* update home record */
21903  rc = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &new_home_recdes);
21904  if (rc != NO_ERROR)
21905  {
21906  ASSERT_ERROR ();
21907  goto exit;
21908  }
21909  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21910  }
21911 
21912  /*
21913  * Delete old forward record (if necessary)
21914  */
21915  if (remove_old_forward)
21916  {
21917  assert (context->forward_page_watcher_p != NULL && context->forward_page_watcher_p->pgptr != NULL);
21918  if ((new_home_recdes.type == REC_RELOCATION || new_home_recdes.type == REC_BIGONE)
21920  {
21921  /*
21922  * Need to get the record again, since the record may have changed by other concurrent
21923  * transactions (INSID removed by VACUUM).
21924  */
21925  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes,
21926  COPY) != S_SUCCESS)
21927  {
21928  assert (false);
21929  ASSERT_ERROR_AND_SET (rc);
21930  goto exit;
21931  }
21932  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21933  }
21934 
21935  /* log operation */
21936  heap_log_delete_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21937  &forward_recdes, true, &prev_version_lsa);
21938  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21939 
21940  /* physical removal of forward record */
21941  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21942  if (rc != NO_ERROR)
21943  {
21944  ASSERT_ERROR ();
21945  goto exit;
21946  }
21947  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21948  }
21949 
21950  /*
21951  * Update old forward record (if necessary)
21952  */
21953  if (update_old_forward)
21954  {
21955  /* log operation */
21956  heap_log_update_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21957  &forward_recdes, context->recdes_p, RVHF_UPDATE);
21958  LSA_COPY (&prev_version_lsa, logtb_find_current_tran_lsa (thread_p));
21959 
21960  if (is_mvcc_op)
21961  {
21962  LOG_DATA_ADDR p_addr;
21963 
21964  p_addr.pgptr = context->home_page_watcher_p->pgptr;
21965  p_addr.vfid = &context->hfid.vfid;
21966  p_addr.offset = context->oid.slotid;
21967 
21968  /* home remains untouched, log no_change on home to notify vacuum */
21969  heap_mvcc_log_home_no_change (thread_p, &p_addr);
21970 
21971  /* Even though home record is not modified, vacuum status of the page might be changed. */
21972  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21973  }
21974 
21975  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21976 
21977  /* physical update of forward record */
21978  rc = heap_update_physical (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21979  context->recdes_p);
21980  if (rc != NO_ERROR)
21981  {
21982  ASSERT_ERROR ();
21983  goto exit;
21984  }
21985 
21986  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21987  }
21988 
21989  if (is_mvcc_op)
21990  {
21991  /* the updated record needs the prev version lsa to the undo log record where the old record can be found */
21992  rc = heap_update_set_prev_version (thread_p, &context->oid, context->home_page_watcher_p,
21993  newhome_pg_watcher_p ? newhome_pg_watcher_p : context->forward_page_watcher_p,
21994  &prev_version_lsa);
21995 
21996  if (rc != NO_ERROR)
21997  {
21998  ASSERT_ERROR ();
21999  goto exit;
22000  }
22001  }
22002 
22003  /* location did not change */
22004  COPY_OID (&context->res_oid, &context->oid);
22005 
22006 exit:
22007 
22008  if (newhome_pg_watcher_p != NULL && newhome_pg_watcher_p->pgptr != NULL)
22009  {
22010  /* newhome_pg_watcher is used only locally; must be unfixed */
22011  pgbuf_ordered_unfix (thread_p, newhome_pg_watcher_p);
22012  }
22013 
22014  return rc;
22015 }
22016 
22017 /*
22018  * heap_update_home () - update a REC_HOME record
22019  * thread_p(in): thread entry
22020  * context(in): operation context
22021  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
22022  */
22023 static int
22024 heap_update_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
22025 {
22026  int error_code = NO_ERROR;
22027  RECDES forwarding_recdes;
22028  RECDES *home_page_updated_recdes_p = NULL;
22029  OID forward_oid;
22030  LOG_RCVINDEX undo_rcvindex;
22031  LOG_LSA prev_version_lsa;
22032  PGBUF_WATCHER newhome_pg_watcher; /* fwd pg watcher required for heap_update_set_prev_version() */
22033  PGBUF_WATCHER *newhome_pg_watcher_p = NULL;
22034 
22035  assert (context != NULL);
22036  assert (context->recdes_p != NULL);
22037  assert (context->type == HEAP_OPERATION_UPDATE);
22038  assert (context->home_page_watcher_p != NULL);
22039  assert (context->home_page_watcher_p->pgptr != NULL);
22040  assert (context->forward_page_watcher_p != NULL);
22041 
22043  {
22044  /* updating a REC_ASSIGN_ADDRESS should be done as a non-mvcc operation */
22045  assert (false);
22046 #if defined(CUBRID_DEBUG)
22048  "heap_update_home: ** SYSTEM_ERROR ** update"
22049  " mvcc update was attempted on REC_ASSIGN_ADDRESS home record");
22050 #endif
22051  error_code = ER_FAILED;
22052  goto exit;
22053  }
22054 
22055 #if defined (SERVER_MODE)
22056  if (is_mvcc_op)
22057  {
22058  undo_rcvindex = RVHF_UPDATE_NOTIFY_VACUUM;
22059  }
22060  else if (context->home_recdes.type == REC_ASSIGN_ADDRESS && !mvcc_is_mvcc_disabled_class (&context->class_oid))
22061  {
22062  /* Quick fix: Assign address is update in-place. Vacuum must be notified. */
22063  undo_rcvindex = RVHF_UPDATE_NOTIFY_VACUUM;
22064  }
22065  else
22066 #endif /* SERVER_MODE */
22067  {
22068  undo_rcvindex = RVHF_UPDATE;
22069  }
22070 
22071  if (heap_is_big_length (context->recdes_p->length))
22072  {
22073  /* fix header page */
22074  error_code = heap_fix_header_page (thread_p, context);
22075  if (error_code != NO_ERROR)
22076  {
22077  ASSERT_ERROR ();
22078  goto exit;
22079  }
22080 
22081  /* insert new overflow record */
22082  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22083  if (heap_ovf_insert (thread_p, &context->hfid, &forward_oid, context->recdes_p) == NULL)
22084  {
22085  ASSERT_ERROR_AND_SET (error_code);
22086  goto exit;
22087  }
22088 
22089  /* forwarding record is REC_BIGONE */
22090  heap_build_forwarding_recdes (&forwarding_recdes, REC_BIGONE, &forward_oid);
22091 
22092  /* we'll be updating home with forwarding record */
22093  home_page_updated_recdes_p = &forwarding_recdes;
22094 
22096  }
22097  else if (!spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
22098  context->recdes_p->length))
22099  {
22100  /* insert new home */
22101 
22102  if (is_mvcc_op)
22103  {
22104  /* necessary later to set prev version, which is required only for mvcc objects */
22105  newhome_pg_watcher_p = &newhome_pg_watcher;
22107  }
22108 
22109  /* fix header page */
22110  error_code = heap_fix_header_page (thread_p, context);
22111  if (error_code != NO_ERROR)
22112  {
22113  ASSERT_ERROR ();
22114  goto exit;
22115  }
22116 
22117  /* insert new home record */
22118  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22119  context->recdes_p->type = REC_NEWHOME;
22120  error_code = heap_insert_newhome (thread_p, context, context->recdes_p, &forward_oid, newhome_pg_watcher_p);
22121  if (error_code != NO_ERROR)
22122  {
22123  ASSERT_ERROR ();
22124  goto exit;
22125  }
22126 
22127  /* forwarding record is REC_RELOCATION */
22128  heap_build_forwarding_recdes (&forwarding_recdes, REC_RELOCATION, &forward_oid);
22129 
22130  /* we'll be updating home with forwarding record */
22131  home_page_updated_recdes_p = &forwarding_recdes;
22132 
22134  }
22135  else
22136  {
22137  context->recdes_p->type = REC_HOME;
22138 
22139  /* updated record fits in home page */
22140  home_page_updated_recdes_p = context->recdes_p;
22141 
22143  }
22144 
22145  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22146 
22147  if ((home_page_updated_recdes_p->type == REC_RELOCATION || home_page_updated_recdes_p->type == REC_BIGONE)
22149  {
22150  /*
22151  * Need to get the record again, since record may have changed
22152  * by other transactions (INSID removed by VACUUM, page compact).
22153  * The object was already locked, so the record size may be the
22154  * same or smaller (INSID removed by VACUUM).
22155  */
22156  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
22157  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes,
22158  is_peeking) != S_SUCCESS)
22159  {
22160  ASSERT_ERROR_AND_SET (error_code);
22161  goto exit;
22162  }
22163  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22164  }
22165 
22166  /* log home update */
22167  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
22168  &context->home_recdes, home_page_updated_recdes_p, undo_rcvindex);
22169  LSA_COPY (&prev_version_lsa, logtb_find_current_tran_lsa (thread_p));
22170 
22171  HEAP_PERF_TRACK_LOGGING (thread_p, context);
22172 
22173  /* physical update of home record */
22174  error_code =
22175  heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
22176  home_page_updated_recdes_p);
22177  if (error_code != NO_ERROR)
22178  {
22179  assert (false);
22180  ASSERT_ERROR ();
22181  goto exit;
22182  }
22183 
22184  if (is_mvcc_op)
22185  {
22186  /* the updated record needs the prev version lsa to the undo log record where the old record can be found */
22187  error_code = heap_update_set_prev_version (thread_p, &context->oid, context->home_page_watcher_p,
22188  newhome_pg_watcher_p, &prev_version_lsa);
22189  if (error_code != NO_ERROR)
22190  {
22191  ASSERT_ERROR ();
22192  goto exit;
22193  }
22194  }
22195 
22196  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22197 
22198  /* location did not change */
22199  COPY_OID (&context->res_oid, &context->oid);
22200 
22201  /* Fall through to exit. */
22202 
22203 exit:
22204 
22205  if (newhome_pg_watcher_p != NULL && newhome_pg_watcher_p->pgptr != NULL)
22206  {
22207  /* newhome_pg_watcher is used only locally; must be unfixed */
22208  pgbuf_ordered_unfix (thread_p, newhome_pg_watcher_p);
22209  }
22210 
22211  return error_code;
22212 }
22213 
22214 /*
22215  * heap_update_physical () - physically update a record
22216  * thread_p(in): thread entry
22217  * page_p(in): page where record is stored
22218  * slot_id(in): slot where record is stored within page
22219  * recdes_p(in): record descriptor of updated record
22220  * returns: error code or NO_ERROR
22221  */
22222 static int
22223 heap_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, short slot_id, RECDES * recdes_p)
22224 {
22225  int scancode;
22226  INT16 old_record_type;
22227 
22228  /* check input */
22229  assert (page_p != NULL);
22230  assert (recdes_p != NULL);
22231  assert (slot_id != NULL_SLOTID);
22232 
22233  /* retrieve current record type */
22234  old_record_type = spage_get_record_type (page_p, slot_id);
22235 
22236  /* update home page and check operation result */
22237  scancode = spage_update (thread_p, page_p, slot_id, recdes_p);
22238  if (scancode != SP_SUCCESS)
22239  {
22240  /*
22241  * This is likely a system error since we have already checked
22242  * for space.
22243  */
22244  assert (false);
22245  if (scancode != SP_ERROR)
22246  {
22248  }
22249 
22250 #if defined(CUBRID_DEBUG)
22252  "heap_update_physical: ** SYSTEM_ERROR ** update operation failed even when have already checked"
22253  " for space");
22254 #endif
22255 
22256  return ER_FAILED;
22257  }
22258 
22259  /* Reflect record type change */
22260  if (old_record_type != recdes_p->type)
22261  {
22262  spage_update_record_type (thread_p, page_p, slot_id, recdes_p->type);
22263  }
22264 
22265  /* mark as dirty */
22266  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
22267 
22268  /* all ok */
22269  return NO_ERROR;
22270 }
22271 
22272 /*
22273  * heap_log_update_physical () - log a physical update
22274  * thread_p(in): thread entry
22275  * page_p(in): updated page
22276  * vfid_p(in): virtual file id
22277  * oid_p(in): object id
22278  * old_recdes_p(in): old record
22279  * new_recdes_p(in): new record
22280  * rcvindex(in): Index to recovery function
22281  */
22282 static void
22283 heap_log_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * old_recdes_p,
22284  RECDES * new_recdes_p, LOG_RCVINDEX rcvindex)
22285 {
22286  LOG_DATA_ADDR address;
22287 
22288  /* build address */
22289  address.offset = oid_p->slotid;
22290  address.pgptr = page_p;
22291  address.vfid = vfid_p;
22292 
22293  /* actual logging */
22294  if (LOG_IS_MVCC_HEAP_OPERATION (rcvindex))
22295  {
22296  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, page_p);
22297  heap_page_update_chain_after_mvcc_op (thread_p, page_p, logtb_get_current_mvccid (thread_p));
22298  if (heap_page_get_vacuum_status (thread_p, page_p) != vacuum_status)
22299  {
22300  /* Mark vacuum status change for recovery. */
22302  }
22303  }
22304 
22305  if (thread_p->no_logging && LOG_IS_MVCC_HEAP_OPERATION (rcvindex))
22306  {
22307  log_append_undo_recdes (thread_p, rcvindex, &address, old_recdes_p);
22308  }
22309  else
22310  {
22311  log_append_undoredo_recdes (thread_p, rcvindex, &address, old_recdes_p, new_recdes_p);
22312  }
22313 }
22314 
22315 /*
22316  * heap_create_insert_context () - create an insertion context
22317  * context(in): context to set up
22318  * hfid_p(in): heap file identifier
22319  * class_oid_p(in): class OID
22320  * recdes_p(in): record descriptor to insert
22321  * scancache_p(in): scan cache to use (optional)
22322  */
22323 void
22324 heap_create_insert_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * class_oid_p, RECDES * recdes_p,
22325  HEAP_SCANCACHE * scancache_p)
22326 {
22327  assert (context != NULL);
22328  assert (hfid_p != NULL);
22329  assert (recdes_p != NULL);
22330 
22331  heap_clear_operation_context (context, hfid_p);
22332  if (class_oid_p != NULL)
22333  {
22334  COPY_OID (&context->class_oid, class_oid_p);
22335  }
22336  context->recdes_p = recdes_p;
22337  context->scan_cache_p = scancache_p;
22338  context->type = HEAP_OPERATION_INSERT;
22339  context->use_bulk_logging = false;
22340 }
22341 
22342 /*
22343  * heap_create_delete_context () - create a deletion context
22344  * context(in): context to set up
22345  * hfid_p(in): heap file identifier
22346  * oid(in): identifier of object to delete
22347  * class_oid_p(in): class OID
22348  * scancache_p(in): scan cache to use (optional)
22349  */
22350 void
22351 heap_create_delete_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * oid_p, OID * class_oid_p,
22352  HEAP_SCANCACHE * scancache_p)
22353 {
22354  assert (context != NULL);
22355  assert (hfid_p != NULL);
22356  assert (oid_p != NULL);
22357  assert (class_oid_p != NULL);
22358 
22359  heap_clear_operation_context (context, hfid_p);
22360  COPY_OID (&context->oid, oid_p);
22361  COPY_OID (&context->class_oid, class_oid_p);
22362  context->scan_cache_p = scancache_p;
22363  context->type = HEAP_OPERATION_DELETE;
22364  context->use_bulk_logging = false;
22365 }
22366 
22367 /*
22368  * heap_create_update_context () - create an update operation context
22369  * context(in): context to set up
22370  * hfid_p(in): heap file identifier
22371  * oid(in): identifier of object to delete
22372  * class_oid_p(in): class OID
22373  * recdes_p(in): updated record to write
22374  * scancache_p(in): scan cache to use (optional)
22375  * in_place(in): specifies if the "in place" type of the update operation
22376  */
22377 void
22378 heap_create_update_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * oid_p, OID * class_oid_p,
22379  RECDES * recdes_p, HEAP_SCANCACHE * scancache_p, UPDATE_INPLACE_STYLE in_place)
22380 {
22381  assert (context != NULL);
22382  assert (hfid_p != NULL);
22383  assert (oid_p != NULL);
22384  assert (class_oid_p != NULL);
22385  assert (recdes_p != NULL);
22386 
22387  heap_clear_operation_context (context, hfid_p);
22388  COPY_OID (&context->oid, oid_p);
22389  COPY_OID (&context->class_oid, class_oid_p);
22390  context->recdes_p = recdes_p;
22391  context->scan_cache_p = scancache_p;
22392  context->type = HEAP_OPERATION_UPDATE;
22393  context->update_in_place = in_place;
22394  context->use_bulk_logging = false;
22395 }
22396 
22397 /*
22398  * heap_insert_logical () - Insert an object onto heap
22399  * context(in/out): operation context
22400  * return: error code or NO_ERROR
22401  *
22402  * Note: Insert an object onto the given file heap. The object is
22403  * inserted using the following algorithm:
22404  * 1: If the object cannot be inserted in a single page, it is
22405  * inserted in overflow as a multipage object. An overflow
22406  * relocation record is created in the heap as an address map
22407  * to the actual content of the object (the overflow address).
22408  * 2: If the object can be inserted in the last allocated page
22409  * without overpassing the reserved space on the page, the
22410  * object is placed on this page.
22411  * 3: If the object can be inserted in the hinted page without
22412  * overpassing the reserved space on the page, the object is
22413  * placed on this page.
22414  * 4: The object is inserted in a newly allocated page. Don't
22415  * about reserve space here.
22416  *
22417  * NOTE-1: The class object was already IX-locked during compile time
22418  * under normal situation.
22419  * However, with prepare-execute-commit-execute-... scenario,
22420  * the class object is not properly IX-locked since the previous
22421  * commit released the entire acquired locks including IX-lock.
22422  * So we have to make it sure the class object is IX-locked at this
22423  * moment.
22424  */
22425 int
22427 {
22428  bool is_mvcc_op;
22429  int rc = NO_ERROR;
22430  PERF_UTIME_TRACKER time_track;
22431  bool is_mvcc_class;
22432 
22433  /* check required input */
22434  assert (context != NULL);
22435  assert (context->type == HEAP_OPERATION_INSERT);
22436  assert (context->recdes_p != NULL);
22437  assert (!HFID_IS_NULL (&context->hfid));
22438 
22439  context->time_track = &time_track;
22440  HEAP_PERF_START (thread_p, context);
22441 
22442  /* check scancache */
22443  if (heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p) !=
22444  NO_ERROR)
22445  {
22446  return ER_FAILED;
22447  }
22448 
22449  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&context->class_oid);
22450  /*
22451  * Determine type of operation
22452  */
22453 #if defined (SERVER_MODE)
22454  if (is_mvcc_class && context->recdes_p->type != REC_ASSIGN_ADDRESS && !context->is_bulk_op)
22455  {
22456  is_mvcc_op = true;
22457  }
22458  else
22459  {
22460  is_mvcc_op = false;
22461  }
22462 #else /* SERVER_MODE */
22463  is_mvcc_op = false;
22464 #endif /* SERVER_MODE */
22465 
22466  /*
22467  * Record header adjustments
22468  */
22469  if (!OID_ISNULL (&context->class_oid) && !OID_IS_ROOTOID (&context->class_oid)
22470  && context->recdes_p->type != REC_ASSIGN_ADDRESS)
22471  {
22472  if (heap_insert_adjust_recdes_header (thread_p, context, is_mvcc_class) != NO_ERROR)
22473  {
22474  return ER_FAILED;
22475  }
22476  }
22477 
22478 #if defined(ENABLE_SYSTEMTAP)
22479  CUBRID_OBJ_INSERT_START (&context->class_oid);
22480 #endif /* ENABLE_SYSTEMTAP */
22481 
22482  /*
22483  * Handle multipage object
22484  */
22485  if (heap_insert_handle_multipage_record (thread_p, context) != NO_ERROR)
22486  {
22487  rc = ER_FAILED;
22488  goto error;
22489  }
22490 
22491  if (context->is_bulk_op)
22492  {
22493  // In case of bulk insert we need to skip the IX lock on class and make sure that we have BU_LOCK acquired.
22495  }
22496  else
22497  {
22498  /*
22499  * Locking
22500  */
22501  /* make sure we have IX_LOCK on class see [NOTE-1] */
22502  if (lock_object (thread_p, &context->class_oid, oid_Root_class_oid, IX_LOCK, LK_UNCOND_LOCK) != LK_GRANTED)
22503  {
22504  return ER_FAILED;
22505  }
22506  }
22507 
22508  /* get insert location (includes locking) */
22509  if (heap_get_insert_location_with_lock (thread_p, context, home_hint_p) != NO_ERROR)
22510  {
22511  return ER_FAILED;
22512  }
22513 
22514  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22515 
22516  /*
22517  * Physical insertion
22518  */
22519  if (heap_insert_physical (thread_p, context) != NO_ERROR)
22520  {
22521  rc = ER_FAILED;
22522  goto error;
22523  }
22524 
22525  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22526 
22527  /*
22528  * Operation logging
22529  */
22530  if (!context->use_bulk_logging)
22531  {
22532  heap_log_insert_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->res_oid,
22533  context->recdes_p, is_mvcc_op, context->is_redistribute_insert_with_delid);
22534  }
22535 
22536  HEAP_PERF_TRACK_LOGGING (thread_p, context);
22537 
22538  /* mark insert page as dirty */
22539  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
22540 
22541  /*
22542  * Page unfix or caching
22543  */
22544  if (context->scan_cache_p != NULL && context->scan_cache_p->cache_last_fix_page == true
22545  && (context->home_page_watcher_p == &context->home_page_watcher || context->home_page_watcher_p == home_hint_p))
22546  {
22547  /* cache */
22548  assert (context->home_page_watcher_p->pgptr != NULL);
22549  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22550  }
22551  else
22552  {
22553  /* unfix */
22554  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22555  }
22556 
22557  /* unfix other pages */
22558  heap_unfix_watchers (thread_p, context);
22559 
22560  /*
22561  * Class creation case
22562  */
22563  if (context->recdes_p->type != REC_ASSIGN_ADDRESS && HFID_EQ ((&context->hfid), &(heap_Classrepr->rootclass_hfid)))
22564  {
22565  if (heap_mark_class_as_modified (thread_p, &context->res_oid, or_chn (context->recdes_p), false) != NO_ERROR)
22566  {
22567  rc = ER_FAILED;
22568  goto error;
22569  }
22570  }
22571 
22572  if (context->recdes_p->type == REC_HOME)
22573  {
22575  }
22576  else if (context->recdes_p->type == REC_BIGONE)
22577  {
22579  }
22580  else
22581  {
22583  }
22584 
22585 error:
22586 
22587 #if defined(ENABLE_SYSTEMTAP)
22588  CUBRID_OBJ_INSERT_END (&context->class_oid, (rc < 0));
22589 #endif /* ENABLE_SYSTEMTAP */
22590 
22591  /* all ok */
22592  return rc;
22593 }
22594 
22595 /*
22596  * heap_delete_logical () - Delete an object from heap file
22597  * thread_p(in): thread entry
22598  * context(in): operation context
22599  * return: error code or NO_ERROR
22600  *
22601  * Note: Delete the object associated with the given OID from the given
22602  * heap file. If the object has been relocated or stored in
22603  * overflow, both the relocation and the relocated record are deleted.
22604  */
22605 int
22607 {
22608  bool is_mvcc_op;
22609  int rc = NO_ERROR;
22610  PERF_UTIME_TRACKER time_track;
22611 
22612  /*
22613  * Check input
22614  */
22615  assert (context != NULL);
22616  assert (context->type == HEAP_OPERATION_DELETE);
22617  assert (!HFID_IS_NULL (&context->hfid));
22618  assert (!OID_ISNULL (&context->oid));
22619 
22620  context->time_track = &time_track;
22621  HEAP_PERF_START (thread_p, context);
22622 
22623  /* check input OID validity */
22624  if (heap_is_valid_oid (thread_p, &context->oid) != NO_ERROR)
22625  {
22626  return ER_FAILED;
22627  }
22628 
22629  /* check scancache */
22630  if (heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p) !=
22631  NO_ERROR)
22632  {
22633  return ER_FAILED;
22634  }
22635 
22636  /* check file type */
22637  context->file_type = heap_get_file_type (thread_p, context);
22638  if (context->file_type != FILE_HEAP && context->file_type != FILE_HEAP_REUSE_SLOTS)
22639  {
22640  if (context->file_type == FILE_UNKNOWN_TYPE)
22641  {
22642  ASSERT_ERROR_AND_SET (rc);
22643  if (rc == ER_INTERRUPTED)
22644  {
22645  return rc;
22646  }
22647  }
22649  return ER_FAILED;
22650  }
22651 
22652  /*
22653  * Class deletion case
22654  */
22655  if (HFID_EQ (&context->hfid, &(heap_Classrepr->rootclass_hfid)))
22656  {
22657  if (heap_mark_class_as_modified (thread_p, &context->oid, NULL_CHN, true) != NO_ERROR)
22658  {
22659  return ER_FAILED;
22660  }
22661  }
22662 
22663  /*
22664  * Determine type of operation
22665  */
22666 #if defined (SERVER_MODE)
22667  if (mvcc_is_mvcc_disabled_class (&context->class_oid))
22668  {
22669  is_mvcc_op = false;
22670  }
22671  else
22672  {
22673  is_mvcc_op = true;
22674  }
22675 #else /* SERVER_MODE */
22676  is_mvcc_op = false;
22677 #endif /* SERVER_MODE */
22678 
22679 #if defined(ENABLE_SYSTEMTAP)
22680  CUBRID_OBJ_DELETE_START (&context->class_oid);
22681 #endif /* ENABLE_SYSTEMTAP */
22682 
22683  /*
22684  * Fetch object's page and check record type
22685  */
22686  if (heap_get_record_location (thread_p, context) != NO_ERROR)
22687  {
22688  rc = ER_FAILED;
22689  goto error;
22690  }
22691 
22692  context->record_type = spage_get_record_type (context->home_page_watcher_p->pgptr, context->oid.slotid);
22693  if (context->record_type == REC_UNKNOWN)
22694  {
22696  context->oid.slotid);
22697  rc = ER_FAILED;
22698  goto error;
22699  }
22700 
22701  /* fetch record to be deleted */
22702  context->home_recdes.area_size = DB_PAGESIZE;
22704  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes, COPY)
22705  != S_SUCCESS)
22706  {
22707  rc = ER_FAILED;
22708  goto error;
22709  }
22710 
22711  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22712 
22713  /*
22714  * Physical deletion and logging
22715  */
22716  switch (context->record_type)
22717  {
22718  case REC_BIGONE:
22719  rc = heap_delete_bigone (thread_p, context, is_mvcc_op);
22720  break;
22721 
22722  case REC_RELOCATION:
22723  rc = heap_delete_relocation (thread_p, context, is_mvcc_op);
22724  break;
22725 
22726  case REC_HOME:
22727  case REC_ASSIGN_ADDRESS:
22728  rc = heap_delete_home (thread_p, context, is_mvcc_op);
22729  break;
22730 
22731  default:
22733  context->oid.slotid);
22734  rc = ER_FAILED;
22735  goto error;
22736  }
22737 
22738 error:
22739 
22740  /* unfix or keep home page */
22741  if (context->scan_cache_p != NULL && context->home_page_watcher_p == &context->home_page_watcher
22742  && context->scan_cache_p->cache_last_fix_page == true)
22743  {
22744  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22745  }
22746  else
22747  {
22748  if (context->home_page_watcher_p->pgptr != NULL)
22749  {
22750  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22751  }
22752  }
22753 
22754  /* unfix pages */
22755  heap_unfix_watchers (thread_p, context);
22756 
22757 #if defined(ENABLE_SYSTEMTAP)
22758  CUBRID_OBJ_DELETE_END (&context->class_oid, (rc != NO_ERROR));
22759 #endif /* ENABLE_SYSTEMTAP */
22760 
22761  return rc;
22762 }
22763 
22764 /*
22765  * heap_update_logical () - update a record in a heap file
22766  * thread_p(in): thread entry
22767  * context(in): operation context
22768  * return: error code or NO_ERROR
22769  */
22770 extern int
22772 {
22773  bool is_mvcc_op;
22774  int rc = NO_ERROR;
22775  PERF_UTIME_TRACKER time_track;
22776  bool is_mvcc_class;
22777 
22778  /*
22779  * Check input
22780  */
22781  assert (context != NULL);
22782  assert (context->type == HEAP_OPERATION_UPDATE);
22783  assert (!OID_ISNULL (&context->oid));
22784  assert (!OID_ISNULL (&context->class_oid));
22785 
22786  context->time_track = &time_track;
22787  HEAP_PERF_START (thread_p, context);
22788 
22789  /* check scancache */
22790  rc = heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p);
22791  if (rc != NO_ERROR)
22792  {
22793  ASSERT_ERROR ();
22794  return rc;
22795  }
22796 
22797  /* check file type */
22798  context->file_type = heap_get_file_type (thread_p, context);
22799  if (context->file_type != FILE_HEAP && context->file_type != FILE_HEAP_REUSE_SLOTS)
22800  {
22801  if (context->file_type == FILE_UNKNOWN_TYPE)
22802  {
22803  ASSERT_ERROR_AND_SET (rc);
22804  if (rc == ER_INTERRUPTED)
22805  {
22806  return rc;
22807  }
22808  }
22810  return ER_GENERIC_ERROR;
22811  }
22812 
22813  /* get heap file identifier from scancache if none was provided */
22814  if (HFID_IS_NULL (&context->hfid))
22815  {
22816  if (context->scan_cache_p != NULL)
22817  {
22818  HFID_COPY (&context->hfid, &context->scan_cache_p->node.hfid);
22819  }
22820  else
22821  {
22822  er_log_debug (ARG_FILE_LINE, "heap_update: Bad interface a heap is needed");
22824  assert (false);
22825  return ER_HEAP_UNKNOWN_HEAP;
22826  }
22827  }
22828 
22829  /* check provided object identifier */
22830  rc = heap_is_valid_oid (thread_p, &context->oid);
22831  if (rc != NO_ERROR)
22832  {
22833  ASSERT_ERROR ();
22834  return rc;
22835  }
22836 
22837  /* by default, consider it old */
22838  context->is_logical_old = true;
22839 
22840  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&context->class_oid);
22841  /*
22842  * Determine type of operation
22843  */
22844  is_mvcc_op = HEAP_UPDATE_IS_MVCC_OP (is_mvcc_class, context->update_in_place);
22845 #if defined (SERVER_MODE)
22846  assert ((!is_mvcc_op && HEAP_IS_UPDATE_INPLACE (context->update_in_place))
22847  || (is_mvcc_op && !HEAP_IS_UPDATE_INPLACE (context->update_in_place)));
22848  /* the update in place concept should be changed in terms of mvcc */
22849 #endif /* SERVER_MODE */
22850 
22851 #if defined(ENABLE_SYSTEMTAP)
22852  CUBRID_OBJ_UPDATE_START (&context->class_oid);
22853 #endif /* ENABLE_SYSTEMTAP */
22854 
22855  /*
22856  * Get location
22857  */
22858  rc = heap_get_record_location (thread_p, context);
22859  if (rc != NO_ERROR)
22860  {
22861  ASSERT_ERROR ();
22862  goto exit;
22863  }
22864 
22865  /* decache guessed representation */
22866  HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS (&context->oid, &context->hfid);
22867 
22868  /*
22869  * Fetch record
22870  */
22871  context->record_type = spage_get_record_type (context->home_page_watcher_p->pgptr, context->oid.slotid);
22872  if (context->record_type == REC_UNKNOWN)
22873  {
22875  context->oid.slotid);
22877  goto exit;
22878  }
22879 
22880  context->home_recdes.area_size = DB_PAGESIZE;
22882  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes, COPY)
22883  != S_SUCCESS)
22884  {
22885  rc = ER_FAILED;
22886  goto exit;
22887  }
22888 
22889  /*
22890  * Adjust new record header
22891  */
22892  if (!OID_ISNULL (&context->class_oid) && !OID_IS_ROOTOID (&context->class_oid))
22893  {
22894  rc = heap_update_adjust_recdes_header (thread_p, context, is_mvcc_class);
22895  if (rc != NO_ERROR)
22896  {
22897  ASSERT_ERROR ();
22898  goto exit;
22899  }
22900  }
22901 
22902  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22903 
22904  /*
22905  * Update record
22906  */
22907  switch (context->record_type)
22908  {
22909  case REC_RELOCATION:
22910  rc = heap_update_relocation (thread_p, context, is_mvcc_op);
22911  break;
22912 
22913  case REC_BIGONE:
22914  rc = heap_update_bigone (thread_p, context, is_mvcc_op);
22915  break;
22916 
22917  case REC_ASSIGN_ADDRESS:
22918  /* it's not an old record, it was inserted in this transaction */
22919  context->is_logical_old = false;
22920  /* FALLTHRU */
22921  case REC_HOME:
22922  rc = heap_update_home (thread_p, context, is_mvcc_op);
22923  break;
22924 
22925  default:
22927  context->oid.slotid);
22929  goto exit;
22930  }
22931 
22932  /* check return code of operation */
22933  if (rc != NO_ERROR)
22934  {
22935  ASSERT_ERROR ();
22936  goto exit;
22937  }
22938 
22939  /*
22940  * Class update case
22941  */
22942  if (HFID_EQ ((&context->hfid), &(heap_Classrepr->rootclass_hfid)))
22943  {
22944  rc = heap_mark_class_as_modified (thread_p, &context->oid, or_chn (context->recdes_p), false);
22945  if (rc != NO_ERROR)
22946  {
22947  ASSERT_ERROR ();
22948  goto exit;
22949  }
22950  }
22951 
22952 exit:
22953 
22954  /* unfix or cache home page */
22955  if (context->home_page_watcher_p->pgptr != NULL && context->home_page_watcher_p == &context->home_page_watcher)
22956  {
22957  if (context->scan_cache_p != NULL && context->scan_cache_p->cache_last_fix_page)
22958  {
22959  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22960  }
22961  else
22962  {
22963  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22964  }
22965  }
22966 
22967  /* unfix pages */
22968  heap_unfix_watchers (thread_p, context);
22969 
22970 #if defined(ENABLE_SYSTEMTAP)
22971  CUBRID_OBJ_UPDATE_END (&context->class_oid, (rc != NO_ERROR));
22972 #endif /* ENABLE_SYSTEMTAP */
22973 
22974  return rc;
22975 }
22976 
22977 /*
22978  * heap_get_class_info_from_record () - get HFID from class record for the
22979  * given OID.
22980  * return: error_code
22981  * class_oid(in): class oid
22982  * hfid(out): the resulting hfid
22983  *
22984  * NOTE!! : classname must be freed by the caller.
22985  */
22986 static int
22987 heap_get_class_info_from_record (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid, char **classname)
22988 {
22989  int error_code = NO_ERROR;
22990  RECDES recdes;
22991  HEAP_SCANCACHE scan_cache;
22992 
22993  if (class_oid == NULL || hfid == NULL)
22994  {
22995  return ER_FAILED;
22996  }
22997 
22998  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
22999 
23000  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
23001  {
23002  heap_scancache_end (thread_p, &scan_cache);
23003  return ER_FAILED;
23004  }
23005 
23006  or_class_hfid (&recdes, hfid);
23007 
23008  if (classname != NULL)
23009  {
23010  *classname = strdup (or_class_name (&recdes));
23011  }
23012 
23013  error_code = heap_scancache_end (thread_p, &scan_cache);
23014  if (error_code != NO_ERROR)
23015  {
23016  return error_code;
23017  }
23018 
23019  return error_code;
23020 }
23021 
23022 /*
23023  * heap_hfid_table_entry_alloc() - allocate a new structure for
23024  * the class OID->HFID hash
23025  * returns: new pointer or NULL on error
23026  */
23027 static void *
23029 {
23030  HEAP_HFID_TABLE_ENTRY *new_entry = (HEAP_HFID_TABLE_ENTRY *) malloc (sizeof (HEAP_HFID_TABLE_ENTRY));
23031 
23032  if (new_entry == NULL)
23033  {
23034  return NULL;
23035  }
23036 
23037  new_entry->classname = NULL;
23038 
23039  return (void *) new_entry;
23040 }
23041 
23042 /*
23043  * logtb_global_unique_stat_free () - free a hfid_table entry
23044  * returns: error code or NO_ERROR
23045  * entry(in): entry to free (HEAP_HFID_TABLE_ENTRY)
23046  */
23047 static int
23049 {
23050  if (entry != NULL)
23051  {
23052  HEAP_HFID_TABLE_ENTRY *entry_p = (HEAP_HFID_TABLE_ENTRY *) entry;
23053 
23054  // Clear the classname.
23055  if (entry_p->classname != NULL)
23056  {
23057  free (entry_p->classname);
23058  entry_p->classname = NULL;
23059  }
23060 
23061  free (entry);
23062  return NO_ERROR;
23063  }
23064  else
23065  {
23066  return ER_FAILED;
23067  }
23068 }
23069 
23070 /*
23071  * heap_hfid_table_entry_init () - initialize a hfid_table entry
23072  * returns: error code or NO_ERROR
23073  * entry(in): hfid_table entry
23074  */
23075 static int
23077 {
23078  HEAP_HFID_TABLE_ENTRY *entry_p = (HEAP_HFID_TABLE_ENTRY *) entry;
23079 
23080  if (entry_p == NULL)
23081  {
23082  return ER_FAILED;
23083  }
23084 
23085  /* initialize fields */
23086  OID_SET_NULL (&entry_p->class_oid);
23087  entry_p->hfid.vfid.fileid = NULL_FILEID;
23088  entry_p->hfid.vfid.volid = NULL_VOLID;
23089  entry_p->hfid.hpgid = NULL_PAGEID;
23090  entry_p->ftype = FILE_UNKNOWN_TYPE;
23091  entry_p->classname = NULL;
23092 
23093  return NO_ERROR;
23094 }
23095 
23096 static int
23098 {
23099  HEAP_HFID_TABLE_ENTRY *entry_p = (HEAP_HFID_TABLE_ENTRY *) entry;
23100  if (entry_p->classname != NULL)
23101  {
23102  free (entry_p->classname);
23103  entry_p->classname = NULL;
23104  }
23105  return NO_ERROR;
23106 }
23107 
23108 /*
23109  * heap_hfid_table_entry_key_copy () - copy a hfid_table key
23110  * returns: error code or NO_ERROR
23111  * src(in): source
23112  * dest(in): destination
23113  */
23114 static int
23115 heap_hfid_table_entry_key_copy (void *src, void *dest)
23116 {
23117  if (src == NULL || dest == NULL)
23118  {
23119  return ER_FAILED;
23120  }
23121 
23122  COPY_OID ((OID *) dest, (OID *) src);
23123 
23124  /* all ok */
23125  return NO_ERROR;
23126 }
23127 
23128 /*
23129  * heap_hfid_table_entry_key_hash () - hashing function for the class OID->HFID
23130  * hash table
23131  * return: int
23132  * key(in): Session key
23133  * hash_table_size(in): Memory Hash Table Size
23134  *
23135  * Note: Generate a hash number for the given key for the given hash table
23136  * size.
23137  */
23138 static unsigned int
23139 heap_hfid_table_entry_key_hash (void *key, int hash_table_size)
23140 {
23141  return ((unsigned int) OID_PSEUDO_KEY ((OID *) key)) % hash_table_size;
23142 }
23143 
23144 /*
23145  * heap_hfid_table_entry_key_compare () - Compare two global unique
23146  * statistics keys (OIDs)
23147  * return: int (true or false)
23148  * k1 (in) : First OID key
23149  * k2 (in) : Second OID key
23150  */
23151 static int
23153 {
23154  OID *key1, *key2;
23155 
23156  key1 = (OID *) k1;
23157  key2 = (OID *) k2;
23158 
23159  if (k1 == NULL || k2 == NULL)
23160  {
23161  /* should not happen */
23162  assert (false);
23163  return 0;
23164  }
23165 
23166  if (OID_EQ (key1, key2))
23167  {
23168  /* equal */
23169  return 0;
23170  }
23171  else
23172  {
23173  /* not equal */
23174  return 1;
23175  }
23176 }
23177 
23178 /*
23179  * heap_initialize_hfid_table () - Creates and initializes global structure
23180  * for global class OID->HFID hash table
23181  * return: error code
23182  * thread_p (in) :
23183  */
23184 int
23186 {
23187  int ret = NO_ERROR;
23188  LF_ENTRY_DESCRIPTOR *edesc = NULL;
23189 
23190  if (heap_Hfid_table != NULL)
23191  {
23192  return NO_ERROR;
23193  }
23194 
23195  edesc = &heap_Hfid_table_area.hfid_hash_descriptor;
23196 
23197  edesc->of_local_next = offsetof (HEAP_HFID_TABLE_ENTRY, stack);
23198  edesc->of_next = offsetof (HEAP_HFID_TABLE_ENTRY, next);
23199  edesc->of_del_tran_id = offsetof (HEAP_HFID_TABLE_ENTRY, del_id);
23200  edesc->of_key = offsetof (HEAP_HFID_TABLE_ENTRY, class_oid);
23201  edesc->of_mutex = 0;
23210  edesc->f_duplicate = NULL;
23211 
23212  /* initialize freelist */
23213  ret = lf_freelist_init (&heap_Hfid_table_area.hfid_hash_freelist, 1, 100, edesc, &hfid_table_Ts);
23214  if (ret != NO_ERROR)
23215  {
23216  return ret;
23217  }
23218 
23219  /* initialize hash table */
23220  ret =
23221  lf_hash_init (&heap_Hfid_table_area.hfid_hash, &heap_Hfid_table_area.hfid_hash_freelist, HEAP_HFID_HASH_SIZE,
23222  edesc);
23223  if (ret != NO_ERROR)
23224  {
23225  lf_hash_destroy (&heap_Hfid_table_area.hfid_hash);
23226  return ret;
23227  }
23228 
23230 
23231  heap_Hfid_table = &heap_Hfid_table_area;
23232 
23233  return ret;
23234 }
23235 
23236 /*
23237  * heap_finalize_hfid_table () - Finalize class OID->HFID hash table
23238  * return: error code
23239  * thread_p (in) :
23240  */
23241 void
23243 {
23244  if (heap_Hfid_table != NULL)
23245  {
23246  /* destroy hash and freelist */
23247  lf_hash_destroy (&heap_Hfid_table->hfid_hash);
23248  lf_freelist_destroy (&heap_Hfid_table->hfid_hash_freelist);
23249 
23250  heap_Hfid_table = NULL;
23251  }
23252 }
23253 
23254 /*
23255  * heap_delete_hfid_from_cache () - deletes the entry associated with
23256  * the given class OID from the hfid table
23257  * return: error code
23258  * thread_p (in) :
23259  * class_oid (in) : the class OID for which the entry will be deleted
23260  */
23261 int
23263 {
23265  int error = NO_ERROR;
23266  int success = 0;
23267 
23268  error = lf_hash_delete (t_entry, &heap_Hfid_table->hfid_hash, class_oid, &success);
23269  heap_hfid_table_log (thread_p, class_oid, "heap_delete_hfid_from_cache success=%d", success);
23270 
23271  return error;
23272 }
23273 
23274 /*
23275  * heap_vacuum_all_objects () - Vacuum all objects in heap.
23276  *
23277  * return : Error code.
23278  * thread_p (in) : Thread entry.
23279  * upd_scancache(in) : Update scan cache
23280  * threshold_mvccid(in) : Threshold MVCCID
23281  */
23282 int
23283 heap_vacuum_all_objects (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * upd_scancache, MVCCID threshold_mvccid)
23284 {
23285  PGBUF_WATCHER pg_watcher;
23286  PGBUF_WATCHER old_pg_watcher;
23287  VPID next_vpid, vpid;
23288  VACUUM_WORKER worker;
23289  int max_num_slots, i;
23290  OID temp_oid;
23291  bool reusable;
23292  int error_code = NO_ERROR;
23293 
23294  assert (upd_scancache != NULL);
23295  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &upd_scancache->node.hfid);
23296  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &upd_scancache->node.hfid);
23297  memset (&worker, 0, sizeof (worker));
23298  max_num_slots = IO_MAX_PAGE_SIZE / sizeof (SPAGE_SLOT);
23299  worker.heap_objects = (VACUUM_HEAP_OBJECT *) malloc (max_num_slots * sizeof (VACUUM_HEAP_OBJECT));
23300  if (worker.heap_objects == NULL)
23301  {
23303  max_num_slots * sizeof (VACUUM_HEAP_OBJECT));
23304  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
23305  goto exit;
23306  }
23307  worker.heap_objects_capacity = max_num_slots;
23308  worker.n_heap_objects = 0;
23309 
23310  next_vpid.volid = upd_scancache->node.hfid.vfid.volid;
23311  next_vpid.pageid = upd_scancache->node.hfid.hpgid;
23312  for (i = 0; i < max_num_slots; i++)
23313  {
23314  VFID_COPY (&worker.heap_objects[i].vfid, &upd_scancache->node.hfid.vfid);
23315  }
23316 
23317  reusable = heap_is_reusable_oid (upd_scancache->file_type);
23318  while (!VPID_ISNULL (&next_vpid))
23319  {
23320  vpid = next_vpid;
23321  error_code = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &pg_watcher);
23322  if (error_code != NO_ERROR)
23323  {
23324  goto exit;
23325  }
23326 
23327  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
23328 
23329  if (old_pg_watcher.pgptr != NULL)
23330  {
23331  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
23332  }
23333 
23334  error_code = heap_vpid_next (thread_p, &upd_scancache->node.hfid, pg_watcher.pgptr, &next_vpid);
23335  if (error_code != NO_ERROR)
23336  {
23337  assert (false);
23338  goto exit;
23339  }
23340 
23341  temp_oid.volid = vpid.volid;
23342  temp_oid.pageid = vpid.pageid;
23343  worker.n_heap_objects = spage_number_of_slots (pg_watcher.pgptr) - 1;
23344  if (worker.n_heap_objects > 0
23345  && heap_page_get_vacuum_status (thread_p, pg_watcher.pgptr) != HEAP_PAGE_VACUUM_NONE)
23346  {
23347  for (i = 1; i <= worker.n_heap_objects; i++)
23348  {
23349  temp_oid.slotid = i;
23350  COPY_OID (&worker.heap_objects[i - 1].oid, &temp_oid);
23351  }
23352 
23353  error_code =
23354  vacuum_heap_page (thread_p, worker.heap_objects, worker.n_heap_objects, threshold_mvccid,
23355  &upd_scancache->node.hfid, &reusable, false);
23356  if (error_code != NO_ERROR)
23357  {
23358  goto exit;
23359  }
23360  }
23361 
23362  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
23363  }
23364 
23365 exit:
23366  if (pg_watcher.pgptr != NULL)
23367  {
23368  pgbuf_ordered_unfix (thread_p, &pg_watcher);
23369  }
23370  if (old_pg_watcher.pgptr != NULL)
23371  {
23372  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
23373  }
23374 
23375  if (worker.heap_objects != NULL)
23376  {
23377  free_and_init (worker.heap_objects);
23378  }
23379  return error_code;
23380 }
23381 
23382 /*
23383  * heap_cache_class_info () - Cache HFID for class object.
23384  *
23385  * return : Error code.
23386  * thread_p (in) : Thread entry.
23387  * class_oid (in) : Class OID.
23388  * hfid (in) : Heap file ID.
23389  * ftype (in) : FILE_HEAP or FILE_HEAP_REUSE_SLOTS.
23390  */
23391 int
23392 heap_cache_class_info (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid, FILE_TYPE ftype,
23393  const char *classname_in)
23394 {
23395  int error_code = NO_ERROR;
23397  HEAP_HFID_TABLE_ENTRY *entry = NULL;
23398  HFID hfid_local = HFID_INITIALIZER;
23399  char *classname_local = NULL;
23400  int inserted = 0;
23401 
23402  assert (hfid != NULL && !HFID_IS_NULL (hfid));
23403  assert (ftype == FILE_HEAP || ftype == FILE_HEAP_REUSE_SLOTS);
23404 
23405  if (class_oid == NULL || OID_ISNULL (class_oid))
23406  {
23407  /* We can't cache it. */
23408  return NO_ERROR;
23409  }
23410 
23411  error_code =
23412  lf_hash_find_or_insert (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, (void **) &entry, &inserted);
23413  if (error_code != NO_ERROR)
23414  {
23415  assert (false);
23416  return error_code;
23417  }
23418  // NOTE: no collisions are expected when heap_cache_class_info is called
23419 
23420  assert (entry != NULL);
23421  assert (entry->hfid.hpgid == NULL_PAGEID);
23422 
23423  HFID_COPY (&entry->hfid, hfid);
23424  if (classname_in != NULL)
23425  {
23426  classname_local = strdup (classname_in);
23427  }
23428  else
23429  {
23430  error_code = heap_get_class_info_from_record (thread_p, class_oid, &hfid_local, &classname_local);
23431  if (error_code != NO_ERROR)
23432  {
23433  ASSERT_ERROR ();
23434  lf_tran_end_with_mb (t_entry);
23435 
23436  // remove from hash
23437  int success = 0;
23438  if (lf_hash_delete (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, &success) != NO_ERROR)
23439  {
23440  assert (false);
23441  }
23442  assert (success);
23443 
23444  heap_hfid_table_log (thread_p, class_oid, "heap_cache_class_info failed error=%d", error_code);
23445 
23446  if (classname_local != NULL)
23447  {
23448  free (classname_local);
23449  }
23450 
23451  return error_code;
23452  }
23453  }
23454 
23455  entry->ftype = ftype;
23456 
23457  char *dummy_null = NULL;
23458  if (!entry->classname.compare_exchange_strong (dummy_null, classname_local))
23459  {
23460  free (classname_local);
23461  }
23462 
23463  lf_tran_end_with_mb (t_entry);
23464 
23465  heap_hfid_table_log (thread_p, class_oid, "heap_cache_class_info hfid=%d|%d|%d, ftype=%s, classname = %s",
23466  HFID_AS_ARGS (hfid), file_type_to_string (ftype), classname_local);
23467 
23468  /* Successfully cached. */
23469  return NO_ERROR;
23470 }
23471 
23472 /*
23473  * heap_hfid_cache_get () - returns the HFID of the
23474  * class with the given class OID
23475  * return: error code
23476  * thread_p (in) :
23477  * class OID (in) : the class OID for which the entry will be returned
23478  * hfid_out (out):
23479  *
23480  * Note: if the entry is not found, one will be inserted and the HFID is
23481  * retrieved from the class record.
23482  */
23483 static int
23484 heap_hfid_cache_get (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid_out, FILE_TYPE * ftype_out,
23485  char **classname_out)
23486 {
23487  int error_code = NO_ERROR;
23489  HEAP_HFID_TABLE_ENTRY *entry = NULL;
23490  char *classname_local = NULL;
23491  int inserted = 0;
23492 
23493  assert (class_oid != NULL && !OID_ISNULL (class_oid));
23494 
23495  error_code =
23496  lf_hash_find_or_insert (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, (void **) &entry, &inserted);
23497  if (error_code != NO_ERROR)
23498  {
23499  ASSERT_ERROR ();
23500  return error_code;
23501  }
23502  assert (entry != NULL);
23503 
23504  /* Here we check only the classname because this is the last field to be populated by other possible concurrent
23505  * inserters. This means that if this field is already set by someone else, then the entry data is already
23506  * mature so we don't need to add data again.
23507  */
23508  if (entry->classname == NULL)
23509  {
23510  HFID hfid_local = HFID_INITIALIZER;
23511 
23512  /* root HFID should already be added. */
23513  if (OID_IS_ROOTOID (class_oid))
23514  {
23515  assert_release (false);
23516  boot_find_root_heap (&entry->hfid);
23517  entry->ftype = FILE_HEAP;
23518  lf_tran_end_with_mb (t_entry);
23519  return NO_ERROR;
23520  }
23521 
23522  /* this is either a newly inserted entry or one with incomplete information that is currently being filled by
23523  * another transaction. We need to retrieve the HFID from the class record. We do not care that we are
23524  * overwriting the information, since it must be always the same (the HFID never changes for the same class OID). */
23525  error_code = heap_get_class_info_from_record (thread_p, class_oid, &hfid_local, &classname_local);
23526  if (error_code != NO_ERROR)
23527  {
23528  ASSERT_ERROR ();
23529  lf_tran_end_with_mb (t_entry);
23530 
23531  // remove entry
23532  lf_hash_delete (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, NULL);
23533 
23534  heap_hfid_table_log (thread_p, class_oid, "heap_hfid_cache_get failed error = %d", error_code);
23535  return error_code;
23536  }
23537  entry->hfid = hfid_local;
23538 
23539  char *dummy_null = NULL;
23540 
23541  if (!entry->classname.compare_exchange_strong (dummy_null, classname_local))
23542  {
23543  // somebody else has set it
23544  free (classname_local);
23545  }
23546  }
23547 
23548  assert (entry->hfid.hpgid != NULL_PAGEID && entry->hfid.vfid.fileid != NULL_FILEID
23549  && entry->hfid.vfid.volid != NULL_VOLID && entry->classname != NULL);
23550 
23551  if (entry->ftype == FILE_UNKNOWN_TYPE)
23552  {
23553  FILE_TYPE ftype_local;
23554  error_code = file_get_type (thread_p, &entry->hfid.vfid, &ftype_local);
23555  if (error_code != NO_ERROR)
23556  {
23557  ASSERT_ERROR ();
23558  lf_tran_end_with_mb (t_entry);
23559 
23560  // remove entry
23561  lf_hash_delete (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, NULL);
23562 
23563  heap_hfid_table_log (thread_p, class_oid, "heap_hfid_cache_get failed error = %d", error_code);
23564  return error_code;
23565  }
23566  entry->ftype = ftype_local;
23567  }
23568  assert (entry->ftype == FILE_HEAP || entry->ftype == FILE_HEAP_REUSE_SLOTS);
23569 
23570  if (hfid_out != NULL)
23571  {
23572  *hfid_out = entry->hfid;
23573  }
23574  if (ftype_out != NULL)
23575  {
23576  *ftype_out = entry->ftype;
23577  }
23578  if (classname_out != NULL)
23579  {
23580  *classname_out = entry->classname;
23581  }
23582 
23583  lf_tran_end_with_mb (t_entry);
23584 
23585  heap_hfid_table_log (thread_p, class_oid, "heap_hfid_cache_get hfid=%d|%d|%d, ftype = %s, classname = %s",
23586  HFID_AS_ARGS (&entry->hfid), file_type_to_string (entry->ftype), entry->classname.load ());
23587  return error_code;
23588 }
23589 
23590 /*
23591  * heap_page_update_chain_after_mvcc_op () - Update max MVCCID and vacuum
23592  * status in heap page chain after
23593  * an MVCC op is executed.
23594  *
23595  * return : Void.
23596  * thread_p (in) : Thread entry.
23597  * heap_page (in) : Heap page.
23598  * mvccid (in) : MVCC op MVCCID.
23599  */
23600 static void
23602 {
23603  HEAP_CHAIN *chain;
23604  RECDES chain_recdes;
23605  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23606 
23607  assert (heap_page != NULL);
23608  assert (MVCCID_IS_NORMAL (mvccid));
23609 
23610  /* Two actions are being done here: 1. Update vacuum status. - HEAP_PAGE_VACUUM_NONE + 1 mvcc op =>
23611  * HEAP_PAGE_VACUUM_ONCE - HEAP_PAGE_VACUUM_ONCE + 1 mvcc op => HEAP_PAGE_VACUUM_UNKNOWN (because future becomes
23612  * unpredictable). - HEAP_PAGE_VACUUM_UNKNOWN + 1 mvcc op can we tell that page is vacuumed? =>
23613  * HEAP_PAGE_VACUUM_ONCE we don't know that page is vacuumed? => HEAP_PAGE_VACUUM_UNKNOWN 2. Update max MVCCID if
23614  * new MVCCID is bigger. */
23615 
23616  /* Get heap chain. */
23617  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23618  {
23619  assert_release (false);
23620  return;
23621  }
23622  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23623  {
23624  /* Heap header page. Do nothing. */
23625  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23626  return;
23627  }
23628  chain = (HEAP_CHAIN *) chain_recdes.data;
23629 
23630  /* Update vacuum status. */
23631  vacuum_status = HEAP_PAGE_GET_VACUUM_STATUS (chain);
23632  switch (vacuum_status)
23633  {
23634  case HEAP_PAGE_VACUUM_NONE:
23635  /* Change status to one vacuum. */
23636  assert (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid));
23639  "Changed vacuum status for page %d|%d, lsa=%lld|%d from no vacuum to vacuum once.",
23640  PGBUF_PAGE_STATE_ARGS (heap_page));
23641  break;
23642 
23643  case HEAP_PAGE_VACUUM_ONCE:
23644  /* Change status to unknown number of vacuums. */
23647  "Changed vacuum status for page %d|%d, lsa=%lld|%d from vacuum once to unknown.",
23648  PGBUF_PAGE_STATE_ARGS (heap_page));
23649  break;
23650 
23652  /* Was page completely vacuumed? We can tell if current max_mvccid precedes vacuum data's oldest mvccid. */
23654  {
23655  /* Now page must be vacuumed once, due to new MVCC op. */
23658  "Changed vacuum status for page %d|%d, lsa=%lld|%d from unknown to vacuum once.",
23659  PGBUF_PAGE_STATE_ARGS (heap_page));
23660  }
23661  else
23662  {
23663  /* Status remains the same. Number of vacuums needed still cannot be predicted. */
23664  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Vacuum status for page %d|%d, %lld|%d remains unknown.",
23665  PGBUF_PAGE_STATE_ARGS (heap_page));
23666  }
23667  break;
23668  default:
23669  assert_release (false);
23670  break;
23671  }
23672 
23673  /* Update max_mvccid. */
23674  if (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid))
23675  {
23676  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Update max MVCCID for page %d|%d from %llu to %llu.",
23677  PGBUF_PAGE_VPID_AS_ARGS (heap_page), (unsigned long long int) chain->max_mvccid,
23678  (unsigned long long int) mvccid);
23679  chain->max_mvccid = mvccid;
23680  }
23681 }
23682 
23683 /*
23684  * heap_page_rv_vacuum_status_change () - Applies vacuum status change for
23685  * recovery.
23686  *
23687  * return : Void.
23688  * thread_p (in) : Thread entry.
23689  * heap_page (in) : Heap page.
23690  */
23691 static void
23692 heap_page_rv_chain_update (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid, bool vacuum_status_change)
23693 {
23694  HEAP_CHAIN *chain;
23695  RECDES chain_recdes;
23696  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23697 
23698  assert (heap_page != NULL);
23699 
23700  /* Possible transitions (see heap_page_update_chain_after_mvcc_op): - HEAP_PAGE_VACUUM_NONE => HEAP_PAGE_VACUUM_ONCE.
23701  * - HEAP_PAGE_VACUUM_ONCE => HEAP_PAGE_VACUUM_UNKNOWN. - HEAP_PAGE_VACUUM_UNKNOWN => HEAP_PAGE_VACUUM_ONCE. */
23702 
23703  /* Get heap chain. */
23704  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23705  {
23706  assert_release (false);
23707  return;
23708  }
23709  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23710  {
23711  /* Header page. Don't change chain. */
23712  return;
23713  }
23714  chain = (HEAP_CHAIN *) chain_recdes.data;
23715 
23716  if (vacuum_status_change)
23717  {
23718  /* Change status. */
23719  vacuum_status = HEAP_PAGE_GET_VACUUM_STATUS (chain);
23720  switch (vacuum_status)
23721  {
23722  case HEAP_PAGE_VACUUM_NONE:
23725 
23727  "Change heap page %d|%d, lsa=%lld|%d, status from %s to once.",
23728  PGBUF_PAGE_STATE_ARGS (heap_page),
23729  vacuum_status == HEAP_PAGE_VACUUM_NONE ? "none" : "unknown");
23730  break;
23731  case HEAP_PAGE_VACUUM_ONCE:
23733 
23735  "Change heap page %d|%d, lsa=%lld|%d, status from once to unknown.",
23736  PGBUF_PAGE_STATE_ARGS (heap_page));
23737  break;
23738  }
23739  }
23740  if (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid))
23741  {
23742  chain->max_mvccid = mvccid;
23743  }
23744 }
23745 
23746 /*
23747  * heap_page_set_vacuum_status_none () - Change vacuum status from one vacuum
23748  * required to none.
23749  *
23750  * return : Void.
23751  * thread_p (in) : Thread entry.
23752  * heap_page (in) : Heap page.
23753  */
23754 void
23756 {
23757  HEAP_CHAIN *chain;
23758  RECDES chain_recdes;
23759 
23760  assert (heap_page != NULL);
23761 
23762  /* Updating vacuum status: - HEAP_PAGE_VACUUM_NONE => Vacuum is not expected. Fail. - HEAP_PAGE_VACUUM_ONCE + 1
23763  * vacuum => HEAP_PAGE_VACUUM_NONE. - HEAP_PAGE_VACUUM_UNKNOWN + 1 vacuum => HEAP_PAGE_VACUUM_UNKNOWN. Number of
23764  * vacuums expected is unknown and remains that way. */
23765 
23766  /* Get heap chain. */
23767  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23768  {
23769  assert_release (false);
23770  return;
23771  }
23772  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23773  {
23774  /* Heap header page. */
23775  /* Should never be here. */
23776  assert_release (false);
23777  return;
23778  }
23779  chain = (HEAP_CHAIN *) chain_recdes.data;
23780 
23782 
23783  /* Update vacuum status. */
23785 
23786  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Changed vacuum status for page %d|%d from vacuum once to no vacuum.",
23787  PGBUF_PAGE_VPID_AS_ARGS (heap_page));
23788 }
23789 
23790 /*
23791  * heap_page_get_max_mvccid () - Get max MVCCID of heap page.
23792  *
23793  * return : Max MVCCID.
23794  * thread_p (in) : Thread entry.
23795  * heap_page (in) : Heap page.
23796  */
23797 MVCCID
23799 {
23800  HEAP_CHAIN *chain;
23801  RECDES chain_recdes;
23802 
23803  assert (heap_page != NULL);
23804 
23805  /* Get heap chain. */
23806  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23807  {
23808  assert_release (false);
23809  return MVCCID_NULL;
23810  }
23811  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23812  {
23813  /* Heap header page. */
23814  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23815  return MVCCID_NULL;
23816  }
23817  chain = (HEAP_CHAIN *) chain_recdes.data;
23818 
23819  return chain->max_mvccid;
23820 }
23821 
23822 /*
23823  * heap_page_get_vacuum_status () - Get heap page vacuum status.
23824  *
23825  * return : Vacuum status.
23826  * thread_p (in) : Thread entry.
23827  * heap_page (in) : Heap page.
23828  */
23831 {
23832  HEAP_CHAIN *chain;
23833  RECDES chain_recdes;
23834 
23835  assert (heap_page != NULL);
23836 
23837  /* Get heap chain. */
23838  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23839  {
23840  assert_release (false);
23841  return HEAP_PAGE_VACUUM_UNKNOWN;
23842  }
23843  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23844  {
23845  /* Heap header page. */
23846  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23847  return HEAP_PAGE_VACUUM_UNKNOWN;
23848  }
23849  chain = (HEAP_CHAIN *) chain_recdes.data;
23850 
23851  return HEAP_PAGE_GET_VACUUM_STATUS (chain);
23852 }
23853 
23854 /*
23855  * heap_rv_nop () - Heap recovery no op function.
23856  *
23857  * return : NO_ERROR.
23858  * thread_p (in) : Thread entry.
23859  * rcv (in) : Recovery data.
23860  */
23861 int
23862 heap_rv_nop (THREAD_ENTRY * thread_p, LOG_RCV * rcv)
23863 {
23864  assert (rcv->pgptr != NULL);
23865  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
23866 
23867  return NO_ERROR;
23868 }
23869 
23870 /*
23871  * heap_rv_update_chain_after_mvcc_op () - Redo update of page chain after
23872  * an MVCC operation (used for
23873  * operations that are not changing
23874  *
23875  *
23876  * return : NO_ERROR
23877  * thread_p (in) : Thread entry.
23878  * rcv (in) : Recovery data.
23879  */
23880 int
23882 {
23883  bool vacuum_status_change = false;
23884 
23885  assert (rcv->pgptr != NULL);
23886  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
23887 
23888  vacuum_status_change = (rcv->offset & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE) != 0;
23889  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
23890  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
23891  return NO_ERROR;
23892 }
23893 
23894 /*
23895  * heap_rv_remove_flags_from_offset () - Remove flags from recovery offset.
23896  *
23897  * return : Offset without flags.
23898  * offset (in) : Offset with flags.
23899  */
23900 INT16
23902 {
23903  return offset & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
23904 }
23905 
23906 /*
23907  * heap_should_try_update_stat () - checks if an heap update statistics is
23908  * indicated
23909  *
23910  *
23911  * return : NO_ERROR
23912  * thread_p (in) : Thread entry.
23913  * rcv (in) : Recovery data.
23914  */
23915 bool
23916 heap_should_try_update_stat (const int current_freespace, const int prev_freespace)
23917 {
23918  if (current_freespace > prev_freespace && current_freespace > HEAP_DROP_FREE_SPACE
23919  && prev_freespace < HEAP_DROP_FREE_SPACE)
23920  {
23921  return true;
23922  }
23923  return false;
23924 }
23925 
23926 /*
23927  * heap_scancache_add_partition_node () - add a new partition information to
23928  * to the scan_cache's partition list.
23929  * Also sets the current node of the
23930  * scancache to this newly inserted node.
23931  *
23932  * return : error code
23933  * thread_p (in) :
23934  * scan_cache (in) :
23935  * partition_oid (in) :
23936  */
23937 static int
23938 heap_scancache_add_partition_node (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, OID * partition_oid)
23939 {
23940  HFID hfid;
23942 
23943  assert (scan_cache != NULL);
23944 
23945  if (heap_get_class_info (thread_p, partition_oid, &hfid, NULL, NULL) != NO_ERROR)
23946  {
23947  return ER_FAILED;
23948  }
23949 
23950  new_ = (HEAP_SCANCACHE_NODE_LIST *) db_private_alloc (thread_p, sizeof (HEAP_SCANCACHE_NODE_LIST));
23951  if (new_ == NULL)
23952  {
23954  return ER_OUT_OF_VIRTUAL_MEMORY;
23955  }
23956 
23957  COPY_OID (&new_->node.class_oid, partition_oid);
23958  HFID_COPY (&new_->node.hfid, &hfid);
23959  if (scan_cache->partition_list == NULL)
23960  {
23961  new_->next = NULL;
23962  scan_cache->partition_list = new_;
23963  }
23964  else
23965  {
23966  new_->next = scan_cache->partition_list;
23967  scan_cache->partition_list = new_;
23968  }
23969 
23970  /* set the new node as the current node */
23971  HEAP_SCANCACHE_SET_NODE (scan_cache, partition_oid, &hfid);
23972 
23973  return NO_ERROR;
23974 }
23975 
23976 /*
23977  * heap_mvcc_log_redistribute () - Log partition redistribute data
23978  *
23979  * return : Void.
23980  * thread_p (in) : Thread entry.
23981  * p_recdes (in) : Newly inserted record.
23982  * p_addr (in) : Log address data.
23983  */
23984 static void
23986 {
23987 #define HEAP_LOG_MVCC_REDISTRIBUTE_MAX_REDO_CRUMBS 4
23988 
23989  int n_redo_crumbs = 0, data_copy_offset = 0;
23991  MVCCID delid;
23993  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23994 
23995  assert (p_recdes != NULL);
23996  assert (p_addr != NULL);
23997 
23998  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
23999 
24000  /* Update chain. */
24002  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
24003  {
24004  /* Mark status change for recovery. */
24006  }
24007 
24008  /* Build redo crumbs */
24009  /* Add record type */
24010  redo_crumbs[n_redo_crumbs].length = sizeof (p_recdes->type);
24011  redo_crumbs[n_redo_crumbs++].data = &p_recdes->type;
24012 
24013  if (p_recdes->type != REC_BIGONE)
24014  {
24015  or_mvcc_get_header (p_recdes, &mvcc_rec_header);
24016  assert (MVCC_IS_FLAG_SET (&mvcc_rec_header, OR_MVCC_FLAG_VALID_DELID));
24017 
24018  /* Add representation ID and flags field */
24019  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
24020  redo_crumbs[n_redo_crumbs++].data = p_recdes->data;
24021 
24022  redo_crumbs[n_redo_crumbs].length = OR_MVCCID_SIZE;
24023  redo_crumbs[n_redo_crumbs++].data = &delid;
24024 
24025  /* Set data copy offset after the record header */
24026  data_copy_offset = OR_HEADER_SIZE (p_recdes->data);
24027  }
24028 
24029  /* Add record data - record may be skipped if the record is not big one */
24030  redo_crumbs[n_redo_crumbs].length = p_recdes->length - data_copy_offset;
24031  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + data_copy_offset;
24032 
24033  /* Safe guard */
24035 
24036  /* Append redo crumbs; undo crumbs not necessary as the spage_delete physical operation uses the offset field of the
24037  * address */
24038  log_append_undoredo_crumbs (thread_p, RVHF_MVCC_REDISTRIBUTE, p_addr, 0, n_redo_crumbs, NULL, redo_crumbs);
24039 }
24040 
24041 /*
24042  * heap_rv_mvcc_redo_redistribute () - Redo the MVCC redistribute partition data
24043  * return: int
24044  * rcv(in): Recovery structure
24045  *
24046  */
24047 int
24049 {
24050  INT16 slotid;
24051  RECDES recdes;
24052  int sp_success;
24053  MVCCID delid;
24055  INT16 record_type;
24056  bool vacuum_status_change = false;
24057 
24058  assert (rcv->pgptr != NULL);
24059 
24060  slotid = rcv->offset;
24061  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
24062  {
24063  vacuum_status_change = true;
24064  }
24065  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
24066  assert (slotid > 0);
24067 
24068  record_type = *(INT16 *) rcv->data;
24069  if (record_type == REC_BIGONE)
24070  {
24071  /* no data header */
24072  HEAP_SET_RECORD (&recdes, rcv->length - sizeof (record_type), rcv->length - sizeof (record_type), REC_BIGONE,
24073  rcv->data + sizeof (record_type));
24074  }
24075  else
24076  {
24078  int repid_and_flags, offset, mvcc_flag, offset_size;
24079 
24080  offset = sizeof (record_type);
24081 
24082  repid_and_flags = OR_GET_INT (rcv->data + offset);
24083  offset += OR_INT_SIZE;
24084 
24085  OR_GET_MVCCID (rcv->data + offset, &delid);
24086  offset += OR_MVCCID_SIZE;
24087 
24088  mvcc_flag = (char) ((repid_and_flags >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK);
24089 
24090  if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_1BYTE)
24091  {
24092  offset_size = OR_BYTE_SIZE;
24093  }
24094  else if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_2BYTE)
24095  {
24096  offset_size = OR_SHORT_SIZE;
24097  }
24098  else
24099  {
24100  offset_size = OR_INT_SIZE;
24101  }
24102 
24103  MVCC_SET_REPID (&mvcc_rec_header, repid_and_flags & OR_MVCC_REPID_MASK);
24104  MVCC_SET_FLAG (&mvcc_rec_header, mvcc_flag);
24105  MVCC_SET_INSID (&mvcc_rec_header, rcv->mvcc_id);
24106  MVCC_SET_DELID (&mvcc_rec_header, delid);
24107 
24108  HEAP_SET_RECORD (&recdes, IO_DEFAULT_PAGE_SIZE + OR_MVCC_MAX_HEADER_SIZE, 0, record_type,
24109  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
24110  or_mvcc_add_header (&recdes, &mvcc_rec_header, repid_and_flags & OR_BOUND_BIT_FLAG, offset_size);
24111 
24112  memcpy (recdes.data + recdes.length, rcv->data + offset, rcv->length - offset);
24113  recdes.length += (rcv->length - offset);
24114  }
24115 
24116  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
24117 
24118  if (sp_success != SP_SUCCESS)
24119  {
24120  /* Unable to redo insertion */
24121  assert_release (false);
24122  return ER_FAILED;
24123  }
24124 
24125  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
24126  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
24127 
24128  return NO_ERROR;
24129 }
24130 
24131 /*
24132  * heap_get_visible_version_from_log () - Iterate through old versions of object until a visible object is found
24133  *
24134  * return: SCAN_CODE. Possible values:
24135  * - S_SUCCESS: for successful case when record was obtained.
24136  * - S_DOESNT_EXIT: NULL LSA was provided, otherwise a visible version should exist
24137  * - S_DOESNT_FIT: the record doesn't fit in allocated area
24138  * - S_ERROR: In case of error
24139  * thread_p (in): Thread entry.
24140  * recdes (out): Record descriptor.
24141  * previous_version_lsa (in): Log address of previous version.
24142  * scan_cache(in): Heap scan cache.
24143  */
24144 static SCAN_CODE
24145 heap_get_visible_version_from_log (THREAD_ENTRY * thread_p, RECDES * recdes, LOG_LSA * previous_version_lsa,
24146  HEAP_SCANCACHE * scan_cache, int has_chn)
24147 {
24148  LOG_LSA process_lsa;
24149  SCAN_CODE scan_code = S_SUCCESS;
24150  char log_pgbuf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
24151  LOG_PAGE *log_page_p = NULL;
24152  MVCC_REC_HEADER mvcc_header;
24153  RECDES local_recdes;
24154  MVCC_SATISFIES_SNAPSHOT_RESULT snapshot_res;
24155  LOG_LSA oldest_prior_lsa;
24156 
24157  assert (scan_cache != NULL);
24158  assert (scan_cache->mvcc_snapshot != NULL);
24159 
24160  if (recdes == NULL)
24161  {
24162  recdes = &local_recdes;
24163  recdes->data = NULL;
24164  }
24165 
24166  /* make sure prev_version_lsa is flushed from prior lsa list - wake up log flush thread if it's not flushed */
24167  oldest_prior_lsa = *log_get_append_lsa (); /* TODO: fix atomicity issue on x86 */
24168  if (LSA_LT (&oldest_prior_lsa, previous_version_lsa))
24169  {
24170  LOG_CS_ENTER (thread_p);
24172  LOG_CS_EXIT (thread_p);
24173 
24174  oldest_prior_lsa = *log_get_append_lsa ();
24175  assert (!LSA_LT (&oldest_prior_lsa, previous_version_lsa));
24176  }
24177 
24178  if (recdes->data == NULL)
24179  {
24180  scan_cache->assign_recdes_to_area (*recdes);
24181  }
24182 
24183  /* check visibility of old versions from log following prev_version_lsa links */
24184  for (LSA_COPY (&process_lsa, previous_version_lsa); !LSA_ISNULL (&process_lsa);)
24185  {
24186  /* Fetch the page where prev_vesion_lsa is located */
24187  log_page_p = (LOG_PAGE *) PTR_ALIGN (log_pgbuf, MAX_ALIGNMENT);
24188  log_page_p->hdr.logical_pageid = NULL_PAGEID;
24189  log_page_p->hdr.offset = NULL_OFFSET;
24190  if (logpb_fetch_page (thread_p, &process_lsa, LOG_CS_SAFE_READER, log_page_p) != NO_ERROR)
24191  {
24192  assert (false);
24193  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "heap_get_visible_version_from_log");
24194  return S_ERROR;
24195  }
24196 
24197  scan_code = log_get_undo_record (thread_p, log_page_p, process_lsa, recdes);
24198  if (scan_code != S_SUCCESS)
24199  {
24200  if (scan_code == S_DOESNT_FIT && scan_cache->is_recdes_assigned_to_area (*recdes))
24201  {
24202  /* expand record area and try again */
24203  assert (recdes->length < 0);
24204  scan_cache->assign_recdes_to_area (*recdes, (size_t) (-recdes->length));
24205  /* final try to get the undo record */
24206  continue;
24207  }
24208  else
24209  {
24210  return scan_code;
24211  }
24212  }
24213 
24214  if (or_mvcc_get_header (recdes, &mvcc_header) != NO_ERROR)
24215  {
24216  assert (false);
24218  return S_ERROR;
24219  }
24220  snapshot_res = scan_cache->mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, scan_cache->mvcc_snapshot);
24221  if (snapshot_res == SNAPSHOT_SATISFIED)
24222  {
24223  /* Visible. Get record if CHN was changed. */
24224  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, has_chn))
24225  {
24226  return S_SUCCESS_CHN_UPTODATE;
24227  }
24228  return S_SUCCESS;
24229  }
24230  else if (snapshot_res == TOO_OLD_FOR_SNAPSHOT)
24231  {
24232  assert (false);
24234  return S_ERROR;
24235  }
24236  else
24237  {
24238  /* TOO_NEW_FOR_SNAPSHOT */
24239  assert (snapshot_res == TOO_NEW_FOR_SNAPSHOT);
24240  /* continue with previous version */
24241  LSA_COPY (&process_lsa, &MVCC_GET_PREV_VERSION_LSA (&mvcc_header));
24242  continue;
24243  }
24244  }
24245 
24246  /* No visible version found. */
24247  return S_DOESNT_EXIST;
24248 }
24249 
24250 /*
24251  * heap_get_visible_version () - get visible version, mvcc style when snapshot provided, otherwise directly from heap
24252  *
24253  * return: SCAN_CODE. Posible values:
24254  * - S_SUCCESS: for successful case when record was obtained.
24255  * - S_DOESNT_EXIT:
24256  * - S_DOESNT_FIT: the record doesn't fit in allocated area
24257  * - S_ERROR: In case of error
24258  * - S_SNAPSHOT_NOT_SATISFIED
24259  * - S_SUCCESS_CHN_UPTODATE: CHN is up to date and it's not necessary to get record again
24260  * thread_p (in): Thread entry.
24261  * oid (in): Object to be obtained.
24262  * class_oid (in):
24263  * recdes (out): Record descriptor. NULL if not needed
24264  * scan_cache(in): Heap scan cache.
24265  * ispeeking(in): Peek record or copy.
24266  * old_chn (in): Cache coherency number for existing record data. It is
24267  * used by clients to avoid resending record data when
24268  * it was not updated.
24269  * Note: this function should not be used for heap scan;
24270  */
24271 SCAN_CODE
24272 heap_get_visible_version (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid, RECDES * recdes,
24273  HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24274 {
24275  SCAN_CODE scan = S_SUCCESS;
24276  HEAP_GET_CONTEXT context;
24277 
24278  heap_init_get_context (thread_p, &context, oid, class_oid, recdes, scan_cache, ispeeking, old_chn);
24279 
24280  scan = heap_get_visible_version_internal (thread_p, &context, false);
24281 
24282  heap_clean_get_context (thread_p, &context);
24283 
24284  return scan;
24285 }
24286 
24287 /*
24288 * heap_scan_get_visible_version () - get visible version, mvcc style when snapshot provided, otherwise directly from heap
24289 *
24290 * return: SCAN_CODE. Posible values:
24291 * - S_SUCCESS: for successful case when record was obtained.
24292 * - S_DOESNT_EXIT:
24293 * - S_DOESNT_FIT: the record doesn't fit in allocated area
24294 * - S_ERROR: In case of error
24295 * - S_SNAPSHOT_NOT_SATISFIED
24296 * - S_SUCCESS_CHN_UPTODATE: CHN is up to date and it's not necessary to get record again
24297 * thread_p (in): Thread entry.
24298 * oid (in): Object to be obtained.
24299 * class_oid (in):
24300 * recdes (out): Record descriptor. NULL if not needed
24301 * scan_cache(in): Heap scan cache.
24302 * ispeeking(in): Peek record or copy.
24303 * old_chn (in): Cache coherency number for existing record data. It is
24304 * used by clients to avoid resending record data when
24305 * it was not updated.
24306 * Note: this function should be used for heap scan;
24307 */
24308 SCAN_CODE
24309 heap_scan_get_visible_version (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid, RECDES * recdes,
24310  HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24311 {
24312  SCAN_CODE scan = S_SUCCESS;
24313  HEAP_GET_CONTEXT context;
24314 
24315  heap_init_get_context (thread_p, &context, oid, class_oid, recdes, scan_cache, ispeeking, old_chn);
24316 
24317  scan = heap_get_visible_version_internal (thread_p, &context, true);
24318 
24319  heap_clean_get_context (thread_p, &context);
24320 
24321  return scan;
24322 }
24323 
24324 /*
24325  * heap_get_visible_version_internal () - Retrieve the visible version of an object according to snapshot
24326  *
24327  * return SCAN_CODE.
24328  * thread_p (in): Thread entry.
24329  * context (in): Heap get context.
24330  * is_heap_scan (in): required for heap_prepare_get_context
24331  */
24332 SCAN_CODE
24333 heap_get_visible_version_internal (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, bool is_heap_scan)
24334 {
24335  SCAN_CODE scan;
24336 
24339  OID class_oid_local = OID_INITIALIZER;
24340 
24341  assert (context->scan_cache != NULL);
24342 
24343  if (context->class_oid_p == NULL)
24344  {
24345  /* we need class_oid to check if the class is mvcc enabled */
24346  context->class_oid_p = &class_oid_local;
24347  }
24348 
24349  if (context->scan_cache && context->ispeeking == COPY && context->recdes_p != NULL)
24350  {
24351  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates. */
24352  if (heap_scan_cache_allocate_area (thread_p, context->scan_cache, DB_PAGESIZE * 2) != NO_ERROR)
24353  {
24354  return S_ERROR;
24355  }
24356  }
24357 
24358  scan = heap_prepare_get_context (thread_p, context, is_heap_scan, LOG_WARNING_IF_DELETED);
24359  if (scan != S_SUCCESS)
24360  {
24361  goto exit;
24362  }
24363  assert (context->record_type == REC_HOME || context->record_type == REC_BIGONE
24364  || context->record_type == REC_RELOCATION);
24365  assert (context->record_type == REC_HOME
24366  || (!OID_ISNULL (&context->forward_oid) && context->fwd_page_watcher.pgptr != NULL));
24367 
24368  if (context->scan_cache != NULL && context->scan_cache->mvcc_snapshot != NULL
24369  && context->scan_cache->mvcc_snapshot->snapshot_fnc != NULL
24371  {
24372  mvcc_snapshot = context->scan_cache->mvcc_snapshot;
24373  }
24374 
24375  if (mvcc_snapshot != NULL || context->old_chn != NULL_CHN)
24376  {
24377  /* mvcc header is needed for visibility check or chn check */
24378  scan = heap_get_mvcc_header (thread_p, context, &mvcc_header);
24379  if (scan != S_SUCCESS)
24380  {
24381  goto exit;
24382  }
24383  }
24384 
24385  if (mvcc_snapshot != NULL)
24386  {
24387  MVCC_SATISFIES_SNAPSHOT_RESULT snapshot_res;
24388 
24389  snapshot_res = mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot);
24390  if (snapshot_res == TOO_NEW_FOR_SNAPSHOT)
24391  {
24392  /* current version is not visible, check previous versions from log and skip record get from heap */
24393  scan =
24394  heap_get_visible_version_from_log (thread_p, context->recdes_p, &MVCC_GET_PREV_VERSION_LSA (&mvcc_header),
24395  context->scan_cache, context->old_chn);
24396  goto exit;
24397  }
24398  else if (snapshot_res == TOO_OLD_FOR_SNAPSHOT)
24399  {
24400  scan = S_SNAPSHOT_NOT_SATISFIED;
24401  goto exit;
24402  }
24403  /* else...fall through to heap get */
24404  }
24405 
24406  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, context->old_chn))
24407  {
24408  /* Object version didn't change and CHN is up-to-date. Don't get record data and return
24409  * S_SUCCESS_CHN_UPTODATE instead. */
24410  scan = S_SUCCESS_CHN_UPTODATE;
24411  goto exit;
24412  }
24413 
24414  if (context->recdes_p != NULL)
24415  {
24416  scan = heap_get_record_data_when_all_ready (thread_p, context);
24417  }
24418 
24419  /* Fall through to exit. */
24420 
24421 exit:
24422  return scan;
24423 }
24424 
24425 /*
24426  * heap_update_set_prev_version () - Set prev version lsa to record according to its type.
24427  *
24428  * return : error code or NO_ERROR
24429  * thread_p (in) : Thread entry.
24430  * oid (in) : Object identifier of the updated record
24431  * home_pg_watcher (in): Home page watcher; must be
24432  * fwd_pg_watcher (in) : Forward page watcher
24433  * prev_version_lsa(in): LSA address of undo log record of the old record
24434  *
24435  * Note: This function works only with heap_update_home/relocation/bigone functions. It is designed to set the
24436  * prev_version_lsa to updated records by overwriting this information directly into heap file. The header of the
24437  * record should be prepared for this in heap_insert_adjust_recdes_header().
24438  * The records are obtained using PEEK, and modified directly, without using spage_update afterwards!
24439  * Note: It is expected to have the home page fixed and also the forward page in case of relocation.
24440  */
24441 static int
24442 heap_update_set_prev_version (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * home_pg_watcher,
24443  PGBUF_WATCHER * fwd_pg_watcher, LOG_LSA * prev_version_lsa)
24444 {
24445  int error_code = NO_ERROR;
24446  RECDES recdes, forward_recdes;
24447  VPID fwd_vpid;
24448  OID forward_oid;
24449  PGBUF_WATCHER overflow_pg_watcher;
24450 
24451  assert (oid != NULL && !OID_ISNULL (oid) && prev_version_lsa != NULL && !LSA_ISNULL (prev_version_lsa));
24452  assert (prev_version_lsa->pageid >= 0 && prev_version_lsa->offset >= 0);
24453 
24454  /* the home page should be already fixed */
24455  assert (home_pg_watcher != NULL && home_pg_watcher->pgptr != NULL);
24456  if (spage_get_record (thread_p, home_pg_watcher->pgptr, oid->slotid, &recdes, PEEK) != S_SUCCESS)
24457  {
24458  ASSERT_ERROR_AND_SET (error_code);
24459  goto end;
24460  }
24461 
24462  if (recdes.type == REC_HOME)
24463  {
24464  error_code = or_mvcc_set_log_lsa_to_record (&recdes, prev_version_lsa);
24465  if (error_code != NO_ERROR)
24466  {
24467  assert (false);
24468  goto end;
24469  }
24470 
24471  pgbuf_set_dirty (thread_p, home_pg_watcher->pgptr, DONT_FREE);
24472  }
24473  else if (recdes.type == REC_RELOCATION)
24474  {
24475  forward_oid = *((OID *) recdes.data);
24476  VPID_GET_FROM_OID (&fwd_vpid, &forward_oid);
24477 
24478  /* the forward page should be already fixed */
24479  assert (fwd_pg_watcher != NULL && fwd_pg_watcher->pgptr != NULL);
24480  assert (VPID_EQ (&fwd_vpid, pgbuf_get_vpid_ptr (fwd_pg_watcher->pgptr)));
24481 
24482  if (spage_get_record (thread_p, fwd_pg_watcher->pgptr, forward_oid.slotid, &forward_recdes, PEEK) != S_SUCCESS)
24483  {
24484  ASSERT_ERROR_AND_SET (error_code);
24485  goto end;
24486  }
24487 
24488  error_code = or_mvcc_set_log_lsa_to_record (&forward_recdes, prev_version_lsa);
24489  if (error_code != NO_ERROR)
24490  {
24491  assert (false);
24492  goto end;
24493  }
24494 
24495  pgbuf_set_dirty (thread_p, fwd_pg_watcher->pgptr, DONT_FREE);
24496  }
24497  else if (recdes.type == REC_BIGONE)
24498  {
24499  forward_oid = *((OID *) recdes.data);
24500 
24501  VPID_GET_FROM_OID (&fwd_vpid, &forward_oid);
24503  PGBUF_WATCHER_COPY_GROUP (&overflow_pg_watcher, home_pg_watcher);
24504  if (pgbuf_ordered_fix (thread_p, &fwd_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &overflow_pg_watcher) != NO_ERROR)
24505  {
24506  ASSERT_ERROR_AND_SET (error_code);
24507  goto end;
24508  }
24509 
24510  forward_recdes.data = overflow_get_first_page_data (overflow_pg_watcher.pgptr);
24511  forward_recdes.length = OR_HEADER_SIZE (forward_recdes.data);
24512 
24513  error_code = or_mvcc_set_log_lsa_to_record (&forward_recdes, prev_version_lsa);
24514 
24515  /* unfix overflow page; it is used only locally */
24516  pgbuf_set_dirty (thread_p, overflow_pg_watcher.pgptr, DONT_FREE);
24517  pgbuf_ordered_unfix (thread_p, &overflow_pg_watcher);
24518 
24519  if (error_code != NO_ERROR)
24520  {
24521  assert (false);
24522  goto end;
24523  }
24524  }
24525  else
24526  {
24527  /* Unexpected record type. */
24528  assert (false);
24529  error_code = ER_FAILED;
24530  }
24531 
24532 end:
24533  return error_code;
24534 }
24535 
24536 /*
24537  * heap_get_last_version () - Generic function for retrieving last version of heap objects (not considering visibility)
24538  *
24539  * return : Scan code.
24540  * thread_p (in) : Thread entry.
24541  * context (in) : Heap get context
24542  *
24543  * NOTE: Caller must handle the cleanup of context
24544  */
24545 SCAN_CODE
24547 {
24548  SCAN_CODE scan = S_SUCCESS;
24550 
24551  assert (context->scan_cache != NULL);
24552  assert (context->recdes_p != NULL);
24553 
24554  if (context->scan_cache && context->ispeeking == COPY)
24555  {
24556  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates. */
24557  if (heap_scan_cache_allocate_area (thread_p, context->scan_cache, DB_PAGESIZE * 2) != NO_ERROR)
24558  {
24559  return S_ERROR;
24560  }
24561  }
24562 
24563  scan = heap_prepare_get_context (thread_p, context, false, LOG_WARNING_IF_DELETED);
24564  if (scan != S_SUCCESS)
24565  {
24566  goto exit;
24567  }
24568  assert (context->record_type == REC_HOME || context->record_type == REC_BIGONE
24569  || context->record_type == REC_RELOCATION);
24570  assert (context->record_type == REC_HOME
24571  || (!OID_ISNULL (&context->forward_oid) && context->fwd_page_watcher.pgptr != NULL));
24572 
24573  scan = heap_get_mvcc_header (thread_p, context, &mvcc_header);
24574  if (scan != S_SUCCESS)
24575  {
24576  goto exit;
24577  }
24578 
24579  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, context->old_chn))
24580  {
24581  /* Object version didn't change and CHN is up-to-date. Don't get record data and return
24582  * S_SUCCESS_CHN_UPTODATE instead. */
24583  scan = S_SUCCESS_CHN_UPTODATE;
24584  goto exit;
24585  }
24586 
24587  if (context->recdes_p != NULL)
24588  {
24589  scan = heap_get_record_data_when_all_ready (thread_p, context);
24590  }
24591 
24592  /* Fall through to exit. */
24593 
24594 exit:
24595 
24596  return scan;
24597 }
24598 
24599 /*
24600  * heap_prepare_object_page () - Check if provided page matches the page of provided OID or fix the right one.
24601  *
24602  * return : Error code.
24603  * thread_p (in) : Thread entry.
24604  * oid (in) : Object identifier.
24605  * page_watcher_p(out) : Page watcher used for page fix.
24606  * latch_mode (in) : Latch mode.
24607  */
24608 int
24609 heap_prepare_object_page (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * page_watcher_p,
24610  PGBUF_LATCH_MODE latch_mode)
24611 {
24612  VPID object_vpid;
24613  int ret = NO_ERROR;
24614 
24615  assert (oid != NULL && !OID_ISNULL (oid));
24616 
24617  VPID_GET_FROM_OID (&object_vpid, oid);
24618 
24619  if (page_watcher_p->pgptr != NULL && !VPID_EQ (pgbuf_get_vpid_ptr (page_watcher_p->pgptr), &object_vpid))
24620  {
24621  /* unfix provided page if it does not correspond to the VPID */
24622  pgbuf_ordered_unfix (thread_p, page_watcher_p);
24623  }
24624 
24625  if (page_watcher_p->pgptr == NULL)
24626  {
24627  /* fix required page */
24628  ret = pgbuf_ordered_fix (thread_p, &object_vpid, OLD_PAGE, latch_mode, page_watcher_p);
24629  if (ret != NO_ERROR)
24630  {
24631  if (ret == ER_PB_BAD_PAGEID)
24632  {
24633  /* maybe this error could be removed */
24635  oid->slotid);
24636  ret = ER_HEAP_UNKNOWN_OBJECT;
24637  }
24638 
24639  if (ret == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
24640  {
24642  ret = ER_PAGE_LATCH_ABORTED;
24643  }
24644  }
24645  }
24646 
24647  return ret;
24648 }
24649 
24650 /*
24651  * heap_clean_get_context () - Unfix page watchers of get context and save home page to scan_cache if possible
24652  *
24653  * thread_p (in) : Thread_identifier.
24654  * context (in) : Heap get context.
24655  */
24656 void
24658 {
24659  assert (context != NULL);
24660 
24661  if (context->scan_cache != NULL && context->scan_cache->cache_last_fix_page
24662  && context->home_page_watcher.pgptr != NULL)
24663  {
24664  /* Save home page (or NULL if it had to be unfixed) to scan_cache. */
24665  pgbuf_replace_watcher (thread_p, &context->home_page_watcher, &context->scan_cache->page_watcher);
24666  assert (context->home_page_watcher.pgptr == NULL);
24667  }
24668 
24669  if (context->home_page_watcher.pgptr)
24670  {
24671  /* Unfix home page. */
24672  pgbuf_ordered_unfix (thread_p, &context->home_page_watcher);
24673  }
24674 
24675  if (context->fwd_page_watcher.pgptr != NULL)
24676  {
24677  /* Unfix forward page. */
24678  pgbuf_ordered_unfix (thread_p, &context->fwd_page_watcher);
24679  }
24680 
24681  assert (context->home_page_watcher.pgptr == NULL && context->fwd_page_watcher.pgptr == NULL);
24682 }
24683 
24684 /*
24685  * heap_init_get_context () - Initiate all heap get context fields with generic informations
24686  *
24687  * thread_p (in) : Thread_identifier.
24688  * context (out) : Heap get context.
24689  * oid (in) : Object identifier.
24690  * class_oid (in) : Class oid.
24691  * recdes (in) : Record descriptor.
24692  * scan_cache (in) : Scan cache.
24693  * is_peeking (in) : PEEK or COPY.
24694  * old_chn (in) : Cache coherency number.
24695 */
24696 void
24697 heap_init_get_context (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, const OID * oid, OID * class_oid,
24698  RECDES * recdes, HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24699 {
24700  context->oid_p = oid;
24701  context->class_oid_p = class_oid;
24702  OID_SET_NULL (&context->forward_oid);
24703  context->recdes_p = recdes;
24704 
24705  if (scan_cache != NULL && !HFID_IS_NULL (&scan_cache->node.hfid))
24706  {
24709  }
24710  else
24711  {
24714  }
24715 
24716  if (scan_cache != NULL && scan_cache->cache_last_fix_page && scan_cache->page_watcher.pgptr != NULL)
24717  {
24718  /* switch to local page watcher */
24719  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &context->home_page_watcher);
24720  }
24721 
24722  context->scan_cache = scan_cache;
24723  context->ispeeking = ispeeking;
24724  context->old_chn = old_chn;
24725  if (scan_cache != NULL && scan_cache->page_latch == X_LOCK)
24726  {
24727  context->latch_mode = PGBUF_LATCH_WRITE;
24728  }
24729  else
24730  {
24731  context->latch_mode = PGBUF_LATCH_READ;
24732  }
24733 }
24734 
24735 /*
24736  * heap_scan_cache_allocate_area () - Allocate scan_cache area
24737  *
24738  * return: error code
24739  * thread_p (in) : Thread entry.
24740  * scan_cache_p (in) : Scan cache.
24741  * size (in) : Required size of recdes data.
24742  */
24743 int
24744 heap_scan_cache_allocate_area (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache_p, int size)
24745 {
24746  assert (scan_cache_p != NULL && size > 0);
24747  scan_cache_p->reserve_area ((size_t) size);
24748  return NO_ERROR;
24749 }
24750 
24751 /*
24752  * heap_scan_cache_allocate_recdes_data () - Allocate recdes data and set it to recdes
24753  *
24754  * return: error code
24755  * thread_p (in) : Thread entry.
24756  * scan_cache_p (in) : Scan cache.
24757  * recdes_p (in) : Record descriptor.
24758  * size (in) : Required size of recdes data.
24759  */
24760 static int
24762  int size)
24763 {
24764  assert (scan_cache_p != NULL && recdes_p != NULL && size >= 0);
24765  scan_cache_p->assign_recdes_to_area (*recdes_p, (size_t) size);
24766  return NO_ERROR;
24767 }
24768 
24769 /*
24770  * heap_get_class_record () - Retrieves class objects only
24771  *
24772  * return SCAN_CODE: S_SUCCESS or error
24773  * thread_p (in) : Thread entry.
24774  * class_oid (in) : Class object identifier.
24775  * recdes_p (out) : Record descriptor.
24776  * scan_cache (in) : Scan cache.
24777  * ispeeking (in) : PEEK or COPY
24778  */
24779 SCAN_CODE
24780 heap_get_class_record (THREAD_ENTRY * thread_p, const OID * class_oid, RECDES * recdes_p, HEAP_SCANCACHE * scan_cache,
24781  int ispeeking)
24782 {
24783  HEAP_GET_CONTEXT context;
24784  OID root_oid = *oid_Root_class_oid;
24785  SCAN_CODE scan;
24786 
24787 #if !defined(NDEBUG)
24788  /* for debugging set root_oid NULL and check afterwards if it really is root oid */
24789  OID_SET_NULL (&root_oid);
24790 #endif /* !NDEBUG */
24791  heap_init_get_context (thread_p, &context, class_oid, &root_oid, recdes_p, scan_cache, ispeeking, NULL_CHN);
24792 
24793  scan = heap_get_last_version (thread_p, &context);
24794 
24795  heap_clean_get_context (thread_p, &context);
24796 
24797 #if !defined(NDEBUG)
24798  assert (OID_ISNULL (&root_oid) || OID_IS_ROOTOID (&root_oid));
24799 #endif /* !NDEBUG */
24800 
24801  return scan;
24802 }
24803 
24804 /*
24805  * heap_rv_undo_ovf_update - Assure undo record corresponds with vacuum status
24806  *
24807  * return : int
24808  * thread_p (in): Thread entry.
24809  * rcv (in) : Recovery structure.
24810  */
24811 int
24813 {
24814  int error_code;
24815 
24816  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, NULL_SLOTID, REC_BIGONE);
24817 
24818  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
24819 
24820  return error_code;
24821 }
24822 
24823 /*
24824  * heap_get_best_space_num_stats_entries - Returns the number of num_stats_entries
24825  * return : the number of entries in the heap
24826  *
24827  */
24828 int
24830 {
24831  return heap_Bestspace->num_stats_entries;
24832 }
24833 
24834 /*
24835  * heap_get_hfid_from_vfid () - Get hfid for file. Caller must be sure this file belong to a heap.
24836  *
24837  * return : error code
24838  * thread_p (in) : thread entry
24839  * vfid (in) : file identifier
24840  * hfid (out) : heap identifier
24841  */
24842 int
24843 heap_get_hfid_from_vfid (THREAD_ENTRY * thread_p, const VFID * vfid, HFID * hfid)
24844 {
24845  VPID vpid_header;
24846  int error_code = NO_ERROR;
24847 
24848  hfid->vfid = *vfid;
24849  error_code = heap_get_header_page (thread_p, hfid, &vpid_header);
24850  if (error_code != NO_ERROR)
24851  {
24852  ASSERT_ERROR ();
24853  VFID_SET_NULL (&hfid->vfid);
24854  return error_code;
24855  }
24856  assert (hfid->vfid.volid == vpid_header.volid);
24857  hfid->hpgid = vpid_header.pageid;
24858  return NO_ERROR;
24859 }
24860 
24861 /*
24862  * heap_is_page_header () - return true if page is a heap header page. must be heap page though!
24863  *
24864  * return : true if file header page, false otherwise.
24865  * thread_p (in) : thread entry
24866  * page (in) : heap page
24867  */
24868 bool
24870 {
24872  SPAGE_SLOT *slotp;
24873 
24874  /* todo: why not set a different page ptype. */
24875 
24876  assert (page != NULL && pgbuf_get_page_ptype (thread_p, page) == PAGE_HEAP);
24877 
24878  spage_header = (SPAGE_HEADER *) page;
24879  if (spage_header->num_records <= 0)
24880  {
24881  return false;
24882  }
24884  if (slotp == NULL)
24885  {
24886  return false;
24887  }
24888  if (slotp->record_length == sizeof (HEAP_HDR_STATS))
24889  {
24890  return true;
24891  }
24892  return false;
24893 }
24894 
24895 //
24896 // C++ code
24897 //
24898 // *INDENT-OFF*
24899 static void
24901 {
24902  const size_t DEFAULT_MINSIZE = (size_t) DB_PAGESIZE * 2;
24903 
24904  if (size <= DEFAULT_MINSIZE)
24905  {
24906  size = DEFAULT_MINSIZE;
24907  }
24908  else
24909  {
24910  size = DB_ALIGN (size, (size_t) DB_PAGESIZE);
24911  }
24912 
24913  if (b.ptr != NULL && b.dim >= size)
24914  {
24915  // no need to change
24916  return;
24917  }
24918 
24919  if (b.ptr == NULL)
24920  {
24921  b.ptr = (char *) db_private_alloc (NULL, size);
24922  assert (b.ptr != NULL);
24923  }
24924  else
24925  {
24926  b.ptr = (char *) db_private_realloc (NULL, b.ptr, size);
24927  assert (b.ptr != NULL);
24928  }
24929  b.dim = size;
24930 }
24931 
24932 static void
24934 {
24936  b.dim = 0;
24937 }
24938 
24939 //
24940 // heap_scancache
24941 //
24942 void
24944 {
24945  m_area = NULL; // start as null; it will be allocated when it is first needed
24946 }
24947 
24948 void
24950 {
24951  if (m_area == NULL)
24952  {
24953  m_area = new cubmem::single_block_allocator (HEAP_SCANCACHE_BLOCK_ALLOCATOR);
24954  }
24955 }
24956 
24957 void
24959 {
24960  delete m_area;
24961  m_area = NULL;
24962 }
24963 
24964 void
24966 {
24967  alloc_area ();
24968  m_area->reserve (size);
24969 }
24970 
24971 void
24972 heap_scancache::assign_recdes_to_area (RECDES & recdes, size_t size /* = 0 */)
24973 {
24974  reserve_area (size);
24975 
24976  recdes.data = m_area->get_ptr ();
24977  recdes.area_size = (int) m_area->get_size ();
24978 }
24979 
24980 bool
24982 {
24983  return m_area != NULL && recdes.data == m_area->get_ptr ();
24984 }
24985 
24988 {
24989  alloc_area ();
24990  return m_area->get_block_allocator ();
24991 }
24992 
24993 int
24994 heap_alloc_new_page (THREAD_ENTRY * thread_p, HFID * hfid, OID class_oid, PGBUF_WATCHER * home_hint_p,
24995  VPID * new_page_vpid)
24996 {
24997  int error_code = NO_ERROR;
24998  HEAP_CHAIN new_page_chain;
24999  PAGE_PTR page_ptr;
25000 
25001  assert (hfid != NULL && home_hint_p != NULL && new_page_vpid != NULL);
25002 
25003  PGBUF_INIT_WATCHER (home_hint_p, PGBUF_ORDERED_HEAP_NORMAL, hfid);
25004  // Init the heap page chain
25005  new_page_chain.class_oid = class_oid;
25006  VPID_SET_NULL (&new_page_chain.prev_vpid);
25007  VPID_SET_NULL (&new_page_chain.next_vpid);
25008  new_page_chain.max_mvccid = MVCCID_NULL;
25009  new_page_chain.flags = 0;
25011 
25012  VPID_SET_NULL (new_page_vpid);
25013 
25014  // Alloc a new page.
25015  error_code = file_alloc (thread_p, &hfid->vfid, heap_vpid_init_new, &new_page_chain, new_page_vpid, &page_ptr);
25016  if (error_code != NO_ERROR)
25017  {
25018  ASSERT_ERROR ();
25019  return error_code;
25020  }
25021 
25022  // Need to get the watcher to the new page.
25023  pgbuf_attach_watcher (thread_p, page_ptr, PGBUF_LATCH_WRITE, hfid, home_hint_p);
25024 
25025  // Make sure we have fixed the page.
25026  assert (pgbuf_is_page_fixed_by_thread (thread_p, new_page_vpid));
25027 
25028  return error_code;
25029 }
25030 
25031 int
25033 {
25034  return spage_max_record_size () - sizeof (HEAP_CHAIN);
25035 }
25036 
25037 /*
25038  * heap_rv_postpone_append_pages_to_heap () - Append a list of pages to the given heap
25039  * return : Error_code
25040  * thread_p(in) : Thread_context
25041  * hfid(in) : Heap file to which we append the pages
25042  * class_oid(in) : The class identifier.
25043  * heap_pages_array(in) : Array containing VPIDs to append to the heap.
25044  *
25045  * Note: This functions also logs any operations in the pages.
25046  *
25047  */
25048 int
25050 {
25051  int error_code = NO_ERROR;
25052  PGBUF_WATCHER page_watcher;
25053  PGBUF_WATCHER heap_header_watcher;
25054  PGBUF_WATCHER heap_last_page_watcher;
25055  VPID null_vpid;
25056  VPID heap_hdr_vpid;
25057  VPID heap_last_page_vpid;
25058  HEAP_HDR_STATS *heap_hdr = NULL;
25059  bool skip_last_page_links = false;
25060  VPID heap_header_next_vpid;
25061  size_t offset = 0;
25062  size_t array_size = 0;
25063  std::vector <VPID> heap_pages_array;
25064  OID class_oid;
25065  HFID hfid;
25066 
25067  /* recovery data: HFID, OID, array_size (int), array_of_VPID(array_size) */
25068  HFID_SET_NULL (&hfid);
25069  OID_SET_NULL (&class_oid);
25070 
25071  OR_GET_HFID ((recv->data + offset), &hfid);
25072  offset += DB_ALIGN (OR_HFID_SIZE, PTR_ALIGNMENT);
25073 
25074  OR_GET_OID ((recv->data + offset), &class_oid);
25075  offset += OR_OID_SIZE;
25076 
25077  int unpack_int = OR_GET_INT ((recv->data + offset));
25078  assert (unpack_int >= 0);
25079  array_size = (size_t) unpack_int;
25080  offset += OR_INT_SIZE;
25081 
25082  for (size_t i = 0; i < array_size; i++)
25083  {
25084  VPID vpid;
25085 
25086  VPID_SET_NULL (&vpid);
25087 
25088  OR_GET_VPID ((recv->data + offset), &vpid);
25089  offset += DISK_VPID_ALIGNED_SIZE;
25090 
25091  heap_pages_array.push_back (vpid);
25092  }
25093 
25094  assert (recv->length >= 0 && offset == (size_t) recv->length);
25095  assert (array_size == heap_pages_array.size ());
25096 
25097  VPID_SET_NULL (&null_vpid);
25098  VPID_SET_NULL (&heap_hdr_vpid);
25099  VPID_SET_NULL (&heap_last_page_vpid);
25100 
25101  PGBUF_INIT_WATCHER (&page_watcher, PGBUF_ORDERED_HEAP_NORMAL, &hfid);
25102  PGBUF_INIT_WATCHER (&heap_header_watcher, PGBUF_ORDERED_HEAP_HDR, &hfid);
25103  PGBUF_INIT_WATCHER (&heap_last_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, &hfid);
25104 
25105  // Early out
25106  if (array_size == 0)
25107  {
25108  // Nothing to append.
25109  return error_code;
25110  }
25111 
25112  // Safe-guards
25113  assert (!HFID_IS_NULL (&hfid));
25114 
25115  // Check every page is allocated
25116  for (size_t i = 0; i < array_size; i++)
25117  {
25118  if (pgbuf_is_valid_page (thread_p, &heap_pages_array[i], false, NULL, NULL) != DISK_VALID)
25119  {
25120  assert (false);
25121  return ER_FAILED;
25122  }
25123  }
25124 
25125  // Start a system operation since we write in multiple pages.
25126  log_sysop_start_atomic (thread_p);
25127 
25128  /**********************************************************/
25129  /* Start by creating a heap chain from the pages. */
25130  /**********************************************************/
25131 
25132  for (size_t i = 0; i < array_size; i++)
25133  {
25134  VPID next_vpid, prev_vpid;
25135 
25136  VPID_COPY (&prev_vpid, ((i == 0) ? (&null_vpid) : (&heap_pages_array[i - 1])));
25137  VPID_COPY (&next_vpid, ((i == array_size - 1) ? (&null_vpid) : (&heap_pages_array[i + 1])));
25138 
25139  error_code = heap_add_chain_links (thread_p, &hfid, &heap_pages_array[i], &next_vpid, &prev_vpid,
25140  &page_watcher, false, false);
25141  if (error_code != NO_ERROR)
25142  {
25143  // This should never happen.
25144  assert (false);
25145  goto cleanup;
25146  }
25147  }
25148 
25149  /**********************************************************/
25150  /* Now add the chain to the heap itself. */
25151  /**********************************************************/
25152 
25153  // First get the heap header page.
25154  error_code = heap_get_header_page (thread_p, &hfid, &heap_hdr_vpid);
25155  if (error_code != NO_ERROR)
25156  {
25157  ASSERT_ERROR ();
25158  goto cleanup;
25159  }
25160 
25161  // Now get a watcher for the heap header page.
25162  error_code = heap_get_page_with_watcher (thread_p, &heap_hdr_vpid, &heap_header_watcher);
25163  if (error_code != NO_ERROR)
25164  {
25165  ASSERT_ERROR ();
25166  goto cleanup;
25167  }
25168 
25169  // Get the heap header.
25170  heap_hdr = heap_get_header_stats_ptr (thread_p, heap_header_watcher.pgptr);
25171  if (heap_hdr == NULL)
25172  {
25173  assert (false);
25174  error_code = ER_FAILED;
25175  goto cleanup;
25176  }
25177 
25178  // Get the next VPID of the heap header.
25179  heap_header_next_vpid = heap_hdr->next_vpid;
25180 
25181  // Get the last page of the heap.
25182  error_code = heap_get_last_page (thread_p, &hfid, heap_hdr, NULL, &heap_last_page_vpid, &heap_last_page_watcher);
25183  if (error_code != NO_ERROR)
25184  {
25185  ASSERT_ERROR ();
25186  goto cleanup;
25187  }
25188 
25189  /**********************************************************/
25190  /* We distinguish 2 cases here:
25191  * 1. Heap is empty
25192  * -> This results in forming the chain with the new pages and append it to the heap header.
25193  * -> More precisely, we skip creating the links with the last page since this is the header page.
25194  * 2. Heap is not empty.
25195  * -> This results in forming the chain with the new pages and append it to the last page of the heap.
25196  */
25197  /**********************************************************/
25198  if (VPID_EQ (&heap_hdr_vpid, &heap_last_page_vpid))
25199  {
25200  assert (VPID_ISNULL (&heap_header_next_vpid));
25201 
25202  skip_last_page_links = true;
25203  // First page of the new chain becomes the new next page of the heap header.
25204  heap_header_next_vpid = heap_pages_array[0];
25205  }
25206 
25207  // Add new links to the first page of the chain.
25208  error_code = heap_add_chain_links (thread_p, &hfid, &heap_pages_array[0], NULL, &heap_last_page_vpid,
25209  &page_watcher, false, false);
25210  if (error_code != NO_ERROR)
25211  {
25212  ASSERT_ERROR ();
25213  goto cleanup;
25214  }
25215 
25216  // Add new links to the last page of the heap.
25217  if (!skip_last_page_links)
25218  {
25219  error_code = heap_add_chain_links (thread_p, &hfid, &heap_last_page_vpid, &heap_pages_array[0], NULL,
25220  &heap_last_page_watcher, true, true);
25221  if (error_code != NO_ERROR)
25222  {
25223  ASSERT_ERROR ();
25224  goto cleanup;
25225  }
25226  }
25227 
25228  // Now update the last page of the heap header.
25229  error_code = heap_update_and_log_header (thread_p, &hfid, heap_header_watcher, heap_hdr, heap_header_next_vpid,
25230  heap_pages_array[array_size - 1], array_size);
25231  if (error_code != NO_ERROR)
25232  {
25233  ASSERT_ERROR ();
25234  goto cleanup;
25235  }
25236 
25237 cleanup:
25238  // Check if we have errors to abort the sysop.
25239  if (error_code != NO_ERROR)
25240  {
25241  // Safeguard
25242  ASSERT_ERROR ();
25243  log_sysop_abort (thread_p);
25244  }
25245  else
25246  {
25247  // Commit the sysop
25249  }
25250 
25251  if (page_watcher.pgptr)
25252  {
25253  pgbuf_ordered_unfix_and_init (thread_p, page_watcher.pgptr, &page_watcher);
25254  }
25255 
25256  if (heap_last_page_watcher.pgptr)
25257  {
25258  pgbuf_ordered_unfix_and_init (thread_p, heap_last_page_watcher.pgptr, &heap_last_page_watcher);
25259  }
25260 
25261  if (heap_header_watcher.pgptr)
25262  {
25263  pgbuf_ordered_unfix_and_init (thread_p, heap_header_watcher.pgptr, &heap_header_watcher);
25264  }
25265 
25266  return error_code;
25267 }
25268 
25269 void
25270 heap_rv_dump_append_pages_to_heap (FILE * fp, int length, void *data)
25271 {
25272  // *INDENT-OFF*
25273  string_buffer strbuf;
25274  // *INDENT-OFF*
25275 
25276  const char *ptr = (const char *) data;
25277 
25278  HFID hfid;
25279  OID class_oid;
25280 
25281  OR_GET_HFID (ptr, &hfid);
25282  ptr += OR_HFID_SIZE;
25283 
25284  OR_GET_OID (ptr, &class_oid);
25285  ptr += OR_OID_SIZE;
25286 
25287  strbuf ("CLASS = %d|%d|%d / HFID = %d, %d|%d\n", OID_AS_ARGS (&class_oid), HFID_AS_ARGS (&hfid));
25288 
25289  int count = OR_GET_INT (ptr);
25290  ptr += OR_INT_SIZE;
25291 
25292  for (int i = 0; i < count; i++)
25293  {
25294  // print VPIDs, 8 on each line
25295 
25296  VPID vpid;
25297  OR_GET_VPID (ptr, &vpid);
25298  ptr += OR_VPID_SIZE;
25299  strbuf ("%d|%d ", VPID_AS_ARGS (&vpid));
25300  if (i % 8 == 7)
25301  {
25302  strbuf ("\n");
25303  }
25304  }
25305  strbuf ("\n");
25306 
25307  fprintf (fp, "%s", strbuf.get_buffer ());
25308 }
25309 
25310 static int
25311 heap_get_page_with_watcher (THREAD_ENTRY * thread_p, const VPID *page_vpid, PGBUF_WATCHER * pg_watcher)
25312 {
25313  int error_code = NO_ERROR;
25314 
25315  // Safeguards.
25316  assert (pg_watcher != NULL);
25317  assert (page_vpid != NULL);
25318 
25319  pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, page_vpid, OLD_PAGE, X_LOCK, NULL, pg_watcher);
25320  if (pg_watcher->pgptr == NULL)
25321  {
25322  ASSERT_ERROR_AND_SET (error_code);
25323  return error_code;
25324  }
25325 
25326  return error_code;
25327 }
25328 
25329 static int
25330 heap_add_chain_links (THREAD_ENTRY * thread_p, const HFID * hfid, const VPID * vpid, const VPID * next_link,
25331  const VPID * prev_link, PGBUF_WATCHER * page_watcher, bool keep_page_fixed,
25332  bool is_page_watcher_inited)
25333 {
25335  int error_code = NO_ERROR;
25336 
25337  // Init watcher if needed.
25338  if (!is_page_watcher_inited)
25339  {
25340  PGBUF_INIT_WATCHER (page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
25341 
25342  // Get a watcher for this page.
25343  error_code = heap_get_page_with_watcher (thread_p, vpid, page_watcher);
25344  if (error_code != NO_ERROR)
25345  {
25346  ASSERT_ERROR ();
25347  return error_code;
25348  }
25349  }
25350 
25351  // Make sure we fixed the page.
25352  assert (pgbuf_is_page_fixed_by_thread (thread_p, vpid));
25353 
25354  // Prepare the chain.
25355  HEAP_CHAIN *chain, chain_prev;
25356 
25357  // Get the chain from the current page.
25358  chain = heap_get_chain_ptr (thread_p, page_watcher->pgptr);
25359  if (chain == NULL)
25360  {
25361  // This should never happen
25362  assert (false);
25363  error_code = ER_FAILED;
25364  return error_code;
25365  }
25366 
25367  // Save the old chain for logging.
25368  chain_prev = *chain;
25369 
25370  // Add the prev vpid to chain
25371  if (prev_link != NULL)
25372  {
25373  VPID_COPY (&chain->prev_vpid, prev_link);
25374  }
25375 
25376  // Add the next vpid to chain
25377  if (next_link != NULL)
25378  {
25379  VPID_COPY (&chain->next_vpid, next_link);
25380  }
25381 
25382  // Prepare logging
25383  addr.vfid = &hfid->vfid;
25385  addr.pgptr = page_watcher->pgptr;
25386 
25387  // Log the changes.
25388  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (HEAP_CHAIN), sizeof (HEAP_CHAIN), &chain_prev,
25389  chain);
25390 
25391  // Now set the page dirty.
25392  pgbuf_set_dirty (thread_p, addr.pgptr, DONT_FREE);
25393 
25394  if (!keep_page_fixed)
25395  {
25396  // Unfix the current page.
25397  pgbuf_ordered_unfix_and_init (thread_p, page_watcher->pgptr, page_watcher);
25398 
25399  // And clean the watcher
25400  PGBUF_CLEAR_WATCHER (page_watcher);
25401  }
25402 
25403  return NO_ERROR;
25404 }
25405 
25406 static int
25407 heap_update_and_log_header (THREAD_ENTRY * thread_p, const HFID * hfid, const PGBUF_WATCHER heap_header_watcher,
25408  HEAP_HDR_STATS * heap_hdr, const VPID new_next_vpid, const VPID new_last_vpid,
25409  const int new_num_pages)
25410 {
25411  HEAP_HDR_STATS heap_hdr_prev;
25413 
25414  assert (!PGBUF_IS_CLEAN_WATCHER (&heap_header_watcher));
25415  assert (heap_hdr != NULL);
25416 
25417  // Save for logging.
25418  heap_hdr_prev = *heap_hdr;
25419 
25420  // Now add the info to the header.
25421  heap_hdr->estimates.last_vpid = new_last_vpid;
25422  heap_hdr->estimates.num_pages += new_num_pages;
25423  heap_hdr->next_vpid = new_next_vpid;
25424 
25425  // Log this change.
25426  addr.pgptr = heap_header_watcher.pgptr;
25427  addr.vfid = &hfid->vfid;
25429 
25430  log_append_undoredo_data (thread_p, RVHF_STATS, &addr, sizeof (HEAP_HDR_STATS), sizeof (HEAP_HDR_STATS),
25431  &heap_hdr_prev, heap_hdr);
25432 
25433  // Set the page as dirty.
25434  pgbuf_set_dirty (thread_p, heap_header_watcher.pgptr, DONT_FREE);
25435 
25436  return NO_ERROR;
25437 }
25438 
25439 void
25440 heap_log_postpone_heap_append_pages (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid,
25441  const std::vector<VPID> &heap_pages_array)
25442 {
25443  if (heap_pages_array.empty ())
25444  {
25445  return;
25446  }
25447 
25448  // This append needs to be run on postpone after the commit.
25449  // First create the log data required.
25450  size_t array_size = heap_pages_array.size ();
25451  int log_data_size = (DB_ALIGN (OR_HFID_SIZE, PTR_ALIGNMENT) + OR_OID_SIZE + sizeof (int)
25452  + array_size * DISK_VPID_ALIGNED_SIZE);
25453  char *log_data = (char *) db_private_alloc (NULL, log_data_size + MAX_ALIGNMENT);
25455  char *ptr = log_data;
25456 
25457  // Now populate the log data needed.
25458 
25459  // HFID
25460  OR_PUT_HFID (ptr, hfid);
25461  ptr += OR_HFID_SIZE;
25462  ptr = PTR_ALIGN (ptr, PTR_ALIGNMENT);
25463 
25464  // class_oid
25465  OR_PUT_OID (ptr, class_oid);
25466  ptr += OR_OID_SIZE;
25467  ptr = PTR_ALIGN (ptr, PTR_ALIGNMENT);
25468 
25469  // array_size
25470  OR_PUT_INT (ptr, (int) array_size);
25471  ptr += OR_INT_SIZE;
25472 
25473  // The array of VPID.
25474  for (size_t i = 0; i < array_size; i++)
25475  {
25476  OR_PUT_VPID_ALIGNED (ptr, &heap_pages_array[i]);
25477  ptr += DISK_VPID_ALIGNED_SIZE;
25478  }
25479 
25480  assert ((ptr - log_data) == log_data_size);
25481 
25482  log_append_postpone (thread_p, RVHF_APPEND_PAGES_TO_HEAP, &log_addr, log_data_size, log_data);
25483 
25484  if (log_data)
25485  {
25486  db_private_free_and_init (NULL, log_data);
25487  }
25488 }
25489 
25490 // *INDENT-ON*
SCAN_CODE heap_get_record_data_when_all_ready(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:7707
PGLENGTH offset
Definition: recovery.h:201
HEAP_CLASSREPR_LOCK * lock_next
Definition: heap_file.c:345
static int heap_stats_sync_bestspace(THREAD_ENTRY *thread_p, const HFID *hfid, HEAP_HDR_STATS *heap_hdr, VPID *hdr_vpid, bool scan_all, bool can_cycle)
Definition: heap_file.c:3696
#define HEAP_PAGE_SET_VACUUM_STATUS(chain, status)
Definition: heap_file.c:244
static int heap_stats_bestspace_finalize(void)
Definition: heap_file.c:15075
DISK_ISVALID not_vacuumed_res
Definition: heap_file.c:297
static int heap_chnguess_decache(const OID *oid)
Definition: heap_file.c:15134
char * PAGE_PTR
MIN_MAX_COLUMN_INFO min_max_val
Definition: dbtype_def.h:867
#define OID_INITIALIZER
Definition: oid.h:36
#define OR_SET_VAR_OFFSET_SIZE(val, offset_size)
DB_C_FLOAT db_get_float(const DB_VALUE *value)
int char_isspace(int c)
Definition: chartype.c:109
int heap_vpid_prev(THREAD_ENTRY *thread_p, const HFID *hfid, PAGE_PTR pgptr, VPID *prev_vpid)
Definition: heap_file.c:5028
PAGE_PTR pgbuf_fix_debug(THREAD_ENTRY *thread_p, const VPID *vpid, PAGE_FETCH_MODE fetch_mode, PGBUF_LATCH_MODE request_mode, PGBUF_LATCH_CONDITION condition, const char *caller_file, int caller_line)
Definition: page_buffer.c:1789
int data_readval(struct or_buf *buf, DB_VALUE *value, const tp_domain *domain, int size, bool copy, char *copy_buf, int copy_buf_len) const
#define HEAP_PERF_TRACK_PREPARE(thread_p, context)
Definition: heap_file.c:521
static int heap_insert_newhome(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *parent_context, RECDES *recdes_p, OID *out_oid_p, PGBUF_WATCHER *newhome_pg_watcher)
Definition: heap_file.c:20264
#define ER_PAGE_LATCH_ABORTED
Definition: error_code.h:1074
DISK_ISVALID pgbuf_is_valid_page(THREAD_ENTRY *thread_p, const VPID *vpid, bool no_error, DISK_ISVALID(*fun)(const VPID *vpid, void *args), void *args)
#define CLASSREPR_HASH_SIZE
Definition: heap_file.c:402
DB_VALUE * heap_attrinfo_access(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10665
void assign_recdes_to_area(RECDES &recdes, size_t size=0)
Definition: heap_file.c:24972
#define PGBUF_PAGE_VPID_AS_ARGS(pg)
Definition: page_buffer.h:53
#define OR_GET_HFID(ptr, hfid)
SCAN_CODE heap_capacity_next_scan(THREAD_ENTRY *thread_p, int cursor, DB_VALUE **out_values, int out_cnt, void *ptr)
Definition: heap_file.c:18014
SCAN_CODE heap_scanrange_next(THREAD_ENTRY *thread_p, OID *next_oid, RECDES *recdes, HEAP_SCANRANGE *scan_range, int ispeeking)
Definition: heap_file.c:8481
void heap_scancache_end_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7230
bool tp_domain_references_objects(const TP_DOMAIN *dom)
int tp_domain_disk_size(TP_DOMAIN *domain)
#define PGBUF_WATCHER_COPY_GROUP(w_dst, w_src)
Definition: page_buffer.h:101
DB_VALUE * heap_attrvalue_get_key(THREAD_ENTRY *thread_p, int btid_index, HEAP_CACHE_ATTRINFO *idx_attrinfo, RECDES *recdes, BTID *btid, DB_VALUE *db_value, char *buf, FUNC_PRED_UNPACK_INFO *func_indx_pred, TP_DOMAIN **key_domain)
Definition: heap_file.c:12802
#define OR_PUT_OID(ptr, oid)
OID * oid_Root_class_oid
Definition: oid.c:73
int heap_object_upgrade_domain(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *upd_scancache, HEAP_CACHE_ATTRINFO *attr_info, OID *oid, const ATTR_ID att_id)
Definition: heap_file.c:17011
#define vacuum_er_log_error(er_log_level, msg,...)
Definition: vacuum.h:69
static int heap_hfid_table_entry_uninit(void *entry)
Definition: heap_file.c:23097
int heap_rv_mark_deleted_on_postpone(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:5877
static HEAP_STATS_BESTSPACE_CACHE heap_Bestspace_cache_area
Definition: heap_file.c:499
static HEAP_CHNGUESS heap_Guesschn_area
Definition: heap_file.c:493
static OID null_oid
Definition: file_hash.c:50
#define pgbuf_attach_watcher(...)
Definition: page_buffer.h:394
int xheap_get_class_num_objects_pages(THREAD_ENTRY *thread_p, const HFID *hfid, int approximation, int *nobjs, int *npages)
Definition: heap_file.c:16396
cubthread::entry * thread_get_thread_entry_info(void)
#define NO_ERROR
Definition: error_code.h:46
int heap_header_capacity_start_scan(THREAD_ENTRY *thread_p, int show_type, DB_VALUE **arg_values, int arg_cnt, void **ptr)
Definition: heap_file.c:17661
static SCAN_CODE heap_next_internal(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, bool ispeeking, bool reversed_direction, DB_VALUE **cache_recordinfo)
Definition: heap_file.c:7775
int mht_map_no_key(THREAD_ENTRY *thread_p, const MHT_TABLE *ht, int(*map_func)(THREAD_ENTRY *thread_p, void *data, void *args), void *func_args)
Definition: memory_hash.c:2231
static DISK_ISVALID heap_chkreloc_end(HEAP_CHKALL_RELOCOIDS *chk)
Definition: heap_file.c:14407
int area_size
char * get_data_for_modify(void)
void log_append_redo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:1979
int or_rep_id(RECDES *record)
#define __attribute__(X)
Definition: porting.h:36
#define MVCC_IS_HEADER_DELID_VALID(rec_header_p)
Definition: mvcc.h:87
DB_COLLECTION * db_get_set(const DB_VALUE *value)
void er_stack_push(void)
int heap_get_class_oid_from_page(THREAD_ENTRY *thread_p, PAGE_PTR page_p, OID *class_oid)
Definition: heap_file.c:18824
#define MVCC_GET_INSID(header)
Definition: mvcc.h:51
HEAP_CLASSREPR_HASH * hash_table
Definition: heap_file.c:370
#define LANG_SYS_COLLATION
int pr_data_writeval_disk_size(DB_VALUE *value)
MVCC_SNAPSHOT * logtb_get_mvcc_snapshot(THREAD_ENTRY *thread_p)
#define MVCC_IS_CHN_UPTODATE(rec_header_p, chn)
Definition: mvcc.h:133
static int heap_classrepr_dump(THREAD_ENTRY *thread_p, FILE *fp, const OID *class_oid, const OR_CLASSREP *repr)
Definition: heap_file.c:2672
INT32 mvcc_flag
Definition: mvcc.h:40
#define pgbuf_ordered_fix(thread_p, req_vpid, fetch_mode, requestmode, req_watcher)
Definition: page_buffer.h:261
#define VACUUM_ER_LOG_RECOVERY
Definition: vacuum.h:55
#define LF_EM_NOT_USING_MUTEX
Definition: lock_free.h:59
void log_append_undoredo_data2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1861
static void heap_scancache_block_allocate(cubmem::block &b, size_t size)
Definition: heap_file.c:24900
TP_DOMAIN_STATUS tp_domain_check(const TP_DOMAIN *domain, const DB_VALUE *value, TP_MATCH exact_match)
int xheap_has_instance(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, int has_visible_instance)
Definition: heap_file.c:16429
void add_empty(const BTID &index)
#define IO_PAGESIZE
SCAN_CODE heap_last(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:8125
static void heap_stats_put_second_best(HEAP_HDR_STATS *heap_hdr, VPID *vpid)
Definition: heap_file.c:3110
int db_value_domain_min(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, const DB_ENUMERATION *enumeration)
Definition: db_macro.c:413
int vacuum_heap_page(THREAD_ENTRY *thread_p, VACUUM_HEAP_OBJECT *heap_objects, int n_heap_objects, MVCCID threshold_mvccid, HFID *hfid, bool *reusable, bool was_interrupted)
Definition: vacuum.c:1546
HEAP_SCANCACHE * scan_cache_p
Definition: heap_file.h:287
#define HEAP_RV_FLAG_VACUUM_STATUS_CHANGE
Definition: heap_file.c:517
static int heap_dump_hdr(FILE *fp, HEAP_HDR_STATS *heap_hdr)
Definition: heap_file.c:14050
void log_append_undoredo_crumbs(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_undo_crumbs, int num_redo_crumbs, const LOG_CRUMB *undo_crumbs, const LOG_CRUMB *redo_crumbs)
Definition: log_manager.c:2030
OR_FUNCTION_INDEX * func_index_info
#define ASSERT_ERROR()
SCAN_CODE
#define OID_GT(oidp1, oidp2)
Definition: oid.h:97
void or_class_tde_algorithm(RECDES *record, TDE_ALGORITHM *tde_algo)
SCAN_CODE heap_scanrange_to_prior(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, OID *last_oid)
Definition: heap_file.c:8375
static int heap_ovf_get_capacity(THREAD_ENTRY *thread_p, const OID *ovf_oid, int *ovf_len, int *ovf_num_pages, int *ovf_overhead, int *ovf_free_space)
Definition: heap_file.c:6648
VACUUM_HEAP_OBJECT * heap_objects
Definition: vacuum.h:114
STATIC_INLINE HEAP_CHAIN * heap_get_chain_ptr(THREAD_ENTRY *thread_p, PAGE_PTR page_heap) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4129
#define HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS(class_oid, hfid)
Definition: heap_file.c:405
static int heap_ovf_get_length(THREAD_ENTRY *thread_p, const OID *ovf_oid)
Definition: heap_file.c:6579
int file_descriptor_update(THREAD_ENTRY *thread_p, const VFID *vfid, void *des_new)
#define heap_classrepr_log_er(msg,...)
Definition: heap_file.c:1358
unsigned int of_local_next
Definition: lock_free.h:66
HEAP_BESTSPACE best[HEAP_NUM_BEST_SPACESTATS]
Definition: heap_file.c:220
void lf_hash_destroy(LF_HASH_TABLE *table)
Definition: lock_free.c:1933
void logpb_fatal_error(THREAD_ENTRY *thread_p, bool logexit, const char *file_name, const int lineno, const char *fmt,...)
int heap_rv_redo_reuse_page(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16283
int data_writeval(struct or_buf *buf, const DB_VALUE *value) const
static int heap_attrinfo_check(const OID *inst_oid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:11186
static int heap_get_record_location(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:20470
#define HFID_INITIALIZER
OID * oid_User_class_oid
Definition: oid.c:78
int file_dealloc(THREAD_ENTRY *thread_p, const VFID *vfid, const VPID *vpid, FILE_TYPE file_type_hint)
#define QSTR_IS_ANY_CHAR(s)
Definition: string_opfunc.h:46
static int heap_update_and_log_header(THREAD_ENTRY *thread_p, const HFID *hfid, const PGBUF_WATCHER heap_header_watcher, HEAP_HDR_STATS *heap_hdr, const VPID new_next_vpid, const VPID new_last_vpid, const int new_num_pages)
Definition: heap_file.c:25407
unsigned char codeset
Definition: object_domain.h:91
bool is_redistribute_insert_with_delid
Definition: heap_file.h:314
int heap_scancache_start_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid, int op_type, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6867
bool cache_last_fix_page
Definition: heap_file.h:148
bool schema_change
Definition: heap_file.c:461
#define LOG_DATA_ADDR_INITIALIZER
Definition: log_append.hpp:63
static OR_CLASSREP * heap_classrepr_get_from_record(THREAD_ENTRY *thread_p, REPR_ID *last_reprid, const OID *class_oid, RECDES *class_recdes, REPR_ID reprid)
Definition: heap_file.c:2246
int heap_scancache_quick_start_modify_with_class_oid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, OID *class_oid)
Definition: heap_file.c:19331
LF_ENTRY_INITIALIZE_FUNC f_init
Definition: lock_free.h:90
static void heap_page_rv_chain_update(THREAD_ENTRY *thread_p, PAGE_PTR heap_page, MVCCID mvccid, bool vacuum_status_change)
Definition: heap_file.c:23692
void LSA_COPY(log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:139
SCAN_CODE heap_get_visible_version(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24272
int heap_attrinfo_delete_lob(THREAD_ENTRY *thread_p, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10464
static int heap_attrinfo_recache_attrepr(HEAP_CACHE_ATTRINFO *attr_info, bool islast_reset)
Definition: heap_file.c:9678
int heap_classrepr_restart_cache(void)
Definition: heap_file.c:1849
#define TP_IS_SET_TYPE(typenum)
#define ER_TF_BUFFER_OVERFLOW
Definition: error_code.h:388
void set_free(DB_COLLECTION *set)
Definition: set_object.c:2560
int spage_insert(THREAD_ENTRY *thread_p, PAGE_PTR page_p, RECDES *record_descriptor_p, PGSLOTID *out_slot_id_p)
static void heap_log_delete_physical(THREAD_ENTRY *thread_p, PAGE_PTR page_p, VFID *vfid_p, OID *oid_p, RECDES *recdes_p, bool mark_reusable, LOG_LSA *undo_lsa)
Definition: heap_file.c:21512
#define VPID_COPY(dest_ptr, src_ptr)
Definition: dbtype_def.h:909
PAGEID pgbuf_get_page_id(PAGE_PTR pgptr)
Definition: page_buffer.c:4657
int db_make_bigint(DB_VALUE *value, const DB_BIGINT num)
int db_get_int(const DB_VALUE *value)
int heap_prepare_object_page(THREAD_ENTRY *thread_p, const OID *oid, PGBUF_WATCHER *page_watcher_p, PGBUF_LATCH_MODE latch_mode)
Definition: heap_file.c:24609
int heap_manager_finalize(void)
Definition: heap_file.c:5111
void heap_finalize_hfid_table(void)
Definition: heap_file.c:23242
SCAN_CODE heap_prepare_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, bool is_heap_scan, NON_EXISTENT_HANDLING non_ex_handling_type)
Definition: heap_file.c:7385
#define ER_HEAP_NODATA_NEWADDRESS
Definition: error_code.h:107
FILE_TYPE file_type
Definition: heap_file.h:152
int logtb_get_number_of_total_tran_indices(void)
DB_TYPE
Definition: dbtype_def.h:670
LOG_RCVINDEX
Definition: recovery.h:36
#define heap_hfid_table_log(thp, oidp, msg,...)
Definition: heap_file.c:510
LOG_LSA * log_get_append_lsa(void)
Definition: log_manager.c:559
int logpb_fetch_page(THREAD_ENTRY *thread_p, const LOG_LSA *req_lsa, LOG_CS_ACCESS_MODE access_mode, LOG_PAGE *log_pgptr)
LOG_LSA * logtb_find_current_tran_lsa(THREAD_ENTRY *thread_p)
static API_MUTEX mutex
Definition: api_util.c:72
void log_sysop_start_atomic(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3644
DB_C_DOUBLE db_get_double(const DB_VALUE *value)
#define ER_FAILED
Definition: error_code.h:47
#define OR_MVCC_DELETE_ID_SIZE
PGBUF_WATCHER overflow_page_watcher
Definition: heap_file.h:301
int db_make_varchar(DB_VALUE *value, const int max_char_length, DB_CONST_C_CHAR str, const int char_str_byte_size, const int codeset, const int collation_id)
void spage_initialize(THREAD_ENTRY *thread_p, PAGE_PTR page_p, INT16 slot_type, unsigned short alignment, bool is_saving)
bool mvcc_is_mvcc_disabled_class(const OID *class_oid)
Definition: mvcc.c:616
pthread_mutex_t hash_mutex
Definition: heap_file.c:342
static SCAN_CODE heap_get_visible_version_from_log(THREAD_ENTRY *thread_p, RECDES *recdes, LOG_LSA *previous_version_lsa, HEAP_SCANCACHE *scan_cache, int has_chn)
Definition: heap_file.c:24145
int file_get_type(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_TYPE *ftype_out)
#define csect_enter(a, b, c)
Definition: cnv.c:138
#define ALWAYS_INLINE
TRAN_ABORT_REASON tran_abort_reason
Definition: log_impl.h:526
#define DEFAULT_REPR_INCREMENT
Definition: heap_file.c:303
DB_COLLECTION * set_copy(DB_COLLECTION *set)
Definition: set_object.c:2473
int file_tracker_reuse_heap(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid_out)
#define OR_MVCC_REPID_MASK
DISK_ISVALID file_check_vpid(THREAD_ENTRY *thread_p, const VFID *vfid, const VPID *vpid_lookup)
BTREE_TYPE
SCAN_CODE spage_previous_record_dont_skip_empty(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
int mht_rem(MHT_TABLE *ht, const void *key, int(*rem_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:1952
LF_ENTRY_UNINITIALIZE_FUNC f_uninit
Definition: lock_free.h:93
FILE_TYPE
Definition: file_manager.h:38
LOG_HDRPAGE hdr
Definition: log_storage.hpp:84
#define HEAP_BIT_SET(byte_ptr, bit_num)
Definition: heap_file.c:435
static int heap_scancache_check_with_hfid(THREAD_ENTRY *thread_p, HFID *hfid, OID *class_oid, HEAP_SCANCACHE **scan_cache)
Definition: heap_file.c:6673
REPR_ID heap_get_class_repr_id(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:16476
void set_record_length(std::size_t length)
static HEAP_CLASSREPR_CACHE heap_Classrepr_cache
Definition: heap_file.c:381
TP_DOMAIN_STATUS tp_value_auto_cast(const DB_VALUE *src, DB_VALUE *dest, const TP_DOMAIN *desired_domain)
bool recently_accessed
Definition: heap_file.c:447
int heap_rv_undo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16199
#define HEAP_MAX_FIRSTSLOTID_LENGTH
MVCCID max_mvccid
Definition: heap_file.c:276
#define OR_HFID_SIZE
#define SAFEGUARD_RVSPACE
Definition: slotted_page.h:53
#define ER_CT_UNKNOWN_REPRID
Definition: error_code.h:493
void logpb_force_flush_pages(THREAD_ENTRY *thread_p)
HEAP_STATS_ENTRY * free_list
Definition: heap_file.c:477
void heap_rv_dump_chain(FILE *fp, int ignore_length, void *data)
Definition: heap_file.c:15550
static int heap_attrinfo_start_refoids(THREAD_ENTRY *thread_p, OID *class_oid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:11922
multi_index_unique_stats * m_index_stats
Definition: heap_file.h:151
const void * mht_put(MHT_TABLE *ht, const void *key, void *data)
Definition: memory_hash.c:1778
static int heap_get_page_with_watcher(THREAD_ENTRY *thread_p, const VPID *page_vpid, PGBUF_WATCHER *pg_watcher)
Definition: heap_file.c:25311
static int heap_classrepr_finalize_cache(void)
Definition: heap_file.c:1498
PGBUF_WATCHER home_page_watcher
Definition: heap_file.h:378
#define ASSERT_ERROR_AND_SET(error_code)
static int heap_update_home(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:22024
struct heap_chain HEAP_CHAIN
Definition: heap_file.c:269
SPAGE_SLOT * spage_get_slot(PAGE_PTR page_p, PGSLOTID slot_id)
#define LSA_INITIALIZER
Definition: log_lsa.hpp:76
Definition: lock_free.h:63
#define OR_MVCC_FLAG_VALID_INSID
LF_ENTRY_FREE_FUNC f_free
Definition: lock_free.h:87
#define assert_release(e)
Definition: error_manager.h:96
HEAP_CLASSREPR_ENTRY * area
Definition: heap_file.c:368
void pgbuf_set_dirty(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, bool free_page)
Definition: page_buffer.c:4280
Definition: lock_free.h:120
char * overflow_get_first_page_data(char *page_ptr)
void LOG_CS_ENTER(THREAD_ENTRY *thread_p)
SCAN_CODE overflow_get(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, RECDES *recdes, MVCC_SNAPSHOT *mvcc_snapshot)
int idx
Definition: heap_file.c:445
const char * classname
Definition: heap_file.h:128
void spage_update_record_type(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, INT16 record_type)
int or_mvcc_get_header(RECDES *record, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:443
int pr_midxkey_init_boundbits(char *bufptr, int n_atts)
PGBUF_WATCHER * overflow_page_watcher_p
Definition: heap_file.h:307
void heap_page_set_vacuum_status_none(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23755
int lock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int cond_flag)
#define ER_CSS_PTHREAD_MUTEX_LOCK
Definition: error_code.h:999
#define MVCCID_NULL
void log_sysop_start(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3578
#define OR_GET_BOUND_BIT_FLAG(ptr)
INT16 heap_rv_remove_flags_from_offset(INT16 offset)
Definition: heap_file.c:23901
int num_substitutions
Definition: heap_file.c:209
int heap_scancache_quick_start_root_hfid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:19255
int heap_dump_capacity(THREAD_ENTRY *thread_p, FILE *fp, const HFID *hfid)
Definition: heap_file.c:14281
#define OR_MVCC_FLAG_VALID_DELID
#define OR_MVCCID_SIZE
static int heap_chkreloc_print_notfound(const void *ignore_reloc_oid, void *ent, void *xchk)
Definition: heap_file.c:14493
static void heap_mvcc_log_delete(THREAD_ENTRY *thread_p, LOG_DATA_ADDR *p_addr, LOG_RCVINDEX rcvindex)
Definition: heap_file.c:15828
#define LOG_IS_MVCC_HEAP_OPERATION(rcvindex)
Definition: mvcc.h:239
int reserve2_for_future
Definition: heap_file.c:228
#define OR_MVCC_PREV_VERSION_LSA_OFFSET(mvcc_flags)
FILE_TYPE ftype
Definition: heap_file.h:206
static int heap_scancache_start_internal(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid, int cache_last_fix_page, bool is_queryscan, int is_indexscan, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6719
bool oid_is_system_class(const OID *class_oid)
Definition: oid.c:400
static int heap_insert_handle_multipage_record(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:20001
DISK_ISVALID heap_check_all_pages(THREAD_ENTRY *thread_p, HFID *hfid)
Definition: heap_file.c:13775
static int heap_update_set_prev_version(THREAD_ENTRY *thread_p, const OID *oid, PGBUF_WATCHER *home_pg_watcher, PGBUF_WATCHER *fwd_pg_watcher, LOG_LSA *prev_version_lsa)
Definition: heap_file.c:24442
static int heap_get_insert_location_with_lock(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, PGBUF_WATCHER *home_hint_p)
Definition: heap_file.c:20046
int index_writeval(struct or_buf *buf, const DB_VALUE *value) const
#define OID_SET_NULL(oidp)
Definition: oid.h:85
static void heap_delete_adjust_header(MVCC_REC_HEADER *header_p, MVCCID mvcc_id, bool need_mvcc_header_max_size)
Definition: heap_file.c:20449
#define OID_LT(oidp1, oidp2)
Definition: oid.h:113
#define OR_GET_BOUND_BITS(obj, nvars, fsize)
#define NULL_SLOTID
#define OR_PUT_VPID_ALIGNED(ptr, vpid)
static const OID * heap_ovf_update(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *ovf_oid, RECDES *recdes)
Definition: heap_file.c:6486
#define OR_VAR_ELEMENT_PTR(obj, index)
#define OR_MVCC_FLAG_SHIFT_BITS
HEAP_CLASSREPR_ENTRY * free_top
Definition: heap_file.c:360
void thread_suspend_wakeup_and_unlock_entry(cubthread::entry *thread_p, thread_resume_suspend_status suspended_reason)
char * data
void heap_rv_dump_statistics(FILE *fp, int ignore_length, void *data)
Definition: heap_file.c:15533
int heap_get_best_space_num_stats_entries(void)
Definition: heap_file.c:24829
int db_elo_copy(DB_ELO *src, DB_ELO *dest)
Definition: db_elo.c:102
MVCCID mvcc_id
Definition: recovery.h:198
struct spage_slot SPAGE_SLOT
Definition: slotted_page.h:84
TP_DOMAIN * tp_domain_copy(const TP_DOMAIN *domain, bool check_cache)
int lock_scan(THREAD_ENTRY *thread_p, const OID *class_oid, int cond_flag, LOCK class_lock)
PGNSLOTS num_records
Definition: slotted_page.h:64
int32_t pageid
Definition: dbtype_def.h:879
void or_free_classrep(OR_CLASSREP *rep)
#define lf_tran_end_with_mb(entry)
Definition: lock_free.h:198
INT32 root_pageid
static int heap_classrepr_initialize_cache(void)
Definition: heap_file.c:1371
TP_DOMAIN * tp_domain_find_charbit(DB_TYPE type, int codeset, int collation_id, unsigned char collation_flag, int precision, bool is_desc)
int or_mvcc_add_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header, int bound_bit, int variable_offset_size)
void pgbuf_ordered_set_dirty_and_free(THREAD_ENTRY *thread_p, PGBUF_WATCHER *pg_watcher)
INT32 hpgid
#define MVCC_CLEAR_FLAG_BITS(rec_header_p, flag)
Definition: mvcc.h:101
#define BTID_IS_EQUAL(b1, b2)
PAGE_PTR pgbuf_flush_with_wal(THREAD_ENTRY *thread_p, PAGE_PTR pgptr)
Definition: page_buffer.c:2956
int xheap_destroy_newly_created(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid)
Definition: heap_file.c:5818
int er_errid(void)
int max_reprid
Definition: heap_file.c:327
int heap_rv_mark_deleted_on_undo(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:5859
SCAN_CODE heap_get_last_version(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:24546
int file_get_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, VPID *vpid_out)
#define MVCC_SET_DELID(header, mvcc_id)
Definition: mvcc.h:60
LF_FREELIST hfid_hash_freelist
Definition: heap_file.h:190
#define SP_SUCCESS
Definition: slotted_page.h:50
#define VPID_INITIALIZER
Definition: dbtype_def.h:894
#define OR_BOUND_BIT_BYTES(count)
#define PGBUF_IS_CLEAN_WATCHER(w)
Definition: page_buffer.h:153
bool spage_reclaim(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
int heap_scancache_end_when_scan_will_resume(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7210
static int heap_get_capacity(THREAD_ENTRY *thread_p, const HFID *hfid, INT64 *num_recs, INT64 *num_recs_relocated, INT64 *num_recs_inovf, INT64 *num_pages, int *avg_freespace, int *avg_freespace_nolast, int *avg_reclength, int *avg_overhead)
Definition: heap_file.c:9116
PAGE_TYPE
#define OR_SHORT_SIZE
static SCAN_CODE heap_get_bigone_content(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, bool ispeeking, OID *forward_oid, RECDES *recdes)
Definition: heap_file.c:18783
#define PTR_ALIGN(addr, boundary)
Definition: memory_alloc.h:77
static int heap_rv_mvcc_redo_delete_internal(THREAD_ENTRY *thread_p, PAGE_PTR page, PGSLOTID slotid, MVCCID mvccid)
Definition: heap_file.c:15973
#define OR_OFFSET_SIZE_2BYTE
#define OID_AS_ARGS(oidp)
Definition: oid.h:39
#define HEAP_SCANCACHE_SET_NODE(scan_cache, class_oid_p, hfid_p)
Definition: heap_file.h:83
struct func_pred_unpack_info FUNC_PRED_UNPACK_INFO
Definition: heap_file.h:226
static const cubmem::block_allocator HEAP_SCANCACHE_BLOCK_ALLOCATOR
Definition: heap_file.c:888
MVCCID heap_page_get_max_mvccid(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23798
void or_class_hfid(RECDES *record, HFID *hfid)
bool LSA_LT(const log_lsa *plsa1, const log_lsa *plsa2)
Definition: log_lsa.hpp:174
enum tp_domain_status TP_DOMAIN_STATUS
static PAGE_PTR heap_stats_find_best_page(THREAD_ENTRY *thread_p, const HFID *hfid, int needed_space, bool isnew_rec, int newrec_size, HEAP_SCANCACHE *space_cache, PGBUF_WATCHER *pg_watcher)
Definition: heap_file.c:3487
#define er_log_debug(...)
void spage_set_need_update_best_hint(THREAD_ENTRY *thread_p, PAGE_PTR page_p, bool need_update)
Definition: slotted_page.c:962
PGSLOTID spage_delete_for_recovery(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
int idx
Definition: heap_file.c:312
#define HEAP_CLASSREPR_MAXCACHE
Definition: heap_file.c:96
int pgbuf_ordered_fix_debug(THREAD_ENTRY *thread_p, const VPID *req_vpid, PAGE_FETCH_MODE fetch_mode, const PGBUF_LATCH_MODE request_mode, PGBUF_WATCHER *req_watcher, const char *caller_file, int caller_line)
int force_decache
Definition: heap_file.c:316
#define VPID_AS_ARGS(vpidp)
Definition: dbtype_def.h:896
static void heap_log_insert_physical(THREAD_ENTRY *thread_p, PAGE_PTR page_p, VFID *vfid_p, OID *oid_p, RECDES *recdes_p, bool is_mvcc_op, bool is_redistribute_op)
Definition: heap_file.c:20388
bool heap_does_exist(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid)
Definition: heap_file.c:8714
void * mht_get2(const MHT_TABLE *ht, const void *key, void **last)
Definition: memory_hash.c:1496
int heap_scancache_end(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7195
HEAP_CLASSREPR_LOCK * lock_next
Definition: heap_file.c:335
#define ER_HEAP_FOUND_NOT_VACUUMED
Definition: error_code.h:1518
#define OR_VAR_OFFSET(obj, index)
HEAP_CHNGUESS_ENTRY * entries
Definition: heap_file.c:458
int file_dump(THREAD_ENTRY *thread_p, const VFID *vfid, FILE *fp)
OID oid
Definition: heap_file.c:448
VPID next_vpid
Definition: heap_file.c:275
static int heap_get_class_info_from_record(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid, char **classname_out)
Definition: heap_file.c:22987
int db_make_elo(DB_VALUE *value, DB_TYPE type, const DB_ELO *elo)
static HEAP_CLASSREPR_CACHE * heap_Classrepr
Definition: heap_file.c:492
#define OR_GET_MVCCID
bool spage_is_slot_exist(PAGE_PTR page_p, PGSLOTID slot_id)
#define MAX_ALIGNMENT
Definition: memory_alloc.h:70
#define ER_ALTER_CHANGE_CAST_FAILED_SET_DEFAULT
Definition: error_code.h:1322
REPR_ID last_reprid
Definition: heap_file.c:328
int heap_attrinfo_start_with_index(THREAD_ENTRY *thread_p, OID *class_oid, RECDES *class_recdes, HEAP_CACHE_ATTRINFO *attr_info, HEAP_IDX_ELEMENTS_INFO *idx_info)
Definition: heap_file.c:11997
#define PGLENGTH_MAX
#define COPY_OID(dest_oid_ptr, src_oid_ptr)
Definition: oid.h:63
#define heap_scan_pb_lock_and_fetch(...)
Definition: heap_file.c:588
#define pthread_mutex_trylock(a)
Definition: heap_file.c:81
#define OR_MVCC_MAX_HEADER_SIZE
SCAN_CODE spage_get_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p, int is_peeking)
HEAP_SCANCACHE_NODE_LIST * partition_list
Definition: heap_file.h:155
#define PGBUF_CLEAR_WATCHER(w)
Definition: page_buffer.h:115
int heap_objects_capacity
Definition: vacuum.h:115
char * or_pack_mvccid(char *ptr, const MVCCID mvccid)
int mvcc_header_size_lookup[8]
static HEAP_STATS_BESTSPACE_CACHE * heap_Bestspace
Definition: heap_file.c:502
#define DBVAL_BUFSIZE
Definition: btree.h:449
bool spage_is_updatable(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, int record_descriptor_length)
DB_DOMAIN_INFO domain
Definition: dbtype_def.h:1082
DB_ELO * db_get_elo(const DB_VALUE *value)
PGBUF_WATCHER fwd_page_watcher
Definition: heap_file.h:379
int xlogtb_reset_wait_msecs(THREAD_ENTRY *thread_p, int wait_msecs)
#define QSTR_IS_BIT(s)
Definition: string_opfunc.h:44
#define vacuum_er_log_warning(er_log_level, msg,...)
Definition: vacuum.h:73
static int heap_is_valid_oid(THREAD_ENTRY *thread_p, OID *oid)
Definition: heap_file.c:19541
static int heap_insert_adjust_recdes_header(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_class)
Definition: heap_file.c:19707
int db_elo_delete(DB_ELO *elo)
Definition: db_elo.c:115
SCAN_CODE heap_get_mvcc_header(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:7620
#define ER_HEAP_MISMATCH_NPAGES
Definition: error_code.h:710
int file_create_with_npages(THREAD_ENTRY *thread_p, FILE_TYPE file_type, int npages, FILE_DESCRIPTORS *des, VFID *vfid)
#define LF_HASH_TABLE_INITIALIZER
Definition: lock_free.h:317
VPID second_best[HEAP_NUM_BEST_SPACESTATS]
Definition: heap_file.c:219
#define VFID_ISNULL(vfid_ptr)
Definition: file_manager.h:72
SCAN_CODE heap_next_record_info(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, DB_VALUE **cache_recordinfo)
Definition: heap_file.c:18648
void THREAD_ENTRY
int overflow_update(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, const VPID *ovf_vpid, RECDES *recdes, FILE_TYPE file_type)
#define NULL_PAGEID
HFID hfid
Definition: heap_file.c:234
#define MVCCID_ALL_VISIBLE
#define pgbuf_unfix_and_init(thread_p, pgptr)
Definition: page_buffer.h:63
HEAP_SCANCACHE_NODE node
Definition: heap_file.h:144
#define OR_VAR_IS_NULL(obj, index)
#define MVCC_GET_PREV_VERSION_LSA(header)
Definition: mvcc.h:152
#define MVCC_SET_INSID(header, mvcc_id)
Definition: mvcc.h:54
int reserve1_for_future
Definition: heap_file.c:227
#define ER_HEAP_UNABLE_TO_CREATE_HEAP
Definition: error_code.h:101
or_auto_increment auto_increment
int spage_get_record_length(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
static int heap_vpid_init_new(THREAD_ENTRY *thread_p, PAGE_PTR page, void *args)
Definition: heap_file.c:4172
#define CT_SERIAL_NAME
Definition: transform.h:135
char * vpid_to_string(char *buf, int buf_size, VPID *vpid)
int heap_chnguess_put(THREAD_ENTRY *thread_p, const OID *oid, int tran_index, int chn)
Definition: heap_file.c:15306
void vacuum_log_add_dropped_file(THREAD_ENTRY *thread_p, const VFID *vfid, const OID *class_oid, bool pospone_or_undo)
Definition: vacuum.c:6024
static void heap_unfix_watchers(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:19375
#define HEAP_NUM_BEST_SPACESTATS
Definition: heap_file.c:182
int ATTR_ID
Definition: heap_file.h:198
int pr_free_ext_value(DB_VALUE *value)
int spage_update(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, const RECDES *record_descriptor_p)
static int heap_stats_bestspace_initialize(void)
Definition: heap_file.c:15023
void heap_log_postpone_heap_append_pages(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid, const std::vector< VPID > &heap_pages_array)
Definition: heap_file.c:25440
LOCK
int heap_attrinfo_set_uninitialized_global(THREAD_ENTRY *thread_p, OID *inst_oid, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:16711
LF_HASH_TABLE hfid_hash
Definition: heap_file.h:188
MIN_MAX_COLUMN_TYPE type
Definition: dbtype_def.h:857
unsigned int of_del_tran_id
Definition: lock_free.h:72
PGBUF_WATCHER home_page_watcher
Definition: heap_file.h:300
#define FREE(PTR)
Definition: cas_common.h:56
BTREE_SEARCH xbtree_find_unique(THREAD_ENTRY *thread_p, BTID *btid, SCAN_OPERATION_TYPE scan_op_type, DB_VALUE *key, OID *class_oid, OID *oid, bool is_all_class_srch)
Definition: btree.c:23990
#define OID_PSEUDO_KEY(oidp)
Definition: oid.h:130
int heap_cache_class_info(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid, FILE_TYPE ftype, const char *classname_in)
Definition: heap_file.c:23392
int heap_scancache_start(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid, int cache_last_fix_page, int is_indexscan, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6833
struct heap_hdr_stats::@162 estimates
#define HEAP_NBYTES_CLEARED(byte_ptr, byte_cnt)
Definition: heap_file.c:425
DB_MONETARY * db_get_monetary(const DB_VALUE *value)
#define ER_UNEXPECTED
Definition: error_code.h:1254
HEAP_CLASSREPR_ENTRY * hash_next
Definition: heap_file.c:319
void copy_to(mvcc_snapshot &other) const
Definition: mvcc.c:665
void heap_classrepr_logging_template(const char *filename, const int line, ErF &&er_f, const char *msg, Args &&...args)
Definition: heap_file.c:1350
void mht_destroy(MHT_TABLE *ht)
Definition: memory_hash.c:1140
bool pr_is_set_type(DB_TYPE type)
int file_tracker_interruptable_iterate(THREAD_ENTRY *thread_p, FILE_TYPE desired_ftype, VFID *vfid, OID *class_oid)
int file_descriptor_get(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_DESCRIPTORS *desc_out)
static int heap_vpid_alloc(THREAD_ENTRY *thread_p, const HFID *hfid, PAGE_PTR hdr_pgptr, HEAP_HDR_STATS *heap_hdr, HEAP_SCANCACHE *scan_cache, PGBUF_WATCHER *new_pg_watcher)
Definition: heap_file.c:4229
static void heap_page_update_chain_after_mvcc_op(THREAD_ENTRY *thread_p, PAGE_PTR heap_page, MVCCID mvccid)
Definition: heap_file.c:23601
static int heap_chnguess_initialize(void)
Definition: heap_file.c:14820
#define HEAP_CHK_ADD_UNFOUND_RELOCOIDS
Definition: heap_file.c:280
int boot_find_root_heap(HFID *root_hfid_p)
Definition: boot_sr.c:325
PAGE_TYPE pgbuf_get_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr)
Definition: page_buffer.c:4675
static int heap_Slotted_overhead
Definition: heap_file.c:489
#define HEAP_GUESS_NUM_INDEXED_ATTRS
Definition: heap_file.c:94
int heap_get_class_tde_algorithm(THREAD_ENTRY *thread_p, const OID *class_oid, TDE_ALGORITHM *tde_algo)
Definition: heap_file.c:10737
int file_alloc_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
int heap_attrinfo_start(THREAD_ENTRY *thread_p, const OID *class_oid, int requested_num_attrs, const ATTR_ID *attrids, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9427
int spage_max_record_size(void)
Definition: slotted_page.c:848
static int heap_scan_cache_allocate_recdes_data(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache_p, RECDES *recdes_p, int size)
Definition: heap_file.c:24761
int heap_scancache_quick_start_modify(HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7056
PR_TYPE * pr_type_from_id(DB_TYPE id)
int spage_check(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
void spage_dump(THREAD_ENTRY *thread_p, FILE *fp, PAGE_PTR page_p, int is_record_printed)
#define RECDES_INITIALIZER
int lf_hash_init(LF_HASH_TABLE *table, LF_FREELIST *freelist, unsigned int hash_size, LF_ENTRY_DESCRIPTOR *edesc)
Definition: lock_free.c:1873
#define MVCCID_IS_NORMAL(id)
#define HEAP_BESTSPACE_SYNC_THRESHOLD
Definition: heap_file.c:86
TP_DOMAIN * tp_domain_resolve_default(DB_TYPE type)
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
int spage_max_space_for_new_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
Definition: slotted_page.c:984
static int heap_scancache_quick_end(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7110
int or_mvcc_set_log_lsa_to_record(RECDES *record, LOG_LSA *lsa)
Definition: db_set.h:35
#define HEAP_STATS_ENTRY_FREELIST_SIZE
Definition: heap_file.c:99
void alloc_area()
Definition: heap_file.c:24949
int db_set_get(DB_SET *set, int index, DB_VALUE *value)
Definition: db_set.c:508
PGBUF_WATCHER * forward_page_watcher_p
Definition: heap_file.h:309
#define OR_MVCC_FLAG_VALID_PREV_VERSION
char * fileio_get_volume_label(VOLID vol_id, bool is_peek)
Definition: file_io.c:6182
#define ER_QPROC_INVALID_PARAMETER
Definition: error_code.h:963
#define HEAP_HEADER_AND_CHAIN_SLOTID
Definition: heap_file.h:62
int heap_attrinfo_read_dbvalues_without_oid(THREAD_ENTRY *thread_p, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10401
static DISK_ISVALID heap_chkreloc_next(THREAD_ENTRY *thread_p, HEAP_CHKALL_RELOCOIDS *chk, PAGE_PTR pgptr)
Definition: heap_file.c:14543
PAGE_FETCH_MODE
Definition: page_buffer.h:160
PAGE_PTR pgptr
Definition: recovery.h:199
NON_EXISTENT_HANDLING
bool pgbuf_has_prevent_dealloc(PAGE_PTR pgptr)
static void heap_clear_operation_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p)
Definition: heap_file.c:19404
static int heap_insert_physical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:20328
#define assert(x)
void log_skip_logging(THREAD_ENTRY *thread_p, LOG_DATA_ADDR *addr)
Definition: log_manager.c:3244
struct or_partition OR_PARTITION
int heap_rv_redo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15569
HEAP_PAGE_VACUUM_STATUS heap_page_get_vacuum_status(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23830
SCAN_CODE heap_page_prev(THREAD_ENTRY *thread_p, const OID *class_oid, const HFID *hfid, VPID *prev_vpid, DB_VALUE **cache_pageinfo)
Definition: heap_file.c:18372
REGU_VARIABLE * func_regu
Definition: xasl.h:277
int heap_rv_mvcc_redo_delete_home(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16029
#define DB_NEED_CLEAR(v)
Definition: dbtype.h:83
#define ER_LK_PAGE_TIMEOUT
Definition: error_code.h:134
LF_ENTRY_KEY_COPY_FUNC f_key_copy
Definition: lock_free.h:96
int heap_get_mvcc_rec_header_from_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header, RECDES *peek_recdes)
Definition: heap_file.c:18714
#define pthread_mutex_destroy(a)
Definition: heap_file.c:79
static int heap_stats_del_bestspace_by_hfid(THREAD_ENTRY *thread_p, const HFID *hfid)
Definition: heap_file.c:1097
int n_heap_objects
Definition: vacuum.h:116
TDE_ALGORITHM
Definition: tde.h:71
static HEAP_HFID_TABLE heap_Hfid_table_area
Definition: heap_file.c:504
#define ER_LC_UNKNOWN_CLASSNAME
Definition: error_code.h:121
#define OR_MVCC_FLAG_MASK
int spage_check_slot_owner(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
int32_t fileid
Definition: dbtype_def.h:886
#define HEAP_STATS_ENTRY_MHT_EST_SIZE
Definition: heap_file.c:98
bool pgbuf_check_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
#define OR_ENABLE_BOUND_BIT(bitptr, element)
#define ER_SP_NOSPACE_IN_PAGE
Definition: error_code.h:97
#define OR_MVCC_INSERT_ID_OFFSET
#define AUTO_INCREMENT_SERIAL_NAME_MAX_LENGTH
Definition: transform.h:180
int file_get_num_user_pages(THREAD_ENTRY *thread_p, const VFID *vfid, int *n_user_pages_out)
void heap_clean_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:24657
static int heap_create_internal(THREAD_ENTRY *thread_p, HFID *hfid, const OID *class_oid, const bool reuse_oid)
Definition: heap_file.c:5161
#define PGBUF_WATCHER_RESET_RANK(w, rank)
Definition: page_buffer.h:109
DISK_ISVALID vacuum_check_not_vacuumed_recdes(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, RECDES *recdes, int btree_node_type)
Definition: vacuum.c:7255
SCAN_CODE heap_scan_get_visible_version(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24309
int prm_get_integer_value(PARAM_ID prm_id)
int heap_rv_mvcc_redo_redistribute(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:24048
#define ER_GENERIC_ERROR
Definition: error_code.h:49
int heap_scanrange_start(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, const HFID *hfid, const OID *class_oid, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:8203
#define STATIC_INLINE
DISK_ISVALID heap_check_all_heaps(THREAD_ENTRY *thread_p)
Definition: heap_file.c:13996
OR_DEFAULT_VALUE default_value
#define OID_IS_ROOTOID(oidp)
Definition: oid.h:82
PGBUF_LATCH_MODE
Definition: page_buffer.h:176
#define heap_bestspace_log(...)
Definition: heap_file.c:581
static int heap_update_physical(THREAD_ENTRY *thread_p, PAGE_PTR page_p, short slot_id, RECDES *recdes_p)
Definition: heap_file.c:22223
static int heap_attrinfo_get_disksize(HEAP_CACHE_ATTRINFO *attr_info, bool is_mvcc_class, int *offset_size_ptr)
Definition: heap_file.c:11465
static int heap_estimate_avg_length(THREAD_ENTRY *thread_p, const HFID *hfid, int &avg_reclen)
Definition: heap_file.c:9086
unsigned int record_length
Definition: slotted_page.h:88
int tp_domain_status_er_set(TP_DOMAIN_STATUS status, const char *file_name, const int line_no, const DB_VALUE *src, const TP_DOMAIN *domain)
static int heap_hfid_table_entry_init(void *unique_stat)
Definition: heap_file.c:23076
std::atomic< or_aligned_oid > serial_obj
static unsigned int heap_hfid_table_entry_key_hash(void *key, int hash_table_size)
Definition: heap_file.c:23139
LF_ENTRY_KEY_COMPARE_FUNC f_key_cmp
Definition: lock_free.h:99
void heap_classrepr_dump_all(THREAD_ENTRY *thread_p, FILE *fp, OID *class_oid)
Definition: heap_file.c:16874
static void heap_link_watchers(HEAP_OPERATION_CONTEXT *child, HEAP_OPERATION_CONTEXT *parent)
Definition: heap_file.c:19354
DB_IDENTIFIER OID
Definition: dbtype_def.h:967
BTID * heap_indexinfo_get_btid(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo)
Definition: heap_file.c:12991
unsigned int of_next
Definition: lock_free.h:69
LC_FIND_CLASSNAME xlocator_find_class_oid(THREAD_ENTRY *thread_p, const char *classname, OID *class_oid, LOCK lock)
Definition: locator_sr.c:1033
OR_ATTRIBUTE * heap_locate_last_attrepr(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10637
void heap_rv_dump_reuse_page(FILE *fp, int ignore_length, void *ignore_data)
Definition: heap_file.c:16382
static int heap_fix_header_page(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:19568
int file_apply_tde_algorithm(THREAD_ENTRY *thread_p, const VFID *vfid, const TDE_ALGORITHM tde_algo)
#define ER_IT_DATA_OVERFLOW
Definition: error_code.h:505
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:50
void LOG_CS_EXIT(THREAD_ENTRY *thread_p)
void log_append_undo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:1917
static int heap_reinitialize_page(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, const bool is_header_page)
Definition: heap_file.c:5444
int or_chn(RECDES *record)
DB_TYPE db_value_type(const DB_VALUE *value)
int orc_superclasses_from_record(RECDES *record, int *array_size, OID **array_ptr)
#define LF_ENTRY_DESCRIPTOR_INITIALIZER
Definition: lock_free.h:109
int heap_get_class_name_alloc_if_diff(THREAD_ENTRY *thread_p, const OID *class_oid, char *guess_classname, char **classname_out)
Definition: heap_file.c:9351
VPID full_search_vpid
Definition: heap_file.c:218
#define OR_VALUE_ALIGNED_SIZE(value)
PGBUF_LATCH_CONDITION
Definition: page_buffer.h:185
#define PTR_ALIGNMENT
Definition: memory_alloc.h:68
#define SINGLE_ROW_UPDATE
Definition: btree.h:54
int heap_get_hfid_from_vfid(THREAD_ENTRY *thread_p, const VFID *vfid, HFID *hfid)
Definition: heap_file.c:24843
unsigned is_desc
lf_tran_entry * thread_get_tran_entry(cubthread::entry *thread_p, int entry_idx)
#define ER_HF_MAX_BESTSPACE_ENTRIES
Definition: error_code.h:1368
int reserve0_for_future
Definition: heap_file.c:226
#define OR_GET_OID(ptr, oid)
SCAN_CODE heap_attrinfo_transform_to_disk(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, RECDES *old_recdes, record_descriptor *new_recdes)
Definition: heap_file.c:11527
#define OR_FIXED_ATTRIBUTES_OFFSET_BY_OBJ(obj, nvars)
Definition: heap_file.c:90
#define pgbuf_replace_watcher(thread_p, old_watcher, new_watcher)
Definition: page_buffer.h:337
int fetch_peek_dbval(THREAD_ENTRY *thread_p, REGU_VARIABLE *regu_var, val_descr *vd, OID *class_oid, OID *obj_oid, QFILE_TUPLE tpl, DB_VALUE **peek_dbval)
Definition: fetch.c:3773
#define ER_HEAP_WRONG_ATTRINFO
Definition: error_code.h:751
int or_put_int(OR_BUF *buf, int num)
int heap_update_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:22771
BTREE_SEARCH
pthread_mutex_t bestspace_mutex
Definition: heap_file.c:478
int intl_identifier_casecmp(const char *str1, const char *str2)
int num_other_high_best
Definition: heap_file.c:204
int numeric_db_value_coerce_from_num(DB_VALUE *src, DB_VALUE *dest, DB_DATA_STATUS *data_status)
#define DB_VALUE_DOMAIN_TYPE(value)
Definition: dbtype.h:70
OR_CLASSREP ** repr
Definition: heap_file.c:326
void spage_collect_statistics(PAGE_PTR page_p, int *npages, int *nrecords, int *rec_length)
bool oid_is_root(const OID *oid)
Definition: oid.c:135
#define VACUUM_ER_LOG_HEAP
Definition: vacuum.h:50
unsigned int record_type
Definition: slotted_page.h:89
#define DB_MAX_IDENTIFIER_LENGTH
Definition: dbtype_def.h:495
int or_mvcc_set_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header)
void heap_create_update_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *oid_p, OID *class_oid_p, RECDES *recdes_p, HEAP_SCANCACHE *scancache_p, UPDATE_INPLACE_STYLE in_place)
Definition: heap_file.c:22378
static OR_ATTRIBUTE * heap_locate_attribute(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10613
int heap_manager_initialize(void)
Definition: heap_file.c:5070
THREAD_ENTRY * next_wait_thrd
Definition: heap_file.c:336
LOG_PAGEID logical_pageid
Definition: log_storage.hpp:65
static FILE_TYPE heap_get_file_type(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:19513
void heap_create_insert_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *class_oid_p, RECDES *recdes_p, HEAP_SCANCACHE *scancache_p)
Definition: heap_file.c:22324
bool heap_remove_page_on_vacuum(THREAD_ENTRY *thread_p, PAGE_PTR *page_ptr, HFID *hfid)
Definition: heap_file.c:4643
int spage_compact(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
static int heap_chnguess_realloc(void)
Definition: heap_file.c:14912
#define TP_DOMAIN_COLLATION(dom)
int heap_rv_redo_update_and_update_chain(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:18918
#define OR_HEADER_SIZE(ptr)
int or_put_bigint(OR_BUF *buf, DB_BIGINT num)
static int heap_chnguess_finalize(void)
Definition: heap_file.c:14994
void lock_unlock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, bool force)
static enum scanner_mode mode
xasl_unpack_info * unpack_info
Definition: heap_file.h:230
int head_second_best
Definition: heap_file.c:213
int heap_rv_redo_newpage(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15453
#define VPID_EQ(vpid_ptr1, vpid_ptr2)
Definition: dbtype_def.h:915
#define VFID_INITIALIZER
Definition: dbtype_def.h:890
HEAP_ATTRVALUE * heap_attrvalue_locate(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10590
int heap_get_num_objects(THREAD_ENTRY *thread_p, const HFID *hfid, int *npages, int *nobjs, int *avg_length)
Definition: heap_file.c:8935
int spage_mark_deleted_slot_as_reusable(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
void reserve_area(size_t size=0)
Definition: heap_file.c:24965
#define HFID_SET_NULL(hfid)
short volid
Definition: dbtype_def.h:880
HEAP_CLASSREPR_ENTRY * LRU_bottom
Definition: heap_file.c:353
#define ER_ALTER_CHANGE_CAST_FAILED_SET_MAX
Definition: error_code.h:1324
int stx_map_stream_to_func_pred(THREAD_ENTRY *thread_p, func_pred **xasl, char *xasl_stream, int xasl_stream_size, XASL_UNPACK_INFO **xasl_unpack_info_ptr)
#define ER_QPROC_SIZE_STRING_TRUNCATED
Definition: error_code.h:1299
static int heap_ovf_flush(THREAD_ENTRY *thread_p, const OID *ovf_oid)
Definition: heap_file.c:6558
#define OID_EQ(oidp1, oidp2)
Definition: oid.h:92
static const int heap_Find_best_page_limit
Definition: heap_file.c:490
#define OR_MVCC_PREV_VERSION_LSA_SIZE
static int heap_get_partitions_from_subclasses(THREAD_ENTRY *thread_p, const OID *subclasses, int *parts_count, OR_PARTITION *partitions)
Definition: heap_file.c:10946
DB_VALUE * db_value_copy(DB_VALUE *value)
Definition: db_macro.c:1537
#define heap_classrepr_free_and_init(class_repr, idxp)
Definition: heap_file.h:91
SCAN_CODE spage_get_page_header_info(PAGE_PTR page_p, DB_VALUE **page_header_info)
TP_DOMAIN_STATUS tp_value_cast(const DB_VALUE *src, DB_VALUE *dest, const TP_DOMAIN *desired_domain, bool implicit_coercion)
int heap_attrinfo_read_dbvalues(THREAD_ENTRY *thread_p, const OID *inst_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10337
VFID vfid
int locator_attribute_info_force(THREAD_ENTRY *thread_p, const HFID *hfid, OID *oid, HEAP_CACHE_ATTRINFO *attr_info, ATTR_ID *att_id, int n_att_id, LC_COPYAREA_OPERATION operation, int op_type, HEAP_SCANCACHE *scan_cache, int *force_count, bool not_check_fk, REPL_INFO_TYPE repl_info, int pruning_type, PRUNING_CONTEXT *pcontext, FUNC_PRED_UNPACK_INFO *func_preds, MVCC_REEV_DATA *mvcc_reev_data, UPDATE_INPLACE_STYLE force_update_inplace, RECDES *rec_descriptor, bool need_locking)
Definition: locator_sr.c:7311
int heap_alloc_new_page(THREAD_ENTRY *thread_p, HFID *hfid, OID class_oid, PGBUF_WATCHER *home_hint_p, VPID *new_page_vpid)
Definition: heap_file.c:24994
#define TP_DOMAIN_TYPE(dom)
int heap_insert_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, PGBUF_WATCHER *home_hint_p)
Definition: heap_file.c:22426
static void heap_mvcc_log_home_no_change(THREAD_ENTRY *thread_p, LOG_DATA_ADDR *p_addr)
Definition: heap_file.c:18897
std::int64_t pageid
Definition: log_lsa.hpp:36
int overflow_get_capacity(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, int *ovf_size, int *ovf_num_pages, int *ovf_overhead, int *ovf_free_space)
int tail_second_best
Definition: heap_file.c:214
int length
Definition: recovery.h:202
MVCC_SATISFIES_SNAPSHOT_RESULT mvcc_is_not_deleted_for_snapshot(THREAD_ENTRY *thread_p, MVCC_REC_HEADER *rec_header, MVCC_SNAPSHOT *snapshot)
Definition: mvcc.c:268
#define OR_MVCC_INSERT_HEADER_SIZE
static void cleanup(int signo)
Definition: broker.c:717
int heap_estimate(THREAD_ENTRY *thread_p, const HFID *hfid, int *npages, int *nobjs, int *avg_length)
Definition: heap_file.c:9005
#define ER_HEAP_UNKNOWN_HEAP
Definition: error_code.h:690
void * mht_get(MHT_TABLE *ht, const void *key)
Definition: memory_hash.c:1419
#define HEAP_HFID_HASH_SIZE
Definition: heap_file.h:194
int or_put_offset_internal(OR_BUF *buf, int num, int offset_size)
#define NULL
Definition: freelistheap.h:34
#define heap_classrepr_log_stack(msg,...)
Definition: heap_file.c:1361
int heap_rv_postpone_append_pages_to_heap(THREAD_ENTRY *thread_p, LOG_RCV *recv)
Definition: heap_file.c:25049
int file_alloc(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
int db_string_truncate(DB_VALUE *value, const int precision)
Definition: db_macro.c:962
RECDES * recdes_p
Definition: heap_file.h:374
static int heap_delete_relocation(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:20704
static int heap_midxkey_get_value(RECDES *recdes, OR_ATTRIBUTE *att, DB_VALUE *value, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10242
UINT64 MVCCID
void log_append_undo_crumbs(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_crumbs, const LOG_CRUMB *crumbs)
Definition: log_manager.c:2170
int heap_attrinfo_clear_dbvalues(HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10022
int lf_hash_find_or_insert(LF_TRAN_ENTRY *tran, LF_HASH_TABLE *table, void *key, void **entry, int *inserted)
Definition: lock_free.c:2101
PGNSLOTS spage_number_of_records(PAGE_PTR page_p)
Definition: slotted_page.c:860
#define MVCC_IS_FLAG_SET(rec_header_p, flags)
Definition: mvcc.h:84
#define HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS
struct pr_type * type
Definition: object_domain.h:76
HEAP_CLASSREPR_LOCK * lock_table
Definition: heap_file.c:371
#define OR_GET_VPID(ptr, vpid)
static int heap_attrinfo_recache(THREAD_ENTRY *thread_p, REPR_ID reprid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9887
int heap_indexinfo_get_attrs_prefix_length(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo, int *attrs_prefix_length, int len_attrs_prefix_length)
Definition: heap_file.c:13054
static int heap_classrepr_entry_free(HEAP_CLASSREPR_ENTRY *cache_entry)
Definition: heap_file.c:2221
const char * pr_type_name(DB_TYPE id)
#define HEAP_GUESS_NUM_ATTRS_REFOIDS
Definition: heap_file.c:93
if(extra_options)
Definition: dynamic_load.c:958
static int heap_delete_physical(THREAD_ENTRY *thread_p, HFID *hfid_p, PAGE_PTR page_p, OID *oid_p)
Definition: heap_file.c:21472
OR_CLASSREP ** or_get_all_representation(RECDES *record, bool do_indexes, int *count)
HEAP_SCANCACHE scan_cache
Definition: heap_file.h:182
bool log_is_in_crash_recovery(void)
Definition: log_manager.c:476
const VFID * vfid
Definition: log_append.hpp:56
int heap_scancache_quick_start_with_class_oid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, OID *class_oid)
Definition: heap_file.c:19282
VFID vfid
DB_VALUE * heap_attrinfo_generate_key(THREAD_ENTRY *thread_p, int n_atts, int *att_ids, int *atts_prefix_length, HEAP_CACHE_ATTRINFO *attr_info, RECDES *recdes, DB_VALUE *db_valuep, char *buf, FUNCTION_INDEX_INFO *func_index_info, TP_DOMAIN *midxkey_domain)
Definition: heap_file.c:12672
int or_class_get_partition_info(RECDES *record, OR_PARTITION *partition_info, REPR_ID *repr_id, int *has_partition_info)
#define vacuum_er_log(er_log_level, msg,...)
Definition: vacuum.h:65
#define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range)
Definition: heap_file.c:110
#define HEAP_STATS_PREV_BEST_INDEX(i)
Definition: heap_file.c:187
static int success()
bool LSA_ISNULL(const log_lsa *lsa_ptr)
Definition: log_lsa.hpp:153
LF_TRAN_SYSTEM hfid_table_Ts
Definition: lock_free.c:53
#define LC_NEXT_ONEOBJ_PTR_IN_COPYAREA(oneobj_ptr)
Definition: locator.h:48
#define OR_MVCC_DELETE_ID_OFFSET(mvcc_flags)
static int heap_stats_update_internal(THREAD_ENTRY *thread_p, const HFID *hfid, VPID *lotspace_vpid, int free_space)
Definition: heap_file.c:2990
void log_append_undoredo_recdes(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, const RECDES *undo_recdes, const RECDES *redo_recdes)
Definition: log_manager.c:2434
#define BEST_PAGE_SEARCH_MAX_COUNT
int heap_vpid_next(THREAD_ENTRY *thread_p, const HFID *hfid, PAGE_PTR pgptr, VPID *next_vpid)
Definition: heap_file.c:4983
#define BTID_SET_NULL(btid)
LC_FIND_CLASSNAME
unsigned int of_mutex
Definition: lock_free.h:78
#define OR_OFFSET_SIZE_1BYTE
int file_create_heap(THREAD_ENTRY *thread_p, bool reuse_oid, const OID *class_oid, VFID *vfid)
int db_value_domain_max(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, const DB_ENUMERATION *enumeration)
Definition: db_macro.c:581
PAGE_PTR pgptr
Definition: log_append.hpp:57
TP_DOMAIN * tp_domain_cache(TP_DOMAIN *transient)
LF_ENTRY_DUPLICATE_KEY_HANDLER f_duplicate
Definition: lock_free.h:106
int db_seq_free(DB_SEQ *seq)
Definition: db_set.c:323
#define err(fd,...)
Definition: porting.h:431
int heap_estimate_num_objects(THREAD_ENTRY *thread_p, const HFID *hfid)
Definition: heap_file.c:9060
#define ER_PB_BAD_PAGEID
Definition: error_code.h:67
#define db_private_free_and_init(thrd, ptr)
Definition: memory_alloc.h:141
int xserial_get_next_value(THREAD_ENTRY *thread_p, DB_VALUE *result_num, const OID *oid_p, int cached_num, int num_alloc, int is_auto_increment, bool force_set_last_insert_id)
Definition: serial.c:282
int num_second_best
Definition: heap_file.c:211
static void heap_log_update_physical(THREAD_ENTRY *thread_p, PAGE_PTR page_p, VFID *vfid_p, OID *oid_p, RECDES *old_recdes_p, RECDES *new_recdes_p, LOG_RCVINDEX rcvindex)
Definition: heap_file.c:22283
void log_append_undoredo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1837
HEAP_STATS_ENTRY * next
Definition: heap_file.c:236
int lf_freelist_init(LF_FREELIST *freelist, int initial_blocks, int block_size, LF_ENTRY_DESCRIPTOR *edesc, LF_TRAN_SYSTEM *tran_system)
Definition: lock_free.c:666
#define pgbuf_fix(thread_p, vpid, fetch_mode, requestmode, condition)
Definition: page_buffer.h:255
void thread_lock_entry(cubthread::entry *thread_p)
#define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style)
Definition: heap_file.c:153
int numeric_db_value_is_positive(const DB_VALUE *dbvalue)
MHT_TABLE * mht_create(const char *name, int est_size, unsigned int(*hash_func)(const void *key, unsigned int ht_size), int(*cmp_func)(const void *key1, const void *key2))
Definition: memory_hash.c:894
#define MVCC_ID_PRECEDES(id1, id2)
Definition: mvcc.h:137
#define HEAP_PAGE_GET_VACUUM_STATUS(chain)
Definition: heap_file.c:262
const char * get_buffer() const
PGLENGTH offset
Definition: log_storage.hpp:66
void log_append_undo_recdes(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, const RECDES *recdes)
Definition: log_manager.c:2532
#define HEAP_LOG_MVCC_REDISTRIBUTE_MAX_REDO_CRUMBS
DISK_ISVALID disk_is_page_sector_reserved(THREAD_ENTRY *thread_p, VOLID volid, PAGEID pageid)
static int heap_get_partition_attributes(THREAD_ENTRY *thread_p, const OID *cls_oid, ATTR_ID *type_id, ATTR_ID *values_id)
Definition: heap_file.c:10839
#define csect_exit(a, b)
Definition: cnv.c:139
#define db_private_free(thrd, ptr)
Definition: memory_alloc.h:229
void or_init(OR_BUF *buf, char *data, int length)
#define OR_VPID_SIZE
static int heap_compare_vpid(const void *key_vpid1, const void *key_vpid2)
Definition: heap_file.c:922
int heap_rv_update_chain_after_mvcc_op(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:23881
unsigned int oid_hash(const void *key_oid, unsigned int htsize)
Definition: oid.c:294
#define MVCC_REC_HEADER_INITIALIZER
Definition: mvcc.h:47
STATIC_INLINE int heap_copy_chain(THREAD_ENTRY *thread_p, PAGE_PTR page_heap, HEAP_CHAIN *chain) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4149
static int heap_attrinfo_set_uninitialized(THREAD_ENTRY *thread_p, OID *inst_oid, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:11348
#define db_private_alloc(thrd, size)
Definition: memory_alloc.h:227
SCAN_CODE spage_next_record_dont_skip_empty(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
PGBUF_WATCHER page_watcher
Definition: heap_file.h:149
#define NULL_FILEID
static OID * heap_ovf_insert(THREAD_ENTRY *thread_p, const HFID *hfid, OID *ovf_oid, RECDES *recdes)
Definition: heap_file.c:6458
const OID oid_Null_oid
Definition: oid.c:68
PGSLOTID spage_find_free_slot(PAGE_PTR page_p, SPAGE_SLOT **out_slot_p, PGSLOTID start_slot)
const OID * heap_ovf_delete(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *ovf_oid, VFID *ovf_vfid_p)
Definition: heap_file.c:6521
INT32 flags
Definition: heap_file.c:277
unsigned char * bitindex
Definition: heap_file.c:459
#define NULL_OFFSET
int using_mutex
Definition: lock_free.h:81
#define BIG_VAR_OFFSET_SIZE
need_clear_type need_clear
Definition: dbtype_def.h:1084
bool logtb_set_check_interrupt(THREAD_ENTRY *thread_p, bool flag)
#define CEIL_PTVDIV(dividend, divisor)
Definition: memory_alloc.h:50
static int heap_scancache_reset_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid)
Definition: heap_file.c:6962
HEAP_CLASSREPR_ENTRY * hash_next
Definition: heap_file.c:344
int heap_rv_redo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15807
int db_set_size(DB_SET *set)
Definition: db_set.c:557
OR_CLASSREP * heap_classrepr_get(THREAD_ENTRY *thread_p, const OID *class_oid, RECDES *class_recdes, REPR_ID reprid, int *idx_incache)
Definition: heap_file.c:2299
void er_set_with_oserror(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
void heap_attrinfo_dump(THREAD_ENTRY *thread_p, FILE *fp, HEAP_CACHE_ATTRINFO *attr_info, bool dump_schema)
Definition: heap_file.c:10540
LC_COPYAREA_ONEOBJ ** obj
Definition: locator.h:255
VFID * heap_ovf_find_vfid(THREAD_ENTRY *thread_p, const HFID *hfid, VFID *ovf_vfid, bool docreate, PGBUF_LATCH_CONDITION latch_cond)
Definition: heap_file.c:6353
HEAP_BESTSPACE best
Definition: heap_file.c:235
int count(int &result, const cub_regex_object &reg, const std::string &src, const int position, const INTL_CODESET codeset)
#define IO_DEFAULT_PAGE_SIZE
MHT_TABLE * ht
Definition: heap_file.c:457
int pr_clear_value(DB_VALUE *value)
static HEAP_CHNGUESS * heap_Guesschn
Definition: heap_file.c:497
void heap_scanrange_end(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range)
Definition: heap_file.c:8238
SCAN_CODE heap_scanrange_to_following(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, OID *start_oid)
Definition: heap_file.c:8265
DB_BIGINT db_get_bigint(const DB_VALUE *value)
int heap_scan_cache_allocate_area(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache_p, int size)
Definition: heap_file.c:24744
pthread_mutex_t mutex
Definition: heap_file.c:311
char home_recdes_buffer[IO_MAX_PAGE_SIZE+MAX_ALIGNMENT]
Definition: heap_file.h:295
#define HFID_AS_ARGS(hfid)
RECDES * recdes
Definition: locator.h:257
void pgbuf_get_vpid(PAGE_PTR pgptr, VPID *vpid)
Definition: page_buffer.c:4579
offset_type offset
Definition: log_append.hpp:58
#define HEAP_NBYTES_TO_NBITS(byte_cnt)
Definition: heap_file.c:424
int db_value_domain_default(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, DB_ENUMERATION *enumeration)
Definition: db_macro.c:756
HEAP_SCANCACHE_NODE node
Definition: heap_file.h:134
static unsigned int heap_hash_vpid(const void *key_vpid, unsigned int htsize)
Definition: heap_file.c:908
#define VFID_COPY(vfid_ptr1, vfid_ptr2)
Definition: file_manager.h:69
pthread_mutex_t free_mutex
Definition: heap_file.c:359
int heap_rv_redo_mark_reusable_slot(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16147
const char * file_type_to_string(FILE_TYPE fstruct_type)
#define LF_FREELIST_INITIALIZER
Definition: lock_free.h:248
void log_sysop_abort(THREAD_ENTRY *thread_p)
Definition: log_manager.c:4017
#define NULL_REPRID
FILE_HEAP_DES heap
Definition: file_manager.h:132
#define MVCC_SET_FLAG_BITS(rec_header_p, flag)
Definition: mvcc.h:95
int heap_vacuum_all_objects(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *upd_scancache, MVCCID threshold_mvccid)
Definition: heap_file.c:23283
void er_stack_pop(void)
void heap_chnguess_clear(THREAD_ENTRY *thread_p, int tran_index)
Definition: heap_file.c:15416
SCAN_CODE heap_first(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:8097
#define ER_ALTER_CHANGE_CAST_FAILED_SET_MIN
Definition: error_code.h:1323
struct db_domain_info::general_info general_info
static bool heap_delete_all_page_records(THREAD_ENTRY *thread_p, const VPID *vpid, PAGE_PTR pgptr)
Definition: heap_file.c:5407
HEAP_CLASSREPR_ENTRY * next
Definition: heap_file.c:321
void heap_create_delete_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *oid_p, OID *class_oid_p, HEAP_SCANCACHE *scancache_p)
Definition: heap_file.c:22351
int oid_compare_equals(const void *key_oid1, const void *key_oid2)
Definition: oid.c:310
LOG_LSA reference_lsa
Definition: recovery.h:204
#define HEAP_NBITS_TO_NBYTES(bit_cnt)
Definition: heap_file.c:422
void start_area()
Definition: heap_file.c:24943
#define OR_CLEAR_BOUND_BIT(bitptr, element)
static const HFID * heap_reuse(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid, const bool reuse_oid)
Definition: heap_file.c:5512
#define OR_CHN_OFFSET
OR_CLASSREP * or_get_classrep(RECDES *record, int repid)
MVCCID logtb_get_current_mvccid(THREAD_ENTRY *thread_p)
#define CAST_BUFLEN
Definition: porting.h:471
static int heap_eval_function_index(THREAD_ENTRY *thread_p, FUNCTION_INDEX_INFO *func_index_info, int n_atts, int *att_ids, HEAP_CACHE_ATTRINFO *attr_info, RECDES *recdes, int btid_index, DB_VALUE *result, FUNC_PRED_UNPACK_INFO *func_pred, TP_DOMAIN **fi_domain)
Definition: heap_file.c:17352
int ncolumns
Definition: dbtype_def.h:864
#define OR_VAR_LENGTH(length, obj, index, n_variables)
DB_ENUMERATION enumeration
Definition: object_domain.h:84
static int rv
Definition: heap_file.c:83
#define TP_IS_CHAR_TYPE(typeid)
PGNSLOTS spage_number_of_slots(PAGE_PTR page_p)
Definition: slotted_page.c:879
TP_DOMAIN_COLL_ACTION collation_flag
Definition: object_domain.h:94
void free_xasl_unpack_info(THREAD_ENTRY *thread_p, REFPTR(XASL_UNPACK_INFO, xasl_unpack_info))
int heap_get_index_with_name(THREAD_ENTRY *thread_p, OID *class_oid, const char *index_name, BTID *btid)
Definition: heap_file.c:13092
static DB_MIDXKEY * heap_midxkey_key_generate(THREAD_ENTRY *thread_p, RECDES *recdes, DB_MIDXKEY *midxkey, int *att_ids, HEAP_CACHE_ATTRINFO *attrinfo, DB_VALUE *func_res, int func_col_id, int func_attr_index_start, TP_DOMAIN *midxkey_domain)
Definition: heap_file.c:12553
int heap_rv_redo_reuse_page_reuse_oid(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16335
int file_descriptor_dump(THREAD_ENTRY *thread_p, const VFID *vfid, FILE *fp)
int heap_classrepr_decache(THREAD_ENTRY *thread_p, const OID *class_oid)
Definition: heap_file.c:1818
static void error(const char *msg)
Definition: gencat.c:331
LF_ENTRY_HASH_FUNC f_hash
Definition: lock_free.h:102
int file_init_page_type(THREAD_ENTRY *thread_p, PAGE_PTR page, void *args)
void log_append_postpone(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:2698
#define VPID_ISNULL(vpid_ptr)
Definition: dbtype_def.h:925
#define HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE(mvcc_rec_header_p)
Definition: heap_file.c:128
const char * data
Definition: recovery.h:203
#define ER_SP_INVALID_HEADER
Definition: error_code.h:1443
int heap_chnguess_get(THREAD_ENTRY *thread_p, const OID *oid, int tran_index)
Definition: heap_file.c:15255
int heap_classrepr_find_index_id(OR_CLASSREP *classrepr, const BTID *btid)
Definition: heap_file.c:12193
void log_append_undo_recdes2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, const RECDES *recdes)
Definition: log_manager.c:2538
static int rc
Definition: serial.c:50
int heap_prefetch(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid, LC_COPYAREA_DESC *prefetch)
Definition: heap_file.c:13512
int xheap_destroy(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid)
Definition: heap_file.c:5786
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
#define ER_INTERRUPTED
Definition: error_code.h:51
int heap_get_btid_from_index_name(THREAD_ENTRY *thread_p, const OID *p_class_oid, const char *index_name, BTID *p_found_btid)
Definition: heap_file.c:16938
void file_postpone_destroy(THREAD_ENTRY *thread_p, const VFID *vfid)
#define pthread_mutex_unlock(a)
Definition: heap_file.c:82
TP_DOMAIN * tp_domain_construct(DB_TYPE domain_type, DB_OBJECT *class_obj, int precision, int scale, TP_DOMAIN *setdomain)
int heap_set_mvcc_rec_header_on_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:18740
PGBUF_WATCHER forward_page_watcher
Definition: heap_file.h:303
LOG_TDES * LOG_FIND_CURRENT_TDES(THREAD_ENTRY *thread_p=NULL)
Definition: log_impl.h:1115
PGSLOTID spage_delete(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
SCAN_CODE heap_get_class_oid(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid)
Definition: heap_file.c:9285
int db_make_midxkey(DB_VALUE *value, DB_MIDXKEY *midxkey)
static int heap_add_chain_links(THREAD_ENTRY *thread_p, const HFID *hfid, const VPID *vpid, const VPID *next_link, const VPID *prev_link, PGBUF_WATCHER *page_watcher, bool keep_page_fixed, bool is_page_watcher_inited)
Definition: heap_file.c:25330
struct func_pred * func_pred
Definition: heap_file.h:229
#define LOG_FIND_THREAD_TRAN_INDEX(thrd)
Definition: perf_monitor.h:158
int heap_classrepr_free(OR_CLASSREP *classrep, int *idx_incache)
Definition: heap_file.c:1893
void overflow_flush(THREAD_ENTRY *thread_p, const VPID *ovf_vpid)
#define MVCC_GET_FLAG(header)
Definition: mvcc.h:75
#define HFID_IS_NULL(hfid)
#define ER_PB_UNEXPECTED_PAGE_REFIX
Definition: error_code.h:1525
bool db_value_is_null(const DB_VALUE *value)
char * or_class_name(RECDES *record)
static DISK_ISVALID heap_chkreloc_start(HEAP_CHKALL_RELOCOIDS *chk)
Definition: heap_file.c:14354
static int heap_compare_hfid(const void *key_hfid1, const void *key_hfid2)
Definition: heap_file.c:951
static int heap_hfid_table_entry_key_copy(void *src, void *dest)
Definition: heap_file.c:23115
#define ARG_FILE_LINE
Definition: error_manager.h:44
#define OR_FIXED_ATT_IS_UNBOUND(obj, nvars, fsize, position)
STATIC_INLINE HEAP_HDR_STATS * heap_get_header_stats_ptr(THREAD_ENTRY *thread_p, PAGE_PTR page_header) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4088
bool is_recdes_assigned_to_area(const RECDES &recdes) const
Definition: heap_file.c:24981
static int heap_attrvalue_read(RECDES *recdes, HEAP_ATTRVALUE *value, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10075
#define OR_BYTE_SIZE
std::atomic< char * > classname
Definition: heap_file.h:208
VOLID pgbuf_get_volume_id(PAGE_PTR pgptr)
Definition: page_buffer.c:4707
#define HEAP_PERF_START(thread_p, context)
Definition: heap_file.c:519
LF_ENTRY_ALLOC_FUNC f_alloc
Definition: lock_free.h:84
#define HEAP_DROP_FREE_SPACE
Definition: heap_file.c:102
int pr_clone_value(const DB_VALUE *src, DB_VALUE *dest)
static bool heap_is_reusable_oid(const FILE_TYPE file_type)
Definition: heap_file.c:1323
int heap_rv_mvcc_undo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15881
static const bool COPY
int heap_rv_redo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16236
int heap_rv_mvcc_redo_delete_overflow(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16069
OID * db_get_oid(const DB_VALUE *value)
int log_add_to_modified_class_list(THREAD_ENTRY *thread_p, const char *classname, const OID *class_oid)
Definition: log_manager.c:4757
#define ER_HEAP_UNKNOWN_ATTRS
Definition: error_code.h:752
void heap_init_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24697
#define ER_ALTER_CHANGE_TRUNC_OVERFLOW_NOT_ALLOWED
Definition: error_code.h:1321
int logpb_prior_lsa_append_all_list(THREAD_ENTRY *thread_p)
OID class_oid
Definition: heap_file.c:273
float prm_get_float_value(PARAM_ID prm_id)
unsigned char * bits
Definition: heap_file.c:449
int zone
Definition: heap_file.c:315
static int heap_chnguess_remove_entry(const void *oid_key, void *ent, void *xignore)
Definition: heap_file.c:15176
const void * mht_put_new(MHT_TABLE *ht, const void *key, void *data)
Definition: memory_hash.c:1723
int heap_delete_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:22606
static int heap_stats_del_bestspace_by_vpid(THREAD_ENTRY *thread_p, VPID *vpid)
Definition: heap_file.c:1137
#define OR_PUT_HFID(ptr, hfid)
LOG_LSA prev_version_lsa
Definition: mvcc.h:45
#define VACUUM_LOG_ADD_DROPPED_FILE_POSTPONE
Definition: vacuum.h:78
unsigned int offset_to_record
Definition: slotted_page.h:87
#define BTREE_IS_MULTI_ROW_OP(op)
Definition: btree.h:60
static int heap_mark_class_as_modified(THREAD_ENTRY *thread_p, OID *oid_p, int chn, bool decache)
Definition: heap_file.c:19460
#define OR_GET_INT(ptr)
MVCC_SNAPSHOT * mvcc_snapshot
Definition: heap_file.h:154
unsigned int mht_count(const MHT_TABLE *ht)
Definition: memory_hash.c:2260
int er_errid_if_has_error(void)
INT16 PGSLOTID
DB_DATA_STATUS
#define pthread_mutex_init(a, b)
Definition: heap_file.c:78
static int heap_stats_entry_free(THREAD_ENTRY *thread_p, void *data, void *args)
Definition: heap_file.c:966
PGBUF_WATCHER * home_page_watcher_p
Definition: heap_file.h:306
static int heap_stats_get_min_freespace(HEAP_HDR_STATS *heap_hdr)
Definition: heap_file.c:2887
#define MVCC_SET_REPID(header, rep_id)
Definition: mvcc.h:66
int xheap_create(THREAD_ENTRY *thread_p, HFID *hfid, const OID *class_oid, bool reuse_oid)
Definition: heap_file.c:5772
static int heap_update_relocation(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:21744
bool vacuum_is_mvccid_vacuumed(MVCCID id)
Definition: vacuum.c:7361
void db_fprint_value(FILE *fp, const db_value *value)
static int heap_get_last_page(THREAD_ENTRY *thread_p, const HFID *hfid, HEAP_HDR_STATS *heap_hdr, HEAP_SCANCACHE *scan_cache, VPID *last_vpid, PGBUF_WATCHER *pg_watcher)
Definition: heap_file.c:4004
#define free_and_init(ptr)
Definition: memory_alloc.h:147
int heap_scancache_quick_start_with_class_hfid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid)
Definition: heap_file.c:19308
bool heap_attrinfo_check_unique_index(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, ATTR_ID *att_id, int n_att_id)
Definition: heap_file.c:18957
#define DB_ALIGN(offset, align)
Definition: memory_alloc.h:84
static int heap_Maxslotted_reclength
Definition: heap_file.c:488
#define strlen(s1)
Definition: intl_support.c:43
#define BTID_COPY(btid_ptr1, btid_ptr2)
SCAN_CODE spage_previous_record(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
#define HEAP_BIT_CLEAR(byte_ptr, bit_num)
Definition: heap_file.c:438
int or_get_attrname(RECDES *record, int attrid, char **string, int *alloced_string)
int heap_initialize_hfid_table(void)
Definition: heap_file.c:23185
#define OR_GET_MVCC_FLAG(ptr)
static DB_MIDXKEY * heap_midxkey_key_get(RECDES *recdes, DB_MIDXKEY *midxkey, OR_INDEX *index, HEAP_CACHE_ATTRINFO *attrinfo, DB_VALUE *func_res, TP_DOMAIN *func_domain, TP_DOMAIN **key_domain)
Definition: heap_file.c:12387
#define HEAP_CHKRELOC_UNFOUND_SHORT
Definition: heap_file.c:14540
void heap_dump(THREAD_ENTRY *thread_p, FILE *fp, HFID *hfid, bool dump_records)
Definition: heap_file.c:14113
#define MVCC_SET_CHN(header, chn_)
Definition: mvcc.h:72
static int heap_class_get_partition_info(THREAD_ENTRY *thread_p, const OID *class_oid, OR_PARTITION *partition_info, HFID *class_hfid, REPR_ID *repr_id, int *has_partition_info)
Definition: heap_file.c:10785
void LSA_SET_NULL(log_lsa *lsa_ptr)
Definition: log_lsa.hpp:146
int fcnt
Definition: heap_file.c:313
HEAP_FINDSPACE
Definition: heap_file.c:159
static int heap_classrepr_entry_reset(HEAP_CLASSREPR_ENTRY *cache_entry)
Definition: heap_file.c:1584
#define OR_VAR_TABLE_SIZE_INTERNAL(vars, offset_size)
bool pgbuf_is_page_fixed_by_thread(THREAD_ENTRY *thread_p, const VPID *vpid_p)
int heap_get_class_partitions(THREAD_ENTRY *thread_p, const OID *class_oid, OR_PARTITION **parts, int *parts_count)
Definition: heap_file.c:11016
DISK_ISVALID heap_check_heap_file(THREAD_ENTRY *thread_p, HFID *hfid)
Definition: heap_file.c:13941
int heap_attrinfo_start_with_btid(THREAD_ENTRY *thread_p, OID *class_oid, BTID *btid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:12237
static int heap_scancache_quick_start_internal(HEAP_SCANCACHE *scan_cache, const HFID *hfid)
Definition: heap_file.c:7072
#define ER_FILE_NOT_ENOUGH_PAGES_IN_DATABASE
Definition: error_code.h:88
static int heap_update_adjust_recdes_header(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *update_context, bool is_mvcc_class)
Definition: heap_file.c:19838
#define OR_GET_MVCC_CHN(ptr)
DB_DOMAIN * domain
Definition: dbtype_def.h:865
PGBUF_WATCHER * header_page_watcher_p
Definition: heap_file.h:308
static int heap_update_bigone(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:21568
int db_make_string_copy(DB_VALUE *value, DB_CONST_C_CHAR str)
#define DB_PAGESIZE
static int heap_find_location_and_insert_rec_newhome(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:20183
HFID hfid
Definition: heap_file.h:205
VPID prev_vpid
Definition: heap_file.c:274
#define ER_HEAP_UNKNOWN_OBJECT
Definition: error_code.h:102
#define MVCC_GET_DELID(header)
Definition: mvcc.h:57
void lf_freelist_destroy(LF_FREELIST *freelist)
Definition: lock_free.c:711
static int heap_stats_get_second_best(HEAP_HDR_STATS *heap_hdr, VPID *vpid)
Definition: heap_file.c:3152
PGBUF_WATCHER header_page_watcher
Definition: heap_file.h:302
int mht_rem2(MHT_TABLE *ht, const void *key, const void *data, int(*rem_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:2078
HEAP_PAGE_VACUUM_STATUS
Definition: heap_file.h:358
void pgbuf_set_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
Definition: page_buffer.c:4847
bool heap_is_page_header(THREAD_ENTRY *thread_p, PAGE_PTR page)
Definition: heap_file.c:24869
bool prm_get_bool_value(PARAM_ID prm_id)
#define INT_ALIGNMENT
Definition: memory_alloc.h:61
#define QSTR_IS_ANY_CHAR_OR_BIT(s)
Definition: string_opfunc.h:47
#define HFID_EQ(hfid_ptr1, hfid_ptr2)
Definition: heap_file.h:48
OID class_oid
Definition: heap_file.c:324
int orc_subclasses_from_record(RECDES *record, int *array_size, OID **array_ptr)
int heap_rv_mvcc_undo_delete_overflow(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15934
HEAP_DIRECTION
Definition: heap_file.c:170
#define HEAP_BIT_GET(byte_ptr, bit_num)
Definition: heap_file.c:433
int spage_get_free_space_without_saving(THREAD_ENTRY *thread_p, PAGE_PTR page_p, bool *need_update)
Definition: slotted_page.c:925
#define OR_PUT_INT(ptr, val)
static void * heap_hfid_table_entry_alloc(void)
Definition: heap_file.c:23028
DISK_ISVALID vacuum_check_not_vacuumed_rec_header(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, MVCC_REC_HEADER *rec_header, int btree_node_type)
Definition: vacuum.c:7314
#define OR_NON_MVCC_HEADER_SIZE
int db_get_string_size(const DB_VALUE *value)
SCAN_CODE heap_prev(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:18671
DB_C_SHORT db_get_short(const DB_VALUE *value)
HEAP_CACHE_ATTRINFO * cache_attrinfo
Definition: xasl.h:278
int file_rv_tracker_mark_heap_deleted(THREAD_ENTRY *thread_p, LOG_RCV *rcv, bool is_undo)
#define HEAP_PERF_TRACK_LOGGING(thread_p, context)
Definition: heap_file.c:561
SCAN_CODE heap_attrinfo_transform_to_disk_except_lob(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, RECDES *old_recdes, record_descriptor *new_recdes)
Definition: heap_file.c:11547
unsigned page_was_unfixed
Definition: page_buffer.h:227
static int heap_hfid_table_entry_free(void *unique_stat)
Definition: heap_file.c:23048
void er_clear(void)
void log_sysop_attach_to_outer(THREAD_ENTRY *thread_p)
Definition: log_manager.c:4076
static VPID * heap_vpid_remove(THREAD_ENTRY *thread_p, const HFID *hfid, HEAP_HDR_STATS *heap_hdr, VPID *rm_vpid)
Definition: heap_file.c:4384
#define REPR_HASH(class_oid)
Definition: heap_file.c:403
LOCK lock_Conv[12][12]
Definition: lock_table.c:179
int chn
Definition: heap_file.c:446
std::size_t thread_num_total_threads(void)
void log_sysop_commit(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3895
static int heap_delete_home(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:21175
int or_put_data(OR_BUF *buf, const char *data, int length)
SCAN_CODE spage_next_record(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
#define DISK_VPID_ALIGNED_SIZE
#define OR_GET_MVCC_REPID_AND_FLAG(ptr)
STATIC_INLINE int heap_get_last_vpid(THREAD_ENTRY *thread_p, const HFID *hfid, VPID *last_vpid) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4048
int REPR_ID
static int heap_get_class_subclasses(THREAD_ENTRY *thread_p, const OID *class_oid, int *count, OID **subclasses)
Definition: heap_file.c:10702
const cubmem::block_allocator & get_area_block_allocator()
Definition: heap_file.c:24987
#define DB_VALUE_TYPE(value)
Definition: dbtype.h:72
static unsigned int heap_hash_hfid(const void *key_hfid, unsigned int htsize)
Definition: heap_file.c:937
static int heap_delete_bigone(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, bool is_mvcc_op)
Definition: heap_file.c:20548
void heap_flush(THREAD_ENTRY *thread_p, const OID *oid)
Definition: heap_file.c:5974
int i
Definition: dynamic_load.c:954
#define PGBUF_ORDERED_NULL_HFID
Definition: page_buffer.h:85
STATIC_INLINE int heap_copy_header_stats(THREAD_ENTRY *thread_p, PAGE_PTR page_header, HEAP_HDR_STATS *header_stats) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4108
int mht_map(const MHT_TABLE *ht, int(*map_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:2199
void heap_rv_dump_append_pages_to_heap(FILE *fp, int length, void *data)
Definition: heap_file.c:25270
int db_make_null(DB_VALUE *value)
int spage_get_free_space(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
Definition: slotted_page.c:898
static PAGE_PTR heap_scan_pb_lock_and_fetch_debug(THREAD_ENTRY *thread_p, const VPID *vpid_ptr, PAGE_FETCH_MODE fetch_mode, LOCK lock, HEAP_SCANCACHE *scan_cache, PGBUF_WATCHER *pg_watcher, const char *caller_file, const int caller_line)
Definition: heap_file.c:1227
HEAP_OPERATION_TYPE type
Definition: heap_file.h:279
DB_TYPE id
#define OR_OFFSET_SIZE_FLAG
#define DB_IS_NULL(value)
Definition: dbtype.h:63
int heap_assign_address(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, int expected_length)
Definition: heap_file.c:5914
void resize_buffer(std::size_t size)
#define OR_GET_OFFSET_SIZE(ptr)
#define NULL_ATTRID
Definition: heap_file.c:309
struct tp_domain * next
Definition: object_domain.h:74
static SCAN_CODE heap_get_record_info(THREAD_ENTRY *thread_p, const OID oid, RECDES *recdes, RECDES forward_recdes, PGBUF_WATCHER *page_watcher, HEAP_SCANCACHE *scan_cache, bool ispeeking, DB_VALUE **record_info)
Definition: heap_file.c:18445
#define INLINE
for(p=libs;*p;p++)
Definition: dynamic_load.c:968
struct func_pred * expr
Definition: heap_file.h:223
static void heap_mvcc_log_redistribute(THREAD_ENTRY *thread_p, RECDES *p_recdes, LOG_DATA_ADDR *p_addr)
Definition: heap_file.c:23985
static HEAP_FINDSPACE heap_stats_find_page_in_bestspace(THREAD_ENTRY *thread_p, const HFID *hfid, HEAP_BESTSPACE *bestspace, int *idx_badspace, int record_length, int needed_space, HEAP_SCANCACHE *scan_cache, PGBUF_WATCHER *pg_watcher)
Definition: heap_file.c:3240
int spage_slot_size(void)
Definition: slotted_page.c:827
static SCAN_CODE heap_get_if_diff_chn(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, INT16 slotid, RECDES *recdes, bool ispeeking, int chn, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:7274
bool heap_should_try_update_stat(const int current_freespace, const int prev_freespace)
Definition: heap_file.c:23916
OID class_oid
Definition: heap_file.h:200
INT16 type
int vacuum_rv_check_at_undo(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, INT16 slotid, INT16 rec_type)
Definition: vacuum.c:7525
#define HEAP_SET_RECORD(recdes, record_area_size, record_length, record_type, record_data)
Definition: heap_file.h:52
char * strdup(const char *str)
Definition: porting.c:901
int heap_delete_hfid_from_cache(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:23262
HEAP_CLASSREPR_ENTRY * LRU_top
Definition: heap_file.c:352
#define NULL_VOLID
MVCC_SNAPSHOT_FUNC snapshot_fnc
Definition: mvcc.h:176
#define SP_ERROR
Definition: slotted_page.h:49
int lf_hash_delete(LF_TRAN_ENTRY *tran, LF_HASH_TABLE *table, void *key, int *success)
Definition: lock_free.c:2181
int lock_has_lock_on_object(const OID *oid, const OID *class_oid, LOCK lock)
int debug_initpattern
Definition: heap_file.h:143
bool heap_is_big_length(int length)
Definition: heap_file.c:1302
static int heap_scancache_add_partition_node(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, OID *partition_oid)
Definition: heap_file.c:23938
#define IO_MAX_PAGE_SIZE
int heap_rv_undoredo_pagehdr(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15495
HEAP_SCANCACHE_NODE_LIST * next
Definition: heap_file.h:135
int heap_indexinfo_get_attrids(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo, ATTR_ID *attrids)
Definition: heap_file.c:13030
SCAN_CODE heap_header_next_scan(THREAD_ENTRY *thread_p, int cursor, DB_VALUE **out_values, int out_cnt, void *ptr)
Definition: heap_file.c:17780
static SCAN_CODE heap_ovf_get(THREAD_ENTRY *thread_p, const OID *ovf_oid, RECDES *recdes, int chn, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6606
static int heap_hfid_cache_get(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid, FILE_TYPE *ftype_out, char **classname_out)
Definition: heap_file.c:23484
int setval(DB_VALUE *dest, const DB_VALUE *src, bool copy) const
int db_make_int(DB_VALUE *value, const int num)
int db_get_string_length(const DB_VALUE *value)
void heap_attrinfo_end(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9979
bool pgbuf_has_any_waiters(PAGE_PTR pgptr)
short volid
Definition: dbtype_def.h:887
static SCAN_CODE heap_attrinfo_transform_to_disk_internal(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, RECDES *old_recdes, record_descriptor *new_recdes, int lob_create_flag)
Definition: heap_file.c:11568
int heap_init_func_pred_unpack_info(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, const OID *class_oid, FUNC_PRED_UNPACK_INFO **func_indx_preds)
Definition: heap_file.c:17493
int heap_rv_nop(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:23862
void log_sysop_end_logical_run_postpone(THREAD_ENTRY *thread_p, LOG_LSA *posp_lsa)
Definition: log_manager.c:3982
HEAP_SCANCACHE * scan_cache
Definition: heap_file.h:375
void thread_wakeup(cubthread::entry *thread_p, thread_resume_suspend_status resume_reason)
entry & get_entry(void)
int heap_scancache_quick_start(HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7040
int db_make_oid(DB_VALUE *value, const OID *oid)
float recs_sumlen
Definition: heap_file.c:203
#define VPID_GET_FROM_OID(vpid_ptr, oid_ptr)
Definition: page_buffer.h:46
#define OR_BOUND_BIT_FLAG
int heap_get_class_supers(THREAD_ENTRY *thread_p, const OID *class_oid, OID **super_oids, int *count)
Definition: heap_file.c:11153
struct heap_hdr_stats HEAP_HDR_STATS
Definition: heap_file.c:190
static void heap_mvcc_log_home_change_on_delete(THREAD_ENTRY *thread_p, RECDES *old_recdes, RECDES *new_recdes, LOG_DATA_ADDR *p_addr)
Definition: heap_file.c:18862
#define MVCC_SET_FLAG(header, flag)
Definition: mvcc.h:78
#define OID_ISNULL(oidp)
Definition: oid.h:81
#define SET_AUTO_INCREMENT_SERIAL_NAME(SR_NAME, CL_NAME, AT_NAME)
Definition: transform.h:176
LC_COPYAREA_MANYOBJS * mobjs
Definition: locator.h:254
static HEAP_CLASSREPR_ENTRY * heap_classrepr_entry_alloc(void)
Definition: heap_file.c:2097
enum mvcc_satisfies_snapshot_result MVCC_SATISFIES_SNAPSHOT_RESULT
Definition: mvcc.h:164
char * meta_data
Definition: dbtype_def.h:949
#define OR_PUT_BIGINT(ptr, val)
#define DONT_FREE
Definition: page_buffer.h:41
static void heap_build_forwarding_recdes(RECDES *recdes_p, INT16 rec_type, OID *forward_oid)
Definition: heap_file.c:19683
static int heap_scancache_force_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:6934
#define HEAP_DEBUG_SCANCACHE_INITPATTERN
Definition: heap_file.c:104
FILE_OVF_HEAP_DES heap_overflow
Definition: file_manager.h:133
SCAN_CODE heap_next(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:18622
int heap_indexinfo_get_num_attrs(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo)
Definition: heap_file.c:13010
#define LANG_SYS_CODESET
#define TP_DOMAIN_CODESET(dom)
pthread_mutex_t LRU_mutex
Definition: heap_file.c:351
int collation_id
Definition: object_domain.h:92
static int heap_hfid_table_entry_key_compare(void *k1, void *k2)
Definition: heap_file.c:23152
INT16 spage_get_record_type(PAGE_PTR page_p, PGSLOTID slot_id)
int heap_header_capacity_end_scan(THREAD_ENTRY *thread_p, void **ptr)
Definition: heap_file.c:18181
static char * heap_bestspace_to_string(char *buf, int buf_size, const HEAP_BESTSPACE *hb)
Definition: heap_file.c:18204
static HEAP_STATS_ENTRY * heap_stats_add_bestspace(THREAD_ENTRY *thread_p, const HFID *hfid, VPID *vpid, int freespace)
Definition: heap_file.c:997
int qexec_clear_func_pred(THREAD_ENTRY *thread_p, func_pred *fpr)
int heap_get_indexinfo_of_btid(THREAD_ENTRY *thread_p, const OID *class_oid, const BTID *btid, BTREE_TYPE *type, int *num_attrs, ATTR_ID **attr_ids, int **attrs_prefix_length, char **btnamepp, int *func_index_col_id)
Definition: heap_file.c:13134
#define pthread_mutex_lock(a)
Definition: heap_file.c:80
enum update_inplace_style UPDATE_INPLACE_STYLE
Definition: heap_file.h:268
char * oid_to_string(char *buf, int buf_size, OID *oid)
int heap_attrinfo_set(const OID *inst_oid, ATTR_ID attrid, DB_VALUE *attr_val, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:11239
int heap_rv_undo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16164
int heap_get_class_info(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid_out, FILE_TYPE *ftype_out, char **classname_out)
Definition: heap_file.c:16733
PAGE_PTR pgptr
Definition: page_buffer.h:222
const void * data
Definition: log_append.hpp:48
OR_ATTRIBUTE * attributes
THREAD_ENTRY * next_wait_thrd
Definition: heap_file.c:318
void heap_clear_partition_info(THREAD_ENTRY *thread_p, OR_PARTITION *parts, int parts_count)
Definition: heap_file.c:11126
int or_pad(OR_BUF *buf, int length)
#define ER_HEAP_CYCLE
Definition: error_code.h:109
#define pgbuf_ordered_unfix(thread_p, watcher_object)
Definition: page_buffer.h:280
char * vfid_to_string(char *buf, int buf_size, VFID *vfid)
#define MVCC_CLEAR_ALL_FLAG_BITS(rec_header_p)
Definition: mvcc.h:98
static int heap_classrepr_decache_guessed_last(const OID *class_oid)
Definition: heap_file.c:1676
PGBUF_LATCH_MODE latch_mode
Definition: heap_file.h:385
#define HEAP_PERF_TRACK_EXECUTE(thread_p, context)
Definition: heap_file.c:540
int heap_nonheader_page_capacity()
Definition: heap_file.c:25032
static int heap_get_spage_type(void)
Definition: heap_file.c:1312
int heap_rv_mvcc_redo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15690
int heap_rv_undoredo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16247
int overflow_get_length(THREAD_ENTRY *thread_p, const VPID *ovf_vpid)
bool heap_is_object_not_null(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid)
Definition: heap_file.c:8848
int heap_rv_undo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15784
int heap_get_class_name(THREAD_ENTRY *thread_p, const OID *class_oid, char **class_name)
Definition: heap_file.c:9328
#define db_private_realloc(thrd, ptr, size)
Definition: memory_alloc.h:231
static int heap_fix_forward_page(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context, OID *forward_oid_hint)
Definition: heap_file.c:19617
#define PEEK
Definition: file_io.h:74
int overflow_insert(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, VPID *ovf_vpid, RECDES *recdes, FILE_TYPE file_type)
Definition: overflow_file.c:95
#define pgbuf_ordered_unfix_and_init(thread_p, page, pg_watcher)
Definition: page_buffer.h:69
double amount
Definition: dbtype_def.h:831
#define VPID_SET_NULL(vpid_ptr)
Definition: dbtype_def.h:906
static void heap_scancache_block_deallocate(cubmem::block &b)
Definition: heap_file.c:24933
int qdata_increment_dbval(DB_VALUE *dbval_p, DB_VALUE *result_p, int inc_val)
int xheap_reclaim_addresses(THREAD_ENTRY *thread_p, const HFID *hfid)
Definition: heap_file.c:6122
int heap_rv_undo_ovf_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:24812
#define HEAP_CHNGUESS_FUDGE_MININDICES
Definition: heap_file.c:418
SCAN_CODE heap_get_visible_version_internal(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, bool is_heap_scan)
Definition: heap_file.c:24333
static int heap_classrepr_entry_remove_from_LRU(HEAP_CLASSREPR_ENTRY *cache_entry)
Definition: heap_file.c:1637
struct heap_classrepr_lock HEAP_CLASSREPR_LOCK
Definition: heap_file.c:331
static int heap_scancache_end_internal(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, bool scan_state)
Definition: heap_file.c:7171
bool btree_is_unique_type(BTREE_TYPE type)
Definition: btree.c:6046
HEAP_CLASSREPR_FREE_LIST free_list
Definition: heap_file.c:373
OR_ATTRIBUTE ** atts
char * buf
Definition: dbtype_def.h:866
#define PGBUF_PAGE_STATE_ARGS(pg)
Definition: page_buffer.h:57
HEAP_CLASSREPR_ENTRY * prev
Definition: heap_file.c:320
int heap_get_referenced_by(THREAD_ENTRY *thread_p, OID *class_oid, const OID *obj_oid, RECDES *recdes, int *max_oid_cnt, OID **oid_list)
Definition: heap_file.c:13303
int heap_compact_pages(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:16754
#define PGBUF_INIT_WATCHER(w, rank, hfid)
Definition: page_buffer.h:123
#define HFID_COPY(hfid_ptr1, hfid_ptr2)
#define ER_HEAP_BAD_OBJECT_TYPE
Definition: error_code.h:105
SCAN_CODE log_get_undo_record(THREAD_ENTRY *thread_p, LOG_PAGE *log_page_p, LOG_LSA process_lsa, RECDES *recdes)
Definition: log_manager.c:9370
std::int64_t offset
Definition: log_lsa.hpp:37
DB_CLASS_PARTITION_TYPE
#define VACUUM_LOG_ADD_DROPPED_FILE_UNDO
Definition: vacuum.h:79
#define VFID_SET_NULL(vfid_ptr)
Definition: file_manager.h:65
const char ** p
Definition: dynamic_load.c:945
const OID * oid_p
Definition: heap_file.h:371
void tp_domain_free(TP_DOMAIN *dom)
DB_CONST_C_CHAR db_get_string(const DB_VALUE *value)
VPID * pgbuf_get_vpid_ptr(PAGE_PTR pgptr)
Definition: page_buffer.c:4609
PERF_UTIME_TRACKER * time_track
Definition: heap_file.h:322
UPDATE_INPLACE_STYLE update_in_place
Definition: heap_file.h:280
int or_advance(OR_BUF *buf, int offset)
DISK_ISVALID
Definition: disk_manager.h:53
static SCAN_CODE heap_get_page_info(THREAD_ENTRY *thread_p, const OID *cls_oid, const HFID *hfid, const VPID *vpid, const PAGE_PTR pgptr, DB_VALUE **page_info)
Definition: heap_file.c:18257
#define OR_BUF_INIT2(buf, data, size)
#define HEAP_MAX_ALIGN
Definition: heap_file.h:64
Definition: heap_file.c:232
void heap_stats_update(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, const HFID *hfid, int prev_freespace)
Definition: heap_file.c:2936
#define HEAP_IS_UPDATE_INPLACE(update_inplace_style)
Definition: heap_file.h:271
int spage_insert_at(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p)
int heap_set_autoincrement_value(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, HEAP_SCANCACHE *scan_cache, int *is_set)
Definition: heap_file.c:16507
void heap_free_func_pred_unpack_info(THREAD_ENTRY *thread_p, int n_indexes, FUNC_PRED_UNPACK_INFO *func_indx_preds, int *attr_info_started)
Definition: heap_file.c:17621
int heap_rv_mvcc_redo_delete_newhome(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16112
#define HEAP_STATS_NEXT_BEST_INDEX(i)
Definition: heap_file.c:185
#define HEAP_ISVALID_OID(thread_p, oid)
Definition: heap_file.h:77
static int fill_string_to_buffer(char **start, char *end, const char *str)
Definition: heap_file.c:18229
#define ER_MVCC_NOT_SATISFIED_REEVALUATION
Definition: error_code.h:1480
HEAP_CLASSREPR_LRU_LIST LRU_list
Definition: heap_file.c:372
const VPID * overflow_delete(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, const VPID *ovf_vpid)
unsigned int of_key
Definition: lock_free.h:75
SCAN_CODE overflow_get_nbytes(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, RECDES *recdes, int start_offset, int max_nbytes, int *remaining_length, MVCC_SNAPSHOT *mvcc_snapshot)
int spage_insert_for_recovery(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p)
static DISK_ISVALID heap_check_all_pages_by_heapchain(THREAD_ENTRY *thread_p, HFID *hfid, HEAP_CHKALL_RELOCOIDS *chk_objs, INT32 *num_checked)
Definition: heap_file.c:13622
int db_value_domain_init(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale)
Definition: db_macro.c:153
SCAN_CODE heap_prev_record_info(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, DB_VALUE **cache_recordinfo)
Definition: heap_file.c:18697
#define TP_DOMAIN_COLLATION_FLAG(dom)
SCAN_CODE heap_page_next(THREAD_ENTRY *thread_p, const OID *class_oid, const HFID *hfid, VPID *next_vpid, DB_VALUE **cache_pageinfo)
Definition: heap_file.c:18305
int file_map_pages(THREAD_ENTRY *thread_p, const VFID *vfid, PGBUF_LATCH_MODE latch_mode, PGBUF_LATCH_CONDITION latch_cond, FILE_MAP_PAGE_FUNC func, void *args)
static HEAP_HFID_TABLE * heap_Hfid_table
Definition: heap_file.c:508
static void heap_mvcc_log_insert(THREAD_ENTRY *thread_p, RECDES *p_recdes, LOG_DATA_ADDR *p_addr)
Definition: heap_file.c:15619
LF_ENTRY_DESCRIPTOR hfid_hash_descriptor
Definition: heap_file.h:189
SCAN_CODE heap_get_class_record(THREAD_ENTRY *thread_p, const OID *class_oid, RECDES *recdes_p, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:24780
static int heap_get_header_page(THREAD_ENTRY *thread_p, const HFID *hfid, VPID *header_vpid)
Definition: heap_file.c:19236