/* * %CopyrightBegin% * * Copyright Ericsson AB 1996-2023. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF #include "sys.h" #include "erl_vm.h" #include "global.h" #include "erl_process.h" #include "beam_file.h" #include "big.h" #include "bif.h" #include "erl_binary.h" #include "erl_bits.h" #include "erl_map.h" #include "packet_parser.h" #include "erl_gc.h" #define ERTS_WANT_DB_INTERNAL__ #include "erl_db.h" #include "erl_threads.h" #include "register.h" #include "dist.h" #include "erl_printf.h" #include "erl_threads.h" #include "erl_lock_count.h" #include "erl_time.h" #include "erl_thr_progress.h" #include "erl_thr_queue.h" #include "erl_sched_spec_pre_alloc.h" #include "beam_bp.h" #include "erl_ptab.h" #include "erl_check_io.h" #include "erl_bif_unique.h" #include "erl_io_queue.h" #define ERTS_WANT_TIMER_WHEEL_API #include "erl_time.h" #include "atom.h" #define ERTS_WANT_NFUNC_SCHED_INTERNALS__ #include "erl_nfunc_sched.h" #include "erl_proc_sig_queue.h" #include "erl_unicode.h" #include "beam_common.h" #include "erl_global_literals.h" /* ******************************* * ** Yielding C Fun (YCF) Note ** * ******************************* * * Yielding versions of some of the functions in this file are * generated by YCF. These generated functions are placed in the file * "utils.ycf.h" by the ERTS build system. The generation of "utils.ycf.h" * is defined in "$ERL_TOP/erts/emulator/Makefile.in". * * See "$ERL_TOP/erts/emulator/internal_doc/AutomaticYieldingOfCCode.md" * and "$ERL_TOP/erts/lib_src/yielding_c_fun/README.md" for more * information about YCF and the limitation that YCF imposes on the * code that it transforms. * */ /* Forward declarations of things needed by utis.ycf.h: */ #define YCF_CONSUME_REDS(x) typedef struct { byte* pivot_part_start; byte* pivot_part_end; } erts_qsort_partion_array_result; static void erts_qsort_swap(size_t item_size, void* ivp, void* jvp); static void erts_qsort_insertion_sort(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare); #if defined(DEBUG) && defined(ARCH_64) erts_tsd_key_t erts_ycf_debug_stack_start_tsd_key; void ycf_debug_set_stack_start(void * start) { erts_tsd_set(erts_ycf_debug_stack_start_tsd_key, start); } void ycf_debug_reset_stack_start(void) { erts_tsd_set(erts_ycf_debug_stack_start_tsd_key, NULL); } void *ycf_debug_get_stack_start(void) { return erts_tsd_get(erts_ycf_debug_stack_start_tsd_key); } #include "utils.debug.ycf.h" #else #include "utils.ycf.h" #endif #if defined(DEBUG) # define IF_DEBUG(X) X # define DBG_RANDOM_REDS(REDS, SEED) \ ((REDS) * 0.01 * erts_sched_local_random_float(SEED)) #else # define IF_DEBUG(X) # define DBG_RANDOM_REDS(REDS, SEED) (REDS) #endif #undef M_TRIM_THRESHOLD #undef M_TOP_PAD #undef M_MMAP_THRESHOLD #undef M_MMAP_MAX #if (defined(__GLIBC__) || defined(_AIX)) && defined(HAVE_MALLOC_H) #include #endif #if !defined(HAVE_MALLOPT) #undef HAVE_MALLOPT #define HAVE_MALLOPT 0 #endif Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra) { ErlHeapFragment* bp; Uint n; #if defined(DEBUG) || defined(CHECK_FOR_HOLES) Uint i; #endif #ifdef FORCE_HEAP_FRAGS if (p->space_verified && p->space_verified_from!=NULL && HEAP_TOP(p) >= p->space_verified_from && HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified && HeapWordsLeft(p) >= need) { Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from); ASSERT(consumed <= p->space_verified); p->space_verified -= consumed; p->space_verified_from += consumed; HEAP_TOP(p) = p->space_verified_from; return HEAP_TOP(p) - need; } p->space_verified = 0; p->space_verified_from = NULL; #endif /* FORCE_HEAP_FRAGS */ n = need + xtra; bp = MBUF(p); if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) { Eterm* ret = bp->mem + bp->used_size; bp->used_size += need; p->mbuf_sz += need; return ret; } #ifdef DEBUG n++; #endif bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG, ERTS_HEAP_FRAG_SIZE(n)); #if defined(DEBUG) || defined(CHECK_FOR_HOLES) for (i = 0; i < n; i++) { bp->mem[i] = ERTS_HOLE_MARKER; } #endif #ifdef DEBUG n--; #endif bp->next = MBUF(p); MBUF(p) = bp; bp->alloc_size = n; bp->used_size = need; MBUF_SIZE(p) += need; bp->off_heap.first = NULL; bp->off_heap.overhead = 0; return bp->mem; } #ifdef CHECK_FOR_HOLES Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz) { Eterm* p = ptr; Uint i; for (i = 0; i < sz; i++) { *p++ = ERTS_HOLE_MARKER; } return ptr; } #endif /* * Helper function for the ESTACK macros defined in global.h. */ void erl_grow_estack(ErtsEStack* s, Uint need) { Uint old_size = (s->end - s->start); Uint new_size; Uint sp_offs = s->sp - s->start; if (need < old_size) new_size = 2*old_size; else new_size = ((need / old_size) + 2) * old_size; if (s->start != s->edefault) { s->start = erts_realloc(s->alloc_type, s->start, new_size*sizeof(Eterm)); } else { Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm)); sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm)); s->start = new_ptr; } s->end = s->start + new_size; s->sp = s->start + sp_offs; } /* * Helper function for the WSTACK macros defined in global.h. */ void erl_grow_wstack(ErtsWStack* s, Uint need) { Uint old_size = (s->wend - s->wstart); Uint new_size; Uint sp_offs = s->wsp - s->wstart; if (need < old_size) new_size = 2 * old_size; else new_size = ((need / old_size) + 2) * old_size; if (s->wstart != s->wdefault) { s->wstart = erts_realloc(s->alloc_type, s->wstart, new_size*sizeof(UWord)); } else { UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord)); sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord)); s->wstart = new_ptr; } s->wend = s->wstart + new_size; s->wsp = s->wstart + sp_offs; } /* * Helper function for the PSTACK macros defined in global.h. */ void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes) { Uint old_size = s->size; Uint new_size; if (need_bytes < old_size) new_size = 2 * old_size; else new_size = ((need_bytes / old_size) + 2) * old_size; if (s->pstart != default_pstack) { s->pstart = erts_realloc(s->alloc_type, s->pstart, new_size); } else { byte* new_ptr = erts_alloc(s->alloc_type, new_size); sys_memcpy(new_ptr, s->pstart, old_size); s->pstart = new_ptr; } s->size = new_size; } /* * Helper function for the EQUEUE macros defined in global.h. */ void erl_grow_equeue(ErtsEQueue* q, Eterm* default_equeue) { Uint old_size = (q->end - q->start); Uint new_size = old_size * 2; Uint first_part = (q->end - q->front); Uint second_part = (q->back - q->start); Eterm* new_ptr = erts_alloc(q->alloc_type, new_size*sizeof(Eterm)); ASSERT(q->back == q->front); // of course the queue is full now! if (first_part > 0) sys_memcpy(new_ptr, q->front, first_part*sizeof(Eterm)); if (second_part > 0) sys_memcpy(new_ptr+first_part, q->start, second_part*sizeof(Eterm)); if (q->start != default_equeue) erts_free(q->alloc_type, q->start); q->start = new_ptr; q->end = q->start + new_size; q->front = q->start; q->back = q->start + old_size; } /* CTYPE macros */ #define LATIN1 #define IS_DIGIT(c) ((c) >= '0' && (c) <= '9') #ifdef LATIN1 #define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \ || ((c) >= 128+95 && (c) <= 255 && (c) != 247)) #define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \ || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32)) #else #define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z') #define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z') #endif #define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c)) /* We don't include 160 (non-breaking space). */ #define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r') #ifdef LATIN1 #define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \ || ((c) >= 128 && (c) < 128+32)) #else /* Treat all non-ASCII as control characters */ #define IS_CNTRL(c) ((c) < ' ' || (c) >= 127) #endif #define IS_PRINT(c) (!IS_CNTRL(c)) /* * Calculate length of a list. * Returns -1 if not a proper list (i.e. not terminated with NIL) */ Sint erts_list_length(Eterm list) { Sint i = 0; while(is_list(list)) { i++; list = CDR(list_val(list)); } if (is_not_nil(list)) { return -1; } return i; } static const struct { Sint64 mask; int bits; } fib_data[] = {{ERTS_I64_LITERAL(0x2), 1}, {ERTS_I64_LITERAL(0xc), 2}, {ERTS_I64_LITERAL(0xf0), 4}, {ERTS_I64_LITERAL(0xff00), 8}, {ERTS_I64_LITERAL(0xffff0000), 16}, {ERTS_I64_LITERAL(0xffffffff00000000), 32}}; static ERTS_INLINE int fit_in_bits(Sint64 value, int start) { int bits = 0; int i; for (i = start; i >= 0; i--) { if (value & fib_data[i].mask) { value >>= fib_data[i].bits; bits |= fib_data[i].bits; } } bits++; return bits; } int erts_fit_in_bits_int64(Sint64 value) { return fit_in_bits(value, 5); } int erts_fit_in_bits_int32(Sint32 value) { return fit_in_bits((Sint64) (Uint32) value, 4); } int erts_fit_in_bits_uint(Uint value) { #if ERTS_SIZEOF_ETERM == 4 return fit_in_bits((Sint64) (Uint32) value, 4); #elif ERTS_SIZEOF_ETERM == 8 return fit_in_bits(value, 5); #else # error "No way, Jose" #endif } int erts_print(fmtfn_t to, void *arg, char *format, ...) { int res; va_list arg_list; va_start(arg_list, format); { switch ((UWord)to) { case (UWord)ERTS_PRINT_STDOUT: res = erts_vprintf(format, arg_list); break; case (UWord)ERTS_PRINT_STDERR: res = erts_vfprintf(stderr, format, arg_list); break; case (UWord)ERTS_PRINT_FILE: res = erts_vfprintf((FILE *) arg, format, arg_list); break; case (UWord)ERTS_PRINT_SBUF: res = erts_vsprintf((char *) arg, format, arg_list); break; case (UWord)ERTS_PRINT_SNBUF: res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf, ((erts_print_sn_buf *) arg)->size, format, arg_list); break; case (UWord)ERTS_PRINT_DSBUF: res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list); break; case (UWord)ERTS_PRINT_FD: res = erts_vfdprintf((int)(SWord) arg, format, arg_list); break; default: res = erts_vcbprintf(to, arg, format, arg_list); break; } } va_end(arg_list); return res; } int erts_putc(fmtfn_t to, void *arg, char c) { return erts_print(to, arg, "%c", c); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Some Erlang term building utility functions (to be used when performance * * isn't critical). * * * * Add more functions like these here (and function prototypes in global.h) * * when needed. * * * \* */ Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str) { if (hpp) return erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1); else return THE_NON_VALUE; } Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui) { Eterm res = THE_NON_VALUE; if (IS_USMALL(0, ui)) { if (hpp) res = make_small(ui); } else { if (szp) *szp += BIG_UINT_HEAP_SIZE; if (hpp) { res = uint_to_big(ui, *hpp); *hpp += BIG_UINT_HEAP_SIZE; } } return res; } /* * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer * can safely be passed. */ Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw) { Eterm res = THE_NON_VALUE; if (IS_USMALL(0, uw)) { if (hpp) res = make_small((Uint) uw); } else { if (szp) *szp += BIG_UWORD_HEAP_SIZE(uw); if (hpp) { res = uword_to_big(uw, *hpp); *hpp += BIG_UWORD_HEAP_SIZE(uw); } } return res; } Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64) { Eterm res = THE_NON_VALUE; if (IS_USMALL(0, ui64)) { if (hpp) res = make_small((Uint) ui64); } else { if (szp) *szp += ERTS_UINT64_HEAP_SIZE(ui64); if (hpp) res = erts_uint64_to_big(ui64, hpp); } return res; } Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64) { Eterm res = THE_NON_VALUE; if (IS_SSMALL(si64)) { if (hpp) res = make_small((Sint) si64); } else { if (szp) *szp += ERTS_SINT64_HEAP_SIZE(si64); if (hpp) res = erts_sint64_to_big(si64, hpp); } return res; } Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr) { Eterm res = THE_NON_VALUE; if (szp) *szp += 2; if (hpp) { res = CONS(*hpp, car, cdr); *hpp += 2; } return res; } Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...) { Eterm res = THE_NON_VALUE; ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS))); if (szp) { if (arity == 0) { *szp = 0; } else { *szp += arity + 1; } } if (arity == 0) { return ERTS_GLOBAL_LIT_EMPTY_TUPLE; } if (hpp) { res = make_tuple(*hpp); *((*hpp)++) = make_arityval(arity); if (arity > 0) { Uint i; va_list argp; va_start(argp, arity); for (i = 0; i < arity; i++) { *((*hpp)++) = va_arg(argp, Eterm); } va_end(argp); } } return res; } Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]) { Eterm res = THE_NON_VALUE; /* * Note callers expect that 'terms' is *not* accessed if hpp == NULL. */ ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS))); if (szp) { if (arity == 0) { *szp = 0; } else { *szp += arity + 1; } } if (arity == 0) { return ERTS_GLOBAL_LIT_EMPTY_TUPLE; } if (hpp) { res = make_tuple(*hpp); *((*hpp)++) = make_arityval(arity); if (arity > 0) { Uint i; for (i = 0; i < arity; i++) *((*hpp)++) = terms[i]; } } return res; } Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len) { Eterm res = THE_NON_VALUE; Sint i = len; if (szp) *szp += len*2; if (hpp) { res = NIL; while (--i >= 0) { res = CONS(*hpp, make_small((byte) str[i]), res); *hpp += 2; } } return res; } Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]) { Eterm list = THE_NON_VALUE; if (szp) *szp += 2*length; if (hpp) { Sint i = length; list = NIL; while (--i >= 0) { list = CONS(*hpp, terms[i], list); *hpp += 2; } } return list; } Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp, Sint length, Eterm terms1[], Uint terms2[]) { Eterm res = THE_NON_VALUE; if (szp) *szp += 5*length; if (hpp) { Sint i = length; res = NIL; while (--i >= 0) { res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res); *hpp += 5; } } return res; } Eterm erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp, Sint length, Eterm atoms[], UWord uints[]) { Sint i; Eterm res = THE_NON_VALUE; if (szp) { *szp += 5*length; i = length; while (--i >= 0) { if (!IS_USMALL(0, uints[i])) *szp += BIG_UINT_HEAP_SIZE; } } if (hpp) { i = length; res = NIL; while (--i >= 0) { Eterm ui; if (IS_USMALL(0, uints[i])) ui = make_small(uints[i]); else { ui = uint_to_big(uints[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res); *hpp += 5; } } return res; } Eterm erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, Eterm atoms[], Uint uints1[], Uint uints2[]) { Sint i; Eterm res = THE_NON_VALUE; if (szp) { *szp += 6*length; i = length; while (--i >= 0) { if (!IS_USMALL(0, uints1[i])) *szp += BIG_UINT_HEAP_SIZE; if (!IS_USMALL(0, uints2[i])) *szp += BIG_UINT_HEAP_SIZE; } } if (hpp) { i = length; res = NIL; while (--i >= 0) { Eterm ui1; Eterm ui2; if (IS_USMALL(0, uints1[i])) ui1 = make_small(uints1[i]); else { ui1 = uint_to_big(uints1[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } if (IS_USMALL(0, uints2[i])) ui2 = make_small(uints2[i]); else { ui2 = uint_to_big(uints2[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res); *hpp += 6; } } return res; } /* error_logger ! {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }} */ static Eterm do_allocate_logger_message(ErtsHeapFactory *factory, Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid, Uint sz) { Uint gl_sz; gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader); sz = sz + gl_sz + 6 /*outer 5-tuple*/ + MAP2_SZ /* error_logger map */; *pid = erts_get_current_pid(); if (is_nil(gleader) && is_non_value(*pid)) { sz += MAP2_SZ /* metadata map no gl, no pid */; } else if (is_nil(gleader) || is_non_value(*pid)) sz += MAP3_SZ /* metadata map no gl or no pid*/; else sz += MAP4_SZ /* metadata map w gl w pid*/; *ts = ERTS_MONOTONIC_TO_USEC(erts_os_system_time()); erts_bld_sint64(NULL, &sz, *ts); erts_factory_heap_frag_init(factory, new_message_buffer(sz)); { Eterm *hp = erts_produce_heap(factory, gl_sz, 0); return copy_struct(gleader,gl_sz,&hp,factory->off_heap); } } static void do_send_logger_message(ErtsHeapFactory *factory, Eterm gl, Eterm tag, Eterm format, Eterm args, ErtsMonotonicTime ts, Eterm pid) { Eterm *hp; Eterm message, md, el_tag = tag; Uint sz = 0; Eterm time; erts_bld_sint64(NULL, &sz, ts); hp = erts_produce_heap(factory, sz, 0); time = erts_bld_sint64(&hp, NULL, ts); /* This mapping is needed for the backwards compatible error_logger */ switch (tag) { case am_info: el_tag = am_info_msg; break; case am_warning: el_tag = am_warning_msg; break; default: ASSERT(am_error); break; } hp = erts_produce_heap(factory, MAP2_SZ, 0); md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag); if (is_nil(gl) && is_non_value(pid)) { /* no gl and no pid, probably from a port */ hp = erts_produce_heap(factory, MAP2_SZ, 0); md = MAP2(hp, am_error_logger, md, am_time, time); pid = NIL; } else if (is_nil(gl)) { /* no gl */ hp = erts_produce_heap(factory, MAP3_SZ, 0); md = MAP3(hp, am_error_logger, md, am_pid, pid, am_time, time); } else if (is_non_value(pid)) { /* no gl */ hp = erts_produce_heap(factory, MAP3_SZ, 0); md = MAP3(hp, am_error_logger, md, ERTS_MAKE_AM("gl"), gl, am_time, time); pid = NIL; } else { Eterm keys[] = { am_error_logger, ERTS_MAKE_AM("gl"), am_pid, am_time }; Eterm values[] = { md, gl, pid, time }; md = erts_map_from_ks_and_vs(factory, keys, values, 4); } hp = erts_produce_heap(factory, 6, 0); message = TUPLE5(hp, am_log, tag, format, args, md); erts_factory_close(factory); erts_queue_error_logger_message(pid, message, factory->heap_frags); } static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len) { Uint sz; Eterm list, args, format, pid; ErtsMonotonicTime ts; ErtsHeapFactory factory; Eterm *hp = NULL; sz = len * 2 /* message list */ + 2 /* cons surrounding message list */ + 8 /* "~s~n" */; /* gleader size is accounted and allocated next */ gl = do_allocate_logger_message(&factory, gl, &ts, &pid, sz); hp = erts_produce_heap(&factory, sz, 0); list = buf_to_intlist(&hp, buf, len, NIL); args = CONS(hp,list,NIL); hp += 2; format = buf_to_intlist(&hp, "~s~n", 4, NIL); do_send_logger_message(&factory, gl, tag, format, args, ts, pid); return 0; } static int do_send_term_to_logger(Eterm tag, Eterm gl, char *buf, size_t len, Eterm args) { Uint sz; Uint args_sz; Eterm format, pid; ErtsMonotonicTime ts; Eterm *hp = NULL; ErtsHeapFactory factory; ASSERT(len > 0); args_sz = size_object(args); sz = len * 2 /* format */ + args_sz; /* gleader size is accounted and allocated next */ gl = do_allocate_logger_message(&factory, gl, &ts, &pid, sz); hp = erts_produce_heap(&factory, sz, 0); format = buf_to_intlist(&hp, buf, len, NIL); args = copy_struct(args, args_sz, &hp, factory.off_heap); do_send_logger_message(&factory, gl, tag, format, args, ts, pid); return 0; } static ERTS_INLINE int send_info_to_logger(Eterm gleader, char *buf, size_t len) { return do_send_to_logger(am_info, gleader, buf, len); } static ERTS_INLINE int send_warning_to_logger(Eterm gleader, char *buf, size_t len) { return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len); } static ERTS_INLINE int send_error_to_logger(Eterm gleader, char *buf, size_t len) { return do_send_to_logger(am_error, gleader, buf, len); } static ERTS_INLINE int send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args) { return do_send_term_to_logger(am_error, gleader, buf, len, args); } #define LOGGER_DSBUF_INC_SZ 256 static erts_dsprintf_buf_t * grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need) { size_t size; size_t free_size = dsbufp->size - dsbufp->str_len; ASSERT(dsbufp && dsbufp->str); if (need <= free_size) return dsbufp; size = need - free_size + LOGGER_DSBUF_INC_SZ; size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ) * LOGGER_DSBUF_INC_SZ); size += dsbufp->size; ASSERT(dsbufp->str_len + need <= size); dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str, size); dsbufp->size = size; return dsbufp; } erts_dsprintf_buf_t * erts_create_logger_dsbuf(void) { erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf); erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF, sizeof(erts_dsprintf_buf_t)); sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t)); dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF, LOGGER_DSBUF_INC_SZ); dsbufp->str[0] = '\0'; dsbufp->size = LOGGER_DSBUF_INC_SZ; return dsbufp; } static ERTS_INLINE void destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp) { ASSERT(dsbufp && dsbufp->str); erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str); erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp); } int erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp) { int res; res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len); destroy_logger_dsbuf(dsbufp); return res; } int erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp) { int res; res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len); destroy_logger_dsbuf(dsbufp); return res; } int erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp) { int res; res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len); destroy_logger_dsbuf(dsbufp); return res; } int erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args) { int res; res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args); destroy_logger_dsbuf(dsbufp); return res; } int erts_send_info_to_logger_str(Eterm gleader, char *str) { return send_info_to_logger(gleader, str, sys_strlen(str)); } int erts_send_warning_to_logger_str(Eterm gleader, char *str) { return send_warning_to_logger(gleader, str, sys_strlen(str)); } int erts_send_error_to_logger_str(Eterm gleader, char *str) { return send_error_to_logger(gleader, str, sys_strlen(str)); } int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf) { return erts_send_info_to_logger(NIL, dsbuf); } int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf) { return erts_send_warning_to_logger(NIL, dsbuf); } int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf) { return erts_send_error_to_logger(NIL, dsbuf); } int erts_send_info_to_logger_str_nogl(char *str) { return erts_send_info_to_logger_str(NIL, str); } int erts_send_warning_to_logger_str_nogl(char *str) { return erts_send_warning_to_logger_str(NIL, str); } int erts_send_error_to_logger_str_nogl(char *str) { return erts_send_error_to_logger_str(NIL, str); } #define TMP_DSBUF_INC_SZ 256 static erts_dsprintf_buf_t * grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need) { size_t size; size_t free_size = dsbufp->size - dsbufp->str_len; ASSERT(dsbufp); if (need <= free_size) return dsbufp; size = need - free_size + TMP_DSBUF_INC_SZ; size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ; size += dsbufp->size; ASSERT(dsbufp->str_len + need <= size); dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str, size); dsbufp->size = size; return dsbufp; } erts_dsprintf_buf_t * erts_create_tmp_dsbuf(Uint size) { Uint init_size = size ? size : TMP_DSBUF_INC_SZ; erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf); erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF, sizeof(erts_dsprintf_buf_t)); sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t)); dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size); dsbufp->str[0] = '\0'; dsbufp->size = init_size; return dsbufp; } void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp) { if (dsbufp->str) erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str); erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp); } /* eq and cmp are written as separate functions a eq is a little faster */ /* * Test for equality of two terms. * Returns 0 if not equal, or a non-zero value otherwise. */ int eq(Eterm a, Eterm b) { DECLARE_WSTACK(stack); Sint sz; Eterm* aa; Eterm* bb; tailrecur: if (is_same(a, b)) goto pop_next; tailrecur_ne: switch (primary_tag(a)) { case TAG_PRIMARY_LIST: if (is_list(b)) { Eterm* aval = list_val(a); Eterm* bval = list_val(b); while (1) { Eterm atmp = CAR(aval); Eterm btmp = CAR(bval); if (!is_same(atmp,btmp)) { WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval)); a = atmp; b = btmp; goto tailrecur_ne; } atmp = CDR(aval); btmp = CDR(bval); if (is_same(atmp,btmp)) { goto pop_next; } if (is_not_list(atmp) || is_not_list(btmp)) { a = atmp; b = btmp; goto tailrecur_ne; } aval = list_val(atmp); bval = list_val(btmp); } } break; /* not equal */ case TAG_PRIMARY_BOXED: { Eterm hdr = *boxed_val(a); switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: { aa = tuple_val(a); if (!is_boxed(b) || *boxed_val(b) != *aa) goto not_equal; bb = tuple_val(b); if ((sz = arityval(*aa)) == 0) goto pop_next; ++aa; ++bb; goto term_array; } case REFC_BINARY_SUBTAG: case HEAP_BINARY_SUBTAG: case SUB_BINARY_SUBTAG: { byte* a_ptr; byte* b_ptr; size_t a_size; size_t b_size; Uint a_bitsize; Uint b_bitsize; Uint a_bitoffs; Uint b_bitoffs; if (!is_binary(b)) { goto not_equal; } a_size = binary_size(a); b_size = binary_size(b); if (a_size != b_size) { goto not_equal; } ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize); ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize); if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) { if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next; } else if (a_bitsize == b_bitsize) { if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs, (a_size << 3) + a_bitsize) == 0) goto pop_next; } break; /* not equal */ } case FUN_SUBTAG: { ErlFunThing* f1; ErlFunThing* f2; if (is_not_any_fun(b)) { goto not_equal; } f1 = (ErlFunThing *) fun_val(a); f2 = (ErlFunThing *) fun_val(b); if (is_local_fun(f1) && is_local_fun(f2)) { ErlFunEntry *fe1, *fe2; fe1 = f1->entry.fun; fe2 = f2->entry.fun; if (fe1->module != fe2->module || fe1->index != fe2->index || fe1->old_uniq != fe2->old_uniq || f1->num_free != f2->num_free) { goto not_equal; } if ((sz = f1->num_free) == 0) { goto pop_next; } aa = f1->env; bb = f2->env; goto term_array; } else if (is_external_fun(f1) && is_external_fun(f2)) { if (f1->entry.exp == f2->entry.exp) { goto pop_next; } } goto not_equal; } case EXTERNAL_PID_SUBTAG: { ExternalThing *ap; ExternalThing *bp; int i; if(!is_external(b)) goto not_equal; ap = external_thing_ptr(a); bp = external_thing_ptr(b); if(ap->header != bp->header || ap->node != bp->node) goto not_equal; ASSERT(external_data_words(a) == EXTERNAL_PID_DATA_WORDS); ASSERT(external_data_words(b) == EXTERNAL_PID_DATA_WORDS); for (i = 0; i < EXTERNAL_PID_DATA_WORDS; i++) { if (ap->data.ui[i] != bp->data.ui[i]) goto not_equal; } goto pop_next; } case EXTERNAL_PORT_SUBTAG: { ExternalThing *ap; ExternalThing *bp; int i; if(!is_external(b)) goto not_equal; ap = external_thing_ptr(a); bp = external_thing_ptr(b); if(ap->header != bp->header || ap->node != bp->node) goto not_equal; ASSERT(EXTERNAL_PORT_DATA_WORDS == external_data_words(a)); ASSERT(EXTERNAL_PORT_DATA_WORDS == external_data_words(b)); for (i = 0; i < EXTERNAL_PORT_DATA_WORDS; i++) { if (ap->data.ui[i] != bp->data.ui[i]) goto not_equal; } goto pop_next; } case EXTERNAL_REF_SUBTAG: { /* * Observe! * When comparing refs we need to compare ref numbers * (32-bit words) *not* ref data words. */ Uint32 *anum; Uint32 *bnum; Uint common_len; Uint alen; Uint blen; Uint i; ExternalThing* athing; ExternalThing* bthing; if(!is_external_ref(b)) goto not_equal; athing = external_thing_ptr(a); bthing = external_thing_ptr(b); if(athing->node != bthing->node) goto not_equal; anum = external_thing_ref_numbers(athing); bnum = external_thing_ref_numbers(bthing); alen = external_thing_ref_no_numbers(athing); blen = external_thing_ref_no_numbers(bthing); goto ref_common; case REF_SUBTAG: if (!is_internal_ref(b)) goto not_equal; alen = internal_ref_no_numbers(a); anum = internal_ref_numbers(a); blen = internal_ref_no_numbers(b); bnum = internal_ref_numbers(b); ref_common: ASSERT(alen > 0 && blen > 0); if (anum[0] != bnum[0]) goto not_equal; if (alen == 3 && blen == 3) { /* Most refs are of length 3 */ if (anum[1] == bnum[1] && anum[2] == bnum[2]) { goto pop_next; } else { goto not_equal; } } common_len = alen; if (blen < alen) common_len = blen; for (i = 1; i < common_len; i++) if (anum[i] != bnum[i]) goto not_equal; if(alen != blen) { if (alen > blen) { for (i = common_len; i < alen; i++) if (anum[i] != 0) goto not_equal; } else { for (i = common_len; i < blen; i++) if (bnum[i] != 0) goto not_equal; } } goto pop_next; } case POS_BIG_SUBTAG: case NEG_BIG_SUBTAG: { int i; if (!is_big(b)) goto not_equal; aa = big_val(a); bb = big_val(b); if (*aa != *bb) goto not_equal; i = BIG_ARITY(aa); while(i--) { if (*++aa != *++bb) goto not_equal; } goto pop_next; } case FLOAT_SUBTAG: { FloatDef af; FloatDef bf; if (is_float(b)) { GET_DOUBLE(a, af); GET_DOUBLE(b, bf); if (af.fd == bf.fd) goto pop_next; } break; /* not equal */ } case MAP_SUBTAG: if (is_flatmap(a)) { aa = flatmap_val(a); if (!is_boxed(b) || *boxed_val(b) != *aa) goto not_equal; bb = flatmap_val(b); sz = flatmap_get_size((flatmap_t*)aa); if (sz != flatmap_get_size((flatmap_t*)bb)) goto not_equal; if (sz == 0) goto pop_next; aa += 2; bb += 2; sz += 1; /* increment for tuple-keys */ goto term_array; } else { if (!is_boxed(b) || *boxed_val(b) != hdr) goto not_equal; aa = hashmap_val(a) + 1; bb = hashmap_val(b) + 1; switch (hdr & _HEADER_MAP_SUBTAG_MASK) { case HAMT_SUBTAG_HEAD_ARRAY: if (aa[0] != bb[0]) goto not_equal; aa++; bb++; sz = 16; break; case HAMT_SUBTAG_HEAD_BITMAP: if (aa[0] != bb[0]) goto not_equal; aa++; bb++; case HAMT_SUBTAG_NODE_BITMAP: sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); ASSERT(sz > 0 && sz < 17); break; default: erts_exit(ERTS_ERROR_EXIT, "bad header"); } goto term_array; } default: ASSERT(!"Unknown boxed subtab in EQ"); } break; } } goto not_equal; term_array: /* arrays in 'aa' and 'bb', length in 'sz' */ ASSERT(sz != 0); { Eterm* ap = aa; Eterm* bp = bb; Sint i = sz; for (;;) { if (!is_same(*ap,*bp)) break; if (--i == 0) goto pop_next; ++ap; ++bp; } a = *ap; b = *bp; if (is_both_immed(a,b)) { goto not_equal; } if (i > 1) { /* push the rest */ WSTACK_PUSH3(stack, i-1, (UWord)(bp+1), ((UWord)(ap+1)) | TAG_PRIMARY_HEADER); /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */ } goto tailrecur_ne; } pop_next: if (!WSTACK_ISEMPTY(stack)) { UWord something = WSTACK_POP(stack); if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */ aa = (Eterm*) something; bb = (Eterm*) WSTACK_POP(stack); sz = WSTACK_POP(stack); goto term_array; } a = something; b = WSTACK_POP(stack); goto tailrecur; } DESTROY_WSTACK(stack); return 1; not_equal: DESTROY_WSTACK(stack); return 0; } static Sint compare_flatmap_atom_keys(const Eterm* a_keys, const Eterm* b_keys, int n_atoms) { Eterm min_key = THE_NON_VALUE; Eterm a, b; int ai, bi; Sint res; ASSERT(n_atoms > 0); ASSERT(is_atom(a_keys[0]) && is_atom(b_keys[0])); ASSERT(is_atom(a_keys[n_atoms-1]) || is_atom(b_keys[n_atoms-1])); ai = n_atoms; while (*a_keys == *b_keys) { ASSERT(is_atom(*a_keys)); a_keys++; b_keys++; if (--ai == 0) return 0; } /* * Found atom key diff. Find the smallest unique atom. * The atoms are sorted by atom index (not term order). * * Continue iterate atom key arrays by always advancing the one lagging * behind atom index-wise. Identical atoms are skipped. An atom can only be * candidate as minimal if we have passed that atom index in the other array * (which means the atom did not exist in the other array). * * There can be different number of atom keys in the arrays (n_atoms is the * larger count). We stop when either reaching the end or finding a non-atom. * ERTS_UINT_MAX is used as an end marker while advancing the other one. */ bi = ai; a = *a_keys; b = *b_keys; IF_DEBUG(res = 0); if (!is_atom(a)) { ASSERT(is_atom(b)); return +1; } else if (!is_atom(b)) { return -1; } do { ASSERT(is_atom(a) || a == ERTS_UINT_MAX); ASSERT(is_atom(b) || b == ERTS_UINT_MAX); ASSERT(is_atom(a) || is_atom(b)); if (a < b) { ASSERT(ai && is_atom(a)); if (is_non_value(min_key) || erts_cmp_atoms(a, min_key) < 0) { min_key = a; res = -1; } if (--ai) { a = *(++a_keys); if (is_not_atom(a)) a = ERTS_UINT_MAX; } else a = ERTS_UINT_MAX; } else if (a > b) { ASSERT(bi && is_atom(b)); if (is_non_value(min_key) || erts_cmp_atoms(b, min_key) < 0) { min_key = b; res = +1; } if (--bi) { b = *(++b_keys); if (is_not_atom(b)) b = ERTS_UINT_MAX; } else b = ERTS_UINT_MAX; } else { ASSERT(ai && bi && is_atom(a) && is_atom(b)); if (--ai) { a = *(++a_keys); if (is_not_atom(a)) a = ERTS_UINT_MAX; } else a = ERTS_UINT_MAX; if (--bi) { b = *(++b_keys); if (is_not_atom(b)) b = ERTS_UINT_MAX; } else b = ERTS_UINT_MAX; } } while (~(a&b)); ASSERT(a == ERTS_UINT_MAX && b == ERTS_UINT_MAX); ASSERT(is_atom(min_key)); ASSERT(res != 0); return res; } /* * Compare objects. * Returns 0 if equal, a negative value if a < b, or a positive number a > b. * * According to the Erlang Standard, types are orderered as follows: * numbers < (characters) < atoms < refs < funs < ports < pids < * tuples < maps < [] < conses < binaries. * */ Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only); /* erts_cmp(Eterm a, Eterm b, int exact) * exact = 1 -> term-based compare * exact = 0 -> arith-based compare */ Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only) { #define PSTACK_TYPE struct cmp_map_state struct cmp_map_state { Sint wstack_rollback; int was_exact; /* hashmap only */ Eterm *atom_keys; /* flatmap only */ Eterm *ap, *bp; /* hashmap: kv-cons, flatmap: values */ Eterm min_key; Sint cmp_res; /* result so far -1,0,+1 */ #ifdef DEBUG int is_hashmap; #endif }; PSTACK_DECLARE(map_stack, 1); WSTACK_DECLARE(stack); WSTACK_DECLARE(b_stack); /* only used by hashmaps */ Eterm* aa; Eterm* bb; int i; Sint j; int a_tag; int b_tag; ErlNode *anode; ErlNode *bnode; Uint alen; Uint blen; Uint32 *anum; Uint32 *bnum; /* The WSTACK contains naked Eterms and Operations marked with header-tags */ #define OP_BITS 4 #define OP_MASK 0xF #define TERM_ARRAY_OP 0 #define SWITCH_EXACT_OFF_OP 1 #define HASHMAP_PHASE1_ARE_KEYS_EQUAL 2 #define HASHMAP_PHASE1_IS_MIN_KEY 3 #define HASHMAP_PHASE1_CMP_VALUES 4 #define HASHMAP_PHASE2_ARE_KEYS_EQUAL 5 #define HASHMAP_PHASE2_IS_MIN_KEY_A 6 #define HASHMAP_PHASE2_IS_MIN_KEY_B 7 #define FLATMAP_ATOM_KEYS 8 #define FLATMAP_ATOM_VALUES 9 #define FLATMAP_ATOM_CMP_VALUES 10 #define OP_WORD(OP) (((OP) << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER) #define OP_ARG_WORD(OP, SZ) OP_WORD(((SZ) << OP_BITS) | OP) #define GET_OP(WORD) (ASSERT(is_header(WORD)), ((WORD) >> _TAG_PRIMARY_SIZE) & OP_MASK) #define GET_OP_ARG(WORD) (ASSERT(is_header(WORD)), ((WORD) >> (OP_BITS + _TAG_PRIMARY_SIZE))) #define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; } #define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal #undef CMP_NODES #define CMP_NODES(AN, BN) \ do { \ if((AN) != (BN)) { \ if((AN)->sysname != (BN)->sysname) \ RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname)); \ ASSERT((AN)->creation != (BN)->creation); \ RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \ } \ } while (0) bodyrecur: j = 0; tailrecur: if (is_same(a,b)) { /* Equal values or pointers. */ goto pop_next; } tailrecur_ne: /* deal with majority (?) cases by brute-force */ if (is_atom(a)) { if (is_atom(b)) { ON_CMP_GOTO(erts_cmp_atoms(a, b)); } } else if (is_both_small(a, b)) { ON_CMP_GOTO(signed_val(a) - signed_val(b)); } /* * Take care of cases where the types are the same. */ a_tag = 42; /* Suppress warning */ switch (primary_tag(a)) { case TAG_PRIMARY_IMMED1: switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE): if (is_internal_port(b)) { Uint adata = internal_port_data(a); Uint bdata = internal_port_data(b); ON_CMP_GOTO((Sint)(adata - bdata)); } else if (is_external_port(b)) { anode = erts_this_node; bnode = external_port_node(b); CMP_NODES(anode, bnode); ERTS_INTERNAL_ERROR("different nodes compared equal"); } else { a_tag = PORT_DEF; goto mixed_types; } case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE): if (is_internal_pid(b)) { Uint adata = internal_pid_data(a); Uint bdata = internal_pid_data(b); ON_CMP_GOTO((Sint)(adata - bdata)); } else if (is_not_external_pid(b)) { a_tag = PID_DEF; goto mixed_types; } pid_common: { Uint32 a_pid_num, a_pid_ser; Uint32 b_pid_num, b_pid_ser; if (is_internal_pid(a)) { a_pid_num = internal_pid_number(a); a_pid_ser = internal_pid_serial(a); anode = erts_this_node; } else { ASSERT(is_external_pid(a)); a_pid_num = external_pid_number(a); a_pid_ser = external_pid_serial(a); anode = external_pid_node(a); } if (is_internal_pid(b)) { b_pid_num = internal_pid_number(b); b_pid_ser = internal_pid_serial(b); bnode = erts_this_node; } else { ASSERT(is_external_pid(b)); b_pid_num = external_pid_number(b); b_pid_ser = external_pid_serial(b); bnode = external_pid_node(b); } if (a_pid_ser != b_pid_ser) RETURN_NEQ(a_pid_ser < b_pid_ser ? -1 : 1); if (a_pid_num != b_pid_num) RETURN_NEQ(a_pid_num < b_pid_num ? -1 : 1); CMP_NODES(anode, bnode); goto pop_next; } case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): a_tag = SMALL_DEF; goto mixed_types; case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): { switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) { case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE): a_tag = ATOM_DEF; goto mixed_types; case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE): a_tag = NIL_DEF; goto mixed_types; } } } case TAG_PRIMARY_LIST: if (is_not_list(b)) { a_tag = LIST_DEF; goto mixed_types; } aa = list_val(a); bb = list_val(b); while (1) { Eterm atmp = CAR(aa); Eterm btmp = CAR(bb); if (!is_same(atmp,btmp)) { WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa)); a = atmp; b = btmp; goto tailrecur_ne; } atmp = CDR(aa); btmp = CDR(bb); if (is_same(atmp,btmp)) { goto pop_next; } if (is_not_list(atmp) || is_not_list(btmp)) { a = atmp; b = btmp; goto tailrecur_ne; } aa = list_val(atmp); bb = list_val(btmp); } case TAG_PRIMARY_BOXED: { Eterm ahdr = *boxed_val(a); switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE): if (!is_tuple(b)) { a_tag = TUPLE_DEF; goto mixed_types; } aa = tuple_val(a); bb = tuple_val(b); /* compare the arities */ i = arityval(ahdr); /* get the arity*/ if (i != arityval(*bb)) { RETURN_NEQ((int)(i - arityval(*bb))); } if (i == 0) { goto pop_next; } ++aa; ++bb; goto term_array; case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) : { struct cmp_map_state* sp; if (is_flatmap_header(ahdr)) { flatmap_t* afm = (flatmap_t*)flatmap_val(a); flatmap_t* bfm; if (!is_flatmap(b)) { if (is_hashmap(b)) { ASSERT(flatmap_get_size(afm) < hashmap_size(b)); RETURN_NEQ(-1); } a_tag = MAP_DEF; goto mixed_types; } bfm = (flatmap_t*)flatmap_val(b); i = flatmap_get_size(afm); if (i != flatmap_get_size(bfm)) { RETURN_NEQ((int)(i - flatmap_get_size(bfm))); } if (i == 0) { goto pop_next; } if (exact) { /* * We only care about equality so we can compare * the maps as two term arrays where the first * element pair are the key tuples. */ aa = &afm->keys; bb = &bfm->keys; i += 1; /* increment for tuple-keys */ goto term_array; } else { Eterm* a_keys = flatmap_get_keys(afm); Eterm* b_keys = flatmap_get_keys(bfm); Eterm* a_vals = flatmap_get_values(afm); Eterm* b_vals = flatmap_get_values(bfm); int n_numbers; /* sorted before atoms */ int n_atoms; int n_rest; /* sorted after atoms */ int n = 0; /* * All keys are sorted in term order except atoms * which are sorted by atom index. The compare * algorithm is optimized to only have to treat * atoms specially and use the term order for other * keys. * The key arrays are divided into three possible * partitions containing: * #1. all numbers (< atoms) * #2. atoms * #3. only the rest (> atoms) * * The tree partions are compared separately in two * phases, first only keys and then values. */ while (n < i && !(is_atom(a_keys[n]) && is_atom(b_keys[n]))) { ++n; } n_numbers = n; while (n < i && (is_atom(a_keys[n]) || is_atom(b_keys[n]))) { ++n; } n_atoms = n - n_numbers; n_rest = i - n; ASSERT(n_numbers + n_atoms + n_rest == i); ASSERT(n_atoms || !n_rest); if (n_rest) { WSTACK_PUSH3(stack, (UWord)&b_vals[n_numbers+n_atoms], (UWord)&a_vals[n_numbers+n_atoms], OP_ARG_WORD(TERM_ARRAY_OP,n_rest)); } if (n_atoms) { WSTACK_PUSH4(stack, (UWord)&b_vals[n_numbers], (UWord)&a_vals[n_numbers], (UWord)&a_keys[n_numbers], OP_ARG_WORD(FLATMAP_ATOM_VALUES,n_atoms)); } if (n_numbers) { WSTACK_PUSH3(stack, (UWord)b_vals, (UWord)a_vals, OP_ARG_WORD(TERM_ARRAY_OP,n_numbers)); } if (!exact) { WSTACK_PUSH(stack, OP_WORD(SWITCH_EXACT_OFF_OP)); exact = 1; } if (n_rest) { WSTACK_PUSH3(stack, (UWord)&b_keys[n_numbers+n_atoms], (UWord)&a_keys[n_numbers+n_atoms], OP_ARG_WORD(TERM_ARRAY_OP,n_rest)); } if (n_atoms) { WSTACK_PUSH3(stack, (UWord)&b_keys[n_numbers], (UWord)&a_keys[n_numbers], OP_ARG_WORD(FLATMAP_ATOM_KEYS,n_atoms)); } if (n_numbers) { WSTACK_PUSH3(stack, (UWord)b_keys, (UWord)a_keys, OP_ARG_WORD(TERM_ARRAY_OP,n_numbers)); } } goto pop_next; } if (!is_hashmap(b)) { if (is_flatmap(b)) { ASSERT(hashmap_size(a) > flatmap_get_size(flatmap_val(b))); RETURN_NEQ(1); } a_tag = MAP_DEF; goto mixed_types; } i = hashmap_size(a) - hashmap_size(b); if (i) { RETURN_NEQ(i); } if (hashmap_size(a) == 0) { goto pop_next; } /* Hashmap compare strategy: Phase 1. While keys are identical Do synchronous stepping through leafs of both trees in hash order. Maintain value compare result of minimal key. Phase 2. If key diff was found in phase 1 Ignore values from now on. Continue iterate trees by always advancing the one lagging behind hash-wise. Identical keys are skipped. A minimal key can only be candidate as tie-breaker if we have passed that hash value in the other tree (which means the key did not exist in the other tree). Collision node amendment: The leafs in collision nodes are sorted in map-key order. If keys are different but hashes are equal we advance the one lagging behind key-wise. */ sp = PSTACK_PUSH(map_stack); IF_DEBUG(sp->is_hashmap = 1); hashmap_iterator_init(&stack, a, 0); hashmap_iterator_init(&b_stack, b, 0); sp->ap = hashmap_iterator_next(&stack); sp->bp = hashmap_iterator_next(&b_stack); sp->cmp_res = 0; ASSERT(sp->ap && sp->bp); a = CAR(sp->ap); b = CAR(sp->bp); sp->was_exact = exact; exact = 1; WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE): if (!is_float(b)) { a_tag = FLOAT_DEF; goto mixed_types; } else { FloatDef af; FloatDef bf; GET_DOUBLE(a, af); GET_DOUBLE(b, bf); ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd)); } case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE): case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE): if (!is_big(b)) { a_tag = BIG_DEF; goto mixed_types; } ON_CMP_GOTO(big_comp(a, b)); case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE): if (is_not_any_fun(b)) { a_tag = FUN_DEF; goto mixed_types; } else { ErlFunThing* f1 = (ErlFunThing *) fun_val(a); ErlFunThing* f2 = (ErlFunThing *) fun_val(b); if (is_local_fun(f1) && is_local_fun(f2)) { ErlFunEntry* fe1 = f1->entry.fun; ErlFunEntry* fe2 = f2->entry.fun; Sint diff; diff = erts_cmp_atoms(fe1->module, (fe2)->module); if (diff != 0) { RETURN_NEQ(diff); } diff = fe1->index - fe2->index; if (diff != 0) { RETURN_NEQ(diff); } diff = fe1->old_uniq - fe2->old_uniq; if (diff != 0) { RETURN_NEQ(diff); } diff = f1->num_free - f2->num_free; if (diff != 0) { RETURN_NEQ(diff); } i = f1->num_free; if (i == 0) goto pop_next; aa = f1->env; bb = f2->env; goto term_array; } else if (is_external_fun(f1) && is_external_fun(f2)) { Export* a_exp = f1->entry.exp; Export* b_exp = f2->entry.exp; if ((j = erts_cmp_atoms(a_exp->info.mfa.module, b_exp->info.mfa.module)) != 0) { RETURN_NEQ(j); } if ((j = erts_cmp_atoms(a_exp->info.mfa.function, b_exp->info.mfa.function)) != 0) { RETURN_NEQ(j); } ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity); } else { /* External funs compare greater than local ones. */ RETURN_NEQ(is_external_fun(f1) - is_external_fun(f2)); } } case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): if (!is_pid(b)) { a_tag = EXTERNAL_PID_DEF; goto mixed_types; } goto pid_common; case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): if (is_internal_port(b)) { anode = external_port_node(a); bnode = erts_this_node; CMP_NODES(anode, bnode); ERTS_INTERNAL_ERROR("different nodes compared equal"); } else if (is_external_port(b)) { Uint64 anum, bnum; anode = external_port_node(a); bnode = external_port_node(b); CMP_NODES(anode, bnode); anum = external_port_number(a); bnum = external_port_number(b); if (anum == bnum) goto pop_next; RETURN_NEQ(anum < bnum ? -1 : 1); } else { a_tag = EXTERNAL_PORT_DEF; goto mixed_types; } case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE): /* * Note! When comparing refs we need to compare ref numbers * (32-bit words), *not* ref data words. */ if (is_internal_ref(b)) { bnode = erts_this_node; blen = internal_ref_no_numbers(b); bnum = internal_ref_numbers(b); } else if(is_external_ref(b)) { ExternalThing* bthing = external_thing_ptr(b); bnode = bthing->node; bnum = external_thing_ref_numbers(bthing); blen = external_thing_ref_no_numbers(bthing); } else { a_tag = REF_DEF; goto mixed_types; } anode = erts_this_node; alen = internal_ref_no_numbers(a); anum = internal_ref_numbers(a); ref_common: CMP_NODES(anode, bnode); ASSERT(alen > 0 && blen > 0); if (alen != blen) { if (alen > blen) { do { if (anum[alen - 1] != 0) RETURN_NEQ(1); alen--; } while (alen > blen); } else { do { if (bnum[blen - 1] != 0) RETURN_NEQ(-1); blen--; } while (alen < blen); } } ASSERT(alen == blen); for (i = (Sint) alen - 1; i >= 0; i--) if (anum[i] != bnum[i]) RETURN_NEQ(anum[i] < bnum[i] ? -1 : 1); goto pop_next; case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): if (is_internal_ref(b)) { bnode = erts_this_node; blen = internal_ref_no_numbers(b); bnum = internal_ref_numbers(b); } else if (is_external_ref(b)) { ExternalThing* bthing = external_thing_ptr(b); bnode = bthing->node; bnum = external_thing_ref_numbers(bthing); blen = external_thing_ref_no_numbers(bthing); } else { a_tag = EXTERNAL_REF_DEF; goto mixed_types; } { ExternalThing* athing = external_thing_ptr(a); anode = athing->node; anum = external_thing_ref_numbers(athing); alen = external_thing_ref_no_numbers(athing); } goto ref_common; default: /* Must be a binary */ ASSERT(is_binary(a)); if (!is_binary(b)) { a_tag = BINARY_DEF; goto mixed_types; } else { Uint a_size = binary_size(a); Uint b_size = binary_size(b); Uint a_bitsize; Uint b_bitsize; Uint a_bitoffs; Uint b_bitoffs; Uint min_size; int cmp; byte* a_ptr; byte* b_ptr; if (eq_only && a_size != b_size) { RETURN_NEQ(a_size - b_size); } ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize); ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize); if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) { min_size = (a_size < b_size) ? a_size : b_size; if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) { RETURN_NEQ(cmp); } } else { a_size = (a_size << 3) + a_bitsize; b_size = (b_size << 3) + b_bitsize; min_size = (a_size < b_size) ? a_size : b_size; if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs, b_ptr,b_bitoffs,min_size)) != 0) { RETURN_NEQ(cmp); } } ON_CMP_GOTO((Sint)(a_size - b_size)); } } } } /* * Take care of the case that the tags are different. */ mixed_types: { FloatDef f1, f2; Eterm big; Eterm aw = a; Eterm bw = b; #define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2)) #define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1)) #define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */ Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)]; b_tag = tag_val_def(bw); switch(_NUMBER_CODE(a_tag, b_tag)) { case SMALL_BIG: j = big_sign(bw) ? 1 : -1; break; case BIG_SMALL: j = big_sign(aw) ? -1 : 1; break; case SMALL_FLOAT: if (exact) goto exact_fall_through; GET_DOUBLE(bw, f2); if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) { /* Float is within the no loss limit */ f1.fd = signed_val(aw); j = erts_float_comp(f1.fd, f2.fd); } #if ERTS_SIZEOF_ETERM == 8 else if (f2.fd > (double) (MAX_SMALL + 1)) { /* Float is a positive bignum, i.e. bigger */ j = -1; } else if (f2.fd < (double) (MIN_SMALL - 1)) { /* Float is a negative bignum, i.e. smaller */ j = 1; } else { /* Float is a Sint but less precise */ j = signed_val(aw) - (Sint) f2.fd; } #else else { /* If float is positive it is bigger than small */ j = (f2.fd > 0.0) ? -1 : 1; } #endif /* ERTS_SIZEOF_ETERM == 8 */ break; case FLOAT_BIG: if (exact) goto exact_fall_through; { Wterm tmp = aw; aw = bw; bw = tmp; }/* fall through */ case BIG_FLOAT: if (exact) goto exact_fall_through; GET_DOUBLE(bw, f2); if ((f2.fd < (double) (MAX_SMALL + 1)) && (f2.fd > (double) (MIN_SMALL - 1))) { /* Float is a Sint */ j = big_sign(aw) ? -1 : 1; } else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX || pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) { /* If bignum size shows that it is bigger than the abs float */ j = big_sign(aw) ? -1 : 1; } else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX && (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) { /* If bignum size shows that it is smaller than the abs float */ j = f2.fd < 0 ? 1 : -1; } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) { /* Float is within the no loss limit */ if (big_to_double(aw, &f1.fd) < 0) { j = big_sign(aw) ? -1 : 1; } else { j = erts_float_comp(f1.fd, f2.fd); } } else { big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm)); j = big_comp(aw, big); } if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) { j = -j; } break; case FLOAT_SMALL: if (exact) goto exact_fall_through; GET_DOUBLE(aw, f1); if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) { /* Float is within the no loss limit */ f2.fd = signed_val(bw); j = erts_float_comp(f1.fd, f2.fd); } #if ERTS_SIZEOF_ETERM == 8 else if (f1.fd > (double) (MAX_SMALL + 1)) { /* Float is a positive bignum, i.e. bigger */ j = 1; } else if (f1.fd < (double) (MIN_SMALL - 1)) { /* Float is a negative bignum, i.e. smaller */ j = -1; } else { /* Float is a Sint but less precise it */ j = (Sint) f1.fd - signed_val(bw); } #else else { /* If float is positive it is bigger than small */ j = (f1.fd > 0.0) ? 1 : -1; } #endif /* ERTS_SIZEOF_ETERM == 8 */ break; exact_fall_through: default: j = b_tag - a_tag; } } if (j == 0) { goto pop_next; } else { goto not_equal; } term_array: /* arrays in 'aa' and 'bb', length in 'i' */ ASSERT(i>0); while (--i) { a = *aa++; b = *bb++; if (!is_same(a, b)) { if (is_atom(a) && is_atom(b)) { if ((j = erts_cmp_atoms(a, b)) != 0) { goto not_equal; } } else if (is_both_small(a, b)) { if ((j = signed_val(a)-signed_val(b)) != 0) { goto not_equal; } } else { WSTACK_PUSH3(stack, (UWord)bb, (UWord)aa, OP_ARG_WORD(TERM_ARRAY_OP,i)); goto tailrecur_ne; } } } a = *aa; b = *bb; goto tailrecur; pop_next: if (!WSTACK_ISEMPTY(stack)) { UWord something = WSTACK_POP(stack); struct cmp_map_state* sp; if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* an operation */ switch (GET_OP(something)) { case TERM_ARRAY_OP: i = GET_OP_ARG(something); aa = (Eterm*)WSTACK_POP(stack); bb = (Eterm*) WSTACK_POP(stack); goto term_array; case SWITCH_EXACT_OFF_OP: /* Done with exact compare of map keys, switch back */ ASSERT(exact); exact = 0; goto pop_next; case HASHMAP_PHASE1_ARE_KEYS_EQUAL: { sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j) { /* Key diff found, enter phase 2 */ int hash_cmp = hashmap_key_hash_cmp(sp->ap, sp->bp); if (hash_cmp == 0) { /* Hash collision. Collision nodes are sorted by map key * order, so we advance the one with the lesser key */ hash_cmp = j; } if (hash_cmp < 0) { sp->min_key = CAR(sp->ap); sp->cmp_res = -1; sp->ap = hashmap_iterator_next(&stack); } else { ASSERT(hash_cmp > 0); sp->min_key = CAR(sp->bp); sp->cmp_res = 1; sp->bp = hashmap_iterator_next(&b_stack); } exact = 1; /* only exact key compares in phase 2 */ goto case_HASHMAP_PHASE2_LOOP; } /* No key diff found so far, compare values if min key */ if (sp->cmp_res) { a = CAR(sp->ap); b = sp->min_key; exact = 1; WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_IS_MIN_KEY)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } /* no min key-value found yet */ a = CDR(sp->ap); b = CDR(sp->bp); exact = sp->was_exact; WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } case HASHMAP_PHASE1_IS_MIN_KEY: sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j < 0) { a = CDR(sp->ap); b = CDR(sp->bp); exact = sp->was_exact; WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_CMP_VALUES)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } goto case_HASHMAP_PHASE1_LOOP; case HASHMAP_PHASE1_CMP_VALUES: sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j) { sp->cmp_res = j; sp->min_key = CAR(sp->ap); } case_HASHMAP_PHASE1_LOOP: sp->ap = hashmap_iterator_next(&stack); sp->bp = hashmap_iterator_next(&b_stack); if (!sp->ap) { /* end of maps with identical keys */ ASSERT(!sp->bp); /* as we assume indentical map sizes */ j = sp->cmp_res; exact = sp->was_exact; (void) PSTACK_POP(map_stack); ON_CMP_GOTO(j); } a = CAR(sp->ap); b = CAR(sp->bp); exact = 1; WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE1_ARE_KEYS_EQUAL)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; case_HASHMAP_PHASE2_LOOP: if (sp->ap && sp->bp) { a = CAR(sp->ap); b = CAR(sp->bp); ASSERT(exact); WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_ARE_KEYS_EQUAL)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } goto case_HASHMAP_PHASE2_NEXT_STEP; case HASHMAP_PHASE2_ARE_KEYS_EQUAL: sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j == 0) { /* keys are equal, skip them */ sp->ap = hashmap_iterator_next(&stack); sp->bp = hashmap_iterator_next(&b_stack); goto case_HASHMAP_PHASE2_LOOP; } /* fall through */ case_HASHMAP_PHASE2_NEXT_STEP: if (sp->ap || sp->bp) { int hash_cmp = hashmap_key_hash_cmp(sp->ap, sp->bp); if (hash_cmp == 0) { /* Hash collision. Collision nodes are sorted by map key * order, so we advance the one with the lesser key */ hash_cmp = j; } if (hash_cmp < 0) { ASSERT(sp->ap); a = CAR(sp->ap); b = sp->min_key; ASSERT(exact); WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_A)); } else { ASSERT(hash_cmp > 0); ASSERT(sp->bp); a = CAR(sp->bp); b = sp->min_key; ASSERT(exact); WSTACK_PUSH(stack, OP_WORD(HASHMAP_PHASE2_IS_MIN_KEY_B)); } sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } /* End of both maps */ j = sp->cmp_res; exact = sp->was_exact; (void) PSTACK_POP(map_stack); ON_CMP_GOTO(j); case HASHMAP_PHASE2_IS_MIN_KEY_A: sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j < 0) { sp->min_key = CAR(sp->ap); sp->cmp_res = -1; } sp->ap = hashmap_iterator_next(&stack); goto case_HASHMAP_PHASE2_LOOP; case HASHMAP_PHASE2_IS_MIN_KEY_B: sp = PSTACK_TOP(map_stack); ASSERT(sp->is_hashmap); if (j < 0) { sp->min_key = CAR(sp->bp); sp->cmp_res = 1; } sp->bp = hashmap_iterator_next(&b_stack); goto case_HASHMAP_PHASE2_LOOP; case FLATMAP_ATOM_KEYS: i = GET_OP_ARG(something); aa = (Eterm*) WSTACK_POP(stack); bb = (Eterm*) WSTACK_POP(stack); ON_CMP_GOTO(compare_flatmap_atom_keys(aa, bb, i)); case FLATMAP_ATOM_VALUES: { /* * Compare values of equal atom keys. * Find the smallest atom key where the values differ */ sp = PSTACK_PUSH(map_stack); IF_DEBUG(sp->is_hashmap = 0); sp->atom_keys = (Eterm*) WSTACK_POP(stack); sp->ap = (Eterm*) WSTACK_POP(stack); sp->bp = (Eterm*) WSTACK_POP(stack); sp->min_key = THE_NON_VALUE; sp->cmp_res = 0; case_FLATMAP_ATOM_VALUES_LOOP: i = GET_OP_ARG(something); while (i--) { ASSERT(is_atom(*sp->atom_keys)); if (is_non_value(sp->min_key) || erts_cmp_atoms(*sp->atom_keys, sp->min_key) < 0) { a = *sp->ap++; b = *sp->bp++; WSTACK_PUSH(stack, OP_ARG_WORD(FLATMAP_ATOM_CMP_VALUES,i)); sp->wstack_rollback = WSTACK_COUNT(stack); goto bodyrecur; } sp->atom_keys++; sp->ap++; sp->bp++; } j = sp->cmp_res; (void) PSTACK_POP(map_stack); ON_CMP_GOTO(j); case FLATMAP_ATOM_CMP_VALUES: sp = PSTACK_TOP(map_stack); ASSERT(!sp->is_hashmap); if (j) { sp->min_key = *sp->atom_keys; sp->cmp_res = j; } sp->atom_keys++; goto case_FLATMAP_ATOM_VALUES_LOOP; } default: ASSERT(!"Invalid cmp op"); } /* switch */ } a = (Eterm) something; b = (Eterm) WSTACK_POP(stack); goto tailrecur; } ASSERT(PSTACK_IS_EMPTY(map_stack)); PSTACK_DESTROY(map_stack); WSTACK_DESTROY(stack); WSTACK_DESTROY(b_stack); return 0; not_equal: if (!PSTACK_IS_EMPTY(map_stack) && !eq_only) { WSTACK_ROLLBACK(stack, PSTACK_TOP(map_stack)->wstack_rollback); goto pop_next; } PSTACK_DESTROY(map_stack); WSTACK_DESTROY(stack); WSTACK_DESTROY(b_stack); return j; #undef CMP_NODES } Eterm store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns) { struct erl_off_heap_header *ohhp; Uint i; Uint size; Eterm *from_hp; Eterm *to_hp = *hpp; ASSERT(is_external(ns) || is_internal_ref(ns)); from_hp = boxed_val(ns); size = thing_arityval(*from_hp) + 1; *hpp += size; for(i = 0; i < size; i++) to_hp[i] = from_hp[i]; if (is_external_header(*from_hp)) { ExternalThing *etp = (ExternalThing *) from_hp; ASSERT(is_external(ns)); erts_ref_node_entry(etp->node, 2, make_boxed(to_hp)); } else if (!is_magic_ref_thing(from_hp)) return make_internal_ref(to_hp); else { ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp; ErtsMagicBinary *mb = mreft->mb; ASSERT(is_magic_ref_thing(from_hp)); erts_refc_inc(&mb->intern.refc, 2); OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm)); } ohhp = (struct erl_off_heap_header*) to_hp; ohhp->next = oh->first; oh->first = ohhp; return make_boxed(to_hp); } Eterm store_external_or_ref_in_proc_(Process *proc, Eterm ns) { Uint sz; Uint *hp; ASSERT(is_external(ns) || is_internal_ref(ns)); sz = NC_HEAP_SIZE(ns); ASSERT(sz > 0); hp = HAlloc(proc, sz); return store_external_or_ref_(&hp, &MSO(proc), ns); } void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz) { size_t i; for (i=0;i> 6); buf[sz+1] = 0x80 | (val & 0x3F); sz += 2; } else if (val < 0x10000UL) { if (0xD800 <= val && val <= 0xDFFF) { res = -1; break; } capacity -= 3; if (capacity < 0) { res = -2; break; } buf[sz+0] = 0xE0 | (val >> 12); buf[sz+1] = 0x80 | ((val >> 6) & 0x3F); buf[sz+2] = 0x80 | (val & 0x3F); sz += 3; } else if (val < 0x110000) { capacity -= 4; if (capacity < 0) { res = -2; break; } buf[sz+0] = 0xF0 | (val >> 18); buf[sz+1] = 0x80 | ((val >> 12) & 0x3F); buf[sz+2] = 0x80 | ((val >> 6) & 0x3F); buf[sz+3] = 0x80 | (val & 0x3F); sz += 4; } else { res = -1; break; } list = CDR(listptr); } if (written) *written = sz; return res; } Sint erts_unicode_list_to_buf_len(Eterm list) { Eterm* listptr; Sint sz = 0; if (is_nil(list)) { return 0; } if (is_not_list(list)) { return -1; } listptr = list_val(list); while (1) { Uint val; if (is_not_small(CAR(listptr))) { return -1; } val = (Uint) signed_val(CAR(listptr)); if (val < 0x80) { sz++; } else if (val < 0x800) { sz += 2; } else if (val < 0x10000UL) { if (0xD800 <= val && val <= 0xDFFF) { return -1; } sz += 3; } else if (val < 0x110000) { sz += 4; } else { return -1; } list = CDR(listptr); if (is_nil(list)) { return sz; } if (is_not_list(list)) { return -1; } listptr = list_val(list); } } /* Prints an integer in the given base, returning the number of digits printed. * * (*buf) is a pointer to the buffer, and is set to the start of the string * when returning. */ int Sint_to_buf(Sint n, int base, char **buf, size_t buf_size) { char *p = &(*buf)[buf_size - 1]; int sign = 0, size = 0; ASSERT(base >= 2 && base <= 36); if (n == 0) { *p-- = '0'; size++; } else if (n < 0) { sign = 1; n = -n; } while (n != 0) { int digit = n % base; if (digit < 10) { *p-- = '0' + digit; } else { *p-- = 'A' + (digit - 10); } size++; n /= base; } if (sign) { *p-- = '-'; size++; } *buf = p + 1; return size; } /* Build a list of integers in some safe memory area ** Memory must be pre allocated prio call 2*len in size ** hp is a pointer to the "heap" pointer on return ** this pointer is updated to point after the list */ Eterm buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail) { Eterm* hp = *hpp; size_t i = len; while(i != 0) { --i; tail = CONS(hp, make_small((Uint)(byte)buf[i]), tail); hp += 2; } *hpp = hp; return tail; } /* ** Write io list in to a buffer. ** ** An iolist is defined as: ** ** iohead ::= Binary ** | Byte (i.e integer in range [0..255] ** | iolist ** ; ** ** iotail ::= [] ** | Binary (added by tony) ** | iolist ** ; ** ** iolist ::= [] ** | Binary ** | [ iohead | iotail] ** ; ** ** Return remaining bytes in buffer on success ** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow ** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes) ** ** Note! ** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size! ** ** A caller should be able to rely on a successful return from erts_iolist_to_buf ** if erts_iolist_size is previously successfully called and erts_iolist_to_buf ** is called with a buffer at least as large as the value given by erts_iolist_size. ** */ typedef enum { ERTS_IL2B_BCOPY_OK, ERTS_IL2B_BCOPY_YIELD, ERTS_IL2B_BCOPY_OVERFLOW, ERTS_IL2B_BCOPY_TYPE_ERROR } ErtsIL2BBCopyRes; static ErtsIL2BBCopyRes iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp); static ERTS_INLINE ErlDrvSizeT iolist_to_buf(const int yield_support, ErtsIOList2BufState *state, Eterm obj, char* buf, ErlDrvSizeT alloced_len) { #undef IOLIST_TO_BUF_BCOPY #define IOLIST_TO_BUF_BCOPY(CONSP) \ do { \ size_t size = binary_size(obj); \ if (size > 0) { \ Uint bitsize; \ byte* bptr; \ Uint bitoffs; \ Uint num_bits; \ if (yield_support) { \ size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ if (yield_count > 0) \ max_size *= yield_count+1; \ if (size > max_size) { \ state->objp = CONSP; \ goto L_bcopy_yield; \ } \ if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \ int cost = (int) size; \ cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ yield_count -= cost; \ } \ } \ if (len < size) \ goto L_overflow; \ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \ if (bitsize != 0) \ goto L_type_error; \ num_bits = 8*size; \ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \ buf += size; \ len -= size; \ } \ } while (0) ErlDrvSizeT res, len; Eterm* objp = NULL; int init_yield_count; int yield_count; DECLARE_ESTACK(s); len = (ErlDrvSizeT) alloced_len; if (!yield_support) { yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */ goto L_again; } else { if (state->iolist.reds_left <= 0) return ERTS_IOLIST_TO_BUF_YIELD; ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED * state->iolist.reds_left); yield_count = init_yield_count; if (!state->iolist.estack.start) goto L_again; else { int chk_stack; /* Restart; restore state... */ ESTACK_RESTORE(s, &state->iolist.estack); if (!state->bcopy.bptr) chk_stack = 0; else { chk_stack = 1; switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) { case ERTS_IL2B_BCOPY_OK: break; case ERTS_IL2B_BCOPY_YIELD: BUMP_ALL_REDS(state->iolist.c_p); state->iolist.reds_left = 0; ESTACK_SAVE(s, &state->iolist.estack); return ERTS_IOLIST_TO_BUF_YIELD; case ERTS_IL2B_BCOPY_OVERFLOW: goto L_overflow; case ERTS_IL2B_BCOPY_TYPE_ERROR: goto L_type_error; } } obj = state->iolist.obj; buf = state->buf; len = state->len; objp = state->objp; state->objp = NULL; if (objp) goto L_tail; if (!chk_stack) goto L_again; /* check stack */ } } while (!ESTACK_ISEMPTY(s)) { obj = ESTACK_POP(s); L_again: if (is_list(obj)) { while (1) { /* Tail loop */ while (1) { /* Head loop */ if (yield_support && --yield_count <= 0) goto L_yield; objp = list_val(obj); obj = CAR(objp); if (is_byte(obj)) { if (len == 0) { goto L_overflow; } *buf++ = unsigned_val(obj); len--; } else if (is_binary(obj)) { IOLIST_TO_BUF_BCOPY(objp); } else if (is_list(obj)) { ESTACK_PUSH(s, CDR(objp)); continue; /* Head loop */ } else if (is_not_nil(obj)) { goto L_type_error; } break; } L_tail: obj = CDR(objp); if (is_list(obj)) { continue; /* Tail loop */ } else if (is_binary(obj)) { IOLIST_TO_BUF_BCOPY(NULL); } else if (is_not_nil(obj)) { goto L_type_error; } break; } } else if (is_binary(obj)) { IOLIST_TO_BUF_BCOPY(NULL); } else if (is_not_nil(obj)) { goto L_type_error; } else if (yield_support && --yield_count <= 0) goto L_yield; } res = len; L_return: DESTROY_ESTACK(s); if (yield_support) { int reds; CLEAR_SAVED_ESTACK(&state->iolist.estack); reds = ((init_yield_count - yield_count - 1) / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1; BUMP_REDS(state->iolist.c_p, reds); state->iolist.reds_left -= reds; if (state->iolist.reds_left < 0) state->iolist.reds_left = 0; } return res; L_type_error: res = ERTS_IOLIST_TO_BUF_TYPE_ERROR; goto L_return; L_overflow: res = ERTS_IOLIST_TO_BUF_OVERFLOW; goto L_return; L_bcopy_yield: state->buf = buf; state->len = len; switch (iolist_to_buf_bcopy(state, obj, &yield_count)) { case ERTS_IL2B_BCOPY_OK: ERTS_INTERNAL_ERROR("Missing yield"); case ERTS_IL2B_BCOPY_YIELD: BUMP_ALL_REDS(state->iolist.c_p); state->iolist.reds_left = 0; ESTACK_SAVE(s, &state->iolist.estack); return ERTS_IOLIST_TO_BUF_YIELD; case ERTS_IL2B_BCOPY_OVERFLOW: goto L_overflow; case ERTS_IL2B_BCOPY_TYPE_ERROR: goto L_type_error; } L_yield: BUMP_ALL_REDS(state->iolist.c_p); state->iolist.reds_left = 0; state->iolist.obj = obj; state->buf = buf; state->len = len; ESTACK_SAVE(s, &state->iolist.estack); return ERTS_IOLIST_TO_BUF_YIELD; #undef IOLIST_TO_BUF_BCOPY } static ErtsIL2BBCopyRes iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp) { ErtsIL2BBCopyRes res; char *buf = state->buf; ErlDrvSizeT len = state->len; byte* bptr; size_t size; size_t max_size; Uint bitoffs; Uint num_bits; int yield_count = *yield_countp; if (state->bcopy.bptr) { bptr = state->bcopy.bptr; size = state->bcopy.size; bitoffs = state->bcopy.bitoffs; state->bcopy.bptr = NULL; } else { Uint bitsize; ASSERT(is_binary(obj)); size = binary_size(obj); if (size <= 0) return ERTS_IL2B_BCOPY_OK; if (len < size) return ERTS_IL2B_BCOPY_OVERFLOW; ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); if (bitsize != 0) return ERTS_IL2B_BCOPY_TYPE_ERROR; } ASSERT(size > 0); max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; if (yield_count > 0) max_size *= (size_t) (yield_count+1); if (size <= max_size) { if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { int cost = (int) size; cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; yield_count -= cost; } res = ERTS_IL2B_BCOPY_OK; } else { ASSERT(0 < max_size && max_size < size); yield_count = 0; state->bcopy.bptr = bptr + max_size; state->bcopy.bitoffs = bitoffs; state->bcopy.size = size - max_size; size = max_size; res = ERTS_IL2B_BCOPY_YIELD; } num_bits = 8*size; copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); state->buf += size; state->len -= size; *yield_countp = yield_count; return res; } ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state) { return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len); } ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) { return iolist_to_buf(0, NULL, obj, buf, alloced_len); } /* * Return 0 if successful, and non-zero if unsuccessful. * * It is vital that if erts_iolist_to_buf would return an error for * any type of term data, this function should do so as well. * Any input term error detected in erts_iolist_to_buf should also * be detected in this function! */ static ERTS_INLINE int iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep) { int res, init_yield_count, yield_count; Eterm* objp; Uint size = (Uint) *sizep; DECLARE_ESTACK(s); if (!yield_support) yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */ else { if (state->reds_left <= 0) return ERTS_IOLIST_YIELD; ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED; init_yield_count *= state->reds_left; yield_count = init_yield_count; if (state->estack.start) { /* Restart; restore state... */ ESTACK_RESTORE(s, &state->estack); size = (Uint) state->size; obj = state->obj; } } goto L_again; #define SAFE_ADD(Var, Val) \ do { \ Uint valvar = (Val); \ Var += valvar; \ if (Var < valvar) { \ goto L_overflow_error; \ } \ } while (0) while (!ESTACK_ISEMPTY(s)) { obj = ESTACK_POP(s); L_again: if (is_list(obj)) { while (1) { /* Tail loop */ while (1) { /* Head loop */ if (yield_support && --yield_count <= 0) goto L_yield; objp = list_val(obj); /* Head */ obj = CAR(objp); if (is_byte(obj)) { size++; if (size == 0) { goto L_overflow_error; } } else if (is_binary(obj) && binary_bitsize(obj) == 0) { SAFE_ADD(size, binary_size(obj)); } else if (is_list(obj)) { ESTACK_PUSH(s, CDR(objp)); continue; /* Head loop */ } else if (is_not_nil(obj)) { goto L_type_error; } break; } /* Tail */ obj = CDR(objp); if (is_list(obj)) continue; /* Tail loop */ else if (is_binary(obj) && binary_bitsize(obj) == 0) { SAFE_ADD(size, binary_size(obj)); } else if (is_not_nil(obj)) { goto L_type_error; } break; } } else { if (yield_support && --yield_count <= 0) goto L_yield; if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */ SAFE_ADD(size, binary_size(obj)); } else if (is_not_nil(obj)) { goto L_type_error; } } } #undef SAFE_ADD *sizep = (ErlDrvSizeT) size; res = ERTS_IOLIST_OK; L_return: DESTROY_ESTACK(s); if (yield_support) { int yc, reds; CLEAR_SAVED_ESTACK(&state->estack); yc = init_yield_count - yield_count; reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1; BUMP_REDS(state->c_p, reds); state->reds_left -= reds; state->size = (ErlDrvSizeT) size; state->have_size = 1; } return res; L_overflow_error: res = ERTS_IOLIST_OVERFLOW; size = 0; goto L_return; L_type_error: res = ERTS_IOLIST_TYPE; size = 0; goto L_return; L_yield: BUMP_ALL_REDS(state->c_p); state->reds_left = 0; state->size = size; state->obj = obj; ESTACK_SAVE(s, &state->estack); return ERTS_IOLIST_YIELD; } int erts_iolist_size_yielding(ErtsIOListState *state) { ErlDrvSizeT size = state->size; return iolist_size(1, state, state->obj, &size); } int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) { *sizep = 0; return iolist_size(0, NULL, obj, sizep); } /* return 0 if item is not a non-empty flat list of bytes otherwise return the nonzero length of the list */ Sint is_string(Eterm list) { Sint len = 0; while(is_list(list)) { Eterm* consp = list_val(list); Eterm hd = CAR(consp); if (!is_byte(hd)) return 0; len++; list = CDR(consp); } if (is_nil(list)) return len; return 0; } static int trim_threshold; static int top_pad; static int mmap_threshold; static int mmap_max; Uint tot_bin_allocated; void erts_init_utils(void) { #if defined(DEBUG) && defined(ARCH_64) erts_tsd_key_create(&erts_ycf_debug_stack_start_tsd_key, "erts_ycf_debug_stack_start_tsd_key"); #endif } void erts_utils_sched_spec_data_init(void) { #if defined(DEBUG) && defined(ARCH_64) erts_tsd_set(erts_ycf_debug_stack_start_tsd_key, NULL); #endif } void erts_init_utils_mem(void) { trim_threshold = -1; top_pad = -1; mmap_threshold = -1; mmap_max = -1; } int sys_alloc_opt(int opt, int value) { #if HAVE_MALLOPT int m_opt; int *curr_val; switch(opt) { case SYS_ALLOC_OPT_TRIM_THRESHOLD: #ifdef M_TRIM_THRESHOLD m_opt = M_TRIM_THRESHOLD; curr_val = &trim_threshold; break; #else return 0; #endif case SYS_ALLOC_OPT_TOP_PAD: #ifdef M_TOP_PAD m_opt = M_TOP_PAD; curr_val = &top_pad; break; #else return 0; #endif case SYS_ALLOC_OPT_MMAP_THRESHOLD: #ifdef M_MMAP_THRESHOLD m_opt = M_MMAP_THRESHOLD; curr_val = &mmap_threshold; break; #else return 0; #endif case SYS_ALLOC_OPT_MMAP_MAX: #ifdef M_MMAP_MAX m_opt = M_MMAP_MAX; curr_val = &mmap_max; break; #else return 0; #endif default: return 0; } if(mallopt(m_opt, value)) { *curr_val = value; return 1; } #endif /* #if HAVE_MALLOPT */ return 0; } void sys_alloc_stat(SysAllocStat *sasp) { sasp->trim_threshold = trim_threshold; sasp->top_pad = top_pad; sasp->mmap_threshold = mmap_threshold; sasp->mmap_max = mmap_max; } char * erts_read_env(char *key) { size_t value_len = 256; char *value = erts_alloc(ERTS_ALC_T_TMP, value_len); int res; while (1) { res = erts_sys_explicit_8bit_getenv(key, value, &value_len); if (res >= 0) { break; } value = erts_realloc(ERTS_ALC_T_TMP, value, value_len); } if (res != 1) { erts_free(ERTS_ALC_T_TMP, value); return NULL; } return value; } void erts_free_read_env(void *value) { if (value) erts_free(ERTS_ALC_T_TMP, value); } typedef struct { size_t sz; char *ptr; } ErtsEmuArg; typedef struct { int argc; ErtsEmuArg *arg; size_t no_bytes; } ErtsEmuArgs; ErtsEmuArgs saved_emu_args = {0}; void erts_save_emu_args(int argc, char **argv) { #ifdef DEBUG char *end_ptr; #endif char *ptr; int i; size_t arg_sz[100]; size_t size; ASSERT(!saved_emu_args.argc); size = sizeof(ErtsEmuArg)*argc; for (i = 0; i < argc; i++) { size_t sz = sys_strlen(argv[i]); if (i < sizeof(arg_sz)/sizeof(arg_sz[0])) arg_sz[i] = sz; size += sz+1; } ptr = (char *) malloc(size); if (!ptr) { ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); } #ifdef DEBUG end_ptr = ptr + size; #endif saved_emu_args.arg = (ErtsEmuArg *) ptr; ptr += sizeof(ErtsEmuArg)*argc; saved_emu_args.argc = argc; saved_emu_args.no_bytes = 0; for (i = 0; i < argc; i++) { size_t sz; if (i < sizeof(arg_sz)/sizeof(arg_sz[0])) sz = arg_sz[i]; else sz = sys_strlen(argv[i]); saved_emu_args.arg[i].ptr = ptr; saved_emu_args.arg[i].sz = sz; saved_emu_args.no_bytes += sz; ptr += sz+1; sys_strcpy(saved_emu_args.arg[i].ptr, argv[i]); } ASSERT(ptr == end_ptr); } Eterm erts_get_emu_args(Process *c_p) { #ifdef DEBUG Eterm *end_hp; #endif int i; Uint hsz; Eterm *hp, res; hsz = saved_emu_args.no_bytes*2; hsz += saved_emu_args.argc*2; hp = HAlloc(c_p, hsz); #ifdef DEBUG end_hp = hp + hsz; #endif res = NIL; for (i = saved_emu_args.argc-1; i >= 0; i--) { Eterm arg = buf_to_intlist(&hp, saved_emu_args.arg[i].ptr, saved_emu_args.arg[i].sz, NIL); res = CONS(hp, arg, res); hp += 2; } ASSERT(hp == end_hp); return res; } /* * To be used to silence unused result warnings, but do not abuse it. */ void erts_silence_warn_unused_result(long unused) { } /* * Interval counts */ void erts_interval_init(erts_interval_t *icp) { erts_atomic64_init_nob(&icp->counter.atomic, 0); } static ERTS_INLINE Uint64 step_interval_nob(erts_interval_t *icp) { return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic); } static ERTS_INLINE Uint64 step_interval_relb(erts_interval_t *icp) { return (Uint64) erts_atomic64_inc_read_relb(&icp->counter.atomic); } static ERTS_INLINE Uint64 ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) { Uint64 curr_ic; curr_ic = (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); if (curr_ic > ic) return curr_ic; return (Uint64) erts_atomic64_inc_read_nob(&icp->counter.atomic); } static ERTS_INLINE Uint64 ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) { Uint64 curr_ic; curr_ic = (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); if (curr_ic > ic) return curr_ic; return (Uint64) erts_atomic64_inc_read_acqb(&icp->counter.atomic); } Uint64 erts_step_interval_nob(erts_interval_t *icp) { return step_interval_nob(icp); } Uint64 erts_step_interval_relb(erts_interval_t *icp) { return step_interval_relb(icp); } Uint64 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) { return ensure_later_interval_nob(icp, ic); } Uint64 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) { return ensure_later_interval_acqb(icp, ic); } /* * A millisecond timestamp without time correction where there's no hrtime * - for tracing on "long" things... */ Uint64 erts_timestamp_millis(void) { #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT return ERTS_MONOTONIC_TO_MSEC(erts_os_monotonic_time()); #else Uint64 res; SysTimeval tv; sys_gettimeofday(&tv); res = (Uint64) tv.tv_sec*1000000; res += (Uint64) tv.tv_usec; return (res / 1000); #endif } /* * erts_check_below_limit() and * erts_check_above_limit() are put * in utils.c in order to prevent * inlining. */ int erts_check_below_limit(char *ptr, char *limit) { return ptr < limit; } int erts_check_above_limit(char *ptr, char *limit) { return ptr > limit; } void * erts_ptr_id(void *ptr) { return ptr; } const void *erts_get_stacklimit(void) { return ethr_get_stacklimit(); } int erts_check_if_stack_grows_downwards(char *ptr) { char c; if (erts_check_below_limit(&c, ptr)) return 1; else return 0; } /* * Build a single {M,F,A,Loction} item to be part of * a stack trace. */ Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p, Eterm loc) { if (fi->loc != LINE_INVALID_LOCATION) { Eterm tuple; int line = LOC_LINE(fi->loc); int file = LOC_FILE(fi->loc); Eterm file_term = fi->fname_ptr[file]; tuple = TUPLE2(hp, am_line, make_small(line)); hp += 3; loc = CONS(hp, tuple, loc); hp += 2; tuple = TUPLE2(hp, am_file, file_term); hp += 3; loc = CONS(hp, tuple, loc); hp += 2; } if (is_list(args) || is_nil(args)) { *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function, args, loc); } else { Eterm arity = make_small(fi->mfa->arity); *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function, arity, loc); } return hp + 5; } static void erts_qsort_helper(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare, Uint32 extra_seed); static erts_qsort_partion_array_result erts_qsort_partion_array(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare, Uint32 extra_seed); static void erts_qsort_swap(size_t item_size, void* iptr, void* jptr) { ASSERT(item_size % sizeof(UWord) == 0); if (iptr != jptr) { /* Do it word by word */ UWord* iwp = (UWord*) iptr; UWord* jwp = (UWord*) jptr; size_t cnt; for (cnt = item_size / sizeof(UWord); cnt; cnt--) { UWord tmp; sys_memcpy(&tmp, jwp, sizeof(UWord)); sys_memcpy(jwp, iwp, sizeof(UWord)); sys_memcpy(iwp, &tmp, sizeof(UWord)); jwp++; iwp++; } } } /* **Important Note** * * A yielding version of this function is generated with YCF. This * means that the code has to follow some restrictions. See note about * YCF near the top of the file for more information. */ static erts_qsort_partion_array_result erts_qsort_partion_array(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare, Uint32 extra_seed) { /* The array is portioned using a fast-path-slow-path approach. We first assume that the there is no duplicates of the selected pivot item in the array. If this assumption holds, we only need to keep track of two regions (items greater than the pivot, and items smaller than the pivot) which leads to fewer swaps than if we need to keep track of three regions (items greater than the pivot, items smaller than the pivot, and items equal to the pivot). If we find an item that is equal to the pivot, we fall back to the slow path that keeps track of three regions. Start of fast path: */ byte* second_part_start = base + (nr_of_items * item_size); byte* curr = base + item_size; int more_than_one_pivot_item = 0; size_t pivot_index = ((size_t)erts_sched_local_random_hash_64_to_32_shift((Uint64)extra_seed + (Uint64)((UWord)compare) + (Uint64)((UWord)base) - (Uint64)nr_of_items)) % nr_of_items; /* Move pivot first */ erts_qsort_swap(item_size, base, base + pivot_index * item_size); while (curr != second_part_start) { int compare_res = compare(curr, base); if (compare_res < 0) { /* Include in first part */ curr += item_size; } else if (compare_res == 0) { more_than_one_pivot_item = 1; break; } else { /* Move to last part */ second_part_start -= item_size; erts_qsort_swap(item_size, curr, second_part_start); } } if (!more_than_one_pivot_item) { /* Fast path successful (we don't need to use the slow path) */ /* Move the pivot before the second part (if any) */ erts_qsort_partion_array_result res; res.pivot_part_start = second_part_start - item_size; res.pivot_part_end = second_part_start; erts_qsort_swap(item_size, base, res.pivot_part_start); return res; } else { /* We have more than one item equal to the pivot item and need to keep track of three regions. Start of slow path: */ byte * pivot_part_start = curr - item_size; byte * pivot_part_end = curr + item_size; byte * last_part_start = second_part_start; erts_qsort_swap(item_size, base, pivot_part_start); while (pivot_part_end != last_part_start) { int compare_res = compare(pivot_part_end, pivot_part_end - item_size); if (compare_res == 0) { /* Include in pivot part */ pivot_part_end += item_size; } else if (compare_res < 0) { /* Move pivot part one step to the right */ erts_qsort_swap(item_size, pivot_part_start, pivot_part_end); pivot_part_start += item_size; pivot_part_end += item_size; } else { /* Move to last part */ last_part_start -= item_size; erts_qsort_swap(item_size, pivot_part_end, last_part_start); } } { erts_qsort_partion_array_result res; res.pivot_part_start = pivot_part_start; res.pivot_part_end = pivot_part_end; return res; } } } static void erts_qsort_insertion_sort(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare) { byte *end = base + ((nr_of_items-1) * item_size); byte *unsorted_start = base; while (unsorted_start < end) { byte *smallest_so_far = unsorted_start; byte *curr = smallest_so_far + item_size; while (curr <= end) { if (compare(curr, smallest_so_far) < 0) { smallest_so_far = curr; } curr += item_size; } if (smallest_so_far != unsorted_start) { erts_qsort_swap(item_size, unsorted_start, smallest_so_far); } unsorted_start += item_size; } } /* **Important Note** * * A yielding version of this function is generated with YCF. This * means that the code has to follow some restrictions. See note about * YCF near the top of the file for more information. */ static void erts_qsort_helper(byte *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare, Uint32 extra_seed) { erts_qsort_partion_array_result partion_info; size_t nr_of_items_in_first_partion; size_t nr_of_items_second_partion; const size_t qsort_cut_off = 16; if (nr_of_items <= qsort_cut_off) { erts_qsort_insertion_sort(base, nr_of_items, item_size, compare); YCF_CONSUME_REDS(nr_of_items * erts_fit_in_bits_int64(nr_of_items) * 2); return; } partion_info = erts_qsort_partion_array(base, nr_of_items, item_size, compare, extra_seed); nr_of_items_in_first_partion = ((partion_info.pivot_part_start - base) / sizeof(byte)) / item_size; nr_of_items_second_partion = nr_of_items - (((partion_info.pivot_part_end - base) / sizeof(byte)) / item_size); erts_qsort_helper(base, nr_of_items_in_first_partion, item_size, compare, extra_seed + 1); erts_qsort_helper(partion_info.pivot_part_end, nr_of_items_second_partion, item_size, compare, extra_seed + 1); } /*************** ** Quicksort ** *************** * * A quicksort implementation with the same interface as the qsort * function in the C standard library. * * A yielding version of erts_qsort is generated by YCF. The functions * (erts_qsort_ycf_gen_yielding, erts_qsort_ycf_gen_continue and * erts_qsort_ycf_gen_destroy) for the yielding version is declared in * global.h. See note about YCF near the top of the file for more * information. * * !!!! * Note that the erts_qsort_swap that is used by erts_qsort does not have * trapping enabled. If the array items are large erts_qsort should also * trap in the erts_qsort_swap function, but this causes terrible * performance when the array items are small, so one should investigate * a fast-path approach */ void erts_qsort(void *base, size_t nr_of_items, size_t item_size, erts_void_ptr_cmp_t compare) { const int improved_seed_limit = 128; Uint32 seed = nr_of_items > improved_seed_limit ? erts_sched_local_random(0) : 1; erts_qsort_helper((byte*)base, nr_of_items, item_size, compare, seed); } typedef struct { void* trap_state; erts_ycf_continue_fun_t ycf_continue; erts_ycf_destroy_trap_state_fun_t ycf_destroy_trap_state; } erts_ycf_trap_driver_state_holder; static int erts_ycf_trap_driver_dtor(Binary* bin) { erts_ycf_trap_driver_state_holder* holder = ERTS_MAGIC_BIN_DATA(bin); if (holder->trap_state != NULL) { holder->ycf_destroy_trap_state(holder->trap_state); } return 1; } static void* erts_ycf_trap_driver_alloc(size_t size, void* ctx) { ErtsAlcType_t type = (ErtsAlcType_t)(Uint)ctx; return erts_alloc(type, size); } static void erts_ycf_trap_driver_free(void* data, void* ctx) { ErtsAlcType_t type = (ErtsAlcType_t)(Uint)ctx; erts_free(type, data); } static BIF_RETTYPE erts_ycf_trap_driver_trap_helper(Process* p, Eterm* bif_args, int nr_of_arguments, Eterm trapTerm, Export *export_entry) { int i; Eterm* reg = erts_proc_sched_data((p))->registers->x_reg_array.d; ERTS_BIF_PREP_TRAP(export_entry, p, nr_of_arguments); reg[0] = trapTerm; for (i = 1; i < nr_of_arguments; i++) { reg[i] = bif_args[i]; } return THE_NON_VALUE; \ } BIF_RETTYPE erts_ycf_trap_driver(Process* p, Eterm* bif_args, int nr_of_arguments, int iterations_per_red, ErtsAlcType_t memory_allocation_type, size_t ycf_stack_alloc_size, int export_entry_index, erts_ycf_continue_fun_t ycf_continue_fun, erts_ycf_destroy_trap_state_fun_t ycf_destroy_fun, erts_ycf_yielding_fun_t ycf_yielding_fun) { const long reds = iterations_per_red * ERTS_BIF_REDS_LEFT(p); long nr_of_reductions = DBG_RANDOM_REDS(reds, (Uint)&p); const long init_reds = nr_of_reductions; if (is_internal_magic_ref(bif_args[0])) { erts_ycf_trap_driver_state_holder *state_holder; Binary *state_bin = erts_magic_ref2bin(bif_args[0]); BIF_RETTYPE ret; if (ERTS_MAGIC_BIN_DESTRUCTOR(state_bin) == erts_ycf_trap_driver_dtor) { /* Continue a trapped call */ erts_set_gc_state(p, 1); state_holder = ERTS_MAGIC_BIN_DATA(state_bin); #if defined(DEBUG) && defined(ARCH_64) ycf_debug_set_stack_start(&nr_of_reductions); #endif ret = state_holder->ycf_continue(&nr_of_reductions, &state_holder->trap_state, NULL); #if defined(DEBUG) && defined(ARCH_64) ycf_debug_reset_stack_start(); #endif BUMP_REDS(p, (init_reds - nr_of_reductions) / iterations_per_red); if (state_holder->trap_state == NULL) { return ret; } else { erts_set_gc_state(p, 0); return erts_ycf_trap_driver_trap_helper(p, bif_args, nr_of_arguments, bif_args[0], BIF_TRAP_EXPORT(export_entry_index)); } } } { void *trap_state = NULL; BIF_RETTYPE ret; #if defined(DEBUG) && defined(ARCH_64) ycf_debug_set_stack_start(&nr_of_reductions); #endif /* Start a new call */ ret = ycf_yielding_fun(&nr_of_reductions, &trap_state, NULL, erts_ycf_trap_driver_alloc, erts_ycf_trap_driver_free, (void*)(Uint)memory_allocation_type, ycf_stack_alloc_size, NULL, p, bif_args); #if defined(DEBUG) && defined(ARCH_64) ycf_debug_reset_stack_start(); #endif BUMP_REDS(p, (init_reds - nr_of_reductions) / iterations_per_red); if (trap_state == NULL) { /* The operation has completed */ return ret; } else { /* We need to trap */ Binary* state_bin = erts_create_magic_binary(sizeof(erts_ycf_trap_driver_state_holder), erts_ycf_trap_driver_dtor); Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); Eterm state_mref = erts_mk_magic_ref(&hp, &MSO(p), state_bin); erts_ycf_trap_driver_state_holder *holder = ERTS_MAGIC_BIN_DATA(state_bin); holder->ycf_continue = ycf_continue_fun; holder->ycf_destroy_trap_state = ycf_destroy_fun; holder->trap_state = trap_state; erts_set_gc_state(p, 0); return erts_ycf_trap_driver_trap_helper(p, bif_args, nr_of_arguments, state_mref, BIF_TRAP_EXPORT(export_entry_index)); } } }