QuickJSR/0000755000176200001440000000000014556440772011723 5ustar liggesusersQuickJSR/NAMESPACE0000644000176200001440000000044714554251671013143 0ustar liggesusers# Generated by roxygen2: do not edit by hand export(JSContext) export(cxxflags) export(ldflags) export(qjs_eval) export(quickjs_version) importFrom(R6,R6Class) importFrom(Rcpp,Rcpp.plugin.maker) importFrom(Rcpp,sourceCpp) importFrom(jsonlite,fromJSON) useDynLib(QuickJSR, .registration = TRUE) QuickJSR/LICENSE0000644000176200001440000000005614554251671012725 0ustar liggesusersYEAR: 2023 COPYRIGHT HOLDER: QuickJSR authors QuickJSR/README.md0000644000176200001440000000044214554251671013176 0ustar liggesusers# QuickJSR [![R-CMD-check](https://github.com/andrjohns/QuickJSR/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/andrjohns/QuickJSR/actions/workflows/R-CMD-check.yaml) R interface for the QuickJS lightweight javascript engine QuickJSR/man/0000755000176200001440000000000014554251671012472 5ustar liggesusersQuickJSR/man/quickjs_version.Rd0000644000176200001440000000054014554251671016176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{quickjs_version} \alias{quickjs_version} \title{Get the version of the bundled QuickJS library} \usage{ quickjs_version() } \value{ Character string of the version of the bundled QuickJS library } \description{ Get the version of the bundled QuickJS library } QuickJSR/man/qjs_eval.Rd0000644000176200001440000000114014554251671014561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qjs.R \name{qjs_eval} \alias{qjs_eval} \title{qjs_eval} \usage{ qjs_eval(eval_string) } \arguments{ \item{eval_string}{A single string of the expression to evaluate} } \value{ The result of the provided expression, the return type is mapped from JS to R using \code{jsonlite::fromJSON()} } \description{ Evaluate a single Javascript expression. } \examples{ # Return the sum of two numbers: qjs_eval("1 + 2") # Concatenate strings: qjs_eval("'1' + '2'") # Create lists from objects: qjs_eval("var t = {'a' : 1, 'b' : 2}; t") } QuickJSR/man/JSContext.Rd0000644000176200001440000001017314554251671014644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JSContext.R \name{JSContext} \alias{JSContext} \title{JSContext object} \value{ A JSContext object containing an initialised JavaScript context for evaluating scripts/commands } \description{ An initialised context within which to evaluate Javascript scripts or commands. } \section{Methods}{ \subsection{Public methods}{ \itemize{ \item \href{#method-JSContext-new}{\code{JSContext$new()}} \item \href{#method-JSContext-validate}{\code{JSContext$validate()}} \item \href{#method-JSContext-source}{\code{JSContext$source()}} \item \href{#method-JSContext-call}{\code{JSContext$call()}} \item \href{#method-JSContext-clone}{\code{JSContext$clone()}} } } \if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-JSContext-new}{}}} \subsection{Method \code{new()}}{ Creates a new JSContext instance and initialises the QuickJS runtime and evaluation context \subsection{Usage}{ \if{html}{\out{
}}\preformatted{JSContext$new(stack_size = NULL, disable_stack_size_check = TRUE)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ \item{\code{stack_size}}{An optional fixed value for the stack size (in bytes)} \item{\code{disable_stack_size_check}}{Disable fixed/automatic stack size allocation.} } \if{html}{\out{
}} } \subsection{Returns}{ No return value, used internally to initialise the JSContext object } } \if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-JSContext-validate}{}}} \subsection{Method \code{validate()}}{ Checks whether JS code string is valid code in the current context \subsection{Usage}{ \if{html}{\out{
}}\preformatted{JSContext$validate(code_string)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ \item{\code{code_string}}{The JS code to check} } \if{html}{\out{
}} } \subsection{Returns}{ A boolean indicating whether code is valid } } \if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-JSContext-source}{}}} \subsection{Method \code{source()}}{ Evaluate a provided JavaScript file or string within the initialised context. Note that this method should only be used for initialising functions or values within the context, no values are returned from this function. See the \verb{$call()} method for returning values. \subsection{Usage}{ \if{html}{\out{
}}\preformatted{JSContext$source(file = NULL, code = NULL)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ \item{\code{file}}{A path to the JavaScript file to load} \item{\code{code}}{A single string of JavaScript to evaluate} } \if{html}{\out{
}} } \subsection{Returns}{ No return value, called for side effects } } \if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-JSContext-call}{}}} \subsection{Method \code{call()}}{ Call a specified function in the JavaScript context with the provided arguments. \subsection{Usage}{ \if{html}{\out{
}}\preformatted{JSContext$call(function_name, ...)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ \item{\code{function_name}}{The function to be called} \item{\code{...}}{The arguments to be passed to the function} } \if{html}{\out{
}} } \subsection{Returns}{ The result of calling the specified function, the return type is mapped from JS to R using \code{jsonlite::fromJSON()} } } \if{html}{\out{
}} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-JSContext-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ \if{html}{\out{
}}\preformatted{JSContext$clone(deep = FALSE)}\if{html}{\out{
}} } \subsection{Arguments}{ \if{html}{\out{
}} \describe{ \item{\code{deep}}{Whether to make a deep clone.} } \if{html}{\out{
}} } } } QuickJSR/man/QuickJSR-package.Rd0000644000176200001440000000153514554251671016011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quickjsr-package.R \docType{package} \name{QuickJSR-package} \alias{QuickJSR-package} \alias{QuickJSR} \title{The QuickJSR package.} \description{ An interface to the QuickJS lightweight Javascript engine } \seealso{ Useful links: \itemize{ \item \url{https://github.com/andrjohns/QuickJSR} \item \url{https://bellard.org/quickjs/} \item Report bugs at \url{https://github.com/andrjohns/QuickJSR/issues} } } \author{ \strong{Maintainer}: Andrew R. Johnson \email{andrew.johnson@arjohnsonau.com} (\href{https://orcid.org/0000-0001-7000-8065}{ORCID}) Other contributors: \itemize{ \item Fabrice Bellard (Author of QuickJS sources and headers) [contributor, copyright holder] \item Charlie Gordon (Author of QuickJS sources and headers) [contributor, copyright holder] } } QuickJSR/man/ldflags.Rd0000644000176200001440000000073214554251671014377 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/flags.R \name{ldflags} \alias{ldflags} \title{ldflags} \usage{ ldflags(to_console = FALSE) } \arguments{ \item{to_console}{Whether the result should be returned as a string} } \value{ Character string of linker flags, or print flags to console and invisibly return NULL (for use in package Makevars or similar) } \description{ Function for returning the flags needed for linking to the package } QuickJSR/man/cxxflags.Rd0000644000176200001440000000076214554251671014605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/flags.R \name{cxxflags} \alias{cxxflags} \title{cxxflags} \usage{ cxxflags(to_console = FALSE) } \arguments{ \item{to_console}{Whether the result should be returned as a string} } \value{ Character string of CXX flags, or print flags to console and invisibly return NULL (for use in package Makevars or similar) } \description{ Function for returning the C/C++ flags needed for compilation using the package's headers } QuickJSR/DESCRIPTION0000644000176200001440000000302214556440772013426 0ustar liggesusersPackage: QuickJSR Title: Interface for the 'QuickJS' Lightweight 'JavaScript' Engine Version: 1.1.3 Authors@R: c( person(given = c("Andrew", "R."), family = "Johnson", role = c("aut", "cre"), email = "andrew.johnson@arjohnsonau.com", comment = c(ORCID = "0000-0001-7000-8065")), person(given = "Fabrice", family = "Bellard", role = c("ctb", "cph"), comment = "Author of QuickJS sources and headers"), person(given = "Charlie", family = "Gordon", role = c("ctb", "cph"), comment = "Author of QuickJS sources and headers") ) Description: An 'R' interface to the 'QuickJS' portable 'JavaScript' engine. The engine is bundled entirely within the package, requiring no external system dependencies beyond a 'C' compiler. License: MIT + file LICENSE Encoding: UTF-8 RoxygenNote: 7.3.1 NeedsCompilation: yes SystemRequirements: GNU make Imports: jsonlite, R6, Rcpp LinkingTo: Rcpp URL: https://github.com/andrjohns/QuickJSR, https://bellard.org/quickjs/ BugReports: https://github.com/andrjohns/QuickJSR/issues Suggests: testthat (>= 3.0.0) Config/testthat/edition: 3 Config/testthat/parallel: true Language: en-GB Packaged: 2024-01-31 09:52:16 UTC; andrew Author: Andrew R. Johnson [aut, cre] (), Fabrice Bellard [ctb, cph] (Author of QuickJS sources and headers), Charlie Gordon [ctb, cph] (Author of QuickJS sources and headers) Maintainer: Andrew R. Johnson Repository: CRAN Date/Publication: 2024-01-31 12:50:02 UTC QuickJSR/tests/0000755000176200001440000000000014554251671013061 5ustar liggesusersQuickJSR/tests/testthat/0000755000176200001440000000000014556440772014725 5ustar liggesusersQuickJSR/tests/testthat/test-qjs_eval.R0000644000176200001440000000054414554251671017630 0ustar liggesuserstest_that("qjs_eval returns correct type & value", { expect_equal(1, qjs_eval("1")) expect_equal(2.5, qjs_eval("1 + 1.5")) expect_equal("Hello World!", qjs_eval("'Hello World!'")) expect_equal("Hello World!", qjs_eval("'Hello' + ' ' + 'World!'")) expect_equal(list(a = 1, b = "2"), qjs_eval("var t = {'a' : 1, 'b' : '2'}; t")) }) QuickJSR/tests/testthat/test-JSContext.R0000644000176200001440000000105014554251671017676 0ustar liggesuserstest_that("JSContext methods work", { jsc <- JSContext$new() jsc$source(code = "function add_test(x, y) { return x + y; }") expect_true(jsc$validate("add_test")) expect_equal(jsc$call("add_test", 1, 2), 3) expect_equal(jsc$call("add_test", 1, "a"), "1a") js_file <- tempfile(fileext = ".js") writeLines("function mult_test(x, y) { return x * y; }", con = js_file) jsc$source(file = js_file) expect_true(jsc$validate("mult_test")) expect_equal(jsc$call("mult_test", 1, 2), 2) expect_equal(jsc$call("mult_test", 10, 15), 150) }) QuickJSR/tests/testthat.R0000644000176200001440000000057014554251671015046 0ustar liggesusers# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/tests.html # * https://testthat.r-lib.org/reference/test_package.html#special-files library(testthat) library(QuickJSR) test_check("QuickJSR") QuickJSR/src/0000755000176200001440000000000014556413700012501 5ustar liggesusersQuickJSR/src/init.cpp0000644000176200001440000000267714554251671014171 0ustar liggesusers#include #include #include #include #include #include using namespace Rcpp; #ifdef __cplusplus extern "C" { #endif SEXP qjs_context_(SEXP stack_size_); SEXP qjs_source_(SEXP ctx_ptr_, SEXP code_string_); SEXP qjs_validate_(SEXP ctx_ptr_, SEXP function_name_); SEXP qjs_call_(SEXP ctx_ptr_, SEXP function_name_, SEXP args_json_); SEXP qjs_eval_(SEXP eval_string_); #ifdef __cplusplus } #endif #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} static const R_CallMethodDef CallEntries[] = { CALLDEF(qjs_context_, 1), CALLDEF(qjs_source_, 2), CALLDEF(qjs_validate_, 2), CALLDEF(qjs_call_, 3), CALLDEF(qjs_eval_, 1), {NULL, NULL, 0} }; #ifdef __cplusplus extern "C" { #endif void attribute_visible R_init_QuickJSR(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); // The call to R_useDynamicSymbols indicates that if the correct C // entry point is not found in the shared library, then an error // should be signaled. Currently, the default // behavior in R is to search all other loaded shared libraries for the // symbol, which is fairly dangerous behavior. If you have registered // all routines in your library, then you should set this to FALSE // as done in the stats package. [copied from `R Programming for // Bioinformatics' // by Robert Gentleman] } #ifdef __cplusplus } #endif QuickJSR/src/Makevars0000644000176200001440000000347214556412776014217 0ustar liggesusersPKG_CPPFLAGS = -I"../inst/include/" -DSTRICT_R_HEADERS -D_GNU_SOURCE -DCONFIG_BIGNUM PKG_CPPFLAGS += -DCONFIG_VERSION=\"$(shell cat quickjs/VERSION)\" PKG_LIBS = ../inst/lib/$(R_ARCH)/libquickjs.a ifeq ($(shell getconf LONG_BIT), 32) PKG_LIBS += -latomic endif # Older versions of gcc and clang don't provide the stdatomic.h header CC_CHECK := $(shell $(CC) --version 2>&1) CC_VERSION := $(shell $(CC) -dumpfullversion -dumpversion 2>&1) CC_MAJOR := $(word 1,$(subst ., ,$(CC_VERSION))) ifneq (,$(findstring gcc,$(CC_CHECK))) ifeq ($(shell expr $(CC_MAJOR) \< 5), 1) PKG_CPPFLAGS += -DDISABLE_ATOMICS endif endif ifneq (,$(findstring GCC,$(CC_CHECK))) ifeq ($(shell expr $(CC_MAJOR) \< 5), 1) PKG_CPPFLAGS += -DDISABLE_ATOMICS endif endif ifneq (,$(findstring clang,$(CC_CHECK))) ifeq ($(shell expr $(CC_MAJOR) \< 4), 1) PKG_CPPFLAGS += -DDISABLE_ATOMICS endif endif QUICKJS_C_FILES = cutils.c libbf.c libregexp.c libunicode.c quickjs.c quickjs-libc.c QUICKJS_C_HEADERS = $(QUICKJS_C_FILES:.c=.h) \ libregexp-opcode.h libunicode-table.h list.h \ quickjs-atom.h quickjs-opcode.h QUICKJS_SOURCES = $(QUICKJS_C_FILES:%=quickjs/%) QUICKJS_OBJECTS = $(QUICKJS_SOURCES:.c=.o) SOURCES = quickjsr_impl.c quickjsr.cpp init.cpp OBJECTS = quickjsr_impl.o quickjsr.o init.o .PHONY: all package-quickjs build-static all: package-quickjs $(SHLIB) $(SHLIB): build-static package-quickjs: @mkdir -p ../inst/include @cp $(QUICKJS_C_HEADERS:%=quickjs/%) ../inst/include @cp quickjs/VERSION ../inst/ build-static: $(QUICKJS_OBJECTS) @mkdir -p ../inst/lib/$(R_ARCH) $(AR) -rs ../inst/lib/$(R_ARCH)/libquickjs.a $(QUICKJS_OBJECTS) $(QUICKJS_OBJECTS): quickjs/%.o : quickjs/%.c $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -funsigned-char -fwrapv -std=c11 -c $< -o $@ clean: $(RM) $(QUICKJS_OBJECTS) ../inst/lib/$(R_ARCH)/libquickjs.a QuickJSR/src/quickjs/0000755000176200001440000000000014556414120014147 5ustar liggesusersQuickJSR/src/quickjs/libbf.c0000644000176200001440000073016214554252063015406 0ustar liggesusers/* * Tiny arbitrary precision floating point library * * Copyright (c) 2017-2021 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include #ifdef __AVX2__ #include #endif #include "cutils.h" #include "libbf.h" /* enable it to check the multiplication result */ //#define USE_MUL_CHECK #ifdef CONFIG_BIGNUM /* enable it to use FFT/NTT multiplication */ #define USE_FFT_MUL /* enable decimal floating point support */ #define USE_BF_DEC #endif //#define inline __attribute__((always_inline)) #ifdef __AVX2__ #define FFT_MUL_THRESHOLD 100 /* in limbs of the smallest factor */ #else #define FFT_MUL_THRESHOLD 100 /* in limbs of the smallest factor */ #endif /* XXX: adjust */ #define DIVNORM_LARGE_THRESHOLD 50 #define UDIV1NORM_THRESHOLD 3 #if LIMB_BITS == 64 #define FMT_LIMB1 "%" PRIx64 #define FMT_LIMB "%016" PRIx64 #define PRId_LIMB PRId64 #define PRIu_LIMB PRIu64 #else #define FMT_LIMB1 "%x" #define FMT_LIMB "%08x" #define PRId_LIMB "d" #define PRIu_LIMB "u" #endif typedef intptr_t mp_size_t; typedef int bf_op2_func_t(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags); #ifdef USE_FFT_MUL #define FFT_MUL_R_OVERLAP_A (1 << 0) #define FFT_MUL_R_OVERLAP_B (1 << 1) #define FFT_MUL_R_NORESIZE (1 << 2) static no_inline int fft_mul(bf_context_t *s, bf_t *res, limb_t *a_tab, limb_t a_len, limb_t *b_tab, limb_t b_len, int mul_flags); static void fft_clear_cache(bf_context_t *s); #endif #ifdef USE_BF_DEC static limb_t get_digit(const limb_t *tab, limb_t len, slimb_t pos); #endif /* could leading zeros */ static inline int clz(limb_t a) { if (a == 0) { return LIMB_BITS; } else { #if LIMB_BITS == 64 return clz64(a); #else return clz32(a); #endif } } static inline int ctz(limb_t a) { if (a == 0) { return LIMB_BITS; } else { #if LIMB_BITS == 64 return ctz64(a); #else return ctz32(a); #endif } } static inline int ceil_log2(limb_t a) { if (a <= 1) return 0; else return LIMB_BITS - clz(a - 1); } /* b must be >= 1 */ static inline slimb_t ceil_div(slimb_t a, slimb_t b) { if (a >= 0) return (a + b - 1) / b; else return a / b; } /* b must be >= 1 */ static inline slimb_t floor_div(slimb_t a, slimb_t b) { if (a >= 0) { return a / b; } else { return (a - b + 1) / b; } } /* return r = a modulo b (0 <= r <= b - 1. b must be >= 1 */ static inline limb_t smod(slimb_t a, slimb_t b) { a = a % (slimb_t)b; if (a < 0) a += b; return a; } /* signed addition with saturation */ static inline slimb_t sat_add(slimb_t a, slimb_t b) { slimb_t r; r = a + b; /* overflow ? */ if (((a ^ r) & (b ^ r)) < 0) r = (a >> (LIMB_BITS - 1)) ^ (((limb_t)1 << (LIMB_BITS - 1)) - 1); return r; } static inline __maybe_unused limb_t shrd(limb_t low, limb_t high, long shift) { if (shift != 0) low = (low >> shift) | (high << (LIMB_BITS - shift)); return low; } static inline __maybe_unused limb_t shld(limb_t a1, limb_t a0, long shift) { if (shift != 0) return (a1 << shift) | (a0 >> (LIMB_BITS - shift)); else return a1; } #define malloc(s) malloc_is_forbidden(s) #define free(p) free_is_forbidden(p) #define realloc(p, s) realloc_is_forbidden(p, s) void bf_context_init(bf_context_t *s, bf_realloc_func_t *realloc_func, void *realloc_opaque) { memset(s, 0, sizeof(*s)); s->realloc_func = realloc_func; s->realloc_opaque = realloc_opaque; } void bf_context_end(bf_context_t *s) { bf_clear_cache(s); } void bf_init(bf_context_t *s, bf_t *r) { r->ctx = s; r->sign = 0; r->expn = BF_EXP_ZERO; r->len = 0; r->tab = NULL; } /* return 0 if OK, -1 if alloc error */ int bf_resize(bf_t *r, limb_t len) { limb_t *tab; if (len != r->len) { tab = bf_realloc(r->ctx, r->tab, len * sizeof(limb_t)); if (!tab && len != 0) return -1; r->tab = tab; r->len = len; } return 0; } /* return 0 or BF_ST_MEM_ERROR */ int bf_set_ui(bf_t *r, uint64_t a) { r->sign = 0; if (a == 0) { r->expn = BF_EXP_ZERO; bf_resize(r, 0); /* cannot fail */ } #if LIMB_BITS == 32 else if (a <= 0xffffffff) #else else #endif { int shift; if (bf_resize(r, 1)) goto fail; shift = clz(a); r->tab[0] = a << shift; r->expn = LIMB_BITS - shift; } #if LIMB_BITS == 32 else { uint32_t a1, a0; int shift; if (bf_resize(r, 2)) goto fail; a0 = a; a1 = a >> 32; shift = clz(a1); r->tab[0] = a0 << shift; r->tab[1] = shld(a1, a0, shift); r->expn = 2 * LIMB_BITS - shift; } #endif return 0; fail: bf_set_nan(r); return BF_ST_MEM_ERROR; } /* return 0 or BF_ST_MEM_ERROR */ int bf_set_si(bf_t *r, int64_t a) { int ret; if (a < 0) { ret = bf_set_ui(r, -a); r->sign = 1; } else { ret = bf_set_ui(r, a); } return ret; } void bf_set_nan(bf_t *r) { bf_resize(r, 0); /* cannot fail */ r->expn = BF_EXP_NAN; r->sign = 0; } void bf_set_zero(bf_t *r, int is_neg) { bf_resize(r, 0); /* cannot fail */ r->expn = BF_EXP_ZERO; r->sign = is_neg; } void bf_set_inf(bf_t *r, int is_neg) { bf_resize(r, 0); /* cannot fail */ r->expn = BF_EXP_INF; r->sign = is_neg; } /* return 0 or BF_ST_MEM_ERROR */ int bf_set(bf_t *r, const bf_t *a) { if (r == a) return 0; if (bf_resize(r, a->len)) { bf_set_nan(r); return BF_ST_MEM_ERROR; } r->sign = a->sign; r->expn = a->expn; memcpy(r->tab, a->tab, a->len * sizeof(limb_t)); return 0; } /* equivalent to bf_set(r, a); bf_delete(a) */ void bf_move(bf_t *r, bf_t *a) { bf_context_t *s = r->ctx; if (r == a) return; bf_free(s, r->tab); *r = *a; } static limb_t get_limbz(const bf_t *a, limb_t idx) { if (idx >= a->len) return 0; else return a->tab[idx]; } /* get LIMB_BITS at bit position 'pos' in tab */ static inline limb_t get_bits(const limb_t *tab, limb_t len, slimb_t pos) { limb_t i, a0, a1; int p; i = pos >> LIMB_LOG2_BITS; p = pos & (LIMB_BITS - 1); if (i < len) a0 = tab[i]; else a0 = 0; if (p == 0) { return a0; } else { i++; if (i < len) a1 = tab[i]; else a1 = 0; return (a0 >> p) | (a1 << (LIMB_BITS - p)); } } static inline limb_t get_bit(const limb_t *tab, limb_t len, slimb_t pos) { slimb_t i; i = pos >> LIMB_LOG2_BITS; if (i < 0 || i >= len) return 0; return (tab[i] >> (pos & (LIMB_BITS - 1))) & 1; } static inline limb_t limb_mask(int start, int last) { limb_t v; int n; n = last - start + 1; if (n == LIMB_BITS) v = -1; else v = (((limb_t)1 << n) - 1) << start; return v; } static limb_t mp_scan_nz(const limb_t *tab, mp_size_t n) { mp_size_t i; for(i = 0; i < n; i++) { if (tab[i] != 0) return 1; } return 0; } /* return != 0 if one bit between 0 and bit_pos inclusive is not zero. */ static inline limb_t scan_bit_nz(const bf_t *r, slimb_t bit_pos) { slimb_t pos; limb_t v; pos = bit_pos >> LIMB_LOG2_BITS; if (pos < 0) return 0; v = r->tab[pos] & limb_mask(0, bit_pos & (LIMB_BITS - 1)); if (v != 0) return 1; pos--; while (pos >= 0) { if (r->tab[pos] != 0) return 1; pos--; } return 0; } /* return the addend for rounding. Note that prec can be <= 0 (for BF_FLAG_RADPNT_PREC) */ static int bf_get_rnd_add(int *pret, const bf_t *r, limb_t l, slimb_t prec, int rnd_mode) { int add_one, inexact; limb_t bit1, bit0; if (rnd_mode == BF_RNDF) { bit0 = 1; /* faithful rounding does not honor the INEXACT flag */ } else { /* starting limb for bit 'prec + 1' */ bit0 = scan_bit_nz(r, l * LIMB_BITS - 1 - bf_max(0, prec + 1)); } /* get the bit at 'prec' */ bit1 = get_bit(r->tab, l, l * LIMB_BITS - 1 - prec); inexact = (bit1 | bit0) != 0; add_one = 0; switch(rnd_mode) { case BF_RNDZ: break; case BF_RNDN: if (bit1) { if (bit0) { add_one = 1; } else { /* round to even */ add_one = get_bit(r->tab, l, l * LIMB_BITS - 1 - (prec - 1)); } } break; case BF_RNDD: case BF_RNDU: if (r->sign == (rnd_mode == BF_RNDD)) add_one = inexact; break; case BF_RNDA: add_one = inexact; break; case BF_RNDNA: case BF_RNDF: add_one = bit1; break; default: abort(); } if (inexact) *pret |= BF_ST_INEXACT; return add_one; } static int bf_set_overflow(bf_t *r, int sign, limb_t prec, bf_flags_t flags) { slimb_t i, l, e_max; int rnd_mode; rnd_mode = flags & BF_RND_MASK; if (prec == BF_PREC_INF || rnd_mode == BF_RNDN || rnd_mode == BF_RNDNA || rnd_mode == BF_RNDA || (rnd_mode == BF_RNDD && sign == 1) || (rnd_mode == BF_RNDU && sign == 0)) { bf_set_inf(r, sign); } else { /* set to maximum finite number */ l = (prec + LIMB_BITS - 1) / LIMB_BITS; if (bf_resize(r, l)) { bf_set_nan(r); return BF_ST_MEM_ERROR; } r->tab[0] = limb_mask((-prec) & (LIMB_BITS - 1), LIMB_BITS - 1); for(i = 1; i < l; i++) r->tab[i] = (limb_t)-1; e_max = (limb_t)1 << (bf_get_exp_bits(flags) - 1); r->expn = e_max; r->sign = sign; } return BF_ST_OVERFLOW | BF_ST_INEXACT; } /* round to prec1 bits assuming 'r' is non zero and finite. 'r' is assumed to have length 'l' (1 <= l <= r->len). Note: 'prec1' can be infinite (BF_PREC_INF). 'ret' is 0 or BF_ST_INEXACT if the result is known to be inexact. Can fail with BF_ST_MEM_ERROR in case of overflow not returning infinity. */ static int __bf_round(bf_t *r, limb_t prec1, bf_flags_t flags, limb_t l, int ret) { limb_t v, a; int shift, add_one, rnd_mode; slimb_t i, bit_pos, pos, e_min, e_max, e_range, prec; /* e_min and e_max are computed to match the IEEE 754 conventions */ e_range = (limb_t)1 << (bf_get_exp_bits(flags) - 1); e_min = -e_range + 3; e_max = e_range; if (flags & BF_FLAG_RADPNT_PREC) { /* 'prec' is the precision after the radix point */ if (prec1 != BF_PREC_INF) prec = r->expn + prec1; else prec = prec1; } else if (unlikely(r->expn < e_min) && (flags & BF_FLAG_SUBNORMAL)) { /* restrict the precision in case of potentially subnormal result */ assert(prec1 != BF_PREC_INF); prec = prec1 - (e_min - r->expn); } else { prec = prec1; } /* round to prec bits */ rnd_mode = flags & BF_RND_MASK; add_one = bf_get_rnd_add(&ret, r, l, prec, rnd_mode); if (prec <= 0) { if (add_one) { bf_resize(r, 1); /* cannot fail */ r->tab[0] = (limb_t)1 << (LIMB_BITS - 1); r->expn += 1 - prec; ret |= BF_ST_UNDERFLOW | BF_ST_INEXACT; return ret; } else { goto underflow; } } else if (add_one) { limb_t carry; /* add one starting at digit 'prec - 1' */ bit_pos = l * LIMB_BITS - 1 - (prec - 1); pos = bit_pos >> LIMB_LOG2_BITS; carry = (limb_t)1 << (bit_pos & (LIMB_BITS - 1)); for(i = pos; i < l; i++) { v = r->tab[i] + carry; carry = (v < carry); r->tab[i] = v; if (carry == 0) break; } if (carry) { /* shift right by one digit */ v = 1; for(i = l - 1; i >= pos; i--) { a = r->tab[i]; r->tab[i] = (a >> 1) | (v << (LIMB_BITS - 1)); v = a; } r->expn++; } } /* check underflow */ if (unlikely(r->expn < e_min)) { if (flags & BF_FLAG_SUBNORMAL) { /* if inexact, also set the underflow flag */ if (ret & BF_ST_INEXACT) ret |= BF_ST_UNDERFLOW; } else { underflow: ret |= BF_ST_UNDERFLOW | BF_ST_INEXACT; bf_set_zero(r, r->sign); return ret; } } /* check overflow */ if (unlikely(r->expn > e_max)) return bf_set_overflow(r, r->sign, prec1, flags); /* keep the bits starting at 'prec - 1' */ bit_pos = l * LIMB_BITS - 1 - (prec - 1); i = bit_pos >> LIMB_LOG2_BITS; if (i >= 0) { shift = bit_pos & (LIMB_BITS - 1); if (shift != 0) r->tab[i] &= limb_mask(shift, LIMB_BITS - 1); } else { i = 0; } /* remove trailing zeros */ while (r->tab[i] == 0) i++; if (i > 0) { l -= i; memmove(r->tab, r->tab + i, l * sizeof(limb_t)); } bf_resize(r, l); /* cannot fail */ return ret; } /* 'r' must be a finite number. */ int bf_normalize_and_round(bf_t *r, limb_t prec1, bf_flags_t flags) { limb_t l, v, a; int shift, ret; slimb_t i; // bf_print_str("bf_renorm", r); l = r->len; while (l > 0 && r->tab[l - 1] == 0) l--; if (l == 0) { /* zero */ r->expn = BF_EXP_ZERO; bf_resize(r, 0); /* cannot fail */ ret = 0; } else { r->expn -= (r->len - l) * LIMB_BITS; /* shift to have the MSB set to '1' */ v = r->tab[l - 1]; shift = clz(v); if (shift != 0) { v = 0; for(i = 0; i < l; i++) { a = r->tab[i]; r->tab[i] = (a << shift) | (v >> (LIMB_BITS - shift)); v = a; } r->expn -= shift; } ret = __bf_round(r, prec1, flags, l, 0); } // bf_print_str("r_final", r); return ret; } /* return true if rounding can be done at precision 'prec' assuming the exact result r is such that |r-a| <= 2^(EXP(a)-k). */ /* XXX: check the case where the exponent would be incremented by the rounding */ int bf_can_round(const bf_t *a, slimb_t prec, bf_rnd_t rnd_mode, slimb_t k) { BOOL is_rndn; slimb_t bit_pos, n; limb_t bit; if (a->expn == BF_EXP_INF || a->expn == BF_EXP_NAN) return FALSE; if (rnd_mode == BF_RNDF) { return (k >= (prec + 1)); } if (a->expn == BF_EXP_ZERO) return FALSE; is_rndn = (rnd_mode == BF_RNDN || rnd_mode == BF_RNDNA); if (k < (prec + 2)) return FALSE; bit_pos = a->len * LIMB_BITS - 1 - prec; n = k - prec; /* bit pattern for RNDN or RNDNA: 0111.. or 1000... for other rounding modes: 000... or 111... */ bit = get_bit(a->tab, a->len, bit_pos); bit_pos--; n--; bit ^= is_rndn; /* XXX: slow, but a few iterations on average */ while (n != 0) { if (get_bit(a->tab, a->len, bit_pos) != bit) return TRUE; bit_pos--; n--; } return FALSE; } /* Cannot fail with BF_ST_MEM_ERROR. */ int bf_round(bf_t *r, limb_t prec, bf_flags_t flags) { if (r->len == 0) return 0; return __bf_round(r, prec, flags, r->len, 0); } /* for debugging */ static __maybe_unused void dump_limbs(const char *str, const limb_t *tab, limb_t n) { limb_t i; printf("%s: len=%" PRId_LIMB "\n", str, n); for(i = 0; i < n; i++) { printf("%" PRId_LIMB ": " FMT_LIMB "\n", i, tab[i]); } } void mp_print_str(const char *str, const limb_t *tab, limb_t n) { slimb_t i; printf("%s= 0x", str); for(i = n - 1; i >= 0; i--) { if (i != (n - 1)) printf("_"); printf(FMT_LIMB, tab[i]); } printf("\n"); } static __maybe_unused void mp_print_str_h(const char *str, const limb_t *tab, limb_t n, limb_t high) { slimb_t i; printf("%s= 0x", str); printf(FMT_LIMB, high); for(i = n - 1; i >= 0; i--) { printf("_"); printf(FMT_LIMB, tab[i]); } printf("\n"); } /* for debugging */ void bf_print_str(const char *str, const bf_t *a) { slimb_t i; printf("%s=", str); if (a->expn == BF_EXP_NAN) { printf("NaN"); } else { if (a->sign) putchar('-'); if (a->expn == BF_EXP_ZERO) { putchar('0'); } else if (a->expn == BF_EXP_INF) { printf("Inf"); } else { printf("0x0."); for(i = a->len - 1; i >= 0; i--) printf(FMT_LIMB, a->tab[i]); printf("p%" PRId_LIMB, a->expn); } } printf("\n"); } /* compare the absolute value of 'a' and 'b'. Return < 0 if a < b, 0 if a = b and > 0 otherwise. */ int bf_cmpu(const bf_t *a, const bf_t *b) { slimb_t i; limb_t len, v1, v2; if (a->expn != b->expn) { if (a->expn < b->expn) return -1; else return 1; } len = bf_max(a->len, b->len); for(i = len - 1; i >= 0; i--) { v1 = get_limbz(a, a->len - len + i); v2 = get_limbz(b, b->len - len + i); if (v1 != v2) { if (v1 < v2) return -1; else return 1; } } return 0; } /* Full order: -0 < 0, NaN == NaN and NaN is larger than all other numbers */ int bf_cmp_full(const bf_t *a, const bf_t *b) { int res; if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { if (a->expn == b->expn) res = 0; else if (a->expn == BF_EXP_NAN) res = 1; else res = -1; } else if (a->sign != b->sign) { res = 1 - 2 * a->sign; } else { res = bf_cmpu(a, b); if (a->sign) res = -res; } return res; } /* Standard floating point comparison: return 2 if one of the operands is NaN (unordered) or -1, 0, 1 depending on the ordering assuming -0 == +0 */ int bf_cmp(const bf_t *a, const bf_t *b) { int res; if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { res = 2; } else if (a->sign != b->sign) { if (a->expn == BF_EXP_ZERO && b->expn == BF_EXP_ZERO) res = 0; else res = 1 - 2 * a->sign; } else { res = bf_cmpu(a, b); if (a->sign) res = -res; } return res; } /* Compute the number of bits 'n' matching the pattern: a= X1000..0 b= X0111..1 When computing a-b, the result will have at least n leading zero bits. Precondition: a > b and a.expn - b.expn = 0 or 1 */ static limb_t count_cancelled_bits(const bf_t *a, const bf_t *b) { slimb_t bit_offset, b_offset, n; int p, p1; limb_t v1, v2, mask; bit_offset = a->len * LIMB_BITS - 1; b_offset = (b->len - a->len) * LIMB_BITS - (LIMB_BITS - 1) + a->expn - b->expn; n = 0; /* first search the equals bits */ for(;;) { v1 = get_limbz(a, bit_offset >> LIMB_LOG2_BITS); v2 = get_bits(b->tab, b->len, bit_offset + b_offset); // printf("v1=" FMT_LIMB " v2=" FMT_LIMB "\n", v1, v2); if (v1 != v2) break; n += LIMB_BITS; bit_offset -= LIMB_BITS; } /* find the position of the first different bit */ p = clz(v1 ^ v2) + 1; n += p; /* then search for '0' in a and '1' in b */ p = LIMB_BITS - p; if (p > 0) { /* search in the trailing p bits of v1 and v2 */ mask = limb_mask(0, p - 1); p1 = bf_min(clz(v1 & mask), clz((~v2) & mask)) - (LIMB_BITS - p); n += p1; if (p1 != p) goto done; } bit_offset -= LIMB_BITS; for(;;) { v1 = get_limbz(a, bit_offset >> LIMB_LOG2_BITS); v2 = get_bits(b->tab, b->len, bit_offset + b_offset); // printf("v1=" FMT_LIMB " v2=" FMT_LIMB "\n", v1, v2); if (v1 != 0 || v2 != -1) { /* different: count the matching bits */ p1 = bf_min(clz(v1), clz(~v2)); n += p1; break; } n += LIMB_BITS; bit_offset -= LIMB_BITS; } done: return n; } static int bf_add_internal(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags, int b_neg) { const bf_t *tmp; int is_sub, ret, cmp_res, a_sign, b_sign; a_sign = a->sign; b_sign = b->sign ^ b_neg; is_sub = a_sign ^ b_sign; cmp_res = bf_cmpu(a, b); if (cmp_res < 0) { tmp = a; a = b; b = tmp; a_sign = b_sign; /* b_sign is never used later */ } /* abs(a) >= abs(b) */ if (cmp_res == 0 && is_sub && a->expn < BF_EXP_INF) { /* zero result */ bf_set_zero(r, (flags & BF_RND_MASK) == BF_RNDD); ret = 0; } else if (a->len == 0 || b->len == 0) { ret = 0; if (a->expn >= BF_EXP_INF) { if (a->expn == BF_EXP_NAN) { /* at least one operand is NaN */ bf_set_nan(r); } else if (b->expn == BF_EXP_INF && is_sub) { /* infinities with different signs */ bf_set_nan(r); ret = BF_ST_INVALID_OP; } else { bf_set_inf(r, a_sign); } } else { /* at least one zero and not subtract */ bf_set(r, a); r->sign = a_sign; goto renorm; } } else { slimb_t d, a_offset, b_bit_offset, i, cancelled_bits; limb_t carry, v1, v2, u, r_len, carry1, precl, tot_len, z, sub_mask; r->sign = a_sign; r->expn = a->expn; d = a->expn - b->expn; /* must add more precision for the leading cancelled bits in subtraction */ if (is_sub) { if (d <= 1) cancelled_bits = count_cancelled_bits(a, b); else cancelled_bits = 1; } else { cancelled_bits = 0; } /* add two extra bits for rounding */ precl = (cancelled_bits + prec + 2 + LIMB_BITS - 1) / LIMB_BITS; tot_len = bf_max(a->len, b->len + (d + LIMB_BITS - 1) / LIMB_BITS); r_len = bf_min(precl, tot_len); if (bf_resize(r, r_len)) goto fail; a_offset = a->len - r_len; b_bit_offset = (b->len - r_len) * LIMB_BITS + d; /* compute the bits before for the rounding */ carry = is_sub; z = 0; sub_mask = -is_sub; i = r_len - tot_len; while (i < 0) { slimb_t ap, bp; BOOL inflag; ap = a_offset + i; bp = b_bit_offset + i * LIMB_BITS; inflag = FALSE; if (ap >= 0 && ap < a->len) { v1 = a->tab[ap]; inflag = TRUE; } else { v1 = 0; } if (bp + LIMB_BITS > 0 && bp < (slimb_t)(b->len * LIMB_BITS)) { v2 = get_bits(b->tab, b->len, bp); inflag = TRUE; } else { v2 = 0; } if (!inflag) { /* outside 'a' and 'b': go directly to the next value inside a or b so that the running time does not depend on the exponent difference */ i = 0; if (ap < 0) i = bf_min(i, -a_offset); /* b_bit_offset + i * LIMB_BITS + LIMB_BITS >= 1 equivalent to i >= ceil(-b_bit_offset + 1 - LIMB_BITS) / LIMB_BITS) */ if (bp + LIMB_BITS <= 0) i = bf_min(i, (-b_bit_offset) >> LIMB_LOG2_BITS); } else { i++; } v2 ^= sub_mask; u = v1 + v2; carry1 = u < v1; u += carry; carry = (u < carry) | carry1; z |= u; } /* and the result */ for(i = 0; i < r_len; i++) { v1 = get_limbz(a, a_offset + i); v2 = get_bits(b->tab, b->len, b_bit_offset + i * LIMB_BITS); v2 ^= sub_mask; u = v1 + v2; carry1 = u < v1; u += carry; carry = (u < carry) | carry1; r->tab[i] = u; } /* set the extra bits for the rounding */ r->tab[0] |= (z != 0); /* carry is only possible in add case */ if (!is_sub && carry) { if (bf_resize(r, r_len + 1)) goto fail; r->tab[r_len] = 1; r->expn += LIMB_BITS; } renorm: ret = bf_normalize_and_round(r, prec, flags); } return ret; fail: bf_set_nan(r); return BF_ST_MEM_ERROR; } static int __bf_add(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { return bf_add_internal(r, a, b, prec, flags, 0); } static int __bf_sub(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { return bf_add_internal(r, a, b, prec, flags, 1); } limb_t mp_add(limb_t *res, const limb_t *op1, const limb_t *op2, limb_t n, limb_t carry) { slimb_t i; limb_t k, a, v, k1; k = carry; for(i=0;i v; v = a - k; k = (v > a) | k1; res[i] = v; } return k; } /* compute 0 - op2 */ static limb_t mp_neg(limb_t *res, const limb_t *op2, mp_size_t n, limb_t carry) { int i; limb_t k, a, v, k1; k = carry; for(i=0;i v; v = a - k; k = (v > a) | k1; res[i] = v; } return k; } limb_t mp_sub_ui(limb_t *tab, limb_t b, mp_size_t n) { mp_size_t i; limb_t k, a, v; k=b; for(i=0;i v; tab[i] = a; if (k == 0) break; } return k; } /* r = (a + high*B^n) >> shift. Return the remainder r (0 <= r < 2^shift). 1 <= shift <= LIMB_BITS - 1 */ static limb_t mp_shr(limb_t *tab_r, const limb_t *tab, mp_size_t n, int shift, limb_t high) { mp_size_t i; limb_t l, a; assert(shift >= 1 && shift < LIMB_BITS); l = high; for(i = n - 1; i >= 0; i--) { a = tab[i]; tab_r[i] = (a >> shift) | (l << (LIMB_BITS - shift)); l = a; } return l & (((limb_t)1 << shift) - 1); } /* tabr[] = taba[] * b + l. Return the high carry */ static limb_t mp_mul1(limb_t *tabr, const limb_t *taba, limb_t n, limb_t b, limb_t l) { limb_t i; dlimb_t t; for(i = 0; i < n; i++) { t = (dlimb_t)taba[i] * (dlimb_t)b + l; tabr[i] = t; l = t >> LIMB_BITS; } return l; } /* tabr[] += taba[] * b, return the high word. */ static limb_t mp_add_mul1(limb_t *tabr, const limb_t *taba, limb_t n, limb_t b) { limb_t i, l; dlimb_t t; l = 0; for(i = 0; i < n; i++) { t = (dlimb_t)taba[i] * (dlimb_t)b + l + tabr[i]; tabr[i] = t; l = t >> LIMB_BITS; } return l; } /* size of the result : op1_size + op2_size. */ static void mp_mul_basecase(limb_t *result, const limb_t *op1, limb_t op1_size, const limb_t *op2, limb_t op2_size) { limb_t i, r; result[op1_size] = mp_mul1(result, op1, op1_size, op2[0], 0); for(i=1;i= FFT_MUL_THRESHOLD)) { bf_t r_s, *r = &r_s; r->tab = result; /* XXX: optimize memory usage in API */ if (fft_mul(s, r, (limb_t *)op1, op1_size, (limb_t *)op2, op2_size, FFT_MUL_R_NORESIZE)) return -1; } else #endif { mp_mul_basecase(result, op1, op1_size, op2, op2_size); } return 0; } /* tabr[] -= taba[] * b. Return the value to substract to the high word. */ static limb_t mp_sub_mul1(limb_t *tabr, const limb_t *taba, limb_t n, limb_t b) { limb_t i, l; dlimb_t t; l = 0; for(i = 0; i < n; i++) { t = tabr[i] - (dlimb_t)taba[i] * (dlimb_t)b - l; tabr[i] = t; l = -(t >> LIMB_BITS); } return l; } /* WARNING: d must be >= 2^(LIMB_BITS-1) */ static inline limb_t udiv1norm_init(limb_t d) { limb_t a0, a1; a1 = -d - 1; a0 = -1; return (((dlimb_t)a1 << LIMB_BITS) | a0) / d; } /* return the quotient and the remainder in '*pr'of 'a1*2^LIMB_BITS+a0 / d' with 0 <= a1 < d. */ static inline limb_t udiv1norm(limb_t *pr, limb_t a1, limb_t a0, limb_t d, limb_t d_inv) { limb_t n1m, n_adj, q, r, ah; dlimb_t a; n1m = ((slimb_t)a0 >> (LIMB_BITS - 1)); n_adj = a0 + (n1m & d); a = (dlimb_t)d_inv * (a1 - n1m) + n_adj; q = (a >> LIMB_BITS) + a1; /* compute a - q * r and update q so that the remainder is\ between 0 and d - 1 */ a = ((dlimb_t)a1 << LIMB_BITS) | a0; a = a - (dlimb_t)q * d - d; ah = a >> LIMB_BITS; q += 1 + ah; r = (limb_t)a + (ah & d); *pr = r; return q; } /* b must be >= 1 << (LIMB_BITS - 1) */ static limb_t mp_div1norm(limb_t *tabr, const limb_t *taba, limb_t n, limb_t b, limb_t r) { slimb_t i; if (n >= UDIV1NORM_THRESHOLD) { limb_t b_inv; b_inv = udiv1norm_init(b); for(i = n - 1; i >= 0; i--) { tabr[i] = udiv1norm(&r, r, taba[i], b, b_inv); } } else { dlimb_t a1; for(i = n - 1; i >= 0; i--) { a1 = ((dlimb_t)r << LIMB_BITS) | taba[i]; tabr[i] = a1 / b; r = a1 % b; } } return r; } static int mp_divnorm_large(bf_context_t *s, limb_t *tabq, limb_t *taba, limb_t na, const limb_t *tabb, limb_t nb); /* base case division: divides taba[0..na-1] by tabb[0..nb-1]. tabb[nb - 1] must be >= 1 << (LIMB_BITS - 1). na - nb must be >= 0. 'taba' is modified and contains the remainder (nb limbs). tabq[0..na-nb] contains the quotient with tabq[na - nb] <= 1. */ static int mp_divnorm(bf_context_t *s, limb_t *tabq, limb_t *taba, limb_t na, const limb_t *tabb, limb_t nb) { limb_t r, a, c, q, v, b1, b1_inv, n, dummy_r; slimb_t i, j; b1 = tabb[nb - 1]; if (nb == 1) { taba[0] = mp_div1norm(tabq, taba, na, b1, 0); return 0; } n = na - nb; if (bf_min(n, nb) >= DIVNORM_LARGE_THRESHOLD) { return mp_divnorm_large(s, tabq, taba, na, tabb, nb); } if (n >= UDIV1NORM_THRESHOLD) b1_inv = udiv1norm_init(b1); else b1_inv = 0; /* first iteration: the quotient is only 0 or 1 */ q = 1; for(j = nb - 1; j >= 0; j--) { if (taba[n + j] != tabb[j]) { if (taba[n + j] < tabb[j]) q = 0; break; } } tabq[n] = q; if (q) { mp_sub(taba + n, taba + n, tabb, nb, 0); } for(i = n - 1; i >= 0; i--) { if (unlikely(taba[i + nb] >= b1)) { q = -1; } else if (b1_inv) { q = udiv1norm(&dummy_r, taba[i + nb], taba[i + nb - 1], b1, b1_inv); } else { dlimb_t al; al = ((dlimb_t)taba[i + nb] << LIMB_BITS) | taba[i + nb - 1]; q = al / b1; r = al % b1; } r = mp_sub_mul1(taba + i, tabb, nb, q); v = taba[i + nb]; a = v - r; c = (a > v); taba[i + nb] = a; if (c != 0) { /* negative result */ for(;;) { q--; c = mp_add(taba + i, taba + i, tabb, nb, 0); /* propagate carry and test if positive result */ if (c != 0) { if (++taba[i + nb] == 0) { break; } } } } tabq[i] = q; } return 0; } /* compute r=B^(2*n)/a such as a*r < B^(2*n) < a*r + 2 with n >= 1. 'a' has n limbs with a[n-1] >= B/2 and 'r' has n+1 limbs with r[n] = 1. See Modern Computer Arithmetic by Richard P. Brent and Paul Zimmermann, algorithm 3.5 */ int mp_recip(bf_context_t *s, limb_t *tabr, const limb_t *taba, limb_t n) { mp_size_t l, h, k, i; limb_t *tabxh, *tabt, c, *tabu; if (n <= 2) { /* return ceil(B^(2*n)/a) - 1 */ /* XXX: could avoid allocation */ tabu = bf_malloc(s, sizeof(limb_t) * (2 * n + 1)); tabt = bf_malloc(s, sizeof(limb_t) * (n + 2)); if (!tabt || !tabu) goto fail; for(i = 0; i < 2 * n; i++) tabu[i] = 0; tabu[2 * n] = 1; if (mp_divnorm(s, tabt, tabu, 2 * n + 1, taba, n)) goto fail; for(i = 0; i < n + 1; i++) tabr[i] = tabt[i]; if (mp_scan_nz(tabu, n) == 0) { /* only happens for a=B^n/2 */ mp_sub_ui(tabr, 1, n + 1); } } else { l = (n - 1) / 2; h = n - l; /* n=2p -> l=p-1, h = p + 1, k = p + 3 n=2p+1-> l=p, h = p + 1; k = p + 2 */ tabt = bf_malloc(s, sizeof(limb_t) * (n + h + 1)); tabu = bf_malloc(s, sizeof(limb_t) * (n + 2 * h - l + 2)); if (!tabt || !tabu) goto fail; tabxh = tabr + l; if (mp_recip(s, tabxh, taba + l, h)) goto fail; if (mp_mul(s, tabt, taba, n, tabxh, h + 1)) /* n + h + 1 limbs */ goto fail; while (tabt[n + h] != 0) { mp_sub_ui(tabxh, 1, h + 1); c = mp_sub(tabt, tabt, taba, n, 0); mp_sub_ui(tabt + n, c, h + 1); } /* T = B^(n+h) - T */ mp_neg(tabt, tabt, n + h + 1, 0); tabt[n + h]++; if (mp_mul(s, tabu, tabt + l, n + h + 1 - l, tabxh, h + 1)) goto fail; /* n + 2*h - l + 2 limbs */ k = 2 * h - l; for(i = 0; i < l; i++) tabr[i] = tabu[i + k]; mp_add(tabr + l, tabr + l, tabu + 2 * h, h, 0); } bf_free(s, tabt); bf_free(s, tabu); return 0; fail: bf_free(s, tabt); bf_free(s, tabu); return -1; } /* return -1, 0 or 1 */ static int mp_cmp(const limb_t *taba, const limb_t *tabb, mp_size_t n) { mp_size_t i; for(i = n - 1; i >= 0; i--) { if (taba[i] != tabb[i]) { if (taba[i] < tabb[i]) return -1; else return 1; } } return 0; } //#define DEBUG_DIVNORM_LARGE //#define DEBUG_DIVNORM_LARGE2 /* subquadratic divnorm */ static int mp_divnorm_large(bf_context_t *s, limb_t *tabq, limb_t *taba, limb_t na, const limb_t *tabb, limb_t nb) { limb_t *tabb_inv, nq, *tabt, i, n; nq = na - nb; #ifdef DEBUG_DIVNORM_LARGE printf("na=%d nb=%d nq=%d\n", (int)na, (int)nb, (int)nq); mp_print_str("a", taba, na); mp_print_str("b", tabb, nb); #endif assert(nq >= 1); n = nq; if (nq < nb) n++; tabb_inv = bf_malloc(s, sizeof(limb_t) * (n + 1)); tabt = bf_malloc(s, sizeof(limb_t) * 2 * (n + 1)); if (!tabb_inv || !tabt) goto fail; if (n >= nb) { for(i = 0; i < n - nb; i++) tabt[i] = 0; for(i = 0; i < nb; i++) tabt[i + n - nb] = tabb[i]; } else { /* truncate B: need to increment it so that the approximate inverse is smaller that the exact inverse */ for(i = 0; i < n; i++) tabt[i] = tabb[i + nb - n]; if (mp_add_ui(tabt, 1, n)) { /* tabt = B^n : tabb_inv = B^n */ memset(tabb_inv, 0, n * sizeof(limb_t)); tabb_inv[n] = 1; goto recip_done; } } if (mp_recip(s, tabb_inv, tabt, n)) goto fail; recip_done: /* Q=A*B^-1 */ if (mp_mul(s, tabt, tabb_inv, n + 1, taba + na - (n + 1), n + 1)) goto fail; for(i = 0; i < nq + 1; i++) tabq[i] = tabt[i + 2 * (n + 1) - (nq + 1)]; #ifdef DEBUG_DIVNORM_LARGE mp_print_str("q", tabq, nq + 1); #endif bf_free(s, tabt); bf_free(s, tabb_inv); tabb_inv = NULL; /* R=A-B*Q */ tabt = bf_malloc(s, sizeof(limb_t) * (na + 1)); if (!tabt) goto fail; if (mp_mul(s, tabt, tabq, nq + 1, tabb, nb)) goto fail; /* we add one more limb for the result */ mp_sub(taba, taba, tabt, nb + 1, 0); bf_free(s, tabt); /* the approximated quotient is smaller than than the exact one, hence we may have to increment it */ #ifdef DEBUG_DIVNORM_LARGE2 int cnt = 0; static int cnt_max; #endif for(;;) { if (taba[nb] == 0 && mp_cmp(taba, tabb, nb) < 0) break; taba[nb] -= mp_sub(taba, taba, tabb, nb, 0); mp_add_ui(tabq, 1, nq + 1); #ifdef DEBUG_DIVNORM_LARGE2 cnt++; #endif } #ifdef DEBUG_DIVNORM_LARGE2 if (cnt > cnt_max) { cnt_max = cnt; printf("\ncnt=%d nq=%d nb=%d\n", cnt_max, (int)nq, (int)nb); } #endif return 0; fail: bf_free(s, tabb_inv); bf_free(s, tabt); return -1; } int bf_mul(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { int ret, r_sign; if (a->len < b->len) { const bf_t *tmp = a; a = b; b = tmp; } r_sign = a->sign ^ b->sign; /* here b->len <= a->len */ if (b->len == 0) { if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bf_set_nan(r); ret = 0; } else if (a->expn == BF_EXP_INF || b->expn == BF_EXP_INF) { if ((a->expn == BF_EXP_INF && b->expn == BF_EXP_ZERO) || (a->expn == BF_EXP_ZERO && b->expn == BF_EXP_INF)) { bf_set_nan(r); ret = BF_ST_INVALID_OP; } else { bf_set_inf(r, r_sign); ret = 0; } } else { bf_set_zero(r, r_sign); ret = 0; } } else { bf_t tmp, *r1 = NULL; limb_t a_len, b_len, precl; limb_t *a_tab, *b_tab; a_len = a->len; b_len = b->len; if ((flags & BF_RND_MASK) == BF_RNDF) { /* faithful rounding does not require using the full inputs */ precl = (prec + 2 + LIMB_BITS - 1) / LIMB_BITS; a_len = bf_min(a_len, precl); b_len = bf_min(b_len, precl); } a_tab = a->tab + a->len - a_len; b_tab = b->tab + b->len - b_len; #ifdef USE_FFT_MUL if (b_len >= FFT_MUL_THRESHOLD) { int mul_flags = 0; if (r == a) mul_flags |= FFT_MUL_R_OVERLAP_A; if (r == b) mul_flags |= FFT_MUL_R_OVERLAP_B; if (fft_mul(r->ctx, r, a_tab, a_len, b_tab, b_len, mul_flags)) goto fail; } else #endif { if (r == a || r == b) { bf_init(r->ctx, &tmp); r1 = r; r = &tmp; } if (bf_resize(r, a_len + b_len)) { #ifdef USE_FFT_MUL fail: #endif bf_set_nan(r); ret = BF_ST_MEM_ERROR; goto done; } mp_mul_basecase(r->tab, a_tab, a_len, b_tab, b_len); } r->sign = r_sign; r->expn = a->expn + b->expn; ret = bf_normalize_and_round(r, prec, flags); done: if (r == &tmp) bf_move(r1, &tmp); } return ret; } /* multiply 'r' by 2^e */ int bf_mul_2exp(bf_t *r, slimb_t e, limb_t prec, bf_flags_t flags) { slimb_t e_max; if (r->len == 0) return 0; e_max = ((limb_t)1 << BF_EXT_EXP_BITS_MAX) - 1; e = bf_max(e, -e_max); e = bf_min(e, e_max); r->expn += e; return __bf_round(r, prec, flags, r->len, 0); } /* Return e such as a=m*2^e with m odd integer. return 0 if a is zero, Infinite or Nan. */ slimb_t bf_get_exp_min(const bf_t *a) { slimb_t i; limb_t v; int k; for(i = 0; i < a->len; i++) { v = a->tab[i]; if (v != 0) { k = ctz(v); return a->expn - (a->len - i) * LIMB_BITS + k; } } return 0; } /* a and b must be finite numbers with a >= 0 and b > 0. 'q' is the integer defined as floor(a/b) and r = a - q * b. */ static void bf_tdivremu(bf_t *q, bf_t *r, const bf_t *a, const bf_t *b) { if (bf_cmpu(a, b) < 0) { bf_set_ui(q, 0); bf_set(r, a); } else { bf_div(q, a, b, bf_max(a->expn - b->expn + 1, 2), BF_RNDZ); bf_rint(q, BF_RNDZ); bf_mul(r, q, b, BF_PREC_INF, BF_RNDZ); bf_sub(r, a, r, BF_PREC_INF, BF_RNDZ); } } static int __bf_div(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; int ret, r_sign; limb_t n, nb, precl; r_sign = a->sign ^ b->sign; if (a->expn >= BF_EXP_INF || b->expn >= BF_EXP_INF) { if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF && b->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else if (a->expn == BF_EXP_INF) { bf_set_inf(r, r_sign); return 0; } else { bf_set_zero(r, r_sign); return 0; } } else if (a->expn == BF_EXP_ZERO) { if (b->expn == BF_EXP_ZERO) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_zero(r, r_sign); return 0; } } else if (b->expn == BF_EXP_ZERO) { bf_set_inf(r, r_sign); return BF_ST_DIVIDE_ZERO; } /* number of limbs of the quotient (2 extra bits for rounding) */ precl = (prec + 2 + LIMB_BITS - 1) / LIMB_BITS; nb = b->len; n = bf_max(a->len, precl); { limb_t *taba, na; slimb_t d; na = n + nb; taba = bf_malloc(s, (na + 1) * sizeof(limb_t)); if (!taba) goto fail; d = na - a->len; memset(taba, 0, d * sizeof(limb_t)); memcpy(taba + d, a->tab, a->len * sizeof(limb_t)); if (bf_resize(r, n + 1)) goto fail1; if (mp_divnorm(s, r->tab, taba, na, b->tab, nb)) { fail1: bf_free(s, taba); goto fail; } /* see if non zero remainder */ if (mp_scan_nz(taba, nb)) r->tab[0] |= 1; bf_free(r->ctx, taba); r->expn = a->expn - b->expn + LIMB_BITS; r->sign = r_sign; ret = bf_normalize_and_round(r, prec, flags); } return ret; fail: bf_set_nan(r); return BF_ST_MEM_ERROR; } /* division and remainder. rnd_mode is the rounding mode for the quotient. The additional rounding mode BF_RND_EUCLIDIAN is supported. 'q' is an integer. 'r' is rounded with prec and flags (prec can be BF_PREC_INF). */ int bf_divrem(bf_t *q, bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags, int rnd_mode) { bf_t a1_s, *a1 = &a1_s; bf_t b1_s, *b1 = &b1_s; int q_sign, ret; BOOL is_ceil, is_rndn; assert(q != a && q != b); assert(r != a && r != b); assert(q != r); if (a->len == 0 || b->len == 0) { bf_set_zero(q, 0); if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF || b->expn == BF_EXP_ZERO) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set(r, a); return bf_round(r, prec, flags); } } q_sign = a->sign ^ b->sign; is_rndn = (rnd_mode == BF_RNDN || rnd_mode == BF_RNDNA); switch(rnd_mode) { default: case BF_RNDZ: case BF_RNDN: case BF_RNDNA: is_ceil = FALSE; break; case BF_RNDD: is_ceil = q_sign; break; case BF_RNDU: is_ceil = q_sign ^ 1; break; case BF_RNDA: is_ceil = TRUE; break; case BF_DIVREM_EUCLIDIAN: is_ceil = a->sign; break; } a1->expn = a->expn; a1->tab = a->tab; a1->len = a->len; a1->sign = 0; b1->expn = b->expn; b1->tab = b->tab; b1->len = b->len; b1->sign = 0; /* XXX: could improve to avoid having a large 'q' */ bf_tdivremu(q, r, a1, b1); if (bf_is_nan(q) || bf_is_nan(r)) goto fail; if (r->len != 0) { if (is_rndn) { int res; b1->expn--; res = bf_cmpu(r, b1); b1->expn++; if (res > 0 || (res == 0 && (rnd_mode == BF_RNDNA || get_bit(q->tab, q->len, q->len * LIMB_BITS - q->expn)))) { goto do_sub_r; } } else if (is_ceil) { do_sub_r: ret = bf_add_si(q, q, 1, BF_PREC_INF, BF_RNDZ); ret |= bf_sub(r, r, b1, BF_PREC_INF, BF_RNDZ); if (ret & BF_ST_MEM_ERROR) goto fail; } } r->sign ^= a->sign; q->sign = q_sign; return bf_round(r, prec, flags); fail: bf_set_nan(q); bf_set_nan(r); return BF_ST_MEM_ERROR; } int bf_rem(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags, int rnd_mode) { bf_t q_s, *q = &q_s; int ret; bf_init(r->ctx, q); ret = bf_divrem(q, r, a, b, prec, flags, rnd_mode); bf_delete(q); return ret; } static inline int bf_get_limb(slimb_t *pres, const bf_t *a, int flags) { #if LIMB_BITS == 32 return bf_get_int32(pres, a, flags); #else return bf_get_int64(pres, a, flags); #endif } int bf_remquo(slimb_t *pq, bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags, int rnd_mode) { bf_t q_s, *q = &q_s; int ret; bf_init(r->ctx, q); ret = bf_divrem(q, r, a, b, prec, flags, rnd_mode); bf_get_limb(pq, q, BF_GET_INT_MOD); bf_delete(q); return ret; } static __maybe_unused inline limb_t mul_mod(limb_t a, limb_t b, limb_t m) { dlimb_t t; t = (dlimb_t)a * (dlimb_t)b; return t % m; } #if defined(USE_MUL_CHECK) static limb_t mp_mod1(const limb_t *tab, limb_t n, limb_t m, limb_t r) { slimb_t i; dlimb_t t; for(i = n - 1; i >= 0; i--) { t = ((dlimb_t)r << LIMB_BITS) | tab[i]; r = t % m; } return r; } #endif static const uint16_t sqrt_table[192] = { 128,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,144,145,146,147,148,149,150,150,151,152,153,154,155,155,156,157,158,159,160,160,161,162,163,163,164,165,166,167,167,168,169,170,170,171,172,173,173,174,175,176,176,177,178,178,179,180,181,181,182,183,183,184,185,185,186,187,187,188,189,189,190,191,192,192,193,193,194,195,195,196,197,197,198,199,199,200,201,201,202,203,203,204,204,205,206,206,207,208,208,209,209,210,211,211,212,212,213,214,214,215,215,216,217,217,218,218,219,219,220,221,221,222,222,223,224,224,225,225,226,226,227,227,228,229,229,230,230,231,231,232,232,233,234,234,235,235,236,236,237,237,238,238,239,240,240,241,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248,249,249,250,250,251,251,252,252,253,253,254,254,255, }; /* a >= 2^(LIMB_BITS - 2). Return (s, r) with s=floor(sqrt(a)) and r=a-s^2. 0 <= r <= 2 * s */ static limb_t mp_sqrtrem1(limb_t *pr, limb_t a) { limb_t s1, r1, s, r, q, u, num; /* use a table for the 16 -> 8 bit sqrt */ s1 = sqrt_table[(a >> (LIMB_BITS - 8)) - 64]; r1 = (a >> (LIMB_BITS - 16)) - s1 * s1; if (r1 > 2 * s1) { r1 -= 2 * s1 + 1; s1++; } /* one iteration to get a 32 -> 16 bit sqrt */ num = (r1 << 8) | ((a >> (LIMB_BITS - 32 + 8)) & 0xff); q = num / (2 * s1); /* q <= 2^8 */ u = num % (2 * s1); s = (s1 << 8) + q; r = (u << 8) | ((a >> (LIMB_BITS - 32)) & 0xff); r -= q * q; if ((slimb_t)r < 0) { s--; r += 2 * s + 1; } #if LIMB_BITS == 64 s1 = s; r1 = r; /* one more iteration for 64 -> 32 bit sqrt */ num = (r1 << 16) | ((a >> (LIMB_BITS - 64 + 16)) & 0xffff); q = num / (2 * s1); /* q <= 2^16 */ u = num % (2 * s1); s = (s1 << 16) + q; r = (u << 16) | ((a >> (LIMB_BITS - 64)) & 0xffff); r -= q * q; if ((slimb_t)r < 0) { s--; r += 2 * s + 1; } #endif *pr = r; return s; } /* return floor(sqrt(a)) */ limb_t bf_isqrt(limb_t a) { limb_t s, r; int k; if (a == 0) return 0; k = clz(a) & ~1; s = mp_sqrtrem1(&r, a << k); s >>= (k >> 1); return s; } static limb_t mp_sqrtrem2(limb_t *tabs, limb_t *taba) { limb_t s1, r1, s, q, u, a0, a1; dlimb_t r, num; int l; a0 = taba[0]; a1 = taba[1]; s1 = mp_sqrtrem1(&r1, a1); l = LIMB_BITS / 2; num = ((dlimb_t)r1 << l) | (a0 >> l); q = num / (2 * s1); u = num % (2 * s1); s = (s1 << l) + q; r = ((dlimb_t)u << l) | (a0 & (((limb_t)1 << l) - 1)); if (unlikely((q >> l) != 0)) r -= (dlimb_t)1 << LIMB_BITS; /* special case when q=2^l */ else r -= q * q; if ((slimb_t)(r >> LIMB_BITS) < 0) { s--; r += 2 * (dlimb_t)s + 1; } tabs[0] = s; taba[0] = r; return r >> LIMB_BITS; } //#define DEBUG_SQRTREM /* tmp_buf must contain (n / 2 + 1 limbs). *prh contains the highest limb of the remainder. */ static int mp_sqrtrem_rec(bf_context_t *s, limb_t *tabs, limb_t *taba, limb_t n, limb_t *tmp_buf, limb_t *prh) { limb_t l, h, rh, ql, qh, c, i; if (n == 1) { *prh = mp_sqrtrem2(tabs, taba); return 0; } #ifdef DEBUG_SQRTREM mp_print_str("a", taba, 2 * n); #endif l = n / 2; h = n - l; if (mp_sqrtrem_rec(s, tabs + l, taba + 2 * l, h, tmp_buf, &qh)) return -1; #ifdef DEBUG_SQRTREM mp_print_str("s1", tabs + l, h); mp_print_str_h("r1", taba + 2 * l, h, qh); mp_print_str_h("r2", taba + l, n, qh); #endif /* the remainder is in taba + 2 * l. Its high bit is in qh */ if (qh) { mp_sub(taba + 2 * l, taba + 2 * l, tabs + l, h, 0); } /* instead of dividing by 2*s, divide by s (which is normalized) and update q and r */ if (mp_divnorm(s, tmp_buf, taba + l, n, tabs + l, h)) return -1; qh += tmp_buf[l]; for(i = 0; i < l; i++) tabs[i] = tmp_buf[i]; ql = mp_shr(tabs, tabs, l, 1, qh & 1); qh = qh >> 1; /* 0 or 1 */ if (ql) rh = mp_add(taba + l, taba + l, tabs + l, h, 0); else rh = 0; #ifdef DEBUG_SQRTREM mp_print_str_h("q", tabs, l, qh); mp_print_str_h("u", taba + l, h, rh); #endif mp_add_ui(tabs + l, qh, h); #ifdef DEBUG_SQRTREM mp_print_str_h("s2", tabs, n, sh); #endif /* q = qh, tabs[l - 1 ... 0], r = taba[n - 1 ... l] */ /* subtract q^2. if qh = 1 then q = B^l, so we can take shortcuts */ if (qh) { c = qh; } else { if (mp_mul(s, taba + n, tabs, l, tabs, l)) return -1; c = mp_sub(taba, taba, taba + n, 2 * l, 0); } rh -= mp_sub_ui(taba + 2 * l, c, n - 2 * l); if ((slimb_t)rh < 0) { mp_sub_ui(tabs, 1, n); rh += mp_add_mul1(taba, tabs, n, 2); rh += mp_add_ui(taba, 1, n); } *prh = rh; return 0; } /* 'taba' has 2*n limbs with n >= 1 and taba[2*n-1] >= 2 ^ (LIMB_BITS - 2). Return (s, r) with s=floor(sqrt(a)) and r=a-s^2. 0 <= r <= 2 * s. tabs has n limbs. r is returned in the lower n limbs of taba. Its r[n] is the returned value of the function. */ /* Algorithm from the article "Karatsuba Square Root" by Paul Zimmermann and inspirated from its GMP implementation */ int mp_sqrtrem(bf_context_t *s, limb_t *tabs, limb_t *taba, limb_t n) { limb_t tmp_buf1[8]; limb_t *tmp_buf; mp_size_t n2; int ret; n2 = n / 2 + 1; if (n2 <= countof(tmp_buf1)) { tmp_buf = tmp_buf1; } else { tmp_buf = bf_malloc(s, sizeof(limb_t) * n2); if (!tmp_buf) return -1; } ret = mp_sqrtrem_rec(s, tabs, taba, n, tmp_buf, taba + n); if (tmp_buf != tmp_buf1) bf_free(s, tmp_buf); return ret; } /* Integer square root with remainder. 'a' must be an integer. r = floor(sqrt(a)) and rem = a - r^2. BF_ST_INEXACT is set if the result is inexact. 'rem' can be NULL if the remainder is not needed. */ int bf_sqrtrem(bf_t *r, bf_t *rem1, const bf_t *a) { int ret; if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); } else if (a->expn == BF_EXP_INF && a->sign) { goto invalid_op; } else { bf_set(r, a); } if (rem1) bf_set_ui(rem1, 0); ret = 0; } else if (a->sign) { invalid_op: bf_set_nan(r); if (rem1) bf_set_ui(rem1, 0); ret = BF_ST_INVALID_OP; } else { bf_t rem_s, *rem; bf_sqrt(r, a, (a->expn + 1) / 2, BF_RNDZ); bf_rint(r, BF_RNDZ); /* see if the result is exact by computing the remainder */ if (rem1) { rem = rem1; } else { rem = &rem_s; bf_init(r->ctx, rem); } /* XXX: could avoid recomputing the remainder */ bf_mul(rem, r, r, BF_PREC_INF, BF_RNDZ); bf_neg(rem); bf_add(rem, rem, a, BF_PREC_INF, BF_RNDZ); if (bf_is_nan(rem)) { ret = BF_ST_MEM_ERROR; goto done; } if (rem->len != 0) { ret = BF_ST_INEXACT; } else { ret = 0; } done: if (!rem1) bf_delete(rem); } return ret; } int bf_sqrt(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = a->ctx; int ret; assert(r != a); if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); } else if (a->expn == BF_EXP_INF && a->sign) { goto invalid_op; } else { bf_set(r, a); } ret = 0; } else if (a->sign) { invalid_op: bf_set_nan(r); ret = BF_ST_INVALID_OP; } else { limb_t *a1; slimb_t n, n1; limb_t res; /* convert the mantissa to an integer with at least 2 * prec + 4 bits */ n = (2 * (prec + 2) + 2 * LIMB_BITS - 1) / (2 * LIMB_BITS); if (bf_resize(r, n)) goto fail; a1 = bf_malloc(s, sizeof(limb_t) * 2 * n); if (!a1) goto fail; n1 = bf_min(2 * n, a->len); memset(a1, 0, (2 * n - n1) * sizeof(limb_t)); memcpy(a1 + 2 * n - n1, a->tab + a->len - n1, n1 * sizeof(limb_t)); if (a->expn & 1) { res = mp_shr(a1, a1, 2 * n, 1, 0); } else { res = 0; } if (mp_sqrtrem(s, r->tab, a1, n)) { bf_free(s, a1); goto fail; } if (!res) { res = mp_scan_nz(a1, n + 1); } bf_free(s, a1); if (!res) { res = mp_scan_nz(a->tab, a->len - n1); } if (res != 0) r->tab[0] |= 1; r->sign = 0; r->expn = (a->expn + 1) >> 1; ret = bf_round(r, prec, flags); } return ret; fail: bf_set_nan(r); return BF_ST_MEM_ERROR; } static no_inline int bf_op2(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags, bf_op2_func_t *func) { bf_t tmp; int ret; if (r == a || r == b) { bf_init(r->ctx, &tmp); ret = func(&tmp, a, b, prec, flags); bf_move(r, &tmp); } else { ret = func(r, a, b, prec, flags); } return ret; } int bf_add(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { return bf_op2(r, a, b, prec, flags, __bf_add); } int bf_sub(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { return bf_op2(r, a, b, prec, flags, __bf_sub); } int bf_div(bf_t *r, const bf_t *a, const bf_t *b, limb_t prec, bf_flags_t flags) { return bf_op2(r, a, b, prec, flags, __bf_div); } int bf_mul_ui(bf_t *r, const bf_t *a, uint64_t b1, limb_t prec, bf_flags_t flags) { bf_t b; int ret; bf_init(r->ctx, &b); ret = bf_set_ui(&b, b1); ret |= bf_mul(r, a, &b, prec, flags); bf_delete(&b); return ret; } int bf_mul_si(bf_t *r, const bf_t *a, int64_t b1, limb_t prec, bf_flags_t flags) { bf_t b; int ret; bf_init(r->ctx, &b); ret = bf_set_si(&b, b1); ret |= bf_mul(r, a, &b, prec, flags); bf_delete(&b); return ret; } int bf_add_si(bf_t *r, const bf_t *a, int64_t b1, limb_t prec, bf_flags_t flags) { bf_t b; int ret; bf_init(r->ctx, &b); ret = bf_set_si(&b, b1); ret |= bf_add(r, a, &b, prec, flags); bf_delete(&b); return ret; } static int bf_pow_ui(bf_t *r, const bf_t *a, limb_t b, limb_t prec, bf_flags_t flags) { int ret, n_bits, i; assert(r != a); if (b == 0) return bf_set_ui(r, 1); ret = bf_set(r, a); n_bits = LIMB_BITS - clz(b); for(i = n_bits - 2; i >= 0; i--) { ret |= bf_mul(r, r, r, prec, flags); if ((b >> i) & 1) ret |= bf_mul(r, r, a, prec, flags); } return ret; } static int bf_pow_ui_ui(bf_t *r, limb_t a1, limb_t b, limb_t prec, bf_flags_t flags) { bf_t a; int ret; #ifdef USE_BF_DEC if (a1 == 10 && b <= LIMB_DIGITS) { /* use precomputed powers. We do not round at this point because we expect the caller to do it */ ret = bf_set_ui(r, mp_pow_dec[b]); } else #endif { bf_init(r->ctx, &a); ret = bf_set_ui(&a, a1); ret |= bf_pow_ui(r, &a, b, prec, flags); bf_delete(&a); } return ret; } /* convert to integer (infinite precision) */ int bf_rint(bf_t *r, int rnd_mode) { return bf_round(r, 0, rnd_mode | BF_FLAG_RADPNT_PREC); } /* logical operations */ #define BF_LOGIC_OR 0 #define BF_LOGIC_XOR 1 #define BF_LOGIC_AND 2 static inline limb_t bf_logic_op1(limb_t a, limb_t b, int op) { switch(op) { case BF_LOGIC_OR: return a | b; case BF_LOGIC_XOR: return a ^ b; default: case BF_LOGIC_AND: return a & b; } } static int bf_logic_op(bf_t *r, const bf_t *a1, const bf_t *b1, int op) { bf_t b1_s, a1_s, *a, *b; limb_t a_sign, b_sign, r_sign; slimb_t l, i, a_bit_offset, b_bit_offset; limb_t v1, v2, v1_mask, v2_mask, r_mask; int ret; assert(r != a1 && r != b1); if (a1->expn <= 0) a_sign = 0; /* minus zero is considered as positive */ else a_sign = a1->sign; if (b1->expn <= 0) b_sign = 0; /* minus zero is considered as positive */ else b_sign = b1->sign; if (a_sign) { a = &a1_s; bf_init(r->ctx, a); if (bf_add_si(a, a1, 1, BF_PREC_INF, BF_RNDZ)) { b = NULL; goto fail; } } else { a = (bf_t *)a1; } if (b_sign) { b = &b1_s; bf_init(r->ctx, b); if (bf_add_si(b, b1, 1, BF_PREC_INF, BF_RNDZ)) goto fail; } else { b = (bf_t *)b1; } r_sign = bf_logic_op1(a_sign, b_sign, op); if (op == BF_LOGIC_AND && r_sign == 0) { /* no need to compute extra zeros for and */ if (a_sign == 0 && b_sign == 0) l = bf_min(a->expn, b->expn); else if (a_sign == 0) l = a->expn; else l = b->expn; } else { l = bf_max(a->expn, b->expn); } /* Note: a or b can be zero */ l = (bf_max(l, 1) + LIMB_BITS - 1) / LIMB_BITS; if (bf_resize(r, l)) goto fail; a_bit_offset = a->len * LIMB_BITS - a->expn; b_bit_offset = b->len * LIMB_BITS - b->expn; v1_mask = -a_sign; v2_mask = -b_sign; r_mask = -r_sign; for(i = 0; i < l; i++) { v1 = get_bits(a->tab, a->len, a_bit_offset + i * LIMB_BITS) ^ v1_mask; v2 = get_bits(b->tab, b->len, b_bit_offset + i * LIMB_BITS) ^ v2_mask; r->tab[i] = bf_logic_op1(v1, v2, op) ^ r_mask; } r->expn = l * LIMB_BITS; r->sign = r_sign; bf_normalize_and_round(r, BF_PREC_INF, BF_RNDZ); /* cannot fail */ if (r_sign) { if (bf_add_si(r, r, -1, BF_PREC_INF, BF_RNDZ)) goto fail; } ret = 0; done: if (a == &a1_s) bf_delete(a); if (b == &b1_s) bf_delete(b); return ret; fail: bf_set_nan(r); ret = BF_ST_MEM_ERROR; goto done; } /* 'a' and 'b' must be integers. Return 0 or BF_ST_MEM_ERROR. */ int bf_logic_or(bf_t *r, const bf_t *a, const bf_t *b) { return bf_logic_op(r, a, b, BF_LOGIC_OR); } /* 'a' and 'b' must be integers. Return 0 or BF_ST_MEM_ERROR. */ int bf_logic_xor(bf_t *r, const bf_t *a, const bf_t *b) { return bf_logic_op(r, a, b, BF_LOGIC_XOR); } /* 'a' and 'b' must be integers. Return 0 or BF_ST_MEM_ERROR. */ int bf_logic_and(bf_t *r, const bf_t *a, const bf_t *b) { return bf_logic_op(r, a, b, BF_LOGIC_AND); } /* conversion between fixed size types */ typedef union { double d; uint64_t u; } Float64Union; int bf_get_float64(const bf_t *a, double *pres, bf_rnd_t rnd_mode) { Float64Union u; int e, ret; uint64_t m; ret = 0; if (a->expn == BF_EXP_NAN) { u.u = 0x7ff8000000000000; /* quiet nan */ } else { bf_t b_s, *b = &b_s; bf_init(a->ctx, b); bf_set(b, a); if (bf_is_finite(b)) { ret = bf_round(b, 53, rnd_mode | BF_FLAG_SUBNORMAL | bf_set_exp_bits(11)); } if (b->expn == BF_EXP_INF) { e = (1 << 11) - 1; m = 0; } else if (b->expn == BF_EXP_ZERO) { e = 0; m = 0; } else { e = b->expn + 1023 - 1; #if LIMB_BITS == 32 if (b->len == 2) { m = ((uint64_t)b->tab[1] << 32) | b->tab[0]; } else { m = ((uint64_t)b->tab[0] << 32); } #else m = b->tab[0]; #endif if (e <= 0) { /* subnormal */ m = m >> (12 - e); e = 0; } else { m = (m << 1) >> 12; } } u.u = m | ((uint64_t)e << 52) | ((uint64_t)b->sign << 63); bf_delete(b); } *pres = u.d; return ret; } int bf_set_float64(bf_t *a, double d) { Float64Union u; uint64_t m; int shift, e, sgn; u.d = d; sgn = u.u >> 63; e = (u.u >> 52) & ((1 << 11) - 1); m = u.u & (((uint64_t)1 << 52) - 1); if (e == ((1 << 11) - 1)) { if (m != 0) { bf_set_nan(a); } else { bf_set_inf(a, sgn); } } else if (e == 0) { if (m == 0) { bf_set_zero(a, sgn); } else { /* subnormal number */ m <<= 12; shift = clz64(m); m <<= shift; e = -shift; goto norm; } } else { m = (m << 11) | ((uint64_t)1 << 63); norm: a->expn = e - 1023 + 1; #if LIMB_BITS == 32 if (bf_resize(a, 2)) goto fail; a->tab[0] = m; a->tab[1] = m >> 32; #else if (bf_resize(a, 1)) goto fail; a->tab[0] = m; #endif a->sign = sgn; } return 0; fail: bf_set_nan(a); return BF_ST_MEM_ERROR; } /* The rounding mode is always BF_RNDZ. Return BF_ST_INVALID_OP if there is an overflow and 0 otherwise. */ int bf_get_int32(int *pres, const bf_t *a, int flags) { uint32_t v; int ret; if (a->expn >= BF_EXP_INF) { ret = BF_ST_INVALID_OP; if (flags & BF_GET_INT_MOD) { v = 0; } else if (a->expn == BF_EXP_INF) { v = (uint32_t)INT32_MAX + a->sign; } else { v = INT32_MAX; } } else if (a->expn <= 0) { v = 0; ret = 0; } else if (a->expn <= 31) { v = a->tab[a->len - 1] >> (LIMB_BITS - a->expn); if (a->sign) v = -v; ret = 0; } else if (!(flags & BF_GET_INT_MOD)) { ret = BF_ST_INVALID_OP; if (a->sign) { v = (uint32_t)INT32_MAX + 1; if (a->expn == 32 && (a->tab[a->len - 1] >> (LIMB_BITS - 32)) == v) { ret = 0; } } else { v = INT32_MAX; } } else { v = get_bits(a->tab, a->len, a->len * LIMB_BITS - a->expn); if (a->sign) v = -v; ret = 0; } *pres = v; return ret; } /* The rounding mode is always BF_RNDZ. Return BF_ST_INVALID_OP if there is an overflow and 0 otherwise. */ int bf_get_int64(int64_t *pres, const bf_t *a, int flags) { uint64_t v; int ret; if (a->expn >= BF_EXP_INF) { ret = BF_ST_INVALID_OP; if (flags & BF_GET_INT_MOD) { v = 0; } else if (a->expn == BF_EXP_INF) { v = (uint64_t)INT64_MAX + a->sign; } else { v = INT64_MAX; } } else if (a->expn <= 0) { v = 0; ret = 0; } else if (a->expn <= 63) { #if LIMB_BITS == 32 if (a->expn <= 32) v = a->tab[a->len - 1] >> (LIMB_BITS - a->expn); else v = (((uint64_t)a->tab[a->len - 1] << 32) | get_limbz(a, a->len - 2)) >> (64 - a->expn); #else v = a->tab[a->len - 1] >> (LIMB_BITS - a->expn); #endif if (a->sign) v = -v; ret = 0; } else if (!(flags & BF_GET_INT_MOD)) { ret = BF_ST_INVALID_OP; if (a->sign) { uint64_t v1; v = (uint64_t)INT64_MAX + 1; if (a->expn == 64) { v1 = a->tab[a->len - 1]; #if LIMB_BITS == 32 v1 = (v1 << 32) | get_limbz(a, a->len - 2); #endif if (v1 == v) ret = 0; } } else { v = INT64_MAX; } } else { slimb_t bit_pos = a->len * LIMB_BITS - a->expn; v = get_bits(a->tab, a->len, bit_pos); #if LIMB_BITS == 32 v |= (uint64_t)get_bits(a->tab, a->len, bit_pos + 32) << 32; #endif if (a->sign) v = -v; ret = 0; } *pres = v; return ret; } /* The rounding mode is always BF_RNDZ. Return BF_ST_INVALID_OP if there is an overflow and 0 otherwise. */ int bf_get_uint64(uint64_t *pres, const bf_t *a) { uint64_t v; int ret; if (a->expn == BF_EXP_NAN) { goto overflow; } else if (a->expn <= 0) { v = 0; ret = 0; } else if (a->sign) { v = 0; ret = BF_ST_INVALID_OP; } else if (a->expn <= 64) { #if LIMB_BITS == 32 if (a->expn <= 32) v = a->tab[a->len - 1] >> (LIMB_BITS - a->expn); else v = (((uint64_t)a->tab[a->len - 1] << 32) | get_limbz(a, a->len - 2)) >> (64 - a->expn); #else v = a->tab[a->len - 1] >> (LIMB_BITS - a->expn); #endif ret = 0; } else { overflow: v = UINT64_MAX; ret = BF_ST_INVALID_OP; } *pres = v; return ret; } /* base conversion from radix */ static const uint8_t digits_per_limb_table[BF_RADIX_MAX - 1] = { #if LIMB_BITS == 32 32,20,16,13,12,11,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, #else 64,40,32,27,24,22,21,20,19,18,17,17,16,16,16,15,15,15,14,14,14,14,13,13,13,13,13,13,13,12,12,12,12,12,12, #endif }; static limb_t get_limb_radix(int radix) { int i, k; limb_t radixl; k = digits_per_limb_table[radix - 2]; radixl = radix; for(i = 1; i < k; i++) radixl *= radix; return radixl; } /* return != 0 if error */ static int bf_integer_from_radix_rec(bf_t *r, const limb_t *tab, limb_t n, int level, limb_t n0, limb_t radix, bf_t *pow_tab) { int ret; if (n == 1) { ret = bf_set_ui(r, tab[0]); } else { bf_t T_s, *T = &T_s, *B; limb_t n1, n2; n2 = (((n0 * 2) >> (level + 1)) + 1) / 2; n1 = n - n2; // printf("level=%d n0=%ld n1=%ld n2=%ld\n", level, n0, n1, n2); B = &pow_tab[level]; if (B->len == 0) { ret = bf_pow_ui_ui(B, radix, n2, BF_PREC_INF, BF_RNDZ); if (ret) return ret; } ret = bf_integer_from_radix_rec(r, tab + n2, n1, level + 1, n0, radix, pow_tab); if (ret) return ret; ret = bf_mul(r, r, B, BF_PREC_INF, BF_RNDZ); if (ret) return ret; bf_init(r->ctx, T); ret = bf_integer_from_radix_rec(T, tab, n2, level + 1, n0, radix, pow_tab); if (!ret) ret = bf_add(r, r, T, BF_PREC_INF, BF_RNDZ); bf_delete(T); } return ret; // bf_print_str(" r=", r); } /* return 0 if OK != 0 if memory error */ static int bf_integer_from_radix(bf_t *r, const limb_t *tab, limb_t n, limb_t radix) { bf_context_t *s = r->ctx; int pow_tab_len, i, ret; limb_t radixl; bf_t *pow_tab; radixl = get_limb_radix(radix); pow_tab_len = ceil_log2(n) + 2; /* XXX: check */ pow_tab = bf_malloc(s, sizeof(pow_tab[0]) * pow_tab_len); if (!pow_tab) return -1; for(i = 0; i < pow_tab_len; i++) bf_init(r->ctx, &pow_tab[i]); ret = bf_integer_from_radix_rec(r, tab, n, 0, n, radixl, pow_tab); for(i = 0; i < pow_tab_len; i++) { bf_delete(&pow_tab[i]); } bf_free(s, pow_tab); return ret; } /* compute and round T * radix^expn. */ int bf_mul_pow_radix(bf_t *r, const bf_t *T, limb_t radix, slimb_t expn, limb_t prec, bf_flags_t flags) { int ret, expn_sign, overflow; slimb_t e, extra_bits, prec1, ziv_extra_bits; bf_t B_s, *B = &B_s; if (T->len == 0) { return bf_set(r, T); } else if (expn == 0) { ret = bf_set(r, T); ret |= bf_round(r, prec, flags); return ret; } e = expn; expn_sign = 0; if (e < 0) { e = -e; expn_sign = 1; } bf_init(r->ctx, B); if (prec == BF_PREC_INF) { /* infinite precision: only used if the result is known to be exact */ ret = bf_pow_ui_ui(B, radix, e, BF_PREC_INF, BF_RNDN); if (expn_sign) { ret |= bf_div(r, T, B, T->len * LIMB_BITS, BF_RNDN); } else { ret |= bf_mul(r, T, B, BF_PREC_INF, BF_RNDN); } } else { ziv_extra_bits = 16; for(;;) { prec1 = prec + ziv_extra_bits; /* XXX: correct overflow/underflow handling */ /* XXX: rigorous error analysis needed */ extra_bits = ceil_log2(e) * 2 + 1; ret = bf_pow_ui_ui(B, radix, e, prec1 + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); overflow = !bf_is_finite(B); /* XXX: if bf_pow_ui_ui returns an exact result, can stop after the next operation */ if (expn_sign) ret |= bf_div(r, T, B, prec1 + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); else ret |= bf_mul(r, T, B, prec1 + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); if (ret & BF_ST_MEM_ERROR) break; if ((ret & BF_ST_INEXACT) && !bf_can_round(r, prec, flags & BF_RND_MASK, prec1) && !overflow) { /* and more precision and retry */ ziv_extra_bits = ziv_extra_bits + (ziv_extra_bits / 2); } else { /* XXX: need to use __bf_round() to pass the inexact flag for the subnormal case */ ret = bf_round(r, prec, flags) | (ret & BF_ST_INEXACT); break; } } } bf_delete(B); return ret; } static inline int to_digit(int c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'A' && c <= 'Z') return c - 'A' + 10; else if (c >= 'a' && c <= 'z') return c - 'a' + 10; else return 36; } /* add a limb at 'pos' and decrement pos. new space is created if needed. Return 0 if OK, -1 if memory error */ static int bf_add_limb(bf_t *a, slimb_t *ppos, limb_t v) { slimb_t pos; pos = *ppos; if (unlikely(pos < 0)) { limb_t new_size, d, *new_tab; new_size = bf_max(a->len + 1, a->len * 3 / 2); new_tab = bf_realloc(a->ctx, a->tab, sizeof(limb_t) * new_size); if (!new_tab) return -1; a->tab = new_tab; d = new_size - a->len; memmove(a->tab + d, a->tab, a->len * sizeof(limb_t)); a->len = new_size; pos += d; } a->tab[pos--] = v; *ppos = pos; return 0; } static int bf_tolower(int c) { if (c >= 'A' && c <= 'Z') c = c - 'A' + 'a'; return c; } static int strcasestart(const char *str, const char *val, const char **ptr) { const char *p, *q; p = str; q = val; while (*q != '\0') { if (bf_tolower(*p) != *q) return 0; p++; q++; } if (ptr) *ptr = p; return 1; } static int bf_atof_internal(bf_t *r, slimb_t *pexponent, const char *str, const char **pnext, int radix, limb_t prec, bf_flags_t flags, BOOL is_dec) { const char *p, *p_start; int is_neg, radix_bits, exp_is_neg, ret, digits_per_limb, shift; limb_t cur_limb; slimb_t pos, expn, int_len, digit_count; BOOL has_decpt, is_bin_exp; bf_t a_s, *a; *pexponent = 0; p = str; if (!(flags & BF_ATOF_NO_NAN_INF) && radix <= 16 && strcasestart(p, "nan", &p)) { bf_set_nan(r); ret = 0; goto done; } is_neg = 0; if (p[0] == '+') { p++; p_start = p; } else if (p[0] == '-') { is_neg = 1; p++; p_start = p; } else { p_start = p; } if (p[0] == '0') { if ((p[1] == 'x' || p[1] == 'X') && (radix == 0 || radix == 16) && !(flags & BF_ATOF_NO_HEX)) { radix = 16; p += 2; } else if ((p[1] == 'o' || p[1] == 'O') && radix == 0 && (flags & BF_ATOF_BIN_OCT)) { p += 2; radix = 8; } else if ((p[1] == 'b' || p[1] == 'B') && radix == 0 && (flags & BF_ATOF_BIN_OCT)) { p += 2; radix = 2; } else { goto no_prefix; } /* there must be a digit after the prefix */ if (to_digit((uint8_t)*p) >= radix) { bf_set_nan(r); ret = 0; goto done; } no_prefix: ; } else { if (!(flags & BF_ATOF_NO_NAN_INF) && radix <= 16 && strcasestart(p, "inf", &p)) { bf_set_inf(r, is_neg); ret = 0; goto done; } } if (radix == 0) radix = 10; if (is_dec) { assert(radix == 10); radix_bits = 0; a = r; } else if ((radix & (radix - 1)) != 0) { radix_bits = 0; /* base is not a power of two */ a = &a_s; bf_init(r->ctx, a); } else { radix_bits = ceil_log2(radix); a = r; } /* skip leading zeros */ /* XXX: could also skip zeros after the decimal point */ while (*p == '0') p++; if (radix_bits) { shift = digits_per_limb = LIMB_BITS; } else { radix_bits = 0; shift = digits_per_limb = digits_per_limb_table[radix - 2]; } cur_limb = 0; bf_resize(a, 1); pos = 0; has_decpt = FALSE; int_len = digit_count = 0; for(;;) { limb_t c; if (*p == '.' && (p > p_start || to_digit(p[1]) < radix)) { if (has_decpt) break; has_decpt = TRUE; int_len = digit_count; p++; } c = to_digit(*p); if (c >= radix) break; digit_count++; p++; if (radix_bits) { shift -= radix_bits; if (shift <= 0) { cur_limb |= c >> (-shift); if (bf_add_limb(a, &pos, cur_limb)) goto mem_error; if (shift < 0) cur_limb = c << (LIMB_BITS + shift); else cur_limb = 0; shift += LIMB_BITS; } else { cur_limb |= c << shift; } } else { cur_limb = cur_limb * radix + c; shift--; if (shift == 0) { if (bf_add_limb(a, &pos, cur_limb)) goto mem_error; shift = digits_per_limb; cur_limb = 0; } } } if (!has_decpt) int_len = digit_count; /* add the last limb and pad with zeros */ if (shift != digits_per_limb) { if (radix_bits == 0) { while (shift != 0) { cur_limb *= radix; shift--; } } if (bf_add_limb(a, &pos, cur_limb)) { mem_error: ret = BF_ST_MEM_ERROR; if (!radix_bits) bf_delete(a); bf_set_nan(r); goto done; } } /* reset the next limbs to zero (we prefer to reallocate in the renormalization) */ memset(a->tab, 0, (pos + 1) * sizeof(limb_t)); if (p == p_start) { ret = 0; if (!radix_bits) bf_delete(a); bf_set_nan(r); goto done; } /* parse the exponent, if any */ expn = 0; is_bin_exp = FALSE; if (((radix == 10 && (*p == 'e' || *p == 'E')) || (radix != 10 && (*p == '@' || (radix_bits && (*p == 'p' || *p == 'P'))))) && p > p_start) { is_bin_exp = (*p == 'p' || *p == 'P'); p++; exp_is_neg = 0; if (*p == '+') { p++; } else if (*p == '-') { exp_is_neg = 1; p++; } for(;;) { int c; c = to_digit(*p); if (c >= 10) break; if (unlikely(expn > ((BF_RAW_EXP_MAX - 2 - 9) / 10))) { /* exponent overflow */ if (exp_is_neg) { bf_set_zero(r, is_neg); ret = BF_ST_UNDERFLOW | BF_ST_INEXACT; } else { bf_set_inf(r, is_neg); ret = BF_ST_OVERFLOW | BF_ST_INEXACT; } goto done; } p++; expn = expn * 10 + c; } if (exp_is_neg) expn = -expn; } if (is_dec) { a->expn = expn + int_len; a->sign = is_neg; ret = bfdec_normalize_and_round((bfdec_t *)a, prec, flags); } else if (radix_bits) { /* XXX: may overflow */ if (!is_bin_exp) expn *= radix_bits; a->expn = expn + (int_len * radix_bits); a->sign = is_neg; ret = bf_normalize_and_round(a, prec, flags); } else { limb_t l; pos++; l = a->len - pos; /* number of limbs */ if (l == 0) { bf_set_zero(r, is_neg); ret = 0; } else { bf_t T_s, *T = &T_s; expn -= l * digits_per_limb - int_len; bf_init(r->ctx, T); if (bf_integer_from_radix(T, a->tab + pos, l, radix)) { bf_set_nan(r); ret = BF_ST_MEM_ERROR; } else { T->sign = is_neg; if (flags & BF_ATOF_EXPONENT) { /* return the exponent */ *pexponent = expn; ret = bf_set(r, T); } else { ret = bf_mul_pow_radix(r, T, radix, expn, prec, flags); } } bf_delete(T); } bf_delete(a); } done: if (pnext) *pnext = p; return ret; } /* Return (status, n, exp). 'status' is the floating point status. 'n' is the parsed number. If (flags & BF_ATOF_EXPONENT) and if the radix is not a power of two, the parsed number is equal to r * (*pexponent)^radix. Otherwise *pexponent = 0. */ int bf_atof2(bf_t *r, slimb_t *pexponent, const char *str, const char **pnext, int radix, limb_t prec, bf_flags_t flags) { return bf_atof_internal(r, pexponent, str, pnext, radix, prec, flags, FALSE); } int bf_atof(bf_t *r, const char *str, const char **pnext, int radix, limb_t prec, bf_flags_t flags) { slimb_t dummy_exp; return bf_atof_internal(r, &dummy_exp, str, pnext, radix, prec, flags, FALSE); } /* base conversion to radix */ #if LIMB_BITS == 64 #define RADIXL_10 UINT64_C(10000000000000000000) #else #define RADIXL_10 UINT64_C(1000000000) #endif static const uint32_t inv_log2_radix[BF_RADIX_MAX - 1][LIMB_BITS / 32 + 1] = { #if LIMB_BITS == 32 { 0x80000000, 0x00000000,}, { 0x50c24e60, 0xd4d4f4a7,}, { 0x40000000, 0x00000000,}, { 0x372068d2, 0x0a1ee5ca,}, { 0x3184648d, 0xb8153e7a,}, { 0x2d983275, 0x9d5369c4,}, { 0x2aaaaaaa, 0xaaaaaaab,}, { 0x28612730, 0x6a6a7a54,}, { 0x268826a1, 0x3ef3fde6,}, { 0x25001383, 0xbac8a744,}, { 0x23b46706, 0x82c0c709,}, { 0x229729f1, 0xb2c83ded,}, { 0x219e7ffd, 0xa5ad572b,}, { 0x20c33b88, 0xda7c29ab,}, { 0x20000000, 0x00000000,}, { 0x1f50b57e, 0xac5884b3,}, { 0x1eb22cc6, 0x8aa6e26f,}, { 0x1e21e118, 0x0c5daab2,}, { 0x1d9dcd21, 0x439834e4,}, { 0x1d244c78, 0x367a0d65,}, { 0x1cb40589, 0xac173e0c,}, { 0x1c4bd95b, 0xa8d72b0d,}, { 0x1bead768, 0x98f8ce4c,}, { 0x1b903469, 0x050f72e5,}, { 0x1b3b433f, 0x2eb06f15,}, { 0x1aeb6f75, 0x9c46fc38,}, { 0x1aa038eb, 0x0e3bfd17,}, { 0x1a593062, 0xb38d8c56,}, { 0x1a15f4c3, 0x2b95a2e6,}, { 0x19d630dc, 0xcc7ddef9,}, { 0x19999999, 0x9999999a,}, { 0x195fec80, 0x8a609431,}, { 0x1928ee7b, 0x0b4f22f9,}, { 0x18f46acf, 0x8c06e318,}, { 0x18c23246, 0xdc0a9f3d,}, #else { 0x80000000, 0x00000000, 0x00000000,}, { 0x50c24e60, 0xd4d4f4a7, 0x021f57bc,}, { 0x40000000, 0x00000000, 0x00000000,}, { 0x372068d2, 0x0a1ee5ca, 0x19ea911b,}, { 0x3184648d, 0xb8153e7a, 0x7fc2d2e1,}, { 0x2d983275, 0x9d5369c4, 0x4dec1661,}, { 0x2aaaaaaa, 0xaaaaaaaa, 0xaaaaaaab,}, { 0x28612730, 0x6a6a7a53, 0x810fabde,}, { 0x268826a1, 0x3ef3fde6, 0x23e2566b,}, { 0x25001383, 0xbac8a744, 0x385a3349,}, { 0x23b46706, 0x82c0c709, 0x3f891718,}, { 0x229729f1, 0xb2c83ded, 0x15fba800,}, { 0x219e7ffd, 0xa5ad572a, 0xe169744b,}, { 0x20c33b88, 0xda7c29aa, 0x9bddee52,}, { 0x20000000, 0x00000000, 0x00000000,}, { 0x1f50b57e, 0xac5884b3, 0x70e28eee,}, { 0x1eb22cc6, 0x8aa6e26f, 0x06d1a2a2,}, { 0x1e21e118, 0x0c5daab1, 0x81b4f4bf,}, { 0x1d9dcd21, 0x439834e3, 0x81667575,}, { 0x1d244c78, 0x367a0d64, 0xc8204d6d,}, { 0x1cb40589, 0xac173e0c, 0x3b7b16ba,}, { 0x1c4bd95b, 0xa8d72b0d, 0x5879f25a,}, { 0x1bead768, 0x98f8ce4c, 0x66cc2858,}, { 0x1b903469, 0x050f72e5, 0x0cf5488e,}, { 0x1b3b433f, 0x2eb06f14, 0x8c89719c,}, { 0x1aeb6f75, 0x9c46fc37, 0xab5fc7e9,}, { 0x1aa038eb, 0x0e3bfd17, 0x1bd62080,}, { 0x1a593062, 0xb38d8c56, 0x7998ab45,}, { 0x1a15f4c3, 0x2b95a2e6, 0x46aed6a0,}, { 0x19d630dc, 0xcc7ddef9, 0x5aadd61b,}, { 0x19999999, 0x99999999, 0x9999999a,}, { 0x195fec80, 0x8a609430, 0xe1106014,}, { 0x1928ee7b, 0x0b4f22f9, 0x5f69791d,}, { 0x18f46acf, 0x8c06e318, 0x4d2aeb2c,}, { 0x18c23246, 0xdc0a9f3d, 0x3fe16970,}, #endif }; static const limb_t log2_radix[BF_RADIX_MAX - 1] = { #if LIMB_BITS == 32 0x20000000, 0x32b80347, 0x40000000, 0x4a4d3c26, 0x52b80347, 0x59d5d9fd, 0x60000000, 0x6570068e, 0x6a4d3c26, 0x6eb3a9f0, 0x72b80347, 0x766a008e, 0x79d5d9fd, 0x7d053f6d, 0x80000000, 0x82cc7edf, 0x8570068e, 0x87ef05ae, 0x8a4d3c26, 0x8c8ddd45, 0x8eb3a9f0, 0x90c10501, 0x92b80347, 0x949a784c, 0x966a008e, 0x982809d6, 0x99d5d9fd, 0x9b74948f, 0x9d053f6d, 0x9e88c6b3, 0xa0000000, 0xa16bad37, 0xa2cc7edf, 0xa4231623, 0xa570068e, #else 0x2000000000000000, 0x32b803473f7ad0f4, 0x4000000000000000, 0x4a4d3c25e68dc57f, 0x52b803473f7ad0f4, 0x59d5d9fd5010b366, 0x6000000000000000, 0x6570068e7ef5a1e8, 0x6a4d3c25e68dc57f, 0x6eb3a9f01975077f, 0x72b803473f7ad0f4, 0x766a008e4788cbcd, 0x79d5d9fd5010b366, 0x7d053f6d26089673, 0x8000000000000000, 0x82cc7edf592262d0, 0x8570068e7ef5a1e8, 0x87ef05ae409a0289, 0x8a4d3c25e68dc57f, 0x8c8ddd448f8b845a, 0x8eb3a9f01975077f, 0x90c10500d63aa659, 0x92b803473f7ad0f4, 0x949a784bcd1b8afe, 0x966a008e4788cbcd, 0x982809d5be7072dc, 0x99d5d9fd5010b366, 0x9b74948f5532da4b, 0x9d053f6d26089673, 0x9e88c6b3626a72aa, 0xa000000000000000, 0xa16bad3758efd873, 0xa2cc7edf592262d0, 0xa4231623369e78e6, 0xa570068e7ef5a1e8, #endif }; /* compute floor(a*b) or ceil(a*b) with b = log2(radix) or b=1/log2(radix). For is_inv = 0, strict accuracy is not guaranteed when radix is not a power of two. */ slimb_t bf_mul_log2_radix(slimb_t a1, unsigned int radix, int is_inv, int is_ceil1) { int is_neg; limb_t a; BOOL is_ceil; is_ceil = is_ceil1; a = a1; if (a1 < 0) { a = -a; is_neg = 1; } else { is_neg = 0; } is_ceil ^= is_neg; if ((radix & (radix - 1)) == 0) { int radix_bits; /* radix is a power of two */ radix_bits = ceil_log2(radix); if (is_inv) { if (is_ceil) a += radix_bits - 1; a = a / radix_bits; } else { a = a * radix_bits; } } else { const uint32_t *tab; limb_t b0, b1; dlimb_t t; if (is_inv) { tab = inv_log2_radix[radix - 2]; #if LIMB_BITS == 32 b1 = tab[0]; b0 = tab[1]; #else b1 = ((limb_t)tab[0] << 32) | tab[1]; b0 = (limb_t)tab[2] << 32; #endif t = (dlimb_t)b0 * (dlimb_t)a; t = (dlimb_t)b1 * (dlimb_t)a + (t >> LIMB_BITS); a = t >> (LIMB_BITS - 1); } else { b0 = log2_radix[radix - 2]; t = (dlimb_t)b0 * (dlimb_t)a; a = t >> (LIMB_BITS - 3); } /* a = floor(result) and 'result' cannot be an integer */ a += is_ceil; } if (is_neg) a = -a; return a; } /* 'n' is the number of output limbs */ static int bf_integer_to_radix_rec(bf_t *pow_tab, limb_t *out, const bf_t *a, limb_t n, int level, limb_t n0, limb_t radixl, unsigned int radixl_bits) { limb_t n1, n2, q_prec; int ret; assert(n >= 1); if (n == 1) { out[0] = get_bits(a->tab, a->len, a->len * LIMB_BITS - a->expn); } else if (n == 2) { dlimb_t t; slimb_t pos; pos = a->len * LIMB_BITS - a->expn; t = ((dlimb_t)get_bits(a->tab, a->len, pos + LIMB_BITS) << LIMB_BITS) | get_bits(a->tab, a->len, pos); if (likely(radixl == RADIXL_10)) { /* use division by a constant when possible */ out[0] = t % RADIXL_10; out[1] = t / RADIXL_10; } else { out[0] = t % radixl; out[1] = t / radixl; } } else { bf_t Q, R, *B, *B_inv; int q_add; bf_init(a->ctx, &Q); bf_init(a->ctx, &R); n2 = (((n0 * 2) >> (level + 1)) + 1) / 2; n1 = n - n2; B = &pow_tab[2 * level]; B_inv = &pow_tab[2 * level + 1]; ret = 0; if (B->len == 0) { /* compute BASE^n2 */ ret |= bf_pow_ui_ui(B, radixl, n2, BF_PREC_INF, BF_RNDZ); /* we use enough bits for the maximum possible 'n1' value, i.e. n2 + 1 */ ret |= bf_set_ui(&R, 1); ret |= bf_div(B_inv, &R, B, (n2 + 1) * radixl_bits + 2, BF_RNDN); } // printf("%d: n1=% " PRId64 " n2=%" PRId64 "\n", level, n1, n2); q_prec = n1 * radixl_bits; ret |= bf_mul(&Q, a, B_inv, q_prec, BF_RNDN); ret |= bf_rint(&Q, BF_RNDZ); ret |= bf_mul(&R, &Q, B, BF_PREC_INF, BF_RNDZ); ret |= bf_sub(&R, a, &R, BF_PREC_INF, BF_RNDZ); if (ret & BF_ST_MEM_ERROR) goto fail; /* adjust if necessary */ q_add = 0; while (R.sign && R.len != 0) { if (bf_add(&R, &R, B, BF_PREC_INF, BF_RNDZ)) goto fail; q_add--; } while (bf_cmpu(&R, B) >= 0) { if (bf_sub(&R, &R, B, BF_PREC_INF, BF_RNDZ)) goto fail; q_add++; } if (q_add != 0) { if (bf_add_si(&Q, &Q, q_add, BF_PREC_INF, BF_RNDZ)) goto fail; } if (bf_integer_to_radix_rec(pow_tab, out + n2, &Q, n1, level + 1, n0, radixl, radixl_bits)) goto fail; if (bf_integer_to_radix_rec(pow_tab, out, &R, n2, level + 1, n0, radixl, radixl_bits)) { fail: bf_delete(&Q); bf_delete(&R); return -1; } bf_delete(&Q); bf_delete(&R); } return 0; } /* return 0 if OK != 0 if memory error */ static int bf_integer_to_radix(bf_t *r, const bf_t *a, limb_t radixl) { bf_context_t *s = r->ctx; limb_t r_len; bf_t *pow_tab; int i, pow_tab_len, ret; r_len = r->len; pow_tab_len = (ceil_log2(r_len) + 2) * 2; /* XXX: check */ pow_tab = bf_malloc(s, sizeof(pow_tab[0]) * pow_tab_len); if (!pow_tab) return -1; for(i = 0; i < pow_tab_len; i++) bf_init(r->ctx, &pow_tab[i]); ret = bf_integer_to_radix_rec(pow_tab, r->tab, a, r_len, 0, r_len, radixl, ceil_log2(radixl)); for(i = 0; i < pow_tab_len; i++) { bf_delete(&pow_tab[i]); } bf_free(s, pow_tab); return ret; } /* a must be >= 0. 'P' is the wanted number of digits in radix 'radix'. 'r' is the mantissa represented as an integer. *pE contains the exponent. Return != 0 if memory error. */ static int bf_convert_to_radix(bf_t *r, slimb_t *pE, const bf_t *a, int radix, limb_t P, bf_rnd_t rnd_mode, BOOL is_fixed_exponent) { slimb_t E, e, prec, extra_bits, ziv_extra_bits, prec0; bf_t B_s, *B = &B_s; int e_sign, ret, res; if (a->len == 0) { /* zero case */ *pE = 0; return bf_set(r, a); } if (is_fixed_exponent) { E = *pE; } else { /* compute the new exponent */ E = 1 + bf_mul_log2_radix(a->expn - 1, radix, TRUE, FALSE); } // bf_print_str("a", a); // printf("E=%ld P=%ld radix=%d\n", E, P, radix); for(;;) { e = P - E; e_sign = 0; if (e < 0) { e = -e; e_sign = 1; } /* Note: precision for log2(radix) is not critical here */ prec0 = bf_mul_log2_radix(P, radix, FALSE, TRUE); ziv_extra_bits = 16; for(;;) { prec = prec0 + ziv_extra_bits; /* XXX: rigorous error analysis needed */ extra_bits = ceil_log2(e) * 2 + 1; ret = bf_pow_ui_ui(r, radix, e, prec + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); if (!e_sign) ret |= bf_mul(r, r, a, prec + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); else ret |= bf_div(r, a, r, prec + extra_bits, BF_RNDN | BF_FLAG_EXT_EXP); if (ret & BF_ST_MEM_ERROR) return BF_ST_MEM_ERROR; /* if the result is not exact, check that it can be safely rounded to an integer */ if ((ret & BF_ST_INEXACT) && !bf_can_round(r, r->expn, rnd_mode, prec)) { /* and more precision and retry */ ziv_extra_bits = ziv_extra_bits + (ziv_extra_bits / 2); continue; } else { ret = bf_rint(r, rnd_mode); if (ret & BF_ST_MEM_ERROR) return BF_ST_MEM_ERROR; break; } } if (is_fixed_exponent) break; /* check that the result is < B^P */ /* XXX: do a fast approximate test first ? */ bf_init(r->ctx, B); ret = bf_pow_ui_ui(B, radix, P, BF_PREC_INF, BF_RNDZ); if (ret) { bf_delete(B); return ret; } res = bf_cmpu(r, B); bf_delete(B); if (res < 0) break; /* try a larger exponent */ E++; } *pE = E; return 0; } static void limb_to_a(char *buf, limb_t n, unsigned int radix, int len) { int digit, i; if (radix == 10) { /* specific case with constant divisor */ for(i = len - 1; i >= 0; i--) { digit = (limb_t)n % 10; n = (limb_t)n / 10; buf[i] = digit + '0'; } } else { for(i = len - 1; i >= 0; i--) { digit = (limb_t)n % radix; n = (limb_t)n / radix; if (digit < 10) digit += '0'; else digit += 'a' - 10; buf[i] = digit; } } } /* for power of 2 radixes */ static void limb_to_a2(char *buf, limb_t n, unsigned int radix_bits, int len) { int digit, i; unsigned int mask; mask = (1 << radix_bits) - 1; for(i = len - 1; i >= 0; i--) { digit = n & mask; n >>= radix_bits; if (digit < 10) digit += '0'; else digit += 'a' - 10; buf[i] = digit; } } /* 'a' must be an integer if the is_dec = FALSE or if the radix is not a power of two. A dot is added before the 'dot_pos' digit. dot_pos = n_digits does not display the dot. 0 <= dot_pos <= n_digits. n_digits >= 1. */ static void output_digits(DynBuf *s, const bf_t *a1, int radix, limb_t n_digits, limb_t dot_pos, BOOL is_dec) { limb_t i, v, l; slimb_t pos, pos_incr; int digits_per_limb, buf_pos, radix_bits, first_buf_pos; char buf[65]; bf_t a_s, *a; if (is_dec) { digits_per_limb = LIMB_DIGITS; a = (bf_t *)a1; radix_bits = 0; pos = a->len; pos_incr = 1; first_buf_pos = 0; } else if ((radix & (radix - 1)) == 0) { a = (bf_t *)a1; radix_bits = ceil_log2(radix); digits_per_limb = LIMB_BITS / radix_bits; pos_incr = digits_per_limb * radix_bits; /* digits are aligned relative to the radix point */ pos = a->len * LIMB_BITS + smod(-a->expn, radix_bits); first_buf_pos = 0; } else { limb_t n, radixl; digits_per_limb = digits_per_limb_table[radix - 2]; radixl = get_limb_radix(radix); a = &a_s; bf_init(a1->ctx, a); n = (n_digits + digits_per_limb - 1) / digits_per_limb; if (bf_resize(a, n)) { dbuf_set_error(s); goto done; } if (bf_integer_to_radix(a, a1, radixl)) { dbuf_set_error(s); goto done; } radix_bits = 0; pos = n; pos_incr = 1; first_buf_pos = pos * digits_per_limb - n_digits; } buf_pos = digits_per_limb; i = 0; while (i < n_digits) { if (buf_pos == digits_per_limb) { pos -= pos_incr; if (radix_bits == 0) { v = get_limbz(a, pos); limb_to_a(buf, v, radix, digits_per_limb); } else { v = get_bits(a->tab, a->len, pos); limb_to_a2(buf, v, radix_bits, digits_per_limb); } buf_pos = first_buf_pos; first_buf_pos = 0; } if (i < dot_pos) { l = dot_pos; } else { if (i == dot_pos) dbuf_putc(s, '.'); l = n_digits; } l = bf_min(digits_per_limb - buf_pos, l - i); dbuf_put(s, (uint8_t *)(buf + buf_pos), l); buf_pos += l; i += l; } done: if (a != a1) bf_delete(a); } static void *bf_dbuf_realloc(void *opaque, void *ptr, size_t size) { bf_context_t *s = opaque; return bf_realloc(s, ptr, size); } /* return the length in bytes. A trailing '\0' is added */ static char *bf_ftoa_internal(size_t *plen, const bf_t *a2, int radix, limb_t prec, bf_flags_t flags, BOOL is_dec) { bf_context_t *ctx = a2->ctx; DynBuf s_s, *s = &s_s; int radix_bits; // bf_print_str("ftoa", a2); // printf("radix=%d\n", radix); dbuf_init2(s, ctx, bf_dbuf_realloc); if (a2->expn == BF_EXP_NAN) { dbuf_putstr(s, "NaN"); } else { if (a2->sign) dbuf_putc(s, '-'); if (a2->expn == BF_EXP_INF) { if (flags & BF_FTOA_JS_QUIRKS) dbuf_putstr(s, "Infinity"); else dbuf_putstr(s, "Inf"); } else { int fmt, ret; slimb_t n_digits, n, i, n_max, n1; bf_t a1_s, *a1 = &a1_s; if ((radix & (radix - 1)) != 0) radix_bits = 0; else radix_bits = ceil_log2(radix); fmt = flags & BF_FTOA_FORMAT_MASK; bf_init(ctx, a1); if (fmt == BF_FTOA_FORMAT_FRAC) { if (is_dec || radix_bits != 0) { if (bf_set(a1, a2)) goto fail1; #ifdef USE_BF_DEC if (is_dec) { if (bfdec_round((bfdec_t *)a1, prec, (flags & BF_RND_MASK) | BF_FLAG_RADPNT_PREC) & BF_ST_MEM_ERROR) goto fail1; n = a1->expn; } else #endif { if (bf_round(a1, prec * radix_bits, (flags & BF_RND_MASK) | BF_FLAG_RADPNT_PREC) & BF_ST_MEM_ERROR) goto fail1; n = ceil_div(a1->expn, radix_bits); } if (flags & BF_FTOA_ADD_PREFIX) { if (radix == 16) dbuf_putstr(s, "0x"); else if (radix == 8) dbuf_putstr(s, "0o"); else if (radix == 2) dbuf_putstr(s, "0b"); } if (a1->expn == BF_EXP_ZERO) { dbuf_putstr(s, "0"); if (prec > 0) { dbuf_putstr(s, "."); for(i = 0; i < prec; i++) { dbuf_putc(s, '0'); } } } else { n_digits = prec + n; if (n <= 0) { /* 0.x */ dbuf_putstr(s, "0."); for(i = 0; i < -n; i++) { dbuf_putc(s, '0'); } if (n_digits > 0) { output_digits(s, a1, radix, n_digits, n_digits, is_dec); } } else { output_digits(s, a1, radix, n_digits, n, is_dec); } } } else { size_t pos, start; bf_t a_s, *a = &a_s; /* make a positive number */ a->tab = a2->tab; a->len = a2->len; a->expn = a2->expn; a->sign = 0; /* one more digit for the rounding */ n = 1 + bf_mul_log2_radix(bf_max(a->expn, 0), radix, TRUE, TRUE); n_digits = n + prec; n1 = n; if (bf_convert_to_radix(a1, &n1, a, radix, n_digits, flags & BF_RND_MASK, TRUE)) goto fail1; start = s->size; output_digits(s, a1, radix, n_digits, n, is_dec); /* remove leading zeros because we allocated one more digit */ pos = start; while ((pos + 1) < s->size && s->buf[pos] == '0' && s->buf[pos + 1] != '.') pos++; if (pos > start) { memmove(s->buf + start, s->buf + pos, s->size - pos); s->size -= (pos - start); } } } else { #ifdef USE_BF_DEC if (is_dec) { if (bf_set(a1, a2)) goto fail1; if (fmt == BF_FTOA_FORMAT_FIXED) { n_digits = prec; n_max = n_digits; if (bfdec_round((bfdec_t *)a1, prec, (flags & BF_RND_MASK)) & BF_ST_MEM_ERROR) goto fail1; } else { /* prec is ignored */ prec = n_digits = a1->len * LIMB_DIGITS; /* remove the trailing zero digits */ while (n_digits > 1 && get_digit(a1->tab, a1->len, prec - n_digits) == 0) { n_digits--; } n_max = n_digits + 4; } n = a1->expn; } else #endif if (radix_bits != 0) { if (bf_set(a1, a2)) goto fail1; if (fmt == BF_FTOA_FORMAT_FIXED) { slimb_t prec_bits; n_digits = prec; n_max = n_digits; /* align to the radix point */ prec_bits = prec * radix_bits - smod(-a1->expn, radix_bits); if (bf_round(a1, prec_bits, (flags & BF_RND_MASK)) & BF_ST_MEM_ERROR) goto fail1; } else { limb_t digit_mask; slimb_t pos; /* position of the digit before the most significant digit in bits */ pos = a1->len * LIMB_BITS + smod(-a1->expn, radix_bits); n_digits = ceil_div(pos, radix_bits); /* remove the trailing zero digits */ digit_mask = ((limb_t)1 << radix_bits) - 1; while (n_digits > 1 && (get_bits(a1->tab, a1->len, pos - n_digits * radix_bits) & digit_mask) == 0) { n_digits--; } n_max = n_digits + 4; } n = ceil_div(a1->expn, radix_bits); } else { bf_t a_s, *a = &a_s; /* make a positive number */ a->tab = a2->tab; a->len = a2->len; a->expn = a2->expn; a->sign = 0; if (fmt == BF_FTOA_FORMAT_FIXED) { n_digits = prec; n_max = n_digits; } else { slimb_t n_digits_max, n_digits_min; assert(prec != BF_PREC_INF); n_digits = 1 + bf_mul_log2_radix(prec, radix, TRUE, TRUE); /* max number of digits for non exponential notation. The rational is to have the same rule as JS i.e. n_max = 21 for 64 bit float in base 10. */ n_max = n_digits + 4; if (fmt == BF_FTOA_FORMAT_FREE_MIN) { bf_t b_s, *b = &b_s; /* find the minimum number of digits by dichotomy. */ /* XXX: inefficient */ n_digits_max = n_digits; n_digits_min = 1; bf_init(ctx, b); while (n_digits_min < n_digits_max) { n_digits = (n_digits_min + n_digits_max) / 2; if (bf_convert_to_radix(a1, &n, a, radix, n_digits, flags & BF_RND_MASK, FALSE)) { bf_delete(b); goto fail1; } /* convert back to a number and compare */ ret = bf_mul_pow_radix(b, a1, radix, n - n_digits, prec, (flags & ~BF_RND_MASK) | BF_RNDN); if (ret & BF_ST_MEM_ERROR) { bf_delete(b); goto fail1; } if (bf_cmpu(b, a) == 0) { n_digits_max = n_digits; } else { n_digits_min = n_digits + 1; } } bf_delete(b); n_digits = n_digits_max; } } if (bf_convert_to_radix(a1, &n, a, radix, n_digits, flags & BF_RND_MASK, FALSE)) { fail1: bf_delete(a1); goto fail; } } if (a1->expn == BF_EXP_ZERO && fmt != BF_FTOA_FORMAT_FIXED && !(flags & BF_FTOA_FORCE_EXP)) { /* just output zero */ dbuf_putstr(s, "0"); } else { if (flags & BF_FTOA_ADD_PREFIX) { if (radix == 16) dbuf_putstr(s, "0x"); else if (radix == 8) dbuf_putstr(s, "0o"); else if (radix == 2) dbuf_putstr(s, "0b"); } if (a1->expn == BF_EXP_ZERO) n = 1; if ((flags & BF_FTOA_FORCE_EXP) || n <= -6 || n > n_max) { const char *fmt; /* exponential notation */ output_digits(s, a1, radix, n_digits, 1, is_dec); if (radix_bits != 0 && radix <= 16) { if (flags & BF_FTOA_JS_QUIRKS) fmt = "p%+" PRId_LIMB; else fmt = "p%" PRId_LIMB; dbuf_printf(s, fmt, (n - 1) * radix_bits); } else { if (flags & BF_FTOA_JS_QUIRKS) fmt = "%c%+" PRId_LIMB; else fmt = "%c%" PRId_LIMB; dbuf_printf(s, fmt, radix <= 10 ? 'e' : '@', n - 1); } } else if (n <= 0) { /* 0.x */ dbuf_putstr(s, "0."); for(i = 0; i < -n; i++) { dbuf_putc(s, '0'); } output_digits(s, a1, radix, n_digits, n_digits, is_dec); } else { if (n_digits <= n) { /* no dot */ output_digits(s, a1, radix, n_digits, n_digits, is_dec); for(i = 0; i < (n - n_digits); i++) dbuf_putc(s, '0'); } else { output_digits(s, a1, radix, n_digits, n, is_dec); } } } } bf_delete(a1); } } dbuf_putc(s, '\0'); if (dbuf_error(s)) goto fail; if (plen) *plen = s->size - 1; return (char *)s->buf; fail: bf_free(ctx, s->buf); if (plen) *plen = 0; return NULL; } char *bf_ftoa(size_t *plen, const bf_t *a, int radix, limb_t prec, bf_flags_t flags) { return bf_ftoa_internal(plen, a, radix, prec, flags, FALSE); } /***************************************************************/ /* transcendental functions */ /* Note: the algorithm is from MPFR */ static void bf_const_log2_rec(bf_t *T, bf_t *P, bf_t *Q, limb_t n1, limb_t n2, BOOL need_P) { bf_context_t *s = T->ctx; if ((n2 - n1) == 1) { if (n1 == 0) { bf_set_ui(P, 3); } else { bf_set_ui(P, n1); P->sign = 1; } bf_set_ui(Q, 2 * n1 + 1); Q->expn += 2; bf_set(T, P); } else { limb_t m; bf_t T1_s, *T1 = &T1_s; bf_t P1_s, *P1 = &P1_s; bf_t Q1_s, *Q1 = &Q1_s; m = n1 + ((n2 - n1) >> 1); bf_const_log2_rec(T, P, Q, n1, m, TRUE); bf_init(s, T1); bf_init(s, P1); bf_init(s, Q1); bf_const_log2_rec(T1, P1, Q1, m, n2, need_P); bf_mul(T, T, Q1, BF_PREC_INF, BF_RNDZ); bf_mul(T1, T1, P, BF_PREC_INF, BF_RNDZ); bf_add(T, T, T1, BF_PREC_INF, BF_RNDZ); if (need_P) bf_mul(P, P, P1, BF_PREC_INF, BF_RNDZ); bf_mul(Q, Q, Q1, BF_PREC_INF, BF_RNDZ); bf_delete(T1); bf_delete(P1); bf_delete(Q1); } } /* compute log(2) with faithful rounding at precision 'prec' */ static void bf_const_log2_internal(bf_t *T, limb_t prec) { limb_t w, N; bf_t P_s, *P = &P_s; bf_t Q_s, *Q = &Q_s; w = prec + 15; N = w / 3 + 1; bf_init(T->ctx, P); bf_init(T->ctx, Q); bf_const_log2_rec(T, P, Q, 0, N, FALSE); bf_div(T, T, Q, prec, BF_RNDN); bf_delete(P); bf_delete(Q); } /* PI constant */ #define CHUD_A 13591409 #define CHUD_B 545140134 #define CHUD_C 640320 #define CHUD_BITS_PER_TERM 47 static void chud_bs(bf_t *P, bf_t *Q, bf_t *G, int64_t a, int64_t b, int need_g, limb_t prec) { bf_context_t *s = P->ctx; int64_t c; if (a == (b - 1)) { bf_t T0, T1; bf_init(s, &T0); bf_init(s, &T1); bf_set_ui(G, 2 * b - 1); bf_mul_ui(G, G, 6 * b - 1, prec, BF_RNDN); bf_mul_ui(G, G, 6 * b - 5, prec, BF_RNDN); bf_set_ui(&T0, CHUD_B); bf_mul_ui(&T0, &T0, b, prec, BF_RNDN); bf_set_ui(&T1, CHUD_A); bf_add(&T0, &T0, &T1, prec, BF_RNDN); bf_mul(P, G, &T0, prec, BF_RNDN); P->sign = b & 1; bf_set_ui(Q, b); bf_mul_ui(Q, Q, b, prec, BF_RNDN); bf_mul_ui(Q, Q, b, prec, BF_RNDN); bf_mul_ui(Q, Q, (uint64_t)CHUD_C * CHUD_C * CHUD_C / 24, prec, BF_RNDN); bf_delete(&T0); bf_delete(&T1); } else { bf_t P2, Q2, G2; bf_init(s, &P2); bf_init(s, &Q2); bf_init(s, &G2); c = (a + b) / 2; chud_bs(P, Q, G, a, c, 1, prec); chud_bs(&P2, &Q2, &G2, c, b, need_g, prec); /* Q = Q1 * Q2 */ /* G = G1 * G2 */ /* P = P1 * Q2 + P2 * G1 */ bf_mul(&P2, &P2, G, prec, BF_RNDN); if (!need_g) bf_set_ui(G, 0); bf_mul(P, P, &Q2, prec, BF_RNDN); bf_add(P, P, &P2, prec, BF_RNDN); bf_delete(&P2); bf_mul(Q, Q, &Q2, prec, BF_RNDN); bf_delete(&Q2); if (need_g) bf_mul(G, G, &G2, prec, BF_RNDN); bf_delete(&G2); } } /* compute Pi with faithful rounding at precision 'prec' using the Chudnovsky formula */ static void bf_const_pi_internal(bf_t *Q, limb_t prec) { bf_context_t *s = Q->ctx; int64_t n, prec1; bf_t P, G; /* number of serie terms */ n = prec / CHUD_BITS_PER_TERM + 1; /* XXX: precision analysis */ prec1 = prec + 32; bf_init(s, &P); bf_init(s, &G); chud_bs(&P, Q, &G, 0, n, 0, BF_PREC_INF); bf_mul_ui(&G, Q, CHUD_A, prec1, BF_RNDN); bf_add(&P, &G, &P, prec1, BF_RNDN); bf_div(Q, Q, &P, prec1, BF_RNDF); bf_set_ui(&P, CHUD_C); bf_sqrt(&G, &P, prec1, BF_RNDF); bf_mul_ui(&G, &G, (uint64_t)CHUD_C / 12, prec1, BF_RNDF); bf_mul(Q, Q, &G, prec, BF_RNDN); bf_delete(&P); bf_delete(&G); } static int bf_const_get(bf_t *T, limb_t prec, bf_flags_t flags, BFConstCache *c, void (*func)(bf_t *res, limb_t prec), int sign) { limb_t ziv_extra_bits, prec1; ziv_extra_bits = 32; for(;;) { prec1 = prec + ziv_extra_bits; if (c->prec < prec1) { if (c->val.len == 0) bf_init(T->ctx, &c->val); func(&c->val, prec1); c->prec = prec1; } else { prec1 = c->prec; } bf_set(T, &c->val); T->sign = sign; if (!bf_can_round(T, prec, flags & BF_RND_MASK, prec1)) { /* and more precision and retry */ ziv_extra_bits = ziv_extra_bits + (ziv_extra_bits / 2); } else { break; } } return bf_round(T, prec, flags); } static void bf_const_free(BFConstCache *c) { bf_delete(&c->val); memset(c, 0, sizeof(*c)); } int bf_const_log2(bf_t *T, limb_t prec, bf_flags_t flags) { bf_context_t *s = T->ctx; return bf_const_get(T, prec, flags, &s->log2_cache, bf_const_log2_internal, 0); } /* return rounded pi * (1 - 2 * sign) */ static int bf_const_pi_signed(bf_t *T, int sign, limb_t prec, bf_flags_t flags) { bf_context_t *s = T->ctx; return bf_const_get(T, prec, flags, &s->pi_cache, bf_const_pi_internal, sign); } int bf_const_pi(bf_t *T, limb_t prec, bf_flags_t flags) { return bf_const_pi_signed(T, 0, prec, flags); } void bf_clear_cache(bf_context_t *s) { #ifdef USE_FFT_MUL fft_clear_cache(s); #endif bf_const_free(&s->log2_cache); bf_const_free(&s->pi_cache); } /* ZivFunc should compute the result 'r' with faithful rounding at precision 'prec'. For efficiency purposes, the final bf_round() does not need to be done in the function. */ typedef int ZivFunc(bf_t *r, const bf_t *a, limb_t prec, void *opaque); static int bf_ziv_rounding(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags, ZivFunc *f, void *opaque) { int rnd_mode, ret; slimb_t prec1, ziv_extra_bits; rnd_mode = flags & BF_RND_MASK; if (rnd_mode == BF_RNDF) { /* no need to iterate */ f(r, a, prec, opaque); ret = 0; } else { ziv_extra_bits = 32; for(;;) { prec1 = prec + ziv_extra_bits; ret = f(r, a, prec1, opaque); if (ret & (BF_ST_OVERFLOW | BF_ST_UNDERFLOW | BF_ST_MEM_ERROR)) { /* overflow or underflow should never happen because it indicates the rounding cannot be done correctly, but we do not catch all the cases */ return ret; } /* if the result is exact, we can stop */ if (!(ret & BF_ST_INEXACT)) { ret = 0; break; } if (bf_can_round(r, prec, rnd_mode, prec1)) { ret = BF_ST_INEXACT; break; } ziv_extra_bits = ziv_extra_bits * 2; // printf("ziv_extra_bits=%" PRId64 "\n", (int64_t)ziv_extra_bits); } } if (r->len == 0) return ret; else return __bf_round(r, prec, flags, r->len, ret); } /* add (1 - 2*e_sign) * 2^e */ static int bf_add_epsilon(bf_t *r, const bf_t *a, slimb_t e, int e_sign, limb_t prec, int flags) { bf_t T_s, *T = &T_s; int ret; /* small argument case: result = 1 + epsilon * sign(x) */ bf_init(a->ctx, T); bf_set_ui(T, 1); T->sign = e_sign; T->expn += e; ret = bf_add(r, r, T, prec, flags); bf_delete(T); return ret; } /* Compute the exponential using faithful rounding at precision 'prec'. Note: the algorithm is from MPFR */ static int bf_exp_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; slimb_t n, K, l, i, prec1; assert(r != a); /* argument reduction: T = a - n*log(2) with 0 <= T < log(2) and n integer. */ bf_init(s, T); if (a->expn <= -1) { /* 0 <= abs(a) <= 0.5 */ if (a->sign) n = -1; else n = 0; } else { bf_const_log2(T, LIMB_BITS, BF_RNDZ); bf_div(T, a, T, LIMB_BITS, BF_RNDD); bf_get_limb(&n, T, 0); } K = bf_isqrt((prec + 1) / 2); l = (prec - 1) / K + 1; /* XXX: precision analysis ? */ prec1 = prec + (K + 2 * l + 18) + K + 8; if (a->expn > 0) prec1 += a->expn; // printf("n=%ld K=%ld prec1=%ld\n", n, K, prec1); bf_const_log2(T, prec1, BF_RNDF); bf_mul_si(T, T, n, prec1, BF_RNDN); bf_sub(T, a, T, prec1, BF_RNDN); /* reduce the range of T */ bf_mul_2exp(T, -K, BF_PREC_INF, BF_RNDZ); /* Taylor expansion around zero : 1 + x + x^2/2 + ... + x^n/n! = (1 + x * (1 + x/2 * (1 + ... (x/n)))) */ { bf_t U_s, *U = &U_s; bf_init(s, U); bf_set_ui(r, 1); for(i = l ; i >= 1; i--) { bf_set_ui(U, i); bf_div(U, T, U, prec1, BF_RNDN); bf_mul(r, r, U, prec1, BF_RNDN); bf_add_si(r, r, 1, prec1, BF_RNDN); } bf_delete(U); } bf_delete(T); /* undo the range reduction */ for(i = 0; i < K; i++) { bf_mul(r, r, r, prec1, BF_RNDN | BF_FLAG_EXT_EXP); } /* undo the argument reduction */ bf_mul_2exp(r, n, BF_PREC_INF, BF_RNDZ | BF_FLAG_EXT_EXP); return BF_ST_INEXACT; } /* crude overflow and underflow tests for exp(a). a_low <= a <= a_high */ static int check_exp_underflow_overflow(bf_context_t *s, bf_t *r, const bf_t *a_low, const bf_t *a_high, limb_t prec, bf_flags_t flags) { bf_t T_s, *T = &T_s; bf_t log2_s, *log2 = &log2_s; slimb_t e_min, e_max; if (a_high->expn <= 0) return 0; e_max = (limb_t)1 << (bf_get_exp_bits(flags) - 1); e_min = -e_max + 3; if (flags & BF_FLAG_SUBNORMAL) e_min -= (prec - 1); bf_init(s, T); bf_init(s, log2); bf_const_log2(log2, LIMB_BITS, BF_RNDU); bf_mul_ui(T, log2, e_max, LIMB_BITS, BF_RNDU); /* a_low > e_max * log(2) implies exp(a) > e_max */ if (bf_cmp_lt(T, a_low) > 0) { /* overflow */ bf_delete(T); bf_delete(log2); return bf_set_overflow(r, 0, prec, flags); } /* a_high < (e_min - 2) * log(2) implies exp(a) < (e_min - 2) */ bf_const_log2(log2, LIMB_BITS, BF_RNDD); bf_mul_si(T, log2, e_min - 2, LIMB_BITS, BF_RNDD); if (bf_cmp_lt(a_high, T)) { int rnd_mode = flags & BF_RND_MASK; /* underflow */ bf_delete(T); bf_delete(log2); if (rnd_mode == BF_RNDU) { /* set the smallest value */ bf_set_ui(r, 1); r->expn = e_min; } else { bf_set_zero(r, 0); } return BF_ST_UNDERFLOW | BF_ST_INEXACT; } bf_delete(log2); bf_delete(T); return 0; } int bf_exp(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; int ret; assert(r != a); if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); } else if (a->expn == BF_EXP_INF) { if (a->sign) bf_set_zero(r, 0); else bf_set_inf(r, 0); } else { bf_set_ui(r, 1); } return 0; } ret = check_exp_underflow_overflow(s, r, a, a, prec, flags); if (ret) return ret; if (a->expn < 0 && (-a->expn) >= (prec + 2)) { /* small argument case: result = 1 + epsilon * sign(x) */ bf_set_ui(r, 1); return bf_add_epsilon(r, r, -(prec + 2), a->sign, prec, flags); } return bf_ziv_rounding(r, a, prec, flags, bf_exp_internal, NULL); } static int bf_log_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; bf_t U_s, *U = &U_s; bf_t V_s, *V = &V_s; slimb_t n, prec1, l, i, K; assert(r != a); bf_init(s, T); /* argument reduction 1 */ /* T=a*2^n with 2/3 <= T <= 4/3 */ { bf_t U_s, *U = &U_s; bf_set(T, a); n = T->expn; T->expn = 0; /* U= ~ 2/3 */ bf_init(s, U); bf_set_ui(U, 0xaaaaaaaa); U->expn = 0; if (bf_cmp_lt(T, U)) { T->expn++; n--; } bf_delete(U); } // printf("n=%ld\n", n); // bf_print_str("T", T); /* XXX: precision analysis */ /* number of iterations for argument reduction 2 */ K = bf_isqrt((prec + 1) / 2); /* order of Taylor expansion */ l = prec / (2 * K) + 1; /* precision of the intermediate computations */ prec1 = prec + K + 2 * l + 32; bf_init(s, U); bf_init(s, V); /* Note: cancellation occurs here, so we use more precision (XXX: reduce the precision by computing the exact cancellation) */ bf_add_si(T, T, -1, BF_PREC_INF, BF_RNDN); /* argument reduction 2 */ for(i = 0; i < K; i++) { /* T = T / (1 + sqrt(1 + T)) */ bf_add_si(U, T, 1, prec1, BF_RNDN); bf_sqrt(V, U, prec1, BF_RNDF); bf_add_si(U, V, 1, prec1, BF_RNDN); bf_div(T, T, U, prec1, BF_RNDN); } { bf_t Y_s, *Y = &Y_s; bf_t Y2_s, *Y2 = &Y2_s; bf_init(s, Y); bf_init(s, Y2); /* compute ln(1+x) = ln((1+y)/(1-y)) with y=x/(2+x) = y + y^3/3 + ... + y^(2*l + 1) / (2*l+1) with Y=Y^2 = y*(1+Y/3+Y^2/5+...) = y*(1+Y*(1/3+Y*(1/5 + ...))) */ bf_add_si(Y, T, 2, prec1, BF_RNDN); bf_div(Y, T, Y, prec1, BF_RNDN); bf_mul(Y2, Y, Y, prec1, BF_RNDN); bf_set_ui(r, 0); for(i = l; i >= 1; i--) { bf_set_ui(U, 1); bf_set_ui(V, 2 * i + 1); bf_div(U, U, V, prec1, BF_RNDN); bf_add(r, r, U, prec1, BF_RNDN); bf_mul(r, r, Y2, prec1, BF_RNDN); } bf_add_si(r, r, 1, prec1, BF_RNDN); bf_mul(r, r, Y, prec1, BF_RNDN); bf_delete(Y); bf_delete(Y2); } bf_delete(V); bf_delete(U); /* multiplication by 2 for the Taylor expansion and undo the argument reduction 2*/ bf_mul_2exp(r, K + 1, BF_PREC_INF, BF_RNDZ); /* undo the argument reduction 1 */ bf_const_log2(T, prec1, BF_RNDF); bf_mul_si(T, T, n, prec1, BF_RNDN); bf_add(r, r, T, prec1, BF_RNDN); bf_delete(T); return BF_ST_INEXACT; } int bf_log(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; assert(r != a); if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { if (a->sign) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_inf(r, 0); return 0; } } else { bf_set_inf(r, 1); return 0; } } if (a->sign) { bf_set_nan(r); return BF_ST_INVALID_OP; } bf_init(s, T); bf_set_ui(T, 1); if (bf_cmp_eq(a, T)) { bf_set_zero(r, 0); bf_delete(T); return 0; } bf_delete(T); return bf_ziv_rounding(r, a, prec, flags, bf_log_internal, NULL); } /* x and y finite and x > 0 */ static int bf_pow_generic(bf_t *r, const bf_t *x, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; const bf_t *y = opaque; bf_t T_s, *T = &T_s; limb_t prec1; bf_init(s, T); /* XXX: proof for the added precision */ prec1 = prec + 32; bf_log(T, x, prec1, BF_RNDF | BF_FLAG_EXT_EXP); bf_mul(T, T, y, prec1, BF_RNDF | BF_FLAG_EXT_EXP); if (bf_is_nan(T)) bf_set_nan(r); else bf_exp_internal(r, T, prec1, NULL); /* no overflow/underlow test needed */ bf_delete(T); return BF_ST_INEXACT; } /* x and y finite, x > 0, y integer and y fits on one limb */ static int bf_pow_int(bf_t *r, const bf_t *x, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; const bf_t *y = opaque; bf_t T_s, *T = &T_s; limb_t prec1; int ret; slimb_t y1; bf_get_limb(&y1, y, 0); if (y1 < 0) y1 = -y1; /* XXX: proof for the added precision */ prec1 = prec + ceil_log2(y1) * 2 + 8; ret = bf_pow_ui(r, x, y1 < 0 ? -y1 : y1, prec1, BF_RNDN | BF_FLAG_EXT_EXP); if (y->sign) { bf_init(s, T); bf_set_ui(T, 1); ret |= bf_div(r, T, r, prec1, BF_RNDN | BF_FLAG_EXT_EXP); bf_delete(T); } return ret; } /* x must be a finite non zero float. Return TRUE if there is a floating point number r such as x=r^(2^n) and return this floating point number 'r'. Otherwise return FALSE and r is undefined. */ static BOOL check_exact_power2n(bf_t *r, const bf_t *x, slimb_t n) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; slimb_t e, i, er; limb_t v; /* x = m*2^e with m odd integer */ e = bf_get_exp_min(x); /* fast check on the exponent */ if (n > (LIMB_BITS - 1)) { if (e != 0) return FALSE; er = 0; } else { if ((e & (((limb_t)1 << n) - 1)) != 0) return FALSE; er = e >> n; } /* every perfect odd square = 1 modulo 8 */ v = get_bits(x->tab, x->len, x->len * LIMB_BITS - x->expn + e); if ((v & 7) != 1) return FALSE; bf_init(s, T); bf_set(T, x); T->expn -= e; for(i = 0; i < n; i++) { if (i != 0) bf_set(T, r); if (bf_sqrtrem(r, NULL, T) != 0) return FALSE; } r->expn += er; return TRUE; } /* prec = BF_PREC_INF is accepted for x and y integers and y >= 0 */ int bf_pow(bf_t *r, const bf_t *x, const bf_t *y, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; bf_t ytmp_s; BOOL y_is_int, y_is_odd; int r_sign, ret, rnd_mode; slimb_t y_emin; if (x->len == 0 || y->len == 0) { if (y->expn == BF_EXP_ZERO) { /* pow(x, 0) = 1 */ bf_set_ui(r, 1); } else if (x->expn == BF_EXP_NAN) { bf_set_nan(r); } else { int cmp_x_abs_1; bf_set_ui(r, 1); cmp_x_abs_1 = bf_cmpu(x, r); if (cmp_x_abs_1 == 0 && (flags & BF_POW_JS_QUIRKS) && (y->expn >= BF_EXP_INF)) { bf_set_nan(r); } else if (cmp_x_abs_1 == 0 && (!x->sign || y->expn != BF_EXP_NAN)) { /* pow(1, y) = 1 even if y = NaN */ /* pow(-1, +/-inf) = 1 */ } else if (y->expn == BF_EXP_NAN) { bf_set_nan(r); } else if (y->expn == BF_EXP_INF) { if (y->sign == (cmp_x_abs_1 > 0)) { bf_set_zero(r, 0); } else { bf_set_inf(r, 0); } } else { y_emin = bf_get_exp_min(y); y_is_odd = (y_emin == 0); if (y->sign == (x->expn == BF_EXP_ZERO)) { bf_set_inf(r, y_is_odd & x->sign); if (y->sign) { /* pow(0, y) with y < 0 */ return BF_ST_DIVIDE_ZERO; } } else { bf_set_zero(r, y_is_odd & x->sign); } } } return 0; } bf_init(s, T); bf_set(T, x); y_emin = bf_get_exp_min(y); y_is_int = (y_emin >= 0); rnd_mode = flags & BF_RND_MASK; if (x->sign) { if (!y_is_int) { bf_set_nan(r); bf_delete(T); return BF_ST_INVALID_OP; } y_is_odd = (y_emin == 0); r_sign = y_is_odd; /* change the directed rounding mode if the sign of the result is changed */ if (r_sign && (rnd_mode == BF_RNDD || rnd_mode == BF_RNDU)) flags ^= 1; bf_neg(T); } else { r_sign = 0; } bf_set_ui(r, 1); if (bf_cmp_eq(T, r)) { /* abs(x) = 1: nothing more to do */ ret = 0; } else { /* check the overflow/underflow cases */ { bf_t al_s, *al = &al_s; bf_t ah_s, *ah = &ah_s; limb_t precl = LIMB_BITS; bf_init(s, al); bf_init(s, ah); /* compute bounds of log(abs(x)) * y with a low precision */ /* XXX: compute bf_log() once */ /* XXX: add a fast test before this slow test */ bf_log(al, T, precl, BF_RNDD); bf_log(ah, T, precl, BF_RNDU); bf_mul(al, al, y, precl, BF_RNDD ^ y->sign); bf_mul(ah, ah, y, precl, BF_RNDU ^ y->sign); ret = check_exp_underflow_overflow(s, r, al, ah, prec, flags); bf_delete(al); bf_delete(ah); if (ret) goto done; } if (y_is_int) { slimb_t T_bits, e; int_pow: T_bits = T->expn - bf_get_exp_min(T); if (T_bits == 1) { /* pow(2^b, y) = 2^(b*y) */ bf_mul_si(T, y, T->expn - 1, LIMB_BITS, BF_RNDZ); bf_get_limb(&e, T, 0); bf_set_ui(r, 1); ret = bf_mul_2exp(r, e, prec, flags); } else if (prec == BF_PREC_INF) { slimb_t y1; /* specific case for infinite precision (integer case) */ bf_get_limb(&y1, y, 0); assert(!y->sign); /* x must be an integer, so abs(x) >= 2 */ if (y1 >= ((slimb_t)1 << BF_EXP_BITS_MAX)) { bf_delete(T); return bf_set_overflow(r, 0, BF_PREC_INF, flags); } ret = bf_pow_ui(r, T, y1, BF_PREC_INF, BF_RNDZ); } else { if (y->expn <= 31) { /* small enough power: use exponentiation in all cases */ } else if (y->sign) { /* cannot be exact */ goto general_case; } else { if (rnd_mode == BF_RNDF) goto general_case; /* no need to track exact results */ /* see if the result has a chance to be exact: if x=a*2^b (a odd), x^y=a^y*2^(b*y) x^y needs a precision of at least floor_log2(a)*y bits */ bf_mul_si(r, y, T_bits - 1, LIMB_BITS, BF_RNDZ); bf_get_limb(&e, r, 0); if (prec < e) goto general_case; } ret = bf_ziv_rounding(r, T, prec, flags, bf_pow_int, (void *)y); } } else { if (rnd_mode != BF_RNDF) { bf_t *y1; if (y_emin < 0 && check_exact_power2n(r, T, -y_emin)) { /* the problem is reduced to a power to an integer */ #if 0 printf("\nn=%" PRId64 "\n", -(int64_t)y_emin); bf_print_str("T", T); bf_print_str("r", r); #endif bf_set(T, r); y1 = &ytmp_s; y1->tab = y->tab; y1->len = y->len; y1->sign = y->sign; y1->expn = y->expn - y_emin; y = y1; goto int_pow; } } general_case: ret = bf_ziv_rounding(r, T, prec, flags, bf_pow_generic, (void *)y); } } done: bf_delete(T); r->sign = r_sign; return ret; } /* compute sqrt(-2*x-x^2) to get |sin(x)| from cos(x) - 1. */ static void bf_sqrt_sin(bf_t *r, const bf_t *x, limb_t prec1) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; bf_init(s, T); bf_set(T, x); bf_mul(r, T, T, prec1, BF_RNDN); bf_mul_2exp(T, 1, BF_PREC_INF, BF_RNDZ); bf_add(T, T, r, prec1, BF_RNDN); bf_neg(T); bf_sqrt(r, T, prec1, BF_RNDF); bf_delete(T); } static int bf_sincos(bf_t *s, bf_t *c, const bf_t *a, limb_t prec) { bf_context_t *s1 = a->ctx; bf_t T_s, *T = &T_s; bf_t U_s, *U = &U_s; bf_t r_s, *r = &r_s; slimb_t K, prec1, i, l, mod, prec2; int is_neg; assert(c != a && s != a); bf_init(s1, T); bf_init(s1, U); bf_init(s1, r); /* XXX: precision analysis */ K = bf_isqrt(prec / 2); l = prec / (2 * K) + 1; prec1 = prec + 2 * K + l + 8; /* after the modulo reduction, -pi/4 <= T <= pi/4 */ if (a->expn <= -1) { /* abs(a) <= 0.25: no modulo reduction needed */ bf_set(T, a); mod = 0; } else { slimb_t cancel; cancel = 0; for(;;) { prec2 = prec1 + a->expn + cancel; bf_const_pi(U, prec2, BF_RNDF); bf_mul_2exp(U, -1, BF_PREC_INF, BF_RNDZ); bf_remquo(&mod, T, a, U, prec2, BF_RNDN, BF_RNDN); // printf("T.expn=%ld prec2=%ld\n", T->expn, prec2); if (mod == 0 || (T->expn != BF_EXP_ZERO && (T->expn + prec2) >= (prec1 - 1))) break; /* increase the number of bits until the precision is good enough */ cancel = bf_max(-T->expn, (cancel + 1) * 3 / 2); } mod &= 3; } is_neg = T->sign; /* compute cosm1(x) = cos(x) - 1 */ bf_mul(T, T, T, prec1, BF_RNDN); bf_mul_2exp(T, -2 * K, BF_PREC_INF, BF_RNDZ); /* Taylor expansion: -x^2/2 + x^4/4! - x^6/6! + ... */ bf_set_ui(r, 1); for(i = l ; i >= 1; i--) { bf_set_ui(U, 2 * i - 1); bf_mul_ui(U, U, 2 * i, BF_PREC_INF, BF_RNDZ); bf_div(U, T, U, prec1, BF_RNDN); bf_mul(r, r, U, prec1, BF_RNDN); bf_neg(r); if (i != 1) bf_add_si(r, r, 1, prec1, BF_RNDN); } bf_delete(U); /* undo argument reduction: cosm1(2*x)= 2*(2*cosm1(x)+cosm1(x)^2) */ for(i = 0; i < K; i++) { bf_mul(T, r, r, prec1, BF_RNDN); bf_mul_2exp(r, 1, BF_PREC_INF, BF_RNDZ); bf_add(r, r, T, prec1, BF_RNDN); bf_mul_2exp(r, 1, BF_PREC_INF, BF_RNDZ); } bf_delete(T); if (c) { if ((mod & 1) == 0) { bf_add_si(c, r, 1, prec1, BF_RNDN); } else { bf_sqrt_sin(c, r, prec1); c->sign = is_neg ^ 1; } c->sign ^= mod >> 1; } if (s) { if ((mod & 1) == 0) { bf_sqrt_sin(s, r, prec1); s->sign = is_neg; } else { bf_add_si(s, r, 1, prec1, BF_RNDN); } s->sign ^= mod >> 1; } bf_delete(r); return BF_ST_INEXACT; } static int bf_cos_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { return bf_sincos(NULL, r, a, prec); } int bf_cos(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_ui(r, 1); return 0; } } /* small argument case: result = 1+r(x) with r(x) = -x^2/2 + O(X^4). We assume r(x) < 2^(2*EXP(x) - 1). */ if (a->expn < 0) { slimb_t e; e = 2 * a->expn - 1; if (e < -(prec + 2)) { bf_set_ui(r, 1); return bf_add_epsilon(r, r, e, 1, prec, flags); } } return bf_ziv_rounding(r, a, prec, flags, bf_cos_internal, NULL); } static int bf_sin_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { return bf_sincos(r, NULL, a, prec); } int bf_sin(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_zero(r, a->sign); return 0; } } /* small argument case: result = x+r(x) with r(x) = -x^3/6 + O(X^5). We assume r(x) < 2^(3*EXP(x) - 2). */ if (a->expn < 0) { slimb_t e; e = sat_add(2 * a->expn, a->expn - 2); if (e < a->expn - bf_max(prec + 2, a->len * LIMB_BITS + 2)) { bf_set(r, a); return bf_add_epsilon(r, r, e, 1 - a->sign, prec, flags); } } return bf_ziv_rounding(r, a, prec, flags, bf_sin_internal, NULL); } static int bf_tan_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; limb_t prec1; /* XXX: precision analysis */ prec1 = prec + 8; bf_init(s, T); bf_sincos(r, T, a, prec1); bf_div(r, r, T, prec1, BF_RNDF); bf_delete(T); return BF_ST_INEXACT; } int bf_tan(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { assert(r != a); if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_zero(r, a->sign); return 0; } } /* small argument case: result = x+r(x) with r(x) = x^3/3 + O(X^5). We assume r(x) < 2^(3*EXP(x) - 1). */ if (a->expn < 0) { slimb_t e; e = sat_add(2 * a->expn, a->expn - 1); if (e < a->expn - bf_max(prec + 2, a->len * LIMB_BITS + 2)) { bf_set(r, a); return bf_add_epsilon(r, r, e, a->sign, prec, flags); } } return bf_ziv_rounding(r, a, prec, flags, bf_tan_internal, NULL); } /* if add_pi2 is true, add pi/2 to the result (used for acos(x) to avoid cancellation) */ static int bf_atan_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; BOOL add_pi2 = (BOOL)(intptr_t)opaque; bf_t T_s, *T = &T_s; bf_t U_s, *U = &U_s; bf_t V_s, *V = &V_s; bf_t X2_s, *X2 = &X2_s; int cmp_1; slimb_t prec1, i, K, l; /* XXX: precision analysis */ K = bf_isqrt((prec + 1) / 2); l = prec / (2 * K) + 1; prec1 = prec + K + 2 * l + 32; // printf("prec=%d K=%d l=%d prec1=%d\n", (int)prec, (int)K, (int)l, (int)prec1); bf_init(s, T); cmp_1 = (a->expn >= 1); /* a >= 1 */ if (cmp_1) { bf_set_ui(T, 1); bf_div(T, T, a, prec1, BF_RNDN); } else { bf_set(T, a); } /* abs(T) <= 1 */ /* argument reduction */ bf_init(s, U); bf_init(s, V); bf_init(s, X2); for(i = 0; i < K; i++) { /* T = T / (1 + sqrt(1 + T^2)) */ bf_mul(U, T, T, prec1, BF_RNDN); bf_add_si(U, U, 1, prec1, BF_RNDN); bf_sqrt(V, U, prec1, BF_RNDN); bf_add_si(V, V, 1, prec1, BF_RNDN); bf_div(T, T, V, prec1, BF_RNDN); } /* Taylor series: x - x^3/3 + ... + (-1)^ l * y^(2*l + 1) / (2*l+1) */ bf_mul(X2, T, T, prec1, BF_RNDN); bf_set_ui(r, 0); for(i = l; i >= 1; i--) { bf_set_si(U, 1); bf_set_ui(V, 2 * i + 1); bf_div(U, U, V, prec1, BF_RNDN); bf_neg(r); bf_add(r, r, U, prec1, BF_RNDN); bf_mul(r, r, X2, prec1, BF_RNDN); } bf_neg(r); bf_add_si(r, r, 1, prec1, BF_RNDN); bf_mul(r, r, T, prec1, BF_RNDN); /* undo the argument reduction */ bf_mul_2exp(r, K, BF_PREC_INF, BF_RNDZ); bf_delete(U); bf_delete(V); bf_delete(X2); i = add_pi2; if (cmp_1 > 0) { /* undo the inversion : r = sign(a)*PI/2 - r */ bf_neg(r); i += 1 - 2 * a->sign; } /* add i*(pi/2) with -1 <= i <= 2 */ if (i != 0) { bf_const_pi(T, prec1, BF_RNDF); if (i != 2) bf_mul_2exp(T, -1, BF_PREC_INF, BF_RNDZ); T->sign = (i < 0); bf_add(r, T, r, prec1, BF_RNDN); } bf_delete(T); return BF_ST_INEXACT; } int bf_atan(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; int res; if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { /* -PI/2 or PI/2 */ bf_const_pi_signed(r, a->sign, prec, flags); bf_mul_2exp(r, -1, BF_PREC_INF, BF_RNDZ); return BF_ST_INEXACT; } else { bf_set_zero(r, a->sign); return 0; } } bf_init(s, T); bf_set_ui(T, 1); res = bf_cmpu(a, T); bf_delete(T); if (res == 0) { /* short cut: abs(a) == 1 -> +/-pi/4 */ bf_const_pi_signed(r, a->sign, prec, flags); bf_mul_2exp(r, -2, BF_PREC_INF, BF_RNDZ); return BF_ST_INEXACT; } /* small argument case: result = x+r(x) with r(x) = -x^3/3 + O(X^5). We assume r(x) < 2^(3*EXP(x) - 1). */ if (a->expn < 0) { slimb_t e; e = sat_add(2 * a->expn, a->expn - 1); if (e < a->expn - bf_max(prec + 2, a->len * LIMB_BITS + 2)) { bf_set(r, a); return bf_add_epsilon(r, r, e, 1 - a->sign, prec, flags); } } return bf_ziv_rounding(r, a, prec, flags, bf_atan_internal, (void *)FALSE); } static int bf_atan2_internal(bf_t *r, const bf_t *y, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; const bf_t *x = opaque; bf_t T_s, *T = &T_s; limb_t prec1; int ret; if (y->expn == BF_EXP_NAN || x->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } /* compute atan(y/x) assumming inf/inf = 1 and 0/0 = 0 */ bf_init(s, T); prec1 = prec + 32; if (y->expn == BF_EXP_INF && x->expn == BF_EXP_INF) { bf_set_ui(T, 1); T->sign = y->sign ^ x->sign; } else if (y->expn == BF_EXP_ZERO && x->expn == BF_EXP_ZERO) { bf_set_zero(T, y->sign ^ x->sign); } else { bf_div(T, y, x, prec1, BF_RNDF); } ret = bf_atan(r, T, prec1, BF_RNDF); if (x->sign) { /* if x < 0 (it includes -0), return sign(y)*pi + atan(y/x) */ bf_const_pi(T, prec1, BF_RNDF); T->sign = y->sign; bf_add(r, r, T, prec1, BF_RNDN); ret |= BF_ST_INEXACT; } bf_delete(T); return ret; } int bf_atan2(bf_t *r, const bf_t *y, const bf_t *x, limb_t prec, bf_flags_t flags) { return bf_ziv_rounding(r, y, prec, flags, bf_atan2_internal, (void *)x); } static int bf_asin_internal(bf_t *r, const bf_t *a, limb_t prec, void *opaque) { bf_context_t *s = r->ctx; BOOL is_acos = (BOOL)(intptr_t)opaque; bf_t T_s, *T = &T_s; limb_t prec1, prec2; /* asin(x) = atan(x/sqrt(1-x^2)) acos(x) = pi/2 - asin(x) */ prec1 = prec + 8; /* increase the precision in x^2 to compensate the cancellation in (1-x^2) if x is close to 1 */ /* XXX: use less precision when possible */ if (a->expn >= 0) prec2 = BF_PREC_INF; else prec2 = prec1; bf_init(s, T); bf_mul(T, a, a, prec2, BF_RNDN); bf_neg(T); bf_add_si(T, T, 1, prec2, BF_RNDN); bf_sqrt(r, T, prec1, BF_RNDN); bf_div(T, a, r, prec1, BF_RNDN); if (is_acos) bf_neg(T); bf_atan_internal(r, T, prec1, (void *)(intptr_t)is_acos); bf_delete(T); return BF_ST_INEXACT; } int bf_asin(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; int res; if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_set_zero(r, a->sign); return 0; } } bf_init(s, T); bf_set_ui(T, 1); res = bf_cmpu(a, T); bf_delete(T); if (res > 0) { bf_set_nan(r); return BF_ST_INVALID_OP; } /* small argument case: result = x+r(x) with r(x) = x^3/6 + O(X^5). We assume r(x) < 2^(3*EXP(x) - 2). */ if (a->expn < 0) { slimb_t e; e = sat_add(2 * a->expn, a->expn - 2); if (e < a->expn - bf_max(prec + 2, a->len * LIMB_BITS + 2)) { bf_set(r, a); return bf_add_epsilon(r, r, e, a->sign, prec, flags); } } return bf_ziv_rounding(r, a, prec, flags, bf_asin_internal, (void *)FALSE); } int bf_acos(bf_t *r, const bf_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = r->ctx; bf_t T_s, *T = &T_s; int res; if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bf_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF) { bf_set_nan(r); return BF_ST_INVALID_OP; } else { bf_const_pi(r, prec, flags); bf_mul_2exp(r, -1, BF_PREC_INF, BF_RNDZ); return BF_ST_INEXACT; } } bf_init(s, T); bf_set_ui(T, 1); res = bf_cmpu(a, T); bf_delete(T); if (res > 0) { bf_set_nan(r); return BF_ST_INVALID_OP; } else if (res == 0 && a->sign == 0) { bf_set_zero(r, 0); return 0; } return bf_ziv_rounding(r, a, prec, flags, bf_asin_internal, (void *)TRUE); } /***************************************************************/ /* decimal floating point numbers */ #ifdef USE_BF_DEC #define adddq(r1, r0, a1, a0) \ do { \ limb_t __t = r0; \ r0 += (a0); \ r1 += (a1) + (r0 < __t); \ } while (0) #define subdq(r1, r0, a1, a0) \ do { \ limb_t __t = r0; \ r0 -= (a0); \ r1 -= (a1) + (r0 > __t); \ } while (0) #if LIMB_BITS == 64 #ifdef STRICT_R_HEADERS /* Note: we assume __int128 is available */ #define muldq(r1, r0, a, b) \ do { \ uint128_t __t; \ __t = (uint128_t)(a) * (uint128_t)(b); \ r0 = __t; \ r1 = __t >> 64; \ } while (0) #define divdq(q, r, a1, a0, b) \ do { \ uint128_t __t; \ limb_t __b = (b); \ __t = ((uint128_t)(a1) << 64) | (a0); \ q = __t / __b; \ r = __t % __b; \ } while (0) #else /* Note: we assume __int128 is available */ #define muldq(r1, r0, a, b) \ do { \ unsigned __int128 __t; \ __t = (unsigned __int128)(a) * (unsigned __int128)(b); \ r0 = __t; \ r1 = __t >> 64; \ } while (0) #define divdq(q, r, a1, a0, b) \ do { \ unsigned __int128 __t; \ limb_t __b = (b); \ __t = ((unsigned __int128)(a1) << 64) | (a0); \ q = __t / __b; \ r = __t % __b; \ } while (0) #endif #else #define muldq(r1, r0, a, b) \ do { \ uint64_t __t; \ __t = (uint64_t)(a) * (uint64_t)(b); \ r0 = __t; \ r1 = __t >> 32; \ } while (0) #define divdq(q, r, a1, a0, b) \ do { \ uint64_t __t; \ limb_t __b = (b); \ __t = ((uint64_t)(a1) << 32) | (a0); \ q = __t / __b; \ r = __t % __b; \ } while (0) #endif /* LIMB_BITS != 64 */ #if LIMB_DIGITS == 19 /* WARNING: hardcoded for b = 1e19. It is assumed that: 0 <= a1 < 2^63 */ #define divdq_base(q, r, a1, a0)\ do {\ uint64_t __a0, __a1, __t0, __t1, __b = BF_DEC_BASE; \ __a0 = a0;\ __a1 = a1;\ __t0 = __a1;\ __t0 = shld(__t0, __a0, 1);\ muldq(q, __t1, __t0, UINT64_C(17014118346046923173)); \ muldq(__t1, __t0, q, __b);\ subdq(__a1, __a0, __t1, __t0);\ subdq(__a1, __a0, 1, __b * 2); \ __t0 = (slimb_t)__a1 >> 1; \ q += 2 + __t0;\ adddq(__a1, __a0, 0, __b & __t0);\ q += __a1; \ __a0 += __b & __a1; \ r = __a0;\ } while(0) #elif LIMB_DIGITS == 9 /* WARNING: hardcoded for b = 1e9. It is assumed that: 0 <= a1 < 2^29 */ #define divdq_base(q, r, a1, a0)\ do {\ uint32_t __t0, __t1, __b = BF_DEC_BASE; \ __t0 = a1;\ __t1 = a0;\ __t0 = (__t0 << 3) | (__t1 >> (32 - 3)); \ muldq(q, __t1, __t0, 2305843009U);\ r = a0 - q * __b;\ __t1 = (r >= __b);\ q += __t1;\ if (__t1)\ r -= __b;\ } while(0) #endif /* fast integer division by a fixed constant */ typedef struct FastDivData { limb_t m1; /* multiplier */ int8_t shift1; int8_t shift2; } FastDivData; /* From "Division by Invariant Integers using Multiplication" by Torborn Granlund and Peter L. Montgomery */ /* d must be != 0 */ static inline __maybe_unused void fast_udiv_init(FastDivData *s, limb_t d) { int l; limb_t q, r, m1; if (d == 1) l = 0; else l = 64 - clz64(d - 1); divdq(q, r, ((limb_t)1 << l) - d, 0, d); (void)r; m1 = q + 1; // printf("d=%lu l=%d m1=0x%016lx\n", d, l, m1); s->m1 = m1; s->shift1 = l; if (s->shift1 > 1) s->shift1 = 1; s->shift2 = l - 1; if (s->shift2 < 0) s->shift2 = 0; } static inline limb_t fast_udiv(limb_t a, const FastDivData *s) { limb_t t0, t1; muldq(t1, t0, s->m1, a); t0 = (a - t1) >> s->shift1; return (t1 + t0) >> s->shift2; } /* contains 10^i */ const limb_t mp_pow_dec[LIMB_DIGITS + 1] = { 1U, 10U, 100U, 1000U, 10000U, 100000U, 1000000U, 10000000U, 100000000U, 1000000000U, #if LIMB_BITS == 64 10000000000U, 100000000000U, 1000000000000U, 10000000000000U, 100000000000000U, 1000000000000000U, 10000000000000000U, 100000000000000000U, 1000000000000000000U, 10000000000000000000U, #endif }; /* precomputed from fast_udiv_init(10^i) */ static const FastDivData mp_pow_div[LIMB_DIGITS + 1] = { #if LIMB_BITS == 32 { 0x00000001, 0, 0 }, { 0x9999999a, 1, 3 }, { 0x47ae147b, 1, 6 }, { 0x0624dd30, 1, 9 }, { 0xa36e2eb2, 1, 13 }, { 0x4f8b588f, 1, 16 }, { 0x0c6f7a0c, 1, 19 }, { 0xad7f29ac, 1, 23 }, { 0x5798ee24, 1, 26 }, { 0x12e0be83, 1, 29 }, #else { 0x0000000000000001, 0, 0 }, { 0x999999999999999a, 1, 3 }, { 0x47ae147ae147ae15, 1, 6 }, { 0x0624dd2f1a9fbe77, 1, 9 }, { 0xa36e2eb1c432ca58, 1, 13 }, { 0x4f8b588e368f0847, 1, 16 }, { 0x0c6f7a0b5ed8d36c, 1, 19 }, { 0xad7f29abcaf48579, 1, 23 }, { 0x5798ee2308c39dfa, 1, 26 }, { 0x12e0be826d694b2f, 1, 29 }, { 0xb7cdfd9d7bdbab7e, 1, 33 }, { 0x5fd7fe17964955fe, 1, 36 }, { 0x19799812dea11198, 1, 39 }, { 0xc25c268497681c27, 1, 43 }, { 0x6849b86a12b9b01f, 1, 46 }, { 0x203af9ee756159b3, 1, 49 }, { 0xcd2b297d889bc2b7, 1, 53 }, { 0x70ef54646d496893, 1, 56 }, { 0x2725dd1d243aba0f, 1, 59 }, { 0xd83c94fb6d2ac34d, 1, 63 }, #endif }; /* divide by 10^shift with 0 <= shift <= LIMB_DIGITS */ static inline limb_t fast_shr_dec(limb_t a, int shift) { return fast_udiv(a, &mp_pow_div[shift]); } /* division and remainder by 10^shift */ #define fast_shr_rem_dec(q, r, a, shift) q = fast_shr_dec(a, shift), r = a - q * mp_pow_dec[shift] limb_t mp_add_dec(limb_t *res, const limb_t *op1, const limb_t *op2, mp_size_t n, limb_t carry) { limb_t base = BF_DEC_BASE; mp_size_t i; limb_t k, a, v; k=carry; for(i=0;i v; if (k) a += base; res[i] = a; } return k; } limb_t mp_sub_ui_dec(limb_t *tab, limb_t b, mp_size_t n) { limb_t base = BF_DEC_BASE; mp_size_t i; limb_t k, v, a; k=b; for(i=0;i v; if (k) a += base; tab[i]=a; if (k == 0) break; } return k; } /* taba[] = taba[] * b + l. 0 <= b, l <= base - 1. Return the high carry */ limb_t mp_mul1_dec(limb_t *tabr, const limb_t *taba, mp_size_t n, limb_t b, limb_t l) { mp_size_t i; limb_t t0, t1, r; for(i = 0; i < n; i++) { muldq(t1, t0, taba[i], b); adddq(t1, t0, 0, l); divdq_base(l, r, t1, t0); tabr[i] = r; } return l; } /* tabr[] += taba[] * b. 0 <= b <= base - 1. Return the value to add to the high word */ limb_t mp_add_mul1_dec(limb_t *tabr, const limb_t *taba, mp_size_t n, limb_t b) { mp_size_t i; limb_t l, t0, t1, r; l = 0; for(i = 0; i < n; i++) { muldq(t1, t0, taba[i], b); adddq(t1, t0, 0, l); adddq(t1, t0, 0, tabr[i]); divdq_base(l, r, t1, t0); tabr[i] = r; } return l; } /* tabr[] -= taba[] * b. 0 <= b <= base - 1. Return the value to substract to the high word. */ limb_t mp_sub_mul1_dec(limb_t *tabr, const limb_t *taba, mp_size_t n, limb_t b) { limb_t base = BF_DEC_BASE; mp_size_t i; limb_t l, t0, t1, r, a, v, c; /* XXX: optimize */ l = 0; for(i = 0; i < n; i++) { muldq(t1, t0, taba[i], b); adddq(t1, t0, 0, l); divdq_base(l, r, t1, t0); v = tabr[i]; a = v - r; c = a > v; if (c) a += base; /* never bigger than base because r = 0 when l = base - 1 */ l += c; tabr[i] = a; } return l; } /* size of the result : op1_size + op2_size. */ void mp_mul_basecase_dec(limb_t *result, const limb_t *op1, mp_size_t op1_size, const limb_t *op2, mp_size_t op2_size) { mp_size_t i; limb_t r; result[op1_size] = mp_mul1_dec(result, op1, op1_size, op2[0], 0); for(i=1;i> 1; if (r) r = base_div2; for(i = na - 1; i >= 0; i--) { t0 = taba[i]; tabr[i] = (t0 >> 1) + r; r = 0; if (t0 & 1) r = base_div2; } if (r) r = 1; } else #endif if (na >= UDIV1NORM_THRESHOLD) { shift = clz(b); if (shift == 0) { /* normalized case: b >= 2^(LIMB_BITS-1) */ limb_t b_inv; b_inv = udiv1norm_init(b); for(i = na - 1; i >= 0; i--) { muldq(t1, t0, r, base); adddq(t1, t0, 0, taba[i]); q = udiv1norm(&r, t1, t0, b, b_inv); tabr[i] = q; } } else { limb_t b_inv; b <<= shift; b_inv = udiv1norm_init(b); for(i = na - 1; i >= 0; i--) { muldq(t1, t0, r, base); adddq(t1, t0, 0, taba[i]); t1 = (t1 << shift) | (t0 >> (LIMB_BITS - shift)); t0 <<= shift; q = udiv1norm(&r, t1, t0, b, b_inv); r >>= shift; tabr[i] = q; } } } else { for(i = na - 1; i >= 0; i--) { muldq(t1, t0, r, base); adddq(t1, t0, 0, taba[i]); divdq(q, r, t1, t0, b); tabr[i] = q; } } return r; } static __maybe_unused void mp_print_str_dec(const char *str, const limb_t *tab, slimb_t n) { slimb_t i; printf("%s=", str); for(i = n - 1; i >= 0; i--) { if (i != n - 1) printf("_"); printf("%0*" PRIu_LIMB, LIMB_DIGITS, tab[i]); } printf("\n"); } static __maybe_unused void mp_print_str_h_dec(const char *str, const limb_t *tab, slimb_t n, limb_t high) { slimb_t i; printf("%s=", str); printf("%0*" PRIu_LIMB, LIMB_DIGITS, high); for(i = n - 1; i >= 0; i--) { printf("_"); printf("%0*" PRIu_LIMB, LIMB_DIGITS, tab[i]); } printf("\n"); } //#define DEBUG_DIV_SLOW #define DIV_STATIC_ALLOC_LEN 16 /* return q = a / b and r = a % b. taba[na] must be allocated if tabb1[nb - 1] < B / 2. tabb1[nb - 1] must be != zero. na must be >= nb. 's' can be NULL if tabb1[nb - 1] >= B / 2. The remainder is is returned in taba and contains nb libms. tabq contains na - nb + 1 limbs. No overlap is permitted. Running time of the standard method: (na - nb + 1) * nb Return 0 if OK, -1 if memory alloc error */ /* XXX: optimize */ static int mp_div_dec(bf_context_t *s, limb_t *tabq, limb_t *taba, mp_size_t na, const limb_t *tabb1, mp_size_t nb) { limb_t base = BF_DEC_BASE; limb_t r, mult, t0, t1, a, c, q, v, *tabb; mp_size_t i, j; limb_t static_tabb[DIV_STATIC_ALLOC_LEN]; #ifdef DEBUG_DIV_SLOW mp_print_str_dec("a", taba, na); mp_print_str_dec("b", tabb1, nb); #endif /* normalize tabb */ r = tabb1[nb - 1]; assert(r != 0); i = na - nb; if (r >= BF_DEC_BASE / 2) { mult = 1; tabb = (limb_t *)tabb1; q = 1; for(j = nb - 1; j >= 0; j--) { if (taba[i + j] != tabb[j]) { if (taba[i + j] < tabb[j]) q = 0; break; } } tabq[i] = q; if (q) { mp_sub_dec(taba + i, taba + i, tabb, nb, 0); } i--; } else { mult = base / (r + 1); if (likely(nb <= DIV_STATIC_ALLOC_LEN)) { tabb = static_tabb; } else { tabb = bf_malloc(s, sizeof(limb_t) * nb); if (!tabb) return -1; } mp_mul1_dec(tabb, tabb1, nb, mult, 0); taba[na] = mp_mul1_dec(taba, taba, na, mult, 0); } #ifdef DEBUG_DIV_SLOW printf("mult=" FMT_LIMB "\n", mult); mp_print_str_dec("a_norm", taba, na + 1); mp_print_str_dec("b_norm", tabb, nb); #endif for(; i >= 0; i--) { if (unlikely(taba[i + nb] >= tabb[nb - 1])) { /* XXX: check if it is really possible */ q = base - 1; } else { muldq(t1, t0, taba[i + nb], base); adddq(t1, t0, 0, taba[i + nb - 1]); divdq(q, r, t1, t0, tabb[nb - 1]); } // printf("i=%d q1=%ld\n", i, q); r = mp_sub_mul1_dec(taba + i, tabb, nb, q); // mp_dump("r1", taba + i, nb, bd); // printf("r2=%ld\n", r); v = taba[i + nb]; a = v - r; c = a > v; if (c) a += base; taba[i + nb] = a; if (c != 0) { /* negative result */ for(;;) { q--; c = mp_add_dec(taba + i, taba + i, tabb, nb, 0); /* propagate carry and test if positive result */ if (c != 0) { if (++taba[i + nb] == base) { break; } } } } tabq[i] = q; } #ifdef DEBUG_DIV_SLOW mp_print_str_dec("q", tabq, na - nb + 1); mp_print_str_dec("r", taba, nb); #endif /* remove the normalization */ if (mult != 1) { mp_div1_dec(taba, taba, nb, mult, 0); if (unlikely(tabb != static_tabb)) bf_free(s, tabb); } return 0; } /* divide by 10^shift */ static limb_t mp_shr_dec(limb_t *tab_r, const limb_t *tab, mp_size_t n, limb_t shift, limb_t high) { mp_size_t i; limb_t l, a, q, r; assert(shift >= 1 && shift < LIMB_DIGITS); l = high; for(i = n - 1; i >= 0; i--) { a = tab[i]; fast_shr_rem_dec(q, r, a, shift); tab_r[i] = q + l * mp_pow_dec[LIMB_DIGITS - shift]; l = r; } return l; } /* multiply by 10^shift */ static limb_t mp_shl_dec(limb_t *tab_r, const limb_t *tab, mp_size_t n, limb_t shift, limb_t low) { mp_size_t i; limb_t l, a, q, r; assert(shift >= 1 && shift < LIMB_DIGITS); l = low; for(i = 0; i < n; i++) { a = tab[i]; fast_shr_rem_dec(q, r, a, LIMB_DIGITS - shift); tab_r[i] = r * mp_pow_dec[shift] + l; l = q; } return l; } static limb_t mp_sqrtrem2_dec(limb_t *tabs, limb_t *taba) { int k; dlimb_t a, b, r; limb_t taba1[2], s, r0, r1; /* convert to binary and normalize */ a = (dlimb_t)taba[1] * BF_DEC_BASE + taba[0]; k = clz(a >> LIMB_BITS) & ~1; b = a << k; taba1[0] = b; taba1[1] = b >> LIMB_BITS; mp_sqrtrem2(&s, taba1); s >>= (k >> 1); /* convert the remainder back to decimal */ r = a - (dlimb_t)s * (dlimb_t)s; divdq_base(r1, r0, r >> LIMB_BITS, r); taba[0] = r0; tabs[0] = s; return r1; } //#define DEBUG_SQRTREM_DEC /* tmp_buf must contain (n / 2 + 1 limbs) */ static limb_t mp_sqrtrem_rec_dec(limb_t *tabs, limb_t *taba, limb_t n, limb_t *tmp_buf) { limb_t l, h, rh, ql, qh, c, i; if (n == 1) return mp_sqrtrem2_dec(tabs, taba); #ifdef DEBUG_SQRTREM_DEC mp_print_str_dec("a", taba, 2 * n); #endif l = n / 2; h = n - l; qh = mp_sqrtrem_rec_dec(tabs + l, taba + 2 * l, h, tmp_buf); #ifdef DEBUG_SQRTREM_DEC mp_print_str_dec("s1", tabs + l, h); mp_print_str_h_dec("r1", taba + 2 * l, h, qh); mp_print_str_h_dec("r2", taba + l, n, qh); #endif /* the remainder is in taba + 2 * l. Its high bit is in qh */ if (qh) { mp_sub_dec(taba + 2 * l, taba + 2 * l, tabs + l, h, 0); } /* instead of dividing by 2*s, divide by s (which is normalized) and update q and r */ mp_div_dec(NULL, tmp_buf, taba + l, n, tabs + l, h); qh += tmp_buf[l]; for(i = 0; i < l; i++) tabs[i] = tmp_buf[i]; ql = mp_div1_dec(tabs, tabs, l, 2, qh & 1); qh = qh >> 1; /* 0 or 1 */ if (ql) rh = mp_add_dec(taba + l, taba + l, tabs + l, h, 0); else rh = 0; #ifdef DEBUG_SQRTREM_DEC mp_print_str_h_dec("q", tabs, l, qh); mp_print_str_h_dec("u", taba + l, h, rh); #endif mp_add_ui_dec(tabs + l, qh, h); #ifdef DEBUG_SQRTREM_DEC mp_print_str_dec("s2", tabs, n); #endif /* q = qh, tabs[l - 1 ... 0], r = taba[n - 1 ... l] */ /* subtract q^2. if qh = 1 then q = B^l, so we can take shortcuts */ if (qh) { c = qh; } else { mp_mul_basecase_dec(taba + n, tabs, l, tabs, l); c = mp_sub_dec(taba, taba, taba + n, 2 * l, 0); } rh -= mp_sub_ui_dec(taba + 2 * l, c, n - 2 * l); if ((slimb_t)rh < 0) { mp_sub_ui_dec(tabs, 1, n); rh += mp_add_mul1_dec(taba, tabs, n, 2); rh += mp_add_ui_dec(taba, 1, n); } return rh; } /* 'taba' has 2*n limbs with n >= 1 and taba[2*n-1] >= B/4. Return (s, r) with s=floor(sqrt(a)) and r=a-s^2. 0 <= r <= 2 * s. tabs has n limbs. r is returned in the lower n limbs of taba. Its r[n] is the returned value of the function. */ int mp_sqrtrem_dec(bf_context_t *s, limb_t *tabs, limb_t *taba, limb_t n) { limb_t tmp_buf1[8]; limb_t *tmp_buf; mp_size_t n2; n2 = n / 2 + 1; if (n2 <= countof(tmp_buf1)) { tmp_buf = tmp_buf1; } else { tmp_buf = bf_malloc(s, sizeof(limb_t) * n2); if (!tmp_buf) return -1; } taba[n] = mp_sqrtrem_rec_dec(tabs, taba, n, tmp_buf); if (tmp_buf != tmp_buf1) bf_free(s, tmp_buf); return 0; } /* return the number of leading zero digits, from 0 to LIMB_DIGITS */ static int clz_dec(limb_t a) { if (a == 0) return LIMB_DIGITS; switch(LIMB_BITS - 1 - clz(a)) { case 0: /* 1-1 */ return LIMB_DIGITS - 1; case 1: /* 2-3 */ return LIMB_DIGITS - 1; case 2: /* 4-7 */ return LIMB_DIGITS - 1; case 3: /* 8-15 */ if (a < 10) return LIMB_DIGITS - 1; else return LIMB_DIGITS - 2; case 4: /* 16-31 */ return LIMB_DIGITS - 2; case 5: /* 32-63 */ return LIMB_DIGITS - 2; case 6: /* 64-127 */ if (a < 100) return LIMB_DIGITS - 2; else return LIMB_DIGITS - 3; case 7: /* 128-255 */ return LIMB_DIGITS - 3; case 8: /* 256-511 */ return LIMB_DIGITS - 3; case 9: /* 512-1023 */ if (a < 1000) return LIMB_DIGITS - 3; else return LIMB_DIGITS - 4; case 10: /* 1024-2047 */ return LIMB_DIGITS - 4; case 11: /* 2048-4095 */ return LIMB_DIGITS - 4; case 12: /* 4096-8191 */ return LIMB_DIGITS - 4; case 13: /* 8192-16383 */ if (a < 10000) return LIMB_DIGITS - 4; else return LIMB_DIGITS - 5; case 14: /* 16384-32767 */ return LIMB_DIGITS - 5; case 15: /* 32768-65535 */ return LIMB_DIGITS - 5; case 16: /* 65536-131071 */ if (a < 100000) return LIMB_DIGITS - 5; else return LIMB_DIGITS - 6; case 17: /* 131072-262143 */ return LIMB_DIGITS - 6; case 18: /* 262144-524287 */ return LIMB_DIGITS - 6; case 19: /* 524288-1048575 */ if (a < 1000000) return LIMB_DIGITS - 6; else return LIMB_DIGITS - 7; case 20: /* 1048576-2097151 */ return LIMB_DIGITS - 7; case 21: /* 2097152-4194303 */ return LIMB_DIGITS - 7; case 22: /* 4194304-8388607 */ return LIMB_DIGITS - 7; case 23: /* 8388608-16777215 */ if (a < 10000000) return LIMB_DIGITS - 7; else return LIMB_DIGITS - 8; case 24: /* 16777216-33554431 */ return LIMB_DIGITS - 8; case 25: /* 33554432-67108863 */ return LIMB_DIGITS - 8; case 26: /* 67108864-134217727 */ if (a < 100000000) return LIMB_DIGITS - 8; else return LIMB_DIGITS - 9; #if LIMB_BITS == 64 case 27: /* 134217728-268435455 */ return LIMB_DIGITS - 9; case 28: /* 268435456-536870911 */ return LIMB_DIGITS - 9; case 29: /* 536870912-1073741823 */ if (a < 1000000000) return LIMB_DIGITS - 9; else return LIMB_DIGITS - 10; case 30: /* 1073741824-2147483647 */ return LIMB_DIGITS - 10; case 31: /* 2147483648-4294967295 */ return LIMB_DIGITS - 10; case 32: /* 4294967296-8589934591 */ return LIMB_DIGITS - 10; case 33: /* 8589934592-17179869183 */ if (a < 10000000000) return LIMB_DIGITS - 10; else return LIMB_DIGITS - 11; case 34: /* 17179869184-34359738367 */ return LIMB_DIGITS - 11; case 35: /* 34359738368-68719476735 */ return LIMB_DIGITS - 11; case 36: /* 68719476736-137438953471 */ if (a < 100000000000) return LIMB_DIGITS - 11; else return LIMB_DIGITS - 12; case 37: /* 137438953472-274877906943 */ return LIMB_DIGITS - 12; case 38: /* 274877906944-549755813887 */ return LIMB_DIGITS - 12; case 39: /* 549755813888-1099511627775 */ if (a < 1000000000000) return LIMB_DIGITS - 12; else return LIMB_DIGITS - 13; case 40: /* 1099511627776-2199023255551 */ return LIMB_DIGITS - 13; case 41: /* 2199023255552-4398046511103 */ return LIMB_DIGITS - 13; case 42: /* 4398046511104-8796093022207 */ return LIMB_DIGITS - 13; case 43: /* 8796093022208-17592186044415 */ if (a < 10000000000000) return LIMB_DIGITS - 13; else return LIMB_DIGITS - 14; case 44: /* 17592186044416-35184372088831 */ return LIMB_DIGITS - 14; case 45: /* 35184372088832-70368744177663 */ return LIMB_DIGITS - 14; case 46: /* 70368744177664-140737488355327 */ if (a < 100000000000000) return LIMB_DIGITS - 14; else return LIMB_DIGITS - 15; case 47: /* 140737488355328-281474976710655 */ return LIMB_DIGITS - 15; case 48: /* 281474976710656-562949953421311 */ return LIMB_DIGITS - 15; case 49: /* 562949953421312-1125899906842623 */ if (a < 1000000000000000) return LIMB_DIGITS - 15; else return LIMB_DIGITS - 16; case 50: /* 1125899906842624-2251799813685247 */ return LIMB_DIGITS - 16; case 51: /* 2251799813685248-4503599627370495 */ return LIMB_DIGITS - 16; case 52: /* 4503599627370496-9007199254740991 */ return LIMB_DIGITS - 16; case 53: /* 9007199254740992-18014398509481983 */ if (a < 10000000000000000) return LIMB_DIGITS - 16; else return LIMB_DIGITS - 17; case 54: /* 18014398509481984-36028797018963967 */ return LIMB_DIGITS - 17; case 55: /* 36028797018963968-72057594037927935 */ return LIMB_DIGITS - 17; case 56: /* 72057594037927936-144115188075855871 */ if (a < 100000000000000000) return LIMB_DIGITS - 17; else return LIMB_DIGITS - 18; case 57: /* 144115188075855872-288230376151711743 */ return LIMB_DIGITS - 18; case 58: /* 288230376151711744-576460752303423487 */ return LIMB_DIGITS - 18; case 59: /* 576460752303423488-1152921504606846975 */ if (a < 1000000000000000000) return LIMB_DIGITS - 18; else return LIMB_DIGITS - 19; #endif default: return 0; } } /* for debugging */ void bfdec_print_str(const char *str, const bfdec_t *a) { slimb_t i; printf("%s=", str); if (a->expn == BF_EXP_NAN) { printf("NaN"); } else { if (a->sign) putchar('-'); if (a->expn == BF_EXP_ZERO) { putchar('0'); } else if (a->expn == BF_EXP_INF) { printf("Inf"); } else { printf("0."); for(i = a->len - 1; i >= 0; i--) printf("%0*" PRIu_LIMB, LIMB_DIGITS, a->tab[i]); printf("e%" PRId_LIMB, a->expn); } } printf("\n"); } /* return != 0 if one digit between 0 and bit_pos inclusive is not zero. */ static inline limb_t scan_digit_nz(const bfdec_t *r, slimb_t bit_pos) { slimb_t pos; limb_t v, q; int shift; if (bit_pos < 0) return 0; pos = (limb_t)bit_pos / LIMB_DIGITS; shift = (limb_t)bit_pos % LIMB_DIGITS; fast_shr_rem_dec(q, v, r->tab[pos], shift + 1); (void)q; if (v != 0) return 1; pos--; while (pos >= 0) { if (r->tab[pos] != 0) return 1; pos--; } return 0; } static limb_t get_digit(const limb_t *tab, limb_t len, slimb_t pos) { slimb_t i; int shift; i = floor_div(pos, LIMB_DIGITS); if (i < 0 || i >= len) return 0; shift = pos - i * LIMB_DIGITS; return fast_shr_dec(tab[i], shift) % 10; } #if 0 static limb_t get_digits(const limb_t *tab, limb_t len, slimb_t pos) { limb_t a0, a1; int shift; slimb_t i; i = floor_div(pos, LIMB_DIGITS); shift = pos - i * LIMB_DIGITS; if (i >= 0 && i < len) a0 = tab[i]; else a0 = 0; if (shift == 0) { return a0; } else { i++; if (i >= 0 && i < len) a1 = tab[i]; else a1 = 0; return fast_shr_dec(a0, shift) + fast_urem(a1, &mp_pow_div[LIMB_DIGITS - shift]) * mp_pow_dec[shift]; } } #endif /* return the addend for rounding. Note that prec can be <= 0 for bf_rint() */ static int bfdec_get_rnd_add(int *pret, const bfdec_t *r, limb_t l, slimb_t prec, int rnd_mode) { int add_one, inexact; limb_t digit1, digit0; // bfdec_print_str("get_rnd_add", r); if (rnd_mode == BF_RNDF) { digit0 = 1; /* faithful rounding does not honor the INEXACT flag */ } else { /* starting limb for bit 'prec + 1' */ digit0 = scan_digit_nz(r, l * LIMB_DIGITS - 1 - bf_max(0, prec + 1)); } /* get the digit at 'prec' */ digit1 = get_digit(r->tab, l, l * LIMB_DIGITS - 1 - prec); inexact = (digit1 | digit0) != 0; add_one = 0; switch(rnd_mode) { case BF_RNDZ: break; case BF_RNDN: if (digit1 == 5) { if (digit0) { add_one = 1; } else { /* round to even */ add_one = get_digit(r->tab, l, l * LIMB_DIGITS - 1 - (prec - 1)) & 1; } } else if (digit1 > 5) { add_one = 1; } break; case BF_RNDD: case BF_RNDU: if (r->sign == (rnd_mode == BF_RNDD)) add_one = inexact; break; case BF_RNDNA: case BF_RNDF: add_one = (digit1 >= 5); break; case BF_RNDA: add_one = inexact; break; default: abort(); } if (inexact) *pret |= BF_ST_INEXACT; return add_one; } /* round to prec1 bits assuming 'r' is non zero and finite. 'r' is assumed to have length 'l' (1 <= l <= r->len). prec1 can be BF_PREC_INF. BF_FLAG_SUBNORMAL is not supported. Cannot fail with BF_ST_MEM_ERROR. */ static int __bfdec_round(bfdec_t *r, limb_t prec1, bf_flags_t flags, limb_t l) { int shift, add_one, rnd_mode, ret; slimb_t i, bit_pos, pos, e_min, e_max, e_range, prec; /* XXX: align to IEEE 754 2008 for decimal numbers ? */ e_range = (limb_t)1 << (bf_get_exp_bits(flags) - 1); e_min = -e_range + 3; e_max = e_range; if (flags & BF_FLAG_RADPNT_PREC) { /* 'prec' is the precision after the decimal point */ if (prec1 != BF_PREC_INF) prec = r->expn + prec1; else prec = prec1; } else if (unlikely(r->expn < e_min) && (flags & BF_FLAG_SUBNORMAL)) { /* restrict the precision in case of potentially subnormal result */ assert(prec1 != BF_PREC_INF); prec = prec1 - (e_min - r->expn); } else { prec = prec1; } /* round to prec bits */ rnd_mode = flags & BF_RND_MASK; ret = 0; add_one = bfdec_get_rnd_add(&ret, r, l, prec, rnd_mode); if (prec <= 0) { if (add_one) { bfdec_resize(r, 1); /* cannot fail because r is non zero */ r->tab[0] = BF_DEC_BASE / 10; r->expn += 1 - prec; ret |= BF_ST_UNDERFLOW | BF_ST_INEXACT; return ret; } else { goto underflow; } } else if (add_one) { limb_t carry; /* add one starting at digit 'prec - 1' */ bit_pos = l * LIMB_DIGITS - 1 - (prec - 1); pos = bit_pos / LIMB_DIGITS; carry = mp_pow_dec[bit_pos % LIMB_DIGITS]; carry = mp_add_ui_dec(r->tab + pos, carry, l - pos); if (carry) { /* shift right by one digit */ mp_shr_dec(r->tab + pos, r->tab + pos, l - pos, 1, 1); r->expn++; } } /* check underflow */ if (unlikely(r->expn < e_min)) { if (flags & BF_FLAG_SUBNORMAL) { /* if inexact, also set the underflow flag */ if (ret & BF_ST_INEXACT) ret |= BF_ST_UNDERFLOW; } else { underflow: bfdec_set_zero(r, r->sign); ret |= BF_ST_UNDERFLOW | BF_ST_INEXACT; return ret; } } /* check overflow */ if (unlikely(r->expn > e_max)) { bfdec_set_inf(r, r->sign); ret |= BF_ST_OVERFLOW | BF_ST_INEXACT; return ret; } /* keep the bits starting at 'prec - 1' */ bit_pos = l * LIMB_DIGITS - 1 - (prec - 1); i = floor_div(bit_pos, LIMB_DIGITS); if (i >= 0) { shift = smod(bit_pos, LIMB_DIGITS); if (shift != 0) { r->tab[i] = fast_shr_dec(r->tab[i], shift) * mp_pow_dec[shift]; } } else { i = 0; } /* remove trailing zeros */ while (r->tab[i] == 0) i++; if (i > 0) { l -= i; memmove(r->tab, r->tab + i, l * sizeof(limb_t)); } bfdec_resize(r, l); /* cannot fail */ return ret; } /* Cannot fail with BF_ST_MEM_ERROR. */ int bfdec_round(bfdec_t *r, limb_t prec, bf_flags_t flags) { if (r->len == 0) return 0; return __bfdec_round(r, prec, flags, r->len); } /* 'r' must be a finite number. Cannot fail with BF_ST_MEM_ERROR. */ int bfdec_normalize_and_round(bfdec_t *r, limb_t prec1, bf_flags_t flags) { limb_t l, v; int shift, ret; // bfdec_print_str("bf_renorm", r); l = r->len; while (l > 0 && r->tab[l - 1] == 0) l--; if (l == 0) { /* zero */ r->expn = BF_EXP_ZERO; bfdec_resize(r, 0); /* cannot fail */ ret = 0; } else { r->expn -= (r->len - l) * LIMB_DIGITS; /* shift to have the MSB set to '1' */ v = r->tab[l - 1]; shift = clz_dec(v); if (shift != 0) { mp_shl_dec(r->tab, r->tab, l, shift, 0); r->expn -= shift; } ret = __bfdec_round(r, prec1, flags, l); } // bf_print_str("r_final", r); return ret; } int bfdec_set_ui(bfdec_t *r, uint64_t v) { #if LIMB_BITS == 32 if (v >= BF_DEC_BASE * BF_DEC_BASE) { if (bfdec_resize(r, 3)) goto fail; r->tab[0] = v % BF_DEC_BASE; v /= BF_DEC_BASE; r->tab[1] = v % BF_DEC_BASE; r->tab[2] = v / BF_DEC_BASE; r->expn = 3 * LIMB_DIGITS; } else #endif if (v >= BF_DEC_BASE) { if (bfdec_resize(r, 2)) goto fail; r->tab[0] = v % BF_DEC_BASE; r->tab[1] = v / BF_DEC_BASE; r->expn = 2 * LIMB_DIGITS; } else { if (bfdec_resize(r, 1)) goto fail; r->tab[0] = v; r->expn = LIMB_DIGITS; } r->sign = 0; return bfdec_normalize_and_round(r, BF_PREC_INF, 0); fail: bfdec_set_nan(r); return BF_ST_MEM_ERROR; } int bfdec_set_si(bfdec_t *r, int64_t v) { int ret; if (v < 0) { ret = bfdec_set_ui(r, -v); r->sign = 1; } else { ret = bfdec_set_ui(r, v); } return ret; } static int bfdec_add_internal(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags, int b_neg) { bf_context_t *s = r->ctx; int is_sub, cmp_res, a_sign, b_sign, ret; a_sign = a->sign; b_sign = b->sign ^ b_neg; is_sub = a_sign ^ b_sign; cmp_res = bfdec_cmpu(a, b); if (cmp_res < 0) { const bfdec_t *tmp; tmp = a; a = b; b = tmp; a_sign = b_sign; /* b_sign is never used later */ } /* abs(a) >= abs(b) */ if (cmp_res == 0 && is_sub && a->expn < BF_EXP_INF) { /* zero result */ bfdec_set_zero(r, (flags & BF_RND_MASK) == BF_RNDD); ret = 0; } else if (a->len == 0 || b->len == 0) { ret = 0; if (a->expn >= BF_EXP_INF) { if (a->expn == BF_EXP_NAN) { /* at least one operand is NaN */ bfdec_set_nan(r); ret = 0; } else if (b->expn == BF_EXP_INF && is_sub) { /* infinities with different signs */ bfdec_set_nan(r); ret = BF_ST_INVALID_OP; } else { bfdec_set_inf(r, a_sign); } } else { /* at least one zero and not subtract */ if (bfdec_set(r, a)) return BF_ST_MEM_ERROR; r->sign = a_sign; goto renorm; } } else { slimb_t d, a_offset, b_offset, i, r_len; limb_t carry; limb_t *b1_tab; int b_shift; mp_size_t b1_len; d = a->expn - b->expn; /* XXX: not efficient in time and memory if the precision is not infinite */ r_len = bf_max(a->len, b->len + (d + LIMB_DIGITS - 1) / LIMB_DIGITS); if (bfdec_resize(r, r_len)) goto fail; r->sign = a_sign; r->expn = a->expn; a_offset = r_len - a->len; for(i = 0; i < a_offset; i++) r->tab[i] = 0; for(i = 0; i < a->len; i++) r->tab[a_offset + i] = a->tab[i]; b_shift = d % LIMB_DIGITS; if (b_shift == 0) { b1_len = b->len; b1_tab = (limb_t *)b->tab; } else { b1_len = b->len + 1; b1_tab = bf_malloc(s, sizeof(limb_t) * b1_len); if (!b1_tab) goto fail; b1_tab[0] = mp_shr_dec(b1_tab + 1, b->tab, b->len, b_shift, 0) * mp_pow_dec[LIMB_DIGITS - b_shift]; } b_offset = r_len - (b->len + (d + LIMB_DIGITS - 1) / LIMB_DIGITS); if (is_sub) { carry = mp_sub_dec(r->tab + b_offset, r->tab + b_offset, b1_tab, b1_len, 0); if (carry != 0) { carry = mp_sub_ui_dec(r->tab + b_offset + b1_len, carry, r_len - (b_offset + b1_len)); assert(carry == 0); } } else { carry = mp_add_dec(r->tab + b_offset, r->tab + b_offset, b1_tab, b1_len, 0); if (carry != 0) { carry = mp_add_ui_dec(r->tab + b_offset + b1_len, carry, r_len - (b_offset + b1_len)); } if (carry != 0) { if (bfdec_resize(r, r_len + 1)) { if (b_shift != 0) bf_free(s, b1_tab); goto fail; } r->tab[r_len] = 1; r->expn += LIMB_DIGITS; } } if (b_shift != 0) bf_free(s, b1_tab); renorm: ret = bfdec_normalize_and_round(r, prec, flags); } return ret; fail: bfdec_set_nan(r); return BF_ST_MEM_ERROR; } static int __bfdec_add(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { return bfdec_add_internal(r, a, b, prec, flags, 0); } static int __bfdec_sub(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { return bfdec_add_internal(r, a, b, prec, flags, 1); } int bfdec_add(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { return bf_op2((bf_t *)r, (bf_t *)a, (bf_t *)b, prec, flags, (bf_op2_func_t *)__bfdec_add); } int bfdec_sub(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { return bf_op2((bf_t *)r, (bf_t *)a, (bf_t *)b, prec, flags, (bf_op2_func_t *)__bfdec_sub); } int bfdec_mul(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { int ret, r_sign; if (a->len < b->len) { const bfdec_t *tmp = a; a = b; b = tmp; } r_sign = a->sign ^ b->sign; /* here b->len <= a->len */ if (b->len == 0) { if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bfdec_set_nan(r); ret = 0; } else if (a->expn == BF_EXP_INF || b->expn == BF_EXP_INF) { if ((a->expn == BF_EXP_INF && b->expn == BF_EXP_ZERO) || (a->expn == BF_EXP_ZERO && b->expn == BF_EXP_INF)) { bfdec_set_nan(r); ret = BF_ST_INVALID_OP; } else { bfdec_set_inf(r, r_sign); ret = 0; } } else { bfdec_set_zero(r, r_sign); ret = 0; } } else { bfdec_t tmp, *r1 = NULL; limb_t a_len, b_len; limb_t *a_tab, *b_tab; a_len = a->len; b_len = b->len; a_tab = a->tab; b_tab = b->tab; if (r == a || r == b) { bfdec_init(r->ctx, &tmp); r1 = r; r = &tmp; } if (bfdec_resize(r, a_len + b_len)) { bfdec_set_nan(r); ret = BF_ST_MEM_ERROR; goto done; } mp_mul_basecase_dec(r->tab, a_tab, a_len, b_tab, b_len); r->sign = r_sign; r->expn = a->expn + b->expn; ret = bfdec_normalize_and_round(r, prec, flags); done: if (r == &tmp) bfdec_move(r1, &tmp); } return ret; } int bfdec_mul_si(bfdec_t *r, const bfdec_t *a, int64_t b1, limb_t prec, bf_flags_t flags) { bfdec_t b; int ret; bfdec_init(r->ctx, &b); ret = bfdec_set_si(&b, b1); ret |= bfdec_mul(r, a, &b, prec, flags); bfdec_delete(&b); return ret; } int bfdec_add_si(bfdec_t *r, const bfdec_t *a, int64_t b1, limb_t prec, bf_flags_t flags) { bfdec_t b; int ret; bfdec_init(r->ctx, &b); ret = bfdec_set_si(&b, b1); ret |= bfdec_add(r, a, &b, prec, flags); bfdec_delete(&b); return ret; } static int __bfdec_div(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { int ret, r_sign; limb_t n, nb, precl; r_sign = a->sign ^ b->sign; if (a->expn >= BF_EXP_INF || b->expn >= BF_EXP_INF) { if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bfdec_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF && b->expn == BF_EXP_INF) { bfdec_set_nan(r); return BF_ST_INVALID_OP; } else if (a->expn == BF_EXP_INF) { bfdec_set_inf(r, r_sign); return 0; } else { bfdec_set_zero(r, r_sign); return 0; } } else if (a->expn == BF_EXP_ZERO) { if (b->expn == BF_EXP_ZERO) { bfdec_set_nan(r); return BF_ST_INVALID_OP; } else { bfdec_set_zero(r, r_sign); return 0; } } else if (b->expn == BF_EXP_ZERO) { bfdec_set_inf(r, r_sign); return BF_ST_DIVIDE_ZERO; } nb = b->len; if (prec == BF_PREC_INF) { /* infinite precision: return BF_ST_INVALID_OP if not an exact result */ /* XXX: check */ precl = nb + 1; } else if (flags & BF_FLAG_RADPNT_PREC) { /* number of digits after the decimal point */ /* XXX: check (2 extra digits for rounding + 2 digits) */ precl = (bf_max(a->expn - b->expn, 0) + 2 + prec + 2 + LIMB_DIGITS - 1) / LIMB_DIGITS; } else { /* number of limbs of the quotient (2 extra digits for rounding) */ precl = (prec + 2 + LIMB_DIGITS - 1) / LIMB_DIGITS; } n = bf_max(a->len, precl); { limb_t *taba, na, i; slimb_t d; na = n + nb; taba = bf_malloc(r->ctx, (na + 1) * sizeof(limb_t)); if (!taba) goto fail; d = na - a->len; memset(taba, 0, d * sizeof(limb_t)); memcpy(taba + d, a->tab, a->len * sizeof(limb_t)); if (bfdec_resize(r, n + 1)) goto fail1; if (mp_div_dec(r->ctx, r->tab, taba, na, b->tab, nb)) { fail1: bf_free(r->ctx, taba); goto fail; } /* see if non zero remainder */ for(i = 0; i < nb; i++) { if (taba[i] != 0) break; } bf_free(r->ctx, taba); if (i != nb) { if (prec == BF_PREC_INF) { bfdec_set_nan(r); return BF_ST_INVALID_OP; } else { r->tab[0] |= 1; } } r->expn = a->expn - b->expn + LIMB_DIGITS; r->sign = r_sign; ret = bfdec_normalize_and_round(r, prec, flags); } return ret; fail: bfdec_set_nan(r); return BF_ST_MEM_ERROR; } int bfdec_div(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags) { return bf_op2((bf_t *)r, (bf_t *)a, (bf_t *)b, prec, flags, (bf_op2_func_t *)__bfdec_div); } /* a and b must be finite numbers with a >= 0 and b > 0. 'q' is the integer defined as floor(a/b) and r = a - q * b. */ static void bfdec_tdivremu(bf_context_t *s, bfdec_t *q, bfdec_t *r, const bfdec_t *a, const bfdec_t *b) { if (bfdec_cmpu(a, b) < 0) { bfdec_set_ui(q, 0); bfdec_set(r, a); } else { bfdec_div(q, a, b, 0, BF_RNDZ | BF_FLAG_RADPNT_PREC); bfdec_mul(r, q, b, BF_PREC_INF, BF_RNDZ); bfdec_sub(r, a, r, BF_PREC_INF, BF_RNDZ); } } /* division and remainder. rnd_mode is the rounding mode for the quotient. The additional rounding mode BF_RND_EUCLIDIAN is supported. 'q' is an integer. 'r' is rounded with prec and flags (prec can be BF_PREC_INF). */ int bfdec_divrem(bfdec_t *q, bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags, int rnd_mode) { bf_context_t *s = q->ctx; bfdec_t a1_s, *a1 = &a1_s; bfdec_t b1_s, *b1 = &b1_s; bfdec_t r1_s, *r1 = &r1_s; int q_sign, res; BOOL is_ceil, is_rndn; assert(q != a && q != b); assert(r != a && r != b); assert(q != r); if (a->len == 0 || b->len == 0) { bfdec_set_zero(q, 0); if (a->expn == BF_EXP_NAN || b->expn == BF_EXP_NAN) { bfdec_set_nan(r); return 0; } else if (a->expn == BF_EXP_INF || b->expn == BF_EXP_ZERO) { bfdec_set_nan(r); return BF_ST_INVALID_OP; } else { bfdec_set(r, a); return bfdec_round(r, prec, flags); } } q_sign = a->sign ^ b->sign; is_rndn = (rnd_mode == BF_RNDN || rnd_mode == BF_RNDNA); switch(rnd_mode) { default: case BF_RNDZ: case BF_RNDN: case BF_RNDNA: is_ceil = FALSE; break; case BF_RNDD: is_ceil = q_sign; break; case BF_RNDU: is_ceil = q_sign ^ 1; break; case BF_RNDA: is_ceil = TRUE; break; case BF_DIVREM_EUCLIDIAN: is_ceil = a->sign; break; } a1->expn = a->expn; a1->tab = a->tab; a1->len = a->len; a1->sign = 0; b1->expn = b->expn; b1->tab = b->tab; b1->len = b->len; b1->sign = 0; // bfdec_print_str("a1", a1); // bfdec_print_str("b1", b1); /* XXX: could improve to avoid having a large 'q' */ bfdec_tdivremu(s, q, r, a1, b1); if (bfdec_is_nan(q) || bfdec_is_nan(r)) goto fail; // bfdec_print_str("q", q); // bfdec_print_str("r", r); if (r->len != 0) { if (is_rndn) { bfdec_init(s, r1); if (bfdec_set(r1, r)) goto fail; if (bfdec_mul_si(r1, r1, 2, BF_PREC_INF, BF_RNDZ)) { bfdec_delete(r1); goto fail; } res = bfdec_cmpu(r1, b); bfdec_delete(r1); if (res > 0 || (res == 0 && (rnd_mode == BF_RNDNA || (get_digit(q->tab, q->len, q->len * LIMB_DIGITS - q->expn) & 1) != 0))) { goto do_sub_r; } } else if (is_ceil) { do_sub_r: res = bfdec_add_si(q, q, 1, BF_PREC_INF, BF_RNDZ); res |= bfdec_sub(r, r, b1, BF_PREC_INF, BF_RNDZ); if (res & BF_ST_MEM_ERROR) goto fail; } } r->sign ^= a->sign; q->sign = q_sign; return bfdec_round(r, prec, flags); fail: bfdec_set_nan(q); bfdec_set_nan(r); return BF_ST_MEM_ERROR; } int bfdec_rem(bfdec_t *r, const bfdec_t *a, const bfdec_t *b, limb_t prec, bf_flags_t flags, int rnd_mode) { bfdec_t q_s, *q = &q_s; int ret; bfdec_init(r->ctx, q); ret = bfdec_divrem(q, r, a, b, prec, flags, rnd_mode); bfdec_delete(q); return ret; } /* convert to integer (infinite precision) */ int bfdec_rint(bfdec_t *r, int rnd_mode) { return bfdec_round(r, 0, rnd_mode | BF_FLAG_RADPNT_PREC); } int bfdec_sqrt(bfdec_t *r, const bfdec_t *a, limb_t prec, bf_flags_t flags) { bf_context_t *s = a->ctx; int ret, k; limb_t *a1, v; slimb_t n, n1, prec1; limb_t res; assert(r != a); if (a->len == 0) { if (a->expn == BF_EXP_NAN) { bfdec_set_nan(r); } else if (a->expn == BF_EXP_INF && a->sign) { goto invalid_op; } else { bfdec_set(r, a); } ret = 0; } else if (a->sign || prec == BF_PREC_INF) { invalid_op: bfdec_set_nan(r); ret = BF_ST_INVALID_OP; } else { if (flags & BF_FLAG_RADPNT_PREC) { prec1 = bf_max(floor_div(a->expn + 1, 2) + prec, 1); } else { prec1 = prec; } /* convert the mantissa to an integer with at least 2 * prec + 4 digits */ n = (2 * (prec1 + 2) + 2 * LIMB_DIGITS - 1) / (2 * LIMB_DIGITS); if (bfdec_resize(r, n)) goto fail; a1 = bf_malloc(s, sizeof(limb_t) * 2 * n); if (!a1) goto fail; n1 = bf_min(2 * n, a->len); memset(a1, 0, (2 * n - n1) * sizeof(limb_t)); memcpy(a1 + 2 * n - n1, a->tab + a->len - n1, n1 * sizeof(limb_t)); if (a->expn & 1) { res = mp_shr_dec(a1, a1, 2 * n, 1, 0); } else { res = 0; } /* normalize so that a1 >= B^(2*n)/4. Not need for n = 1 because mp_sqrtrem2_dec already does it */ k = 0; if (n > 1) { v = a1[2 * n - 1]; while (v < BF_DEC_BASE / 4) { k++; v *= 4; } if (k != 0) mp_mul1_dec(a1, a1, 2 * n, 1 << (2 * k), 0); } if (mp_sqrtrem_dec(s, r->tab, a1, n)) { bf_free(s, a1); goto fail; } if (k != 0) mp_div1_dec(r->tab, r->tab, n, 1 << k, 0); if (!res) { res = mp_scan_nz(a1, n + 1); } bf_free(s, a1); if (!res) { res = mp_scan_nz(a->tab, a->len - n1); } if (res != 0) r->tab[0] |= 1; r->sign = 0; r->expn = (a->expn + 1) >> 1; ret = bfdec_round(r, prec, flags); } return ret; fail: bfdec_set_nan(r); return BF_ST_MEM_ERROR; } /* The rounding mode is always BF_RNDZ. Return BF_ST_OVERFLOW if there is an overflow and 0 otherwise. No memory error is possible. */ int bfdec_get_int32(int *pres, const bfdec_t *a) { uint32_t v; int ret; if (a->expn >= BF_EXP_INF) { ret = 0; if (a->expn == BF_EXP_INF) { v = (uint32_t)INT32_MAX + a->sign; /* XXX: return overflow ? */ } else { v = INT32_MAX; } } else if (a->expn <= 0) { v = 0; ret = 0; } else if (a->expn <= 9) { v = fast_shr_dec(a->tab[a->len - 1], LIMB_DIGITS - a->expn); if (a->sign) v = -v; ret = 0; } else if (a->expn == 10) { uint64_t v1; uint32_t v_max; #if LIMB_BITS == 64 v1 = fast_shr_dec(a->tab[a->len - 1], LIMB_DIGITS - a->expn); #else v1 = (uint64_t)a->tab[a->len - 1] * 10 + get_digit(a->tab, a->len, (a->len - 1) * LIMB_DIGITS - 1); #endif v_max = (uint32_t)INT32_MAX + a->sign; if (v1 > v_max) { v = v_max; ret = BF_ST_OVERFLOW; } else { v = v1; if (a->sign) v = -v; ret = 0; } } else { v = (uint32_t)INT32_MAX + a->sign; ret = BF_ST_OVERFLOW; } *pres = v; return ret; } /* power to an integer with infinite precision */ int bfdec_pow_ui(bfdec_t *r, const bfdec_t *a, limb_t b) { int ret, n_bits, i; assert(r != a); if (b == 0) return bfdec_set_ui(r, 1); ret = bfdec_set(r, a); n_bits = LIMB_BITS - clz(b); for(i = n_bits - 2; i >= 0; i--) { ret |= bfdec_mul(r, r, r, BF_PREC_INF, BF_RNDZ); if ((b >> i) & 1) ret |= bfdec_mul(r, r, a, BF_PREC_INF, BF_RNDZ); } return ret; } char *bfdec_ftoa(size_t *plen, const bfdec_t *a, limb_t prec, bf_flags_t flags) { return bf_ftoa_internal(plen, (const bf_t *)a, 10, prec, flags, TRUE); } int bfdec_atof(bfdec_t *r, const char *str, const char **pnext, limb_t prec, bf_flags_t flags) { slimb_t dummy_exp; return bf_atof_internal((bf_t *)r, &dummy_exp, str, pnext, 10, prec, flags, TRUE); } #endif /* USE_BF_DEC */ #ifdef USE_FFT_MUL /***************************************************************/ /* Integer multiplication with FFT */ /* or LIMB_BITS at bit position 'pos' in tab */ static inline void put_bits(limb_t *tab, limb_t len, slimb_t pos, limb_t val) { limb_t i; int p; i = pos >> LIMB_LOG2_BITS; p = pos & (LIMB_BITS - 1); if (i < len) tab[i] |= val << p; if (p != 0) { i++; if (i < len) { tab[i] |= val >> (LIMB_BITS - p); } } } #if defined(__AVX2__) typedef double NTTLimb; /* we must have: modulo >= 1 << NTT_MOD_LOG2_MIN */ #define NTT_MOD_LOG2_MIN 50 #define NTT_MOD_LOG2_MAX 51 #define NB_MODS 5 #define NTT_PROOT_2EXP 39 static const int ntt_int_bits[NB_MODS] = { 254, 203, 152, 101, 50, }; static const limb_t ntt_mods[NB_MODS] = { 0x00073a8000000001, 0x0007858000000001, 0x0007a38000000001, 0x0007a68000000001, 0x0007fd8000000001, }; static const limb_t ntt_proot[2][NB_MODS] = { { 0x00056198d44332c8, 0x0002eb5d640aad39, 0x00047e31eaa35fd0, 0x0005271ac118a150, 0x00075e0ce8442bd5, }, { 0x000461169761bcc5, 0x0002dac3cb2da688, 0x0004abc97751e3bf, 0x000656778fc8c485, 0x0000dc6469c269fa, }, }; static const limb_t ntt_mods_cr[NB_MODS * (NB_MODS - 1) / 2] = { 0x00020e4da740da8e, 0x0004c3dc09c09c1d, 0x000063bd097b4271, 0x000799d8f18f18fd, 0x0005384222222264, 0x000572b07c1f07fe, 0x00035cd08888889a, 0x00066015555557e3, 0x000725960b60b623, 0x0002fc1fa1d6ce12, }; #else typedef limb_t NTTLimb; #if LIMB_BITS == 64 #define NTT_MOD_LOG2_MIN 61 #define NTT_MOD_LOG2_MAX 62 #define NB_MODS 5 #define NTT_PROOT_2EXP 51 static const int ntt_int_bits[NB_MODS] = { 307, 246, 185, 123, 61, }; static const limb_t ntt_mods[NB_MODS] = { 0x28d8000000000001, 0x2a88000000000001, 0x2ed8000000000001, 0x3508000000000001, 0x3aa8000000000001, }; static const limb_t ntt_proot[2][NB_MODS] = { { 0x1b8ea61034a2bea7, 0x21a9762de58206fb, 0x02ca782f0756a8ea, 0x278384537a3e50a1, 0x106e13fee74ce0ab, }, { 0x233513af133e13b8, 0x1d13140d1c6f75f1, 0x12cde57f97e3eeda, 0x0d6149e23cbe654f, 0x36cd204f522a1379, }, }; static const limb_t ntt_mods_cr[NB_MODS * (NB_MODS - 1) / 2] = { 0x08a9ed097b425eea, 0x18a44aaaaaaaaab3, 0x2493f57f57f57f5d, 0x126b8d0649a7f8d4, 0x09d80ed7303b5ccc, 0x25b8bcf3cf3cf3d5, 0x2ce6ce63398ce638, 0x0e31fad40a57eb59, 0x02a3529fd4a7f52f, 0x3a5493e93e93e94a, }; #elif LIMB_BITS == 32 /* we must have: modulo >= 1 << NTT_MOD_LOG2_MIN */ #define NTT_MOD_LOG2_MIN 29 #define NTT_MOD_LOG2_MAX 30 #define NB_MODS 5 #define NTT_PROOT_2EXP 20 static const int ntt_int_bits[NB_MODS] = { 148, 119, 89, 59, 29, }; static const limb_t ntt_mods[NB_MODS] = { 0x0000000032b00001, 0x0000000033700001, 0x0000000036d00001, 0x0000000037300001, 0x000000003e500001, }; static const limb_t ntt_proot[2][NB_MODS] = { { 0x0000000032525f31, 0x0000000005eb3b37, 0x00000000246eda9f, 0x0000000035f25901, 0x00000000022f5768, }, { 0x00000000051eba1a, 0x00000000107be10e, 0x000000001cd574e0, 0x00000000053806e6, 0x000000002cd6bf98, }, }; static const limb_t ntt_mods_cr[NB_MODS * (NB_MODS - 1) / 2] = { 0x000000000449559a, 0x000000001eba6ca9, 0x000000002ec18e46, 0x000000000860160b, 0x000000000d321307, 0x000000000bf51120, 0x000000000f662938, 0x000000000932ab3e, 0x000000002f40eef8, 0x000000002e760905, }; #endif /* LIMB_BITS */ #endif /* !AVX2 */ #if defined(__AVX2__) #define NTT_TRIG_K_MAX 18 #else #define NTT_TRIG_K_MAX 19 #endif typedef struct BFNTTState { bf_context_t *ctx; /* used for mul_mod_fast() */ limb_t ntt_mods_div[NB_MODS]; limb_t ntt_proot_pow[NB_MODS][2][NTT_PROOT_2EXP + 1]; limb_t ntt_proot_pow_inv[NB_MODS][2][NTT_PROOT_2EXP + 1]; NTTLimb *ntt_trig[NB_MODS][2][NTT_TRIG_K_MAX + 1]; /* 1/2^n mod m */ limb_t ntt_len_inv[NB_MODS][NTT_PROOT_2EXP + 1][2]; #if defined(__AVX2__) __m256d ntt_mods_cr_vec[NB_MODS * (NB_MODS - 1) / 2]; __m256d ntt_mods_vec[NB_MODS]; __m256d ntt_mods_inv_vec[NB_MODS]; #else limb_t ntt_mods_cr_inv[NB_MODS * (NB_MODS - 1) / 2]; #endif } BFNTTState; static NTTLimb *get_trig(BFNTTState *s, int k, int inverse, int m_idx); /* add modulo with up to (LIMB_BITS-1) bit modulo */ static inline limb_t add_mod(limb_t a, limb_t b, limb_t m) { limb_t r; r = a + b; if (r >= m) r -= m; return r; } /* sub modulo with up to LIMB_BITS bit modulo */ static inline limb_t sub_mod(limb_t a, limb_t b, limb_t m) { limb_t r; r = a - b; if (r > a) r += m; return r; } /* return (r0+r1*B) mod m precondition: 0 <= r0+r1*B < 2^(64+NTT_MOD_LOG2_MIN) */ static inline limb_t mod_fast(dlimb_t r, limb_t m, limb_t m_inv) { limb_t a1, q, t0, r1, r0; a1 = r >> NTT_MOD_LOG2_MIN; q = ((dlimb_t)a1 * m_inv) >> LIMB_BITS; r = r - (dlimb_t)q * m - m * 2; r1 = r >> LIMB_BITS; t0 = (slimb_t)r1 >> 1; r += m & t0; r0 = r; r1 = r >> LIMB_BITS; r0 += m & r1; return r0; } /* faster version using precomputed modulo inverse. precondition: 0 <= a * b < 2^(64+NTT_MOD_LOG2_MIN) */ static inline limb_t mul_mod_fast(limb_t a, limb_t b, limb_t m, limb_t m_inv) { dlimb_t r; r = (dlimb_t)a * (dlimb_t)b; return mod_fast(r, m, m_inv); } static inline limb_t init_mul_mod_fast(limb_t m) { dlimb_t t; assert(m < (limb_t)1 << NTT_MOD_LOG2_MAX); assert(m >= (limb_t)1 << NTT_MOD_LOG2_MIN); t = (dlimb_t)1 << (LIMB_BITS + NTT_MOD_LOG2_MIN); return t / m; } /* Faster version used when the multiplier is constant. 0 <= a < 2^64, 0 <= b < m. */ static inline limb_t mul_mod_fast2(limb_t a, limb_t b, limb_t m, limb_t b_inv) { limb_t r, q; q = ((dlimb_t)a * (dlimb_t)b_inv) >> LIMB_BITS; r = a * b - q * m; if (r >= m) r -= m; return r; } /* Faster version used when the multiplier is constant. 0 <= a < 2^64, 0 <= b < m. Let r = a * b mod m. The return value is 'r' or 'r + m'. */ static inline limb_t mul_mod_fast3(limb_t a, limb_t b, limb_t m, limb_t b_inv) { limb_t r, q; q = ((dlimb_t)a * (dlimb_t)b_inv) >> LIMB_BITS; r = a * b - q * m; return r; } static inline limb_t init_mul_mod_fast2(limb_t b, limb_t m) { return ((dlimb_t)b << LIMB_BITS) / m; } #ifdef __AVX2__ static inline limb_t ntt_limb_to_int(NTTLimb a, limb_t m) { slimb_t v; v = a; if (v < 0) v += m; if (v >= m) v -= m; return v; } static inline NTTLimb int_to_ntt_limb(limb_t a, limb_t m) { return (slimb_t)a; } static inline NTTLimb int_to_ntt_limb2(limb_t a, limb_t m) { if (a >= (m / 2)) a -= m; return (slimb_t)a; } /* return r + m if r < 0 otherwise r. */ static inline __m256d ntt_mod1(__m256d r, __m256d m) { return _mm256_blendv_pd(r, r + m, r); } /* input: abs(r) < 2 * m. Output: abs(r) < m */ static inline __m256d ntt_mod(__m256d r, __m256d mf, __m256d m2f) { return _mm256_blendv_pd(r, r + m2f, r) - mf; } /* input: abs(a*b) < 2 * m^2, output: abs(r) < m */ static inline __m256d ntt_mul_mod(__m256d a, __m256d b, __m256d mf, __m256d m_inv) { __m256d r, q, ab1, ab0, qm0, qm1; ab1 = a * b; q = _mm256_round_pd(ab1 * m_inv, 0); /* round to nearest */ qm1 = q * mf; qm0 = _mm256_fmsub_pd(q, mf, qm1); /* low part */ ab0 = _mm256_fmsub_pd(a, b, ab1); /* low part */ r = (ab1 - qm1) + (ab0 - qm0); return r; } static void *bf_aligned_malloc(bf_context_t *s, size_t size, size_t align) { void *ptr; void **ptr1; ptr = bf_malloc(s, size + sizeof(void *) + align - 1); if (!ptr) return NULL; ptr1 = (void **)(((uintptr_t)ptr + sizeof(void *) + align - 1) & ~(align - 1)); ptr1[-1] = ptr; return ptr1; } static void bf_aligned_free(bf_context_t *s, void *ptr) { if (!ptr) return; bf_free(s, ((void **)ptr)[-1]); } static void *ntt_malloc(BFNTTState *s, size_t size) { return bf_aligned_malloc(s->ctx, size, 64); } static void ntt_free(BFNTTState *s, void *ptr) { bf_aligned_free(s->ctx, ptr); } static no_inline int ntt_fft(BFNTTState *s, NTTLimb *out_buf, NTTLimb *in_buf, NTTLimb *tmp_buf, int fft_len_log2, int inverse, int m_idx) { limb_t nb_blocks, fft_per_block, p, k, n, stride_in, i, j; NTTLimb *tab_in, *tab_out, *tmp, *trig; __m256d m_inv, mf, m2f, c, a0, a1, b0, b1; limb_t m; int l; m = ntt_mods[m_idx]; m_inv = _mm256_set1_pd(1.0 / (double)m); mf = _mm256_set1_pd(m); m2f = _mm256_set1_pd(m * 2); n = (limb_t)1 << fft_len_log2; assert(n >= 8); stride_in = n / 2; tab_in = in_buf; tab_out = tmp_buf; trig = get_trig(s, fft_len_log2, inverse, m_idx); if (!trig) return -1; p = 0; for(k = 0; k < stride_in; k += 4) { a0 = _mm256_load_pd(&tab_in[k]); a1 = _mm256_load_pd(&tab_in[k + stride_in]); c = _mm256_load_pd(trig); trig += 4; b0 = ntt_mod(a0 + a1, mf, m2f); b1 = ntt_mul_mod(a0 - a1, c, mf, m_inv); a0 = _mm256_permute2f128_pd(b0, b1, 0x20); a1 = _mm256_permute2f128_pd(b0, b1, 0x31); a0 = _mm256_permute4x64_pd(a0, 0xd8); a1 = _mm256_permute4x64_pd(a1, 0xd8); _mm256_store_pd(&tab_out[p], a0); _mm256_store_pd(&tab_out[p + 4], a1); p += 2 * 4; } tmp = tab_in; tab_in = tab_out; tab_out = tmp; trig = get_trig(s, fft_len_log2 - 1, inverse, m_idx); if (!trig) return -1; p = 0; for(k = 0; k < stride_in; k += 4) { a0 = _mm256_load_pd(&tab_in[k]); a1 = _mm256_load_pd(&tab_in[k + stride_in]); c = _mm256_setr_pd(trig[0], trig[0], trig[1], trig[1]); trig += 2; b0 = ntt_mod(a0 + a1, mf, m2f); b1 = ntt_mul_mod(a0 - a1, c, mf, m_inv); a0 = _mm256_permute2f128_pd(b0, b1, 0x20); a1 = _mm256_permute2f128_pd(b0, b1, 0x31); _mm256_store_pd(&tab_out[p], a0); _mm256_store_pd(&tab_out[p + 4], a1); p += 2 * 4; } tmp = tab_in; tab_in = tab_out; tab_out = tmp; nb_blocks = n / 4; fft_per_block = 4; l = fft_len_log2 - 2; while (nb_blocks != 2) { nb_blocks >>= 1; p = 0; k = 0; trig = get_trig(s, l, inverse, m_idx); if (!trig) return -1; for(i = 0; i < nb_blocks; i++) { c = _mm256_set1_pd(trig[0]); trig++; for(j = 0; j < fft_per_block; j += 4) { a0 = _mm256_load_pd(&tab_in[k + j]); a1 = _mm256_load_pd(&tab_in[k + j + stride_in]); b0 = ntt_mod(a0 + a1, mf, m2f); b1 = ntt_mul_mod(a0 - a1, c, mf, m_inv); _mm256_store_pd(&tab_out[p + j], b0); _mm256_store_pd(&tab_out[p + j + fft_per_block], b1); } k += fft_per_block; p += 2 * fft_per_block; } fft_per_block <<= 1; l--; tmp = tab_in; tab_in = tab_out; tab_out = tmp; } tab_out = out_buf; for(k = 0; k < stride_in; k += 4) { a0 = _mm256_load_pd(&tab_in[k]); a1 = _mm256_load_pd(&tab_in[k + stride_in]); b0 = ntt_mod(a0 + a1, mf, m2f); b1 = ntt_mod(a0 - a1, mf, m2f); _mm256_store_pd(&tab_out[k], b0); _mm256_store_pd(&tab_out[k + stride_in], b1); } return 0; } static void ntt_vec_mul(BFNTTState *s, NTTLimb *tab1, NTTLimb *tab2, limb_t fft_len_log2, int k_tot, int m_idx) { limb_t i, c_inv, n, m; __m256d m_inv, mf, a, b, c; m = ntt_mods[m_idx]; c_inv = s->ntt_len_inv[m_idx][k_tot][0]; m_inv = _mm256_set1_pd(1.0 / (double)m); mf = _mm256_set1_pd(m); c = _mm256_set1_pd(int_to_ntt_limb(c_inv, m)); n = (limb_t)1 << fft_len_log2; for(i = 0; i < n; i += 4) { a = _mm256_load_pd(&tab1[i]); b = _mm256_load_pd(&tab2[i]); a = ntt_mul_mod(a, b, mf, m_inv); a = ntt_mul_mod(a, c, mf, m_inv); _mm256_store_pd(&tab1[i], a); } } static no_inline void mul_trig(NTTLimb *buf, limb_t n, limb_t c1, limb_t m, limb_t m_inv1) { limb_t i, c2, c3, c4; __m256d c, c_mul, a0, mf, m_inv; assert(n >= 2); mf = _mm256_set1_pd(m); m_inv = _mm256_set1_pd(1.0 / (double)m); c2 = mul_mod_fast(c1, c1, m, m_inv1); c3 = mul_mod_fast(c2, c1, m, m_inv1); c4 = mul_mod_fast(c2, c2, m, m_inv1); c = _mm256_setr_pd(1, int_to_ntt_limb(c1, m), int_to_ntt_limb(c2, m), int_to_ntt_limb(c3, m)); c_mul = _mm256_set1_pd(int_to_ntt_limb(c4, m)); for(i = 0; i < n; i += 4) { a0 = _mm256_load_pd(&buf[i]); a0 = ntt_mul_mod(a0, c, mf, m_inv); _mm256_store_pd(&buf[i], a0); c = ntt_mul_mod(c, c_mul, mf, m_inv); } } #else static void *ntt_malloc(BFNTTState *s, size_t size) { return bf_malloc(s->ctx, size); } static void ntt_free(BFNTTState *s, void *ptr) { bf_free(s->ctx, ptr); } static inline limb_t ntt_limb_to_int(NTTLimb a, limb_t m) { if (a >= m) a -= m; return a; } static inline NTTLimb int_to_ntt_limb(slimb_t a, limb_t m) { return a; } static no_inline int ntt_fft(BFNTTState *s, NTTLimb *out_buf, NTTLimb *in_buf, NTTLimb *tmp_buf, int fft_len_log2, int inverse, int m_idx) { limb_t nb_blocks, fft_per_block, p, k, n, stride_in, i, j, m, m2; NTTLimb *tab_in, *tab_out, *tmp, a0, a1, b0, b1, c, *trig, c_inv; int l; m = ntt_mods[m_idx]; m2 = 2 * m; n = (limb_t)1 << fft_len_log2; nb_blocks = n; fft_per_block = 1; stride_in = n / 2; tab_in = in_buf; tab_out = tmp_buf; l = fft_len_log2; while (nb_blocks != 2) { nb_blocks >>= 1; p = 0; k = 0; trig = get_trig(s, l, inverse, m_idx); if (!trig) return -1; for(i = 0; i < nb_blocks; i++) { c = trig[0]; c_inv = trig[1]; trig += 2; for(j = 0; j < fft_per_block; j++) { a0 = tab_in[k + j]; a1 = tab_in[k + j + stride_in]; b0 = add_mod(a0, a1, m2); b1 = a0 - a1 + m2; b1 = mul_mod_fast3(b1, c, m, c_inv); tab_out[p + j] = b0; tab_out[p + j + fft_per_block] = b1; } k += fft_per_block; p += 2 * fft_per_block; } fft_per_block <<= 1; l--; tmp = tab_in; tab_in = tab_out; tab_out = tmp; } /* no twiddle in last step */ tab_out = out_buf; for(k = 0; k < stride_in; k++) { a0 = tab_in[k]; a1 = tab_in[k + stride_in]; b0 = add_mod(a0, a1, m2); b1 = sub_mod(a0, a1, m2); tab_out[k] = b0; tab_out[k + stride_in] = b1; } return 0; } static void ntt_vec_mul(BFNTTState *s, NTTLimb *tab1, NTTLimb *tab2, int fft_len_log2, int k_tot, int m_idx) { limb_t i, norm, norm_inv, a, n, m, m_inv; m = ntt_mods[m_idx]; m_inv = s->ntt_mods_div[m_idx]; norm = s->ntt_len_inv[m_idx][k_tot][0]; norm_inv = s->ntt_len_inv[m_idx][k_tot][1]; n = (limb_t)1 << fft_len_log2; for(i = 0; i < n; i++) { a = tab1[i]; /* need to reduce the range so that the product is < 2^(LIMB_BITS+NTT_MOD_LOG2_MIN) */ if (a >= m) a -= m; a = mul_mod_fast(a, tab2[i], m, m_inv); a = mul_mod_fast3(a, norm, m, norm_inv); tab1[i] = a; } } static no_inline void mul_trig(NTTLimb *buf, limb_t n, limb_t c_mul, limb_t m, limb_t m_inv) { limb_t i, c0, c_mul_inv; c0 = 1; c_mul_inv = init_mul_mod_fast2(c_mul, m); for(i = 0; i < n; i++) { buf[i] = mul_mod_fast(buf[i], c0, m, m_inv); c0 = mul_mod_fast2(c0, c_mul, m, c_mul_inv); } } #endif /* !AVX2 */ static no_inline NTTLimb *get_trig(BFNTTState *s, int k, int inverse, int m_idx) { NTTLimb *tab; limb_t i, n2, c, c_mul, m, c_mul_inv; if (k > NTT_TRIG_K_MAX) return NULL; tab = s->ntt_trig[m_idx][inverse][k]; if (tab) return tab; n2 = (limb_t)1 << (k - 1); m = ntt_mods[m_idx]; #ifdef __AVX2__ tab = ntt_malloc(s, sizeof(NTTLimb) * n2); #else tab = ntt_malloc(s, sizeof(NTTLimb) * n2 * 2); #endif if (!tab) return NULL; c = 1; c_mul = s->ntt_proot_pow[m_idx][inverse][k]; c_mul_inv = s->ntt_proot_pow_inv[m_idx][inverse][k]; for(i = 0; i < n2; i++) { #ifdef __AVX2__ tab[i] = int_to_ntt_limb2(c, m); #else tab[2 * i] = int_to_ntt_limb(c, m); tab[2 * i + 1] = init_mul_mod_fast2(c, m); #endif c = mul_mod_fast2(c, c_mul, m, c_mul_inv); } s->ntt_trig[m_idx][inverse][k] = tab; return tab; } void fft_clear_cache(bf_context_t *s1) { int m_idx, inverse, k; BFNTTState *s = s1->ntt_state; if (s) { for(m_idx = 0; m_idx < NB_MODS; m_idx++) { for(inverse = 0; inverse < 2; inverse++) { for(k = 0; k < NTT_TRIG_K_MAX + 1; k++) { if (s->ntt_trig[m_idx][inverse][k]) { ntt_free(s, s->ntt_trig[m_idx][inverse][k]); s->ntt_trig[m_idx][inverse][k] = NULL; } } } } #if defined(__AVX2__) bf_aligned_free(s1, s); #else bf_free(s1, s); #endif s1->ntt_state = NULL; } } #define STRIP_LEN 16 /* dst = buf1, src = buf2 */ static int ntt_fft_partial(BFNTTState *s, NTTLimb *buf1, int k1, int k2, limb_t n1, limb_t n2, int inverse, limb_t m_idx) { limb_t i, j, c_mul, c0, m, m_inv, strip_len, l; NTTLimb *buf2, *buf3; buf2 = NULL; buf3 = ntt_malloc(s, sizeof(NTTLimb) * n1); if (!buf3) goto fail; if (k2 == 0) { if (ntt_fft(s, buf1, buf1, buf3, k1, inverse, m_idx)) goto fail; } else { strip_len = STRIP_LEN; buf2 = ntt_malloc(s, sizeof(NTTLimb) * n1 * strip_len); if (!buf2) goto fail; m = ntt_mods[m_idx]; m_inv = s->ntt_mods_div[m_idx]; c0 = s->ntt_proot_pow[m_idx][inverse][k1 + k2]; c_mul = 1; assert((n2 % strip_len) == 0); for(j = 0; j < n2; j += strip_len) { for(i = 0; i < n1; i++) { for(l = 0; l < strip_len; l++) { buf2[i + l * n1] = buf1[i * n2 + (j + l)]; } } for(l = 0; l < strip_len; l++) { if (inverse) mul_trig(buf2 + l * n1, n1, c_mul, m, m_inv); if (ntt_fft(s, buf2 + l * n1, buf2 + l * n1, buf3, k1, inverse, m_idx)) goto fail; if (!inverse) mul_trig(buf2 + l * n1, n1, c_mul, m, m_inv); c_mul = mul_mod_fast(c_mul, c0, m, m_inv); } for(i = 0; i < n1; i++) { for(l = 0; l < strip_len; l++) { buf1[i * n2 + (j + l)] = buf2[i + l *n1]; } } } ntt_free(s, buf2); } ntt_free(s, buf3); return 0; fail: ntt_free(s, buf2); ntt_free(s, buf3); return -1; } /* dst = buf1, src = buf2, tmp = buf3 */ static int ntt_conv(BFNTTState *s, NTTLimb *buf1, NTTLimb *buf2, int k, int k_tot, limb_t m_idx) { limb_t n1, n2, i; int k1, k2; if (k <= NTT_TRIG_K_MAX) { k1 = k; } else { /* recursive split of the FFT */ k1 = bf_min(k / 2, NTT_TRIG_K_MAX); } k2 = k - k1; n1 = (limb_t)1 << k1; n2 = (limb_t)1 << k2; if (ntt_fft_partial(s, buf1, k1, k2, n1, n2, 0, m_idx)) return -1; if (ntt_fft_partial(s, buf2, k1, k2, n1, n2, 0, m_idx)) return -1; if (k2 == 0) { ntt_vec_mul(s, buf1, buf2, k, k_tot, m_idx); } else { for(i = 0; i < n1; i++) { ntt_conv(s, buf1 + i * n2, buf2 + i * n2, k2, k_tot, m_idx); } } if (ntt_fft_partial(s, buf1, k1, k2, n1, n2, 1, m_idx)) return -1; return 0; } static no_inline void limb_to_ntt(BFNTTState *s, NTTLimb *tabr, limb_t fft_len, const limb_t *taba, limb_t a_len, int dpl, int first_m_idx, int nb_mods) { slimb_t i, n; dlimb_t a, b; int j, shift; limb_t base_mask1, a0, a1, a2, r, m, m_inv; #if 0 for(i = 0; i < a_len; i++) { printf("%" PRId64 ": " FMT_LIMB "\n", (int64_t)i, taba[i]); } #endif memset(tabr, 0, sizeof(NTTLimb) * fft_len * nb_mods); shift = dpl & (LIMB_BITS - 1); if (shift == 0) base_mask1 = -1; else base_mask1 = ((limb_t)1 << shift) - 1; n = bf_min(fft_len, (a_len * LIMB_BITS + dpl - 1) / dpl); for(i = 0; i < n; i++) { a0 = get_bits(taba, a_len, i * dpl); if (dpl <= LIMB_BITS) { a0 &= base_mask1; a = a0; } else { a1 = get_bits(taba, a_len, i * dpl + LIMB_BITS); if (dpl <= (LIMB_BITS + NTT_MOD_LOG2_MIN)) { a = a0 | ((dlimb_t)(a1 & base_mask1) << LIMB_BITS); } else { if (dpl > 2 * LIMB_BITS) { a2 = get_bits(taba, a_len, i * dpl + LIMB_BITS * 2) & base_mask1; } else { a1 &= base_mask1; a2 = 0; } // printf("a=0x%016lx%016lx%016lx\n", a2, a1, a0); a = (a0 >> (LIMB_BITS - NTT_MOD_LOG2_MAX + NTT_MOD_LOG2_MIN)) | ((dlimb_t)a1 << (NTT_MOD_LOG2_MAX - NTT_MOD_LOG2_MIN)) | ((dlimb_t)a2 << (LIMB_BITS + NTT_MOD_LOG2_MAX - NTT_MOD_LOG2_MIN)); a0 &= ((limb_t)1 << (LIMB_BITS - NTT_MOD_LOG2_MAX + NTT_MOD_LOG2_MIN)) - 1; } } for(j = 0; j < nb_mods; j++) { m = ntt_mods[first_m_idx + j]; m_inv = s->ntt_mods_div[first_m_idx + j]; r = mod_fast(a, m, m_inv); if (dpl > (LIMB_BITS + NTT_MOD_LOG2_MIN)) { b = ((dlimb_t)r << (LIMB_BITS - NTT_MOD_LOG2_MAX + NTT_MOD_LOG2_MIN)) | a0; r = mod_fast(b, m, m_inv); } tabr[i + j * fft_len] = int_to_ntt_limb(r, m); } } } #if defined(__AVX2__) #define VEC_LEN 4 typedef union { __m256d v; double d[4]; } VecUnion; static no_inline void ntt_to_limb(BFNTTState *s, limb_t *tabr, limb_t r_len, const NTTLimb *buf, int fft_len_log2, int dpl, int nb_mods) { const limb_t *mods = ntt_mods + NB_MODS - nb_mods; const __m256d *mods_cr_vec, *mf, *m_inv; VecUnion y[NB_MODS]; limb_t u[NB_MODS], carry[NB_MODS], fft_len, base_mask1, r; slimb_t i, len, pos; int j, k, l, shift, n_limb1, p; dlimb_t t; j = NB_MODS * (NB_MODS - 1) / 2 - nb_mods * (nb_mods - 1) / 2; mods_cr_vec = s->ntt_mods_cr_vec + j; mf = s->ntt_mods_vec + NB_MODS - nb_mods; m_inv = s->ntt_mods_inv_vec + NB_MODS - nb_mods; shift = dpl & (LIMB_BITS - 1); if (shift == 0) base_mask1 = -1; else base_mask1 = ((limb_t)1 << shift) - 1; n_limb1 = ((unsigned)dpl - 1) / LIMB_BITS; for(j = 0; j < NB_MODS; j++) carry[j] = 0; for(j = 0; j < NB_MODS; j++) u[j] = 0; /* avoid warnings */ memset(tabr, 0, sizeof(limb_t) * r_len); fft_len = (limb_t)1 << fft_len_log2; len = bf_min(fft_len, (r_len * LIMB_BITS + dpl - 1) / dpl); len = (len + VEC_LEN - 1) & ~(VEC_LEN - 1); i = 0; while (i < len) { for(j = 0; j < nb_mods; j++) y[j].v = *(__m256d *)&buf[i + fft_len * j]; /* Chinese remainder to get mixed radix representation */ l = 0; for(j = 0; j < nb_mods - 1; j++) { y[j].v = ntt_mod1(y[j].v, mf[j]); for(k = j + 1; k < nb_mods; k++) { y[k].v = ntt_mul_mod(y[k].v - y[j].v, mods_cr_vec[l], mf[k], m_inv[k]); l++; } } y[j].v = ntt_mod1(y[j].v, mf[j]); for(p = 0; p < VEC_LEN; p++) { /* back to normal representation */ u[0] = (int64_t)y[nb_mods - 1].d[p]; l = 1; for(j = nb_mods - 2; j >= 1; j--) { r = (int64_t)y[j].d[p]; for(k = 0; k < l; k++) { t = (dlimb_t)u[k] * mods[j] + r; r = t >> LIMB_BITS; u[k] = t; } u[l] = r; l++; } /* XXX: for nb_mods = 5, l should be 4 */ /* last step adds the carry */ r = (int64_t)y[0].d[p]; for(k = 0; k < l; k++) { t = (dlimb_t)u[k] * mods[j] + r + carry[k]; r = t >> LIMB_BITS; u[k] = t; } u[l] = r + carry[l]; #if 0 printf("%" PRId64 ": ", i); for(j = nb_mods - 1; j >= 0; j--) { printf(" %019" PRIu64, u[j]); } printf("\n"); #endif /* write the digits */ pos = i * dpl; for(j = 0; j < n_limb1; j++) { put_bits(tabr, r_len, pos, u[j]); pos += LIMB_BITS; } put_bits(tabr, r_len, pos, u[n_limb1] & base_mask1); /* shift by dpl digits and set the carry */ if (shift == 0) { for(j = n_limb1 + 1; j < nb_mods; j++) carry[j - (n_limb1 + 1)] = u[j]; } else { for(j = n_limb1; j < nb_mods - 1; j++) { carry[j - n_limb1] = (u[j] >> shift) | (u[j + 1] << (LIMB_BITS - shift)); } carry[nb_mods - 1 - n_limb1] = u[nb_mods - 1] >> shift; } i++; } } } #else static no_inline void ntt_to_limb(BFNTTState *s, limb_t *tabr, limb_t r_len, const NTTLimb *buf, int fft_len_log2, int dpl, int nb_mods) { const limb_t *mods = ntt_mods + NB_MODS - nb_mods; const limb_t *mods_cr, *mods_cr_inv; limb_t y[NB_MODS], u[NB_MODS], carry[NB_MODS], fft_len, base_mask1, r; slimb_t i, len, pos; int j, k, l, shift, n_limb1; dlimb_t t; j = NB_MODS * (NB_MODS - 1) / 2 - nb_mods * (nb_mods - 1) / 2; mods_cr = ntt_mods_cr + j; mods_cr_inv = s->ntt_mods_cr_inv + j; shift = dpl & (LIMB_BITS - 1); if (shift == 0) base_mask1 = -1; else base_mask1 = ((limb_t)1 << shift) - 1; n_limb1 = ((unsigned)dpl - 1) / LIMB_BITS; for(j = 0; j < NB_MODS; j++) carry[j] = 0; for(j = 0; j < NB_MODS; j++) u[j] = 0; /* avoid warnings */ memset(tabr, 0, sizeof(limb_t) * r_len); fft_len = (limb_t)1 << fft_len_log2; len = bf_min(fft_len, (r_len * LIMB_BITS + dpl - 1) / dpl); for(i = 0; i < len; i++) { for(j = 0; j < nb_mods; j++) { y[j] = ntt_limb_to_int(buf[i + fft_len * j], mods[j]); } /* Chinese remainder to get mixed radix representation */ l = 0; for(j = 0; j < nb_mods - 1; j++) { for(k = j + 1; k < nb_mods; k++) { limb_t m; m = mods[k]; /* Note: there is no overflow in the sub_mod() because the modulos are sorted by increasing order */ y[k] = mul_mod_fast2(y[k] - y[j] + m, mods_cr[l], m, mods_cr_inv[l]); l++; } } /* back to normal representation */ u[0] = y[nb_mods - 1]; l = 1; for(j = nb_mods - 2; j >= 1; j--) { r = y[j]; for(k = 0; k < l; k++) { t = (dlimb_t)u[k] * mods[j] + r; r = t >> LIMB_BITS; u[k] = t; } u[l] = r; l++; } /* last step adds the carry */ r = y[0]; for(k = 0; k < l; k++) { t = (dlimb_t)u[k] * mods[j] + r + carry[k]; r = t >> LIMB_BITS; u[k] = t; } u[l] = r + carry[l]; #if 0 printf("%" PRId64 ": ", (int64_t)i); for(j = nb_mods - 1; j >= 0; j--) { printf(" " FMT_LIMB, u[j]); } printf("\n"); #endif /* write the digits */ pos = i * dpl; for(j = 0; j < n_limb1; j++) { put_bits(tabr, r_len, pos, u[j]); pos += LIMB_BITS; } put_bits(tabr, r_len, pos, u[n_limb1] & base_mask1); /* shift by dpl digits and set the carry */ if (shift == 0) { for(j = n_limb1 + 1; j < nb_mods; j++) carry[j - (n_limb1 + 1)] = u[j]; } else { for(j = n_limb1; j < nb_mods - 1; j++) { carry[j - n_limb1] = (u[j] >> shift) | (u[j + 1] << (LIMB_BITS - shift)); } carry[nb_mods - 1 - n_limb1] = u[nb_mods - 1] >> shift; } } } #endif static int ntt_static_init(bf_context_t *s1) { BFNTTState *s; int inverse, i, j, k, l; limb_t c, c_inv, c_inv2, m, m_inv; if (s1->ntt_state) return 0; #if defined(__AVX2__) s = bf_aligned_malloc(s1, sizeof(*s), 64); #else s = bf_malloc(s1, sizeof(*s)); #endif if (!s) return -1; memset(s, 0, sizeof(*s)); s1->ntt_state = s; s->ctx = s1; for(j = 0; j < NB_MODS; j++) { m = ntt_mods[j]; m_inv = init_mul_mod_fast(m); s->ntt_mods_div[j] = m_inv; #if defined(__AVX2__) s->ntt_mods_vec[j] = _mm256_set1_pd(m); s->ntt_mods_inv_vec[j] = _mm256_set1_pd(1.0 / (double)m); #endif c_inv2 = (m + 1) / 2; /* 1/2 */ c_inv = 1; for(i = 0; i <= NTT_PROOT_2EXP; i++) { s->ntt_len_inv[j][i][0] = c_inv; s->ntt_len_inv[j][i][1] = init_mul_mod_fast2(c_inv, m); c_inv = mul_mod_fast(c_inv, c_inv2, m, m_inv); } for(inverse = 0; inverse < 2; inverse++) { c = ntt_proot[inverse][j]; for(i = 0; i < NTT_PROOT_2EXP; i++) { s->ntt_proot_pow[j][inverse][NTT_PROOT_2EXP - i] = c; s->ntt_proot_pow_inv[j][inverse][NTT_PROOT_2EXP - i] = init_mul_mod_fast2(c, m); c = mul_mod_fast(c, c, m, m_inv); } } } l = 0; for(j = 0; j < NB_MODS - 1; j++) { for(k = j + 1; k < NB_MODS; k++) { #if defined(__AVX2__) s->ntt_mods_cr_vec[l] = _mm256_set1_pd(int_to_ntt_limb2(ntt_mods_cr[l], ntt_mods[k])); #else s->ntt_mods_cr_inv[l] = init_mul_mod_fast2(ntt_mods_cr[l], ntt_mods[k]); #endif l++; } } return 0; } int bf_get_fft_size(int *pdpl, int *pnb_mods, limb_t len) { int dpl, fft_len_log2, n_bits, nb_mods, dpl_found, fft_len_log2_found; int int_bits, nb_mods_found; limb_t cost, min_cost; min_cost = -1; dpl_found = 0; nb_mods_found = 4; fft_len_log2_found = 0; for(nb_mods = 3; nb_mods <= NB_MODS; nb_mods++) { int_bits = ntt_int_bits[NB_MODS - nb_mods]; dpl = bf_min((int_bits - 4) / 2, 2 * LIMB_BITS + 2 * NTT_MOD_LOG2_MIN - NTT_MOD_LOG2_MAX); for(;;) { fft_len_log2 = ceil_log2((len * LIMB_BITS + dpl - 1) / dpl); if (fft_len_log2 > NTT_PROOT_2EXP) goto next; n_bits = fft_len_log2 + 2 * dpl; if (n_bits <= int_bits) { cost = ((limb_t)(fft_len_log2 + 1) << fft_len_log2) * nb_mods; // printf("n=%d dpl=%d: cost=%" PRId64 "\n", nb_mods, dpl, (int64_t)cost); if (cost < min_cost) { min_cost = cost; dpl_found = dpl; nb_mods_found = nb_mods; fft_len_log2_found = fft_len_log2; } break; } dpl--; if (dpl == 0) break; } next: ; } if (!dpl_found) abort(); /* limit dpl if possible to reduce fixed cost of limb/NTT conversion */ if (dpl_found > (LIMB_BITS + NTT_MOD_LOG2_MIN) && ((limb_t)(LIMB_BITS + NTT_MOD_LOG2_MIN) << fft_len_log2_found) >= len * LIMB_BITS) { dpl_found = LIMB_BITS + NTT_MOD_LOG2_MIN; } *pnb_mods = nb_mods_found; *pdpl = dpl_found; return fft_len_log2_found; } /* return 0 if OK, -1 if memory error */ static no_inline int fft_mul(bf_context_t *s1, bf_t *res, limb_t *a_tab, limb_t a_len, limb_t *b_tab, limb_t b_len, int mul_flags) { BFNTTState *s; int dpl, fft_len_log2, j, nb_mods, reduced_mem; slimb_t len, fft_len; NTTLimb *buf1, *buf2, *ptr; #if defined(USE_MUL_CHECK) limb_t ha, hb, hr, h_ref; #endif if (ntt_static_init(s1)) return -1; s = s1->ntt_state; /* find the optimal number of digits per limb (dpl) */ len = a_len + b_len; fft_len_log2 = bf_get_fft_size(&dpl, &nb_mods, len); fft_len = (uint64_t)1 << fft_len_log2; // printf("len=%" PRId64 " fft_len_log2=%d dpl=%d\n", len, fft_len_log2, dpl); #if defined(USE_MUL_CHECK) ha = mp_mod1(a_tab, a_len, BF_CHKSUM_MOD, 0); hb = mp_mod1(b_tab, b_len, BF_CHKSUM_MOD, 0); #endif if ((mul_flags & (FFT_MUL_R_OVERLAP_A | FFT_MUL_R_OVERLAP_B)) == 0) { if (!(mul_flags & FFT_MUL_R_NORESIZE)) bf_resize(res, 0); } else if (mul_flags & FFT_MUL_R_OVERLAP_B) { limb_t *tmp_tab, tmp_len; /* it is better to free 'b' first */ tmp_tab = a_tab; a_tab = b_tab; b_tab = tmp_tab; tmp_len = a_len; a_len = b_len; b_len = tmp_len; } buf1 = ntt_malloc(s, sizeof(NTTLimb) * fft_len * nb_mods); if (!buf1) return -1; limb_to_ntt(s, buf1, fft_len, a_tab, a_len, dpl, NB_MODS - nb_mods, nb_mods); if ((mul_flags & (FFT_MUL_R_OVERLAP_A | FFT_MUL_R_OVERLAP_B)) == FFT_MUL_R_OVERLAP_A) { if (!(mul_flags & FFT_MUL_R_NORESIZE)) bf_resize(res, 0); } reduced_mem = (fft_len_log2 >= 14); if (!reduced_mem) { buf2 = ntt_malloc(s, sizeof(NTTLimb) * fft_len * nb_mods); if (!buf2) goto fail; limb_to_ntt(s, buf2, fft_len, b_tab, b_len, dpl, NB_MODS - nb_mods, nb_mods); if (!(mul_flags & FFT_MUL_R_NORESIZE)) bf_resize(res, 0); /* in case res == b */ } else { buf2 = ntt_malloc(s, sizeof(NTTLimb) * fft_len); if (!buf2) goto fail; } for(j = 0; j < nb_mods; j++) { if (reduced_mem) { limb_to_ntt(s, buf2, fft_len, b_tab, b_len, dpl, NB_MODS - nb_mods + j, 1); ptr = buf2; } else { ptr = buf2 + fft_len * j; } if (ntt_conv(s, buf1 + fft_len * j, ptr, fft_len_log2, fft_len_log2, j + NB_MODS - nb_mods)) goto fail; } if (!(mul_flags & FFT_MUL_R_NORESIZE)) bf_resize(res, 0); /* in case res == b and reduced mem */ ntt_free(s, buf2); buf2 = NULL; if (!(mul_flags & FFT_MUL_R_NORESIZE)) { if (bf_resize(res, len)) goto fail; } ntt_to_limb(s, res->tab, len, buf1, fft_len_log2, dpl, nb_mods); ntt_free(s, buf1); #if defined(USE_MUL_CHECK) hr = mp_mod1(res->tab, len, BF_CHKSUM_MOD, 0); h_ref = mul_mod(ha, hb, BF_CHKSUM_MOD); if (hr != h_ref) { printf("ntt_mul_error: len=%" PRId_LIMB " fft_len_log2=%d dpl=%d nb_mods=%d\n", len, fft_len_log2, dpl, nb_mods); // printf("ha=0x" FMT_LIMB" hb=0x" FMT_LIMB " hr=0x" FMT_LIMB " expected=0x" FMT_LIMB "\n", ha, hb, hr, h_ref); exit(1); } #endif return 0; fail: ntt_free(s, buf1); ntt_free(s, buf2); return -1; } #else /* USE_FFT_MUL */ int bf_get_fft_size(int *pdpl, int *pnb_mods, limb_t len) { return 0; } #endif /* !USE_FFT_MUL */ QuickJSR/src/quickjs/examples/0000755000176200001440000000000014554252063015771 5ustar liggesusersQuickJSR/src/quickjs/examples/fib.c0000644000176200001440000000436314554252063016703 0ustar liggesusers/* * QuickJS: Example of C module * * Copyright (c) 2017-2018 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "../quickjs.h" #define countof(x) (sizeof(x) / sizeof((x)[0])) static int fib(int n) { if (n <= 0) return 0; else if (n == 1) return 1; else return fib(n - 1) + fib(n - 2); } static JSValue js_fib(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { int n, res; if (JS_ToInt32(ctx, &n, argv[0])) return JS_EXCEPTION; res = fib(n); return JS_NewInt32(ctx, res); } static const JSCFunctionListEntry js_fib_funcs[] = { JS_CFUNC_DEF("fib", 1, js_fib ), }; static int js_fib_init(JSContext *ctx, JSModuleDef *m) { return JS_SetModuleExportList(ctx, m, js_fib_funcs, countof(js_fib_funcs)); } #ifdef JS_SHARED_LIBRARY #define JS_INIT_MODULE js_init_module #else #define JS_INIT_MODULE js_init_module_fib #endif JSModuleDef *JS_INIT_MODULE(JSContext *ctx, const char *module_name) { JSModuleDef *m; m = JS_NewCModule(ctx, module_name, js_fib_init); if (!m) return NULL; JS_AddModuleExportList(ctx, m, js_fib_funcs, countof(js_fib_funcs)); return m; } QuickJSR/src/quickjs/examples/fib_module.js0000644000176200001440000000024614554252063020436 0ustar liggesusers/* fib module */ export function fib(n) { if (n <= 0) return 0; else if (n == 1) return 1; else return fib(n - 1) + fib(n - 2); } QuickJSR/src/quickjs/examples/test_point.js0000644000176200001440000000131614554252063020520 0ustar liggesusers/* example of JS module importing a C module */ import { Point } from "./point.so"; function assert(b, str) { if (b) { return; } else { throw Error("assertion failed: " + str); } } class ColorPoint extends Point { constructor(x, y, color) { super(x, y); this.color = color; } get_color() { return this.color; } }; function main() { var pt, pt2; pt = new Point(2, 3); assert(pt.x === 2); assert(pt.y === 3); pt.x = 4; assert(pt.x === 4); assert(pt.norm() == 5); pt2 = new ColorPoint(2, 3, 0xffffff); assert(pt2.x === 2); assert(pt2.color === 0xffffff); assert(pt2.get_color() === 0xffffff); } main(); QuickJSR/src/quickjs/examples/pi_bigdecimal.js0000644000176200001440000000412014554252063021074 0ustar liggesusers/* * PI computation in Javascript using the QuickJS bigdecimal type * (decimal floating point) */ "use strict"; /* compute PI with a precision of 'prec' digits */ function calc_pi(prec) { const CHUD_A = 13591409m; const CHUD_B = 545140134m; const CHUD_C = 640320m; const CHUD_C3 = 10939058860032000m; /* C^3/24 */ const CHUD_DIGITS_PER_TERM = 14.18164746272548; /* log10(C/12)*3 */ /* return [P, Q, G] */ function chud_bs(a, b, need_G) { var c, P, Q, G, P1, Q1, G1, P2, Q2, G2, b1; if (a == (b - 1n)) { b1 = BigDecimal(b); G = (2m * b1 - 1m) * (6m * b1 - 1m) * (6m * b1 - 5m); P = G * (CHUD_B * b1 + CHUD_A); if (b & 1n) P = -P; G = G; Q = b1 * b1 * b1 * CHUD_C3; } else { c = (a + b) >> 1n; [P1, Q1, G1] = chud_bs(a, c, true); [P2, Q2, G2] = chud_bs(c, b, need_G); P = P1 * Q2 + P2 * G1; Q = Q1 * Q2; if (need_G) G = G1 * G2; else G = 0m; } return [P, Q, G]; } var n, P, Q, G; /* number of serie terms */ n = BigInt(Math.ceil(prec / CHUD_DIGITS_PER_TERM)) + 10n; [P, Q, G] = chud_bs(0n, n, false); Q = BigDecimal.div(Q, (P + Q * CHUD_A), { roundingMode: "half-even", maximumSignificantDigits: prec }); G = (CHUD_C / 12m) * BigDecimal.sqrt(CHUD_C, { roundingMode: "half-even", maximumSignificantDigits: prec }); return Q * G; } (function() { var r, n_digits, n_bits; if (typeof scriptArgs != "undefined") { if (scriptArgs.length < 2) { print("usage: pi n_digits"); return; } n_digits = scriptArgs[1] | 0; } else { n_digits = 1000; } /* we add more digits to reduce the probability of bad rounding for the last digits */ r = calc_pi(n_digits + 20); print(r.toFixed(n_digits, "down")); })(); QuickJSR/src/quickjs/examples/pi_bigint.js0000644000176200001440000000542214554252063020276 0ustar liggesusers/* * PI computation in Javascript using the BigInt type */ "use strict"; /* return floor(log2(a)) for a > 0 and 0 for a = 0 */ function floor_log2(a) { var k_max, a1, k, i; k_max = 0n; while ((a >> (2n ** k_max)) != 0n) { k_max++; } k = 0n; a1 = a; for(i = k_max - 1n; i >= 0n; i--) { a1 = a >> (2n ** i); if (a1 != 0n) { a = a1; k |= (1n << i); } } return k; } /* return ceil(log2(a)) for a > 0 */ function ceil_log2(a) { return floor_log2(a - 1n) + 1n; } /* return floor(sqrt(a)) (not efficient but simple) */ function int_sqrt(a) { var l, u, s; if (a == 0n) return a; l = ceil_log2(a); u = 1n << ((l + 1n) / 2n); /* u >= floor(sqrt(a)) */ for(;;) { s = u; u = ((a / s) + s) / 2n; if (u >= s) break; } return s; } /* return pi * 2**prec */ function calc_pi(prec) { const CHUD_A = 13591409n; const CHUD_B = 545140134n; const CHUD_C = 640320n; const CHUD_C3 = 10939058860032000n; /* C^3/24 */ const CHUD_BITS_PER_TERM = 47.11041313821584202247; /* log2(C/12)*3 */ /* return [P, Q, G] */ function chud_bs(a, b, need_G) { var c, P, Q, G, P1, Q1, G1, P2, Q2, G2; if (a == (b - 1n)) { G = (2n * b - 1n) * (6n * b - 1n) * (6n * b - 5n); P = G * (CHUD_B * b + CHUD_A); if (b & 1n) P = -P; Q = b * b * b * CHUD_C3; } else { c = (a + b) >> 1n; [P1, Q1, G1] = chud_bs(a, c, true); [P2, Q2, G2] = chud_bs(c, b, need_G); P = P1 * Q2 + P2 * G1; Q = Q1 * Q2; if (need_G) G = G1 * G2; else G = 0n; } return [P, Q, G]; } var n, P, Q, G; /* number of serie terms */ n = BigInt(Math.ceil(Number(prec) / CHUD_BITS_PER_TERM)) + 10n; [P, Q, G] = chud_bs(0n, n, false); Q = (CHUD_C / 12n) * (Q << prec) / (P + Q * CHUD_A); G = int_sqrt(CHUD_C << (2n * prec)); return (Q * G) >> prec; } function main(args) { var r, n_digits, n_bits, out; if (args.length < 1) { print("usage: pi n_digits"); return; } n_digits = args[0] | 0; /* we add more bits to reduce the probability of bad rounding for the last digits */ n_bits = BigInt(Math.ceil(n_digits * Math.log2(10))) + 32n; r = calc_pi(n_bits); r = ((10n ** BigInt(n_digits)) * r) >> n_bits; out = r.toString(); print(out[0] + "." + out.slice(1)); } var args; if (typeof scriptArgs != "undefined") { args = scriptArgs; args.shift(); } else if (typeof arguments != "undefined") { args = arguments; } else { /* default: 1000 digits */ args=[1000]; } main(args); QuickJSR/src/quickjs/examples/test_fib.js0000644000176200001440000000022014554252063020120 0ustar liggesusers/* example of JS module importing a C module */ import { fib } from "./fib.so"; console.log("Hello World"); console.log("fib(10)=", fib(10)); QuickJSR/src/quickjs/examples/point.c0000644000176200001440000001132114554252063017264 0ustar liggesusers/* * QuickJS: Example of C module with a class * * Copyright (c) 2019 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "../quickjs.h" #include #define countof(x) (sizeof(x) / sizeof((x)[0])) /* Point Class */ typedef struct { int x; int y; } JSPointData; static JSClassID js_point_class_id; static void js_point_finalizer(JSRuntime *rt, JSValue val) { JSPointData *s = JS_GetOpaque(val, js_point_class_id); /* Note: 's' can be NULL in case JS_SetOpaque() was not called */ js_free_rt(rt, s); } static JSValue js_point_ctor(JSContext *ctx, JSValueConst new_target, int argc, JSValueConst *argv) { JSPointData *s; JSValue obj = JS_UNDEFINED; JSValue proto; s = js_mallocz(ctx, sizeof(*s)); if (!s) return JS_EXCEPTION; if (JS_ToInt32(ctx, &s->x, argv[0])) goto fail; if (JS_ToInt32(ctx, &s->y, argv[1])) goto fail; /* using new_target to get the prototype is necessary when the class is extended. */ proto = JS_GetPropertyStr(ctx, new_target, "prototype"); if (JS_IsException(proto)) goto fail; obj = JS_NewObjectProtoClass(ctx, proto, js_point_class_id); JS_FreeValue(ctx, proto); if (JS_IsException(obj)) goto fail; JS_SetOpaque(obj, s); return obj; fail: js_free(ctx, s); JS_FreeValue(ctx, obj); return JS_EXCEPTION; } static JSValue js_point_get_xy(JSContext *ctx, JSValueConst this_val, int magic) { JSPointData *s = JS_GetOpaque2(ctx, this_val, js_point_class_id); if (!s) return JS_EXCEPTION; if (magic == 0) return JS_NewInt32(ctx, s->x); else return JS_NewInt32(ctx, s->y); } static JSValue js_point_set_xy(JSContext *ctx, JSValueConst this_val, JSValue val, int magic) { JSPointData *s = JS_GetOpaque2(ctx, this_val, js_point_class_id); int v; if (!s) return JS_EXCEPTION; if (JS_ToInt32(ctx, &v, val)) return JS_EXCEPTION; if (magic == 0) s->x = v; else s->y = v; return JS_UNDEFINED; } static JSValue js_point_norm(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { JSPointData *s = JS_GetOpaque2(ctx, this_val, js_point_class_id); if (!s) return JS_EXCEPTION; return JS_NewFloat64(ctx, sqrt((double)s->x * s->x + (double)s->y * s->y)); } static JSClassDef js_point_class = { "Point", .finalizer = js_point_finalizer, }; static const JSCFunctionListEntry js_point_proto_funcs[] = { JS_CGETSET_MAGIC_DEF("x", js_point_get_xy, js_point_set_xy, 0), JS_CGETSET_MAGIC_DEF("y", js_point_get_xy, js_point_set_xy, 1), JS_CFUNC_DEF("norm", 0, js_point_norm), }; static int js_point_init(JSContext *ctx, JSModuleDef *m) { JSValue point_proto, point_class; /* create the Point class */ JS_NewClassID(&js_point_class_id); JS_NewClass(JS_GetRuntime(ctx), js_point_class_id, &js_point_class); point_proto = JS_NewObject(ctx); JS_SetPropertyFunctionList(ctx, point_proto, js_point_proto_funcs, countof(js_point_proto_funcs)); point_class = JS_NewCFunction2(ctx, js_point_ctor, "Point", 2, JS_CFUNC_constructor, 0); /* set proto.constructor and ctor.prototype */ JS_SetConstructor(ctx, point_class, point_proto); JS_SetClassProto(ctx, js_point_class_id, point_proto); JS_SetModuleExport(ctx, m, "Point", point_class); return 0; } JSModuleDef *js_init_module(JSContext *ctx, const char *module_name) { JSModuleDef *m; m = JS_NewCModule(ctx, module_name, js_point_init); if (!m) return NULL; JS_AddModuleExport(ctx, m, "Point"); return m; } QuickJSR/src/quickjs/examples/hello_module.js0000644000176200001440000000020214554252063020771 0ustar liggesusers/* example of JS module */ import { fib } from "./fib_module.js"; console.log("Hello World"); console.log("fib(10)=", fib(10)); QuickJSR/src/quickjs/examples/pi_bigfloat.js0000644000176200001440000000367114554252063020615 0ustar liggesusers/* * PI computation in Javascript using the QuickJS bigfloat type * (binary floating point) */ "use strict"; /* compute PI with a precision of 'prec' bits */ function calc_pi() { const CHUD_A = 13591409n; const CHUD_B = 545140134n; const CHUD_C = 640320n; const CHUD_C3 = 10939058860032000n; /* C^3/24 */ const CHUD_BITS_PER_TERM = 47.11041313821584202247; /* log2(C/12)*3 */ /* return [P, Q, G] */ function chud_bs(a, b, need_G) { var c, P, Q, G, P1, Q1, G1, P2, Q2, G2; if (a == (b - 1n)) { G = (2n * b - 1n) * (6n * b - 1n) * (6n * b - 5n); P = BigFloat(G * (CHUD_B * b + CHUD_A)); if (b & 1n) P = -P; G = BigFloat(G); Q = BigFloat(b * b * b * CHUD_C3); } else { c = (a + b) >> 1n; [P1, Q1, G1] = chud_bs(a, c, true); [P2, Q2, G2] = chud_bs(c, b, need_G); P = P1 * Q2 + P2 * G1; Q = Q1 * Q2; if (need_G) G = G1 * G2; else G = 0l; } return [P, Q, G]; } var n, P, Q, G; /* number of serie terms */ n = BigInt(Math.ceil(BigFloatEnv.prec / CHUD_BITS_PER_TERM)) + 10n; [P, Q, G] = chud_bs(0n, n, false); Q = Q / (P + Q * BigFloat(CHUD_A)); G = BigFloat((CHUD_C / 12n)) * BigFloat.sqrt(BigFloat(CHUD_C)); return Q * G; } (function() { var r, n_digits, n_bits; if (typeof scriptArgs != "undefined") { if (scriptArgs.length < 2) { print("usage: pi n_digits"); return; } n_digits = scriptArgs[1]; } else { n_digits = 1000; } n_bits = Math.ceil(n_digits * Math.log2(10)); /* we add more bits to reduce the probability of bad rounding for the last digits */ BigFloatEnv.setPrec( () => { r = calc_pi(); print(r.toFixed(n_digits, BigFloatEnv.RNDZ)); }, n_bits + 32); })(); QuickJSR/src/quickjs/examples/hello.js0000644000176200001440000000003414554252063017427 0ustar liggesusersconsole.log("Hello World"); QuickJSR/src/quickjs/unicode_gen_def.h0000644000176200001440000001552114554252063017425 0ustar liggesusers#ifdef UNICODE_GENERAL_CATEGORY DEF(Cn, "Unassigned") /* must be zero */ DEF(Lu, "Uppercase_Letter") DEF(Ll, "Lowercase_Letter") DEF(Lt, "Titlecase_Letter") DEF(Lm, "Modifier_Letter") DEF(Lo, "Other_Letter") DEF(Mn, "Nonspacing_Mark") DEF(Mc, "Spacing_Mark") DEF(Me, "Enclosing_Mark") DEF(Nd, "Decimal_Number,digit") DEF(Nl, "Letter_Number") DEF(No, "Other_Number") DEF(Sm, "Math_Symbol") DEF(Sc, "Currency_Symbol") DEF(Sk, "Modifier_Symbol") DEF(So, "Other_Symbol") DEF(Pc, "Connector_Punctuation") DEF(Pd, "Dash_Punctuation") DEF(Ps, "Open_Punctuation") DEF(Pe, "Close_Punctuation") DEF(Pi, "Initial_Punctuation") DEF(Pf, "Final_Punctuation") DEF(Po, "Other_Punctuation") DEF(Zs, "Space_Separator") DEF(Zl, "Line_Separator") DEF(Zp, "Paragraph_Separator") DEF(Cc, "Control,cntrl") DEF(Cf, "Format") DEF(Cs, "Surrogate") DEF(Co, "Private_Use") /* synthetic properties */ DEF(LC, "Cased_Letter") DEF(L, "Letter") DEF(M, "Mark,Combining_Mark") DEF(N, "Number") DEF(S, "Symbol") DEF(P, "Punctuation,punct") DEF(Z, "Separator") DEF(C, "Other") #endif #ifdef UNICODE_SCRIPT /* scripts aliases names in PropertyValueAliases.txt */ DEF(Unknown, "Zzzz") DEF(Adlam, "Adlm") DEF(Ahom, "Ahom") DEF(Anatolian_Hieroglyphs, "Hluw") DEF(Arabic, "Arab") DEF(Armenian, "Armn") DEF(Avestan, "Avst") DEF(Balinese, "Bali") DEF(Bamum, "Bamu") DEF(Bassa_Vah, "Bass") DEF(Batak, "Batk") DEF(Bengali, "Beng") DEF(Bhaiksuki, "Bhks") DEF(Bopomofo, "Bopo") DEF(Brahmi, "Brah") DEF(Braille, "Brai") DEF(Buginese, "Bugi") DEF(Buhid, "Buhd") DEF(Canadian_Aboriginal, "Cans") DEF(Carian, "Cari") DEF(Caucasian_Albanian, "Aghb") DEF(Chakma, "Cakm") DEF(Cham, "Cham") DEF(Cherokee, "Cher") DEF(Chorasmian, "Chrs") DEF(Common, "Zyyy") DEF(Coptic, "Copt,Qaac") DEF(Cuneiform, "Xsux") DEF(Cypriot, "Cprt") DEF(Cyrillic, "Cyrl") DEF(Cypro_Minoan, "Cpmn") DEF(Deseret, "Dsrt") DEF(Devanagari, "Deva") DEF(Dives_Akuru, "Diak") DEF(Dogra, "Dogr") DEF(Duployan, "Dupl") DEF(Egyptian_Hieroglyphs, "Egyp") DEF(Elbasan, "Elba") DEF(Elymaic, "Elym") DEF(Ethiopic, "Ethi") DEF(Georgian, "Geor") DEF(Glagolitic, "Glag") DEF(Gothic, "Goth") DEF(Grantha, "Gran") DEF(Greek, "Grek") DEF(Gujarati, "Gujr") DEF(Gunjala_Gondi, "Gong") DEF(Gurmukhi, "Guru") DEF(Han, "Hani") DEF(Hangul, "Hang") DEF(Hanifi_Rohingya, "Rohg") DEF(Hanunoo, "Hano") DEF(Hatran, "Hatr") DEF(Hebrew, "Hebr") DEF(Hiragana, "Hira") DEF(Imperial_Aramaic, "Armi") DEF(Inherited, "Zinh,Qaai") DEF(Inscriptional_Pahlavi, "Phli") DEF(Inscriptional_Parthian, "Prti") DEF(Javanese, "Java") DEF(Kaithi, "Kthi") DEF(Kannada, "Knda") DEF(Katakana, "Kana") DEF(Kawi, "Kawi") DEF(Kayah_Li, "Kali") DEF(Kharoshthi, "Khar") DEF(Khmer, "Khmr") DEF(Khojki, "Khoj") DEF(Khitan_Small_Script, "Kits") DEF(Khudawadi, "Sind") DEF(Lao, "Laoo") DEF(Latin, "Latn") DEF(Lepcha, "Lepc") DEF(Limbu, "Limb") DEF(Linear_A, "Lina") DEF(Linear_B, "Linb") DEF(Lisu, "Lisu") DEF(Lycian, "Lyci") DEF(Lydian, "Lydi") DEF(Makasar, "Maka") DEF(Mahajani, "Mahj") DEF(Malayalam, "Mlym") DEF(Mandaic, "Mand") DEF(Manichaean, "Mani") DEF(Marchen, "Marc") DEF(Masaram_Gondi, "Gonm") DEF(Medefaidrin, "Medf") DEF(Meetei_Mayek, "Mtei") DEF(Mende_Kikakui, "Mend") DEF(Meroitic_Cursive, "Merc") DEF(Meroitic_Hieroglyphs, "Mero") DEF(Miao, "Plrd") DEF(Modi, "Modi") DEF(Mongolian, "Mong") DEF(Mro, "Mroo") DEF(Multani, "Mult") DEF(Myanmar, "Mymr") DEF(Nabataean, "Nbat") DEF(Nag_Mundari, "Nagm") DEF(Nandinagari, "Nand") DEF(New_Tai_Lue, "Talu") DEF(Newa, "Newa") DEF(Nko, "Nkoo") DEF(Nushu, "Nshu") DEF(Nyiakeng_Puachue_Hmong, "Hmnp") DEF(Ogham, "Ogam") DEF(Ol_Chiki, "Olck") DEF(Old_Hungarian, "Hung") DEF(Old_Italic, "Ital") DEF(Old_North_Arabian, "Narb") DEF(Old_Permic, "Perm") DEF(Old_Persian, "Xpeo") DEF(Old_Sogdian, "Sogo") DEF(Old_South_Arabian, "Sarb") DEF(Old_Turkic, "Orkh") DEF(Old_Uyghur, "Ougr") DEF(Oriya, "Orya") DEF(Osage, "Osge") DEF(Osmanya, "Osma") DEF(Pahawh_Hmong, "Hmng") DEF(Palmyrene, "Palm") DEF(Pau_Cin_Hau, "Pauc") DEF(Phags_Pa, "Phag") DEF(Phoenician, "Phnx") DEF(Psalter_Pahlavi, "Phlp") DEF(Rejang, "Rjng") DEF(Runic, "Runr") DEF(Samaritan, "Samr") DEF(Saurashtra, "Saur") DEF(Sharada, "Shrd") DEF(Shavian, "Shaw") DEF(Siddham, "Sidd") DEF(SignWriting, "Sgnw") DEF(Sinhala, "Sinh") DEF(Sogdian, "Sogd") DEF(Sora_Sompeng, "Sora") DEF(Soyombo, "Soyo") DEF(Sundanese, "Sund") DEF(Syloti_Nagri, "Sylo") DEF(Syriac, "Syrc") DEF(Tagalog, "Tglg") DEF(Tagbanwa, "Tagb") DEF(Tai_Le, "Tale") DEF(Tai_Tham, "Lana") DEF(Tai_Viet, "Tavt") DEF(Takri, "Takr") DEF(Tamil, "Taml") DEF(Tangut, "Tang") DEF(Telugu, "Telu") DEF(Thaana, "Thaa") DEF(Thai, "Thai") DEF(Tibetan, "Tibt") DEF(Tifinagh, "Tfng") DEF(Tirhuta, "Tirh") DEF(Tangsa, "Tnsa") DEF(Toto, "Toto") DEF(Ugaritic, "Ugar") DEF(Vai, "Vaii") DEF(Vithkuqi, "Vith") DEF(Wancho, "Wcho") DEF(Warang_Citi, "Wara") DEF(Yezidi, "Yezi") DEF(Yi, "Yiii") DEF(Zanabazar_Square, "Zanb") #endif #ifdef UNICODE_PROP_LIST /* Prop list not exported to regexp */ DEF(Hyphen, "") DEF(Other_Math, "") DEF(Other_Alphabetic, "") DEF(Other_Lowercase, "") DEF(Other_Uppercase, "") DEF(Other_Grapheme_Extend, "") DEF(Other_Default_Ignorable_Code_Point, "") DEF(Other_ID_Start, "") DEF(Other_ID_Continue, "") DEF(Prepended_Concatenation_Mark, "") /* additional computed properties for smaller tables */ DEF(ID_Continue1, "") DEF(XID_Start1, "") DEF(XID_Continue1, "") DEF(Changes_When_Titlecased1, "") DEF(Changes_When_Casefolded1, "") DEF(Changes_When_NFKC_Casefolded1, "") /* Prop list exported to JS */ DEF(ASCII_Hex_Digit, "AHex") DEF(Bidi_Control, "Bidi_C") DEF(Dash, "") DEF(Deprecated, "Dep") DEF(Diacritic, "Dia") DEF(Extender, "Ext") DEF(Hex_Digit, "Hex") DEF(IDS_Binary_Operator, "IDSB") DEF(IDS_Trinary_Operator, "IDST") DEF(Ideographic, "Ideo") DEF(Join_Control, "Join_C") DEF(Logical_Order_Exception, "LOE") DEF(Noncharacter_Code_Point, "NChar") DEF(Pattern_Syntax, "Pat_Syn") DEF(Pattern_White_Space, "Pat_WS") DEF(Quotation_Mark, "QMark") DEF(Radical, "") DEF(Regional_Indicator, "RI") DEF(Sentence_Terminal, "STerm") DEF(Soft_Dotted, "SD") DEF(Terminal_Punctuation, "Term") DEF(Unified_Ideograph, "UIdeo") DEF(Variation_Selector, "VS") DEF(White_Space, "space") DEF(Bidi_Mirrored, "Bidi_M") DEF(Emoji, "") DEF(Emoji_Component, "EComp") DEF(Emoji_Modifier, "EMod") DEF(Emoji_Modifier_Base, "EBase") DEF(Emoji_Presentation, "EPres") DEF(Extended_Pictographic, "ExtPict") DEF(Default_Ignorable_Code_Point, "DI") DEF(ID_Start, "IDS") DEF(Case_Ignorable, "CI") /* other binary properties */ DEF(ASCII,"") DEF(Alphabetic, "Alpha") DEF(Any, "") DEF(Assigned,"") DEF(Cased, "") DEF(Changes_When_Casefolded, "CWCF") DEF(Changes_When_Casemapped, "CWCM") DEF(Changes_When_Lowercased, "CWL") DEF(Changes_When_NFKC_Casefolded, "CWKCF") DEF(Changes_When_Titlecased, "CWT") DEF(Changes_When_Uppercased, "CWU") DEF(Grapheme_Base, "Gr_Base") DEF(Grapheme_Extend, "Gr_Ext") DEF(ID_Continue, "IDC") DEF(Lowercase, "Lower") DEF(Math, "") DEF(Uppercase, "Upper") DEF(XID_Continue, "XIDC") DEF(XID_Start, "XIDS") /* internal tables with index */ DEF(Cased1, "") #endif QuickJSR/src/quickjs/libregexp-opcode.h0000644000176200001440000000431314554252063017555 0ustar liggesusers/* * Regular Expression Engine * * Copyright (c) 2017-2018 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifdef DEF DEF(invalid, 1) /* never used */ DEF(char, 3) DEF(char32, 5) DEF(dot, 1) DEF(any, 1) /* same as dot but match any character including line terminator */ DEF(line_start, 1) DEF(line_end, 1) DEF(goto, 5) DEF(split_goto_first, 5) DEF(split_next_first, 5) DEF(match, 1) DEF(save_start, 2) /* save start position */ DEF(save_end, 2) /* save end position, must come after saved_start */ DEF(save_reset, 3) /* reset save positions */ DEF(loop, 5) /* decrement the top the stack and goto if != 0 */ DEF(push_i32, 5) /* push integer on the stack */ DEF(drop, 1) DEF(word_boundary, 1) DEF(not_word_boundary, 1) DEF(back_reference, 2) DEF(backward_back_reference, 2) /* must come after back_reference */ DEF(range, 3) /* variable length */ DEF(range32, 3) /* variable length */ DEF(lookahead, 5) DEF(negative_lookahead, 5) DEF(push_char_pos, 1) /* push the character position on the stack */ DEF(check_advance, 1) /* pop one stack element and check that it is different from the character position */ DEF(prev, 1) /* go to the previous char */ DEF(simple_greedy_quant, 17) #endif /* DEF */ QuickJSR/src/quickjs/LICENSE0000644000176200001440000000215314554252063015161 0ustar liggesusersQuickJS Javascript Engine Copyright (c) 2017-2021 Fabrice Bellard Copyright (c) 2017-2021 Charlie Gordon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. QuickJSR/src/quickjs/qjs.c0000644000176200001440000004035714554252063015125 0ustar liggesusers/* * QuickJS stand alone interpreter * * Copyright (c) 2017-2021 Fabrice Bellard * Copyright (c) 2017-2021 Charlie Gordon * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #if defined(__APPLE__) #include #elif defined(__linux__) #include #endif #include "cutils.h" #include "quickjs-libc.h" extern const uint8_t qjsc_repl[]; extern const uint32_t qjsc_repl_size; #ifdef CONFIG_BIGNUM extern const uint8_t qjsc_qjscalc[]; extern const uint32_t qjsc_qjscalc_size; static int bignum_ext; #endif static int eval_buf(JSContext *ctx, const void *buf, int buf_len, const char *filename, int eval_flags) { JSValue val; int ret; if ((eval_flags & JS_EVAL_TYPE_MASK) == JS_EVAL_TYPE_MODULE) { /* for the modules, we compile then run to be able to set import.meta */ val = JS_Eval(ctx, buf, buf_len, filename, eval_flags | JS_EVAL_FLAG_COMPILE_ONLY); if (!JS_IsException(val)) { js_module_set_import_meta(ctx, val, TRUE, TRUE); val = JS_EvalFunction(ctx, val); } } else { val = JS_Eval(ctx, buf, buf_len, filename, eval_flags); } if (JS_IsException(val)) { js_std_dump_error(ctx); ret = -1; } else { ret = 0; } JS_FreeValue(ctx, val); return ret; } static int eval_file(JSContext *ctx, const char *filename, int module) { uint8_t *buf; int ret, eval_flags; size_t buf_len; buf = js_load_file(ctx, &buf_len, filename); if (!buf) { perror(filename); exit(1); } if (module < 0) { module = (has_suffix(filename, ".mjs") || JS_DetectModule((const char *)buf, buf_len)); } if (module) eval_flags = JS_EVAL_TYPE_MODULE; else eval_flags = JS_EVAL_TYPE_GLOBAL; ret = eval_buf(ctx, buf, buf_len, filename, eval_flags); js_free(ctx, buf); return ret; } /* also used to initialize the worker context */ static JSContext *JS_NewCustomContext(JSRuntime *rt) { JSContext *ctx; ctx = JS_NewContext(rt); if (!ctx) return NULL; #ifdef CONFIG_BIGNUM if (bignum_ext) { JS_AddIntrinsicBigFloat(ctx); JS_AddIntrinsicBigDecimal(ctx); JS_AddIntrinsicOperators(ctx); JS_EnableBignumExt(ctx, TRUE); } #endif /* system modules */ js_init_module_std(ctx, "std"); js_init_module_os(ctx, "os"); return ctx; } #if defined(__APPLE__) #define MALLOC_OVERHEAD 0 #else #define MALLOC_OVERHEAD 8 #endif struct trace_malloc_data { uint8_t *base; }; static inline unsigned long long js_trace_malloc_ptr_offset(uint8_t *ptr, struct trace_malloc_data *dp) { return ptr - dp->base; } /* default memory allocation functions with memory limitation */ static size_t js_trace_malloc_usable_size(const void *ptr) { #if defined(__APPLE__) return malloc_size(ptr); #elif defined(_WIN32) return _msize((void *)ptr); #elif defined(EMSCRIPTEN) return 0; #elif defined(__linux__) return malloc_usable_size((void *)ptr); #else /* change this to `return 0;` if compilation fails */ return malloc_usable_size((void *)ptr); #endif } static void #ifdef _WIN32 /* mingw printf is used */ __attribute__((format(gnu_printf, 2, 3))) #else __attribute__((format(printf, 2, 3))) #endif js_trace_malloc_printf(JSMallocState *s, const char *fmt, ...) { va_list ap; int c; va_start(ap, fmt); while ((c = *fmt++) != '\0') { if (c == '%') { /* only handle %p and %zd */ if (*fmt == 'p') { uint8_t *ptr = va_arg(ap, void *); if (ptr == NULL) { printf("NULL"); } else { printf("H%+06lld.%zd", js_trace_malloc_ptr_offset(ptr, s->opaque), js_trace_malloc_usable_size(ptr)); } fmt++; continue; } if (fmt[0] == 'z' && fmt[1] == 'd') { size_t sz = va_arg(ap, size_t); printf("%zd", sz); fmt += 2; continue; } } putc(c, stdout); } va_end(ap); } static void js_trace_malloc_init(struct trace_malloc_data *s) { free(s->base = malloc(8)); } static void *js_trace_malloc(JSMallocState *s, size_t size) { void *ptr; /* Do not allocate zero bytes: behavior is platform dependent */ assert(size != 0); if (unlikely(s->malloc_size + size > s->malloc_limit)) return NULL; ptr = malloc(size); js_trace_malloc_printf(s, "A %zd -> %p\n", size, ptr); if (ptr) { s->malloc_count++; s->malloc_size += js_trace_malloc_usable_size(ptr) + MALLOC_OVERHEAD; } return ptr; } static void js_trace_free(JSMallocState *s, void *ptr) { if (!ptr) return; js_trace_malloc_printf(s, "F %p\n", ptr); s->malloc_count--; s->malloc_size -= js_trace_malloc_usable_size(ptr) + MALLOC_OVERHEAD; free(ptr); } static void *js_trace_realloc(JSMallocState *s, void *ptr, size_t size) { size_t old_size; if (!ptr) { if (size == 0) return NULL; return js_trace_malloc(s, size); } old_size = js_trace_malloc_usable_size(ptr); if (size == 0) { js_trace_malloc_printf(s, "R %zd %p\n", size, ptr); s->malloc_count--; s->malloc_size -= old_size + MALLOC_OVERHEAD; free(ptr); return NULL; } if (s->malloc_size + size - old_size > s->malloc_limit) return NULL; js_trace_malloc_printf(s, "R %zd %p", size, ptr); ptr = realloc(ptr, size); js_trace_malloc_printf(s, " -> %p\n", ptr); if (ptr) { s->malloc_size += js_trace_malloc_usable_size(ptr) - old_size; } return ptr; } static const JSMallocFunctions trace_mf = { js_trace_malloc, js_trace_free, js_trace_realloc, js_trace_malloc_usable_size, }; #define PROG_NAME "qjs" void help(void) { printf("QuickJS version " CONFIG_VERSION "\n" "usage: " PROG_NAME " [options] [file [args]]\n" "-h --help list options\n" "-e --eval EXPR evaluate EXPR\n" "-i --interactive go to interactive mode\n" "-m --module load as ES6 module (default=autodetect)\n" " --script load as ES6 script (default=autodetect)\n" "-I --include file include an additional file\n" " --std make 'std' and 'os' available to the loaded script\n" #ifdef CONFIG_BIGNUM " --bignum enable the bignum extensions (BigFloat, BigDecimal)\n" " --qjscalc load the QJSCalc runtime (default if invoked as qjscalc)\n" #endif "-T --trace trace memory allocation\n" "-d --dump dump the memory usage stats\n" " --memory-limit n limit the memory usage to 'n' bytes\n" " --stack-size n limit the stack size to 'n' bytes\n" " --unhandled-rejection dump unhandled promise rejections\n" "-q --quit just instantiate the interpreter and quit\n"); exit(1); } int main(int argc, char **argv) { JSRuntime *rt; JSContext *ctx; struct trace_malloc_data trace_data = { NULL }; int optind; char *expr = NULL; int interactive = 0; int dump_memory = 0; int trace_memory = 0; int empty_run = 0; int module = -1; int load_std = 0; int dump_unhandled_promise_rejection = 0; size_t memory_limit = 0; char *include_list[32]; int i, include_count = 0; #ifdef CONFIG_BIGNUM int load_jscalc; #endif size_t stack_size = 0; #ifdef CONFIG_BIGNUM /* load jscalc runtime if invoked as 'qjscalc' */ { const char *p, *exename; exename = argv[0]; p = strrchr(exename, '/'); if (p) exename = p + 1; load_jscalc = !strcmp(exename, "qjscalc"); } #endif /* cannot use getopt because we want to pass the command line to the script */ optind = 1; while (optind < argc && *argv[optind] == '-') { char *arg = argv[optind] + 1; const char *longopt = ""; /* a single - is not an option, it also stops argument scanning */ if (!*arg) break; optind++; if (*arg == '-') { longopt = arg + 1; arg += strlen(arg); /* -- stops argument scanning */ if (!*longopt) break; } for (; *arg || *longopt; longopt = "") { char opt = *arg; if (opt) arg++; if (opt == 'h' || opt == '?' || !strcmp(longopt, "help")) { help(); continue; } if (opt == 'e' || !strcmp(longopt, "eval")) { if (*arg) { expr = arg; break; } if (optind < argc) { expr = argv[optind++]; break; } fprintf(stderr, "qjs: missing expression for -e\n"); exit(2); } if (opt == 'I' || !strcmp(longopt, "include")) { if (optind >= argc) { fprintf(stderr, "expecting filename"); exit(1); } if (include_count >= countof(include_list)) { fprintf(stderr, "too many included files"); exit(1); } include_list[include_count++] = argv[optind++]; continue; } if (opt == 'i' || !strcmp(longopt, "interactive")) { interactive++; continue; } if (opt == 'm' || !strcmp(longopt, "module")) { module = 1; continue; } if (!strcmp(longopt, "script")) { module = 0; continue; } if (opt == 'd' || !strcmp(longopt, "dump")) { dump_memory++; continue; } if (opt == 'T' || !strcmp(longopt, "trace")) { trace_memory++; continue; } if (!strcmp(longopt, "std")) { load_std = 1; continue; } if (!strcmp(longopt, "unhandled-rejection")) { dump_unhandled_promise_rejection = 1; continue; } #ifdef CONFIG_BIGNUM if (!strcmp(longopt, "bignum")) { bignum_ext = 1; continue; } if (!strcmp(longopt, "qjscalc")) { load_jscalc = 1; continue; } #endif if (opt == 'q' || !strcmp(longopt, "quit")) { empty_run++; continue; } if (!strcmp(longopt, "memory-limit")) { if (optind >= argc) { fprintf(stderr, "expecting memory limit"); exit(1); } memory_limit = (size_t)strtod(argv[optind++], NULL); continue; } if (!strcmp(longopt, "stack-size")) { if (optind >= argc) { fprintf(stderr, "expecting stack size"); exit(1); } stack_size = (size_t)strtod(argv[optind++], NULL); continue; } if (opt) { fprintf(stderr, "qjs: unknown option '-%c'\n", opt); } else { fprintf(stderr, "qjs: unknown option '--%s'\n", longopt); } help(); } } #ifdef CONFIG_BIGNUM if (load_jscalc) bignum_ext = 1; #endif if (trace_memory) { js_trace_malloc_init(&trace_data); rt = JS_NewRuntime2(&trace_mf, &trace_data); } else { rt = JS_NewRuntime(); } if (!rt) { fprintf(stderr, "qjs: cannot allocate JS runtime\n"); exit(2); } if (memory_limit != 0) JS_SetMemoryLimit(rt, memory_limit); if (stack_size != 0) JS_SetMaxStackSize(rt, stack_size); js_std_set_worker_new_context_func(JS_NewCustomContext); js_std_init_handlers(rt); ctx = JS_NewCustomContext(rt); if (!ctx) { fprintf(stderr, "qjs: cannot allocate JS context\n"); exit(2); } /* loader for ES6 modules */ JS_SetModuleLoaderFunc(rt, NULL, js_module_loader, NULL); if (dump_unhandled_promise_rejection) { JS_SetHostPromiseRejectionTracker(rt, js_std_promise_rejection_tracker, NULL); } if (!empty_run) { #ifdef CONFIG_BIGNUM if (load_jscalc) { js_std_eval_binary(ctx, qjsc_qjscalc, qjsc_qjscalc_size, 0); } #endif js_std_add_helpers(ctx, argc - optind, argv + optind); /* make 'std' and 'os' visible to non module code */ if (load_std) { const char *str = "import * as std from 'std';\n" "import * as os from 'os';\n" "globalThis.std = std;\n" "globalThis.os = os;\n"; eval_buf(ctx, str, strlen(str), "", JS_EVAL_TYPE_MODULE); } for(i = 0; i < include_count; i++) { if (eval_file(ctx, include_list[i], module)) goto fail; } if (expr) { if (eval_buf(ctx, expr, strlen(expr), "", 0)) goto fail; } else if (optind >= argc) { /* interactive mode */ interactive = 1; } else { const char *filename; filename = argv[optind]; if (eval_file(ctx, filename, module)) goto fail; } if (interactive) { js_std_eval_binary(ctx, qjsc_repl, qjsc_repl_size, 0); } js_std_loop(ctx); } if (dump_memory) { JSMemoryUsage stats; JS_ComputeMemoryUsage(rt, &stats); JS_DumpMemoryUsage(stdout, &stats, rt); } js_std_free_handlers(rt); JS_FreeContext(ctx); JS_FreeRuntime(rt); if (empty_run && dump_memory) { clock_t t[5]; double best[5]; int i, j; for (i = 0; i < 100; i++) { t[0] = clock(); rt = JS_NewRuntime(); t[1] = clock(); ctx = JS_NewContext(rt); t[2] = clock(); JS_FreeContext(ctx); t[3] = clock(); JS_FreeRuntime(rt); t[4] = clock(); for (j = 4; j > 0; j--) { double ms = 1000.0 * (t[j] - t[j - 1]) / CLOCKS_PER_SEC; if (i == 0 || best[j] > ms) best[j] = ms; } } printf("\nInstantiation times (ms): %.3f = %.3f+%.3f+%.3f+%.3f\n", best[1] + best[2] + best[3] + best[4], best[1], best[2], best[3], best[4]); } return 0; fail: js_std_free_handlers(rt); JS_FreeContext(ctx); JS_FreeRuntime(rt); return 1; } QuickJSR/src/quickjs/quickjs.c0000644000176200001440000672722014554754431016016 0ustar liggesusers/* * QuickJS Javascript Engine * * Copyright (c) 2017-2021 Fabrice Bellard * Copyright (c) 2017-2021 Charlie Gordon * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #if defined(__APPLE__) #include #elif defined(__linux__) #include #elif defined(__FreeBSD__) #include #endif #include "cutils.h" #include "list.h" #include "quickjs.h" #include "libregexp.h" #include "libbf.h" #define OPTIMIZE 1 #define SHORT_OPCODES 1 #if defined(EMSCRIPTEN) #define DIRECT_DISPATCH 0 #else #define DIRECT_DISPATCH 1 #endif #if defined(__APPLE__) #define MALLOC_OVERHEAD 0 #else #define MALLOC_OVERHEAD 8 #endif #if !defined(_WIN32) /* define it if printf uses the RNDN rounding mode instead of RNDNA */ #define CONFIG_PRINTF_RNDN #endif /* define to include Atomics.* operations which depend on the OS threads */ #if !defined(EMSCRIPTEN) && !defined(DISABLE_ATOMICS) #define CONFIG_ATOMICS #endif #if !defined(EMSCRIPTEN) /* enable stack limitation */ #define CONFIG_STACK_CHECK #endif /* dump object free */ //#define DUMP_FREE //#define DUMP_CLOSURE /* dump the bytecode of the compiled functions: combination of bits 1: dump pass 3 final byte code 2: dump pass 2 code 4: dump pass 1 code 8: dump stdlib functions 16: dump bytecode in hex 32: dump line number table 64: dump compute_stack_size */ //#define DUMP_BYTECODE (1) /* dump the occurence of the automatic GC */ //#define DUMP_GC /* dump objects freed by the garbage collector */ //#define DUMP_GC_FREE /* dump objects leaking when freeing the runtime */ //#define DUMP_LEAKS 1 /* dump memory usage before running the garbage collector */ //#define DUMP_MEM //#define DUMP_OBJECTS /* dump objects in JS_FreeContext */ //#define DUMP_ATOMS /* dump atoms in JS_FreeContext */ //#define DUMP_SHAPES /* dump shapes in JS_FreeContext */ //#define DUMP_MODULE_RESOLVE //#define DUMP_PROMISE //#define DUMP_READ_OBJECT /* test the GC by forcing it before each object allocation */ //#define FORCE_GC_AT_MALLOC #ifdef CONFIG_ATOMICS #include #include #include #endif enum { /* classid tag */ /* union usage | properties */ JS_CLASS_OBJECT = 1, /* must be first */ JS_CLASS_ARRAY, /* u.array | length */ JS_CLASS_ERROR, JS_CLASS_NUMBER, /* u.object_data */ JS_CLASS_STRING, /* u.object_data */ JS_CLASS_BOOLEAN, /* u.object_data */ JS_CLASS_SYMBOL, /* u.object_data */ JS_CLASS_ARGUMENTS, /* u.array | length */ JS_CLASS_MAPPED_ARGUMENTS, /* | length */ JS_CLASS_DATE, /* u.object_data */ JS_CLASS_MODULE_NS, JS_CLASS_C_FUNCTION, /* u.cfunc */ JS_CLASS_BYTECODE_FUNCTION, /* u.func */ JS_CLASS_BOUND_FUNCTION, /* u.bound_function */ JS_CLASS_C_FUNCTION_DATA, /* u.c_function_data_record */ JS_CLASS_GENERATOR_FUNCTION, /* u.func */ JS_CLASS_FOR_IN_ITERATOR, /* u.for_in_iterator */ JS_CLASS_REGEXP, /* u.regexp */ JS_CLASS_ARRAY_BUFFER, /* u.array_buffer */ JS_CLASS_SHARED_ARRAY_BUFFER, /* u.array_buffer */ JS_CLASS_UINT8C_ARRAY, /* u.array (typed_array) */ JS_CLASS_INT8_ARRAY, /* u.array (typed_array) */ JS_CLASS_UINT8_ARRAY, /* u.array (typed_array) */ JS_CLASS_INT16_ARRAY, /* u.array (typed_array) */ JS_CLASS_UINT16_ARRAY, /* u.array (typed_array) */ JS_CLASS_INT32_ARRAY, /* u.array (typed_array) */ JS_CLASS_UINT32_ARRAY, /* u.array (typed_array) */ JS_CLASS_BIG_INT64_ARRAY, /* u.array (typed_array) */ JS_CLASS_BIG_UINT64_ARRAY, /* u.array (typed_array) */ JS_CLASS_FLOAT32_ARRAY, /* u.array (typed_array) */ JS_CLASS_FLOAT64_ARRAY, /* u.array (typed_array) */ JS_CLASS_DATAVIEW, /* u.typed_array */ JS_CLASS_BIG_INT, /* u.object_data */ #ifdef CONFIG_BIGNUM JS_CLASS_BIG_FLOAT, /* u.object_data */ JS_CLASS_FLOAT_ENV, /* u.float_env */ JS_CLASS_BIG_DECIMAL, /* u.object_data */ JS_CLASS_OPERATOR_SET, /* u.operator_set */ #endif JS_CLASS_MAP, /* u.map_state */ JS_CLASS_SET, /* u.map_state */ JS_CLASS_WEAKMAP, /* u.map_state */ JS_CLASS_WEAKSET, /* u.map_state */ JS_CLASS_MAP_ITERATOR, /* u.map_iterator_data */ JS_CLASS_SET_ITERATOR, /* u.map_iterator_data */ JS_CLASS_ARRAY_ITERATOR, /* u.array_iterator_data */ JS_CLASS_STRING_ITERATOR, /* u.array_iterator_data */ JS_CLASS_REGEXP_STRING_ITERATOR, /* u.regexp_string_iterator_data */ JS_CLASS_GENERATOR, /* u.generator_data */ JS_CLASS_PROXY, /* u.proxy_data */ JS_CLASS_PROMISE, /* u.promise_data */ JS_CLASS_PROMISE_RESOLVE_FUNCTION, /* u.promise_function_data */ JS_CLASS_PROMISE_REJECT_FUNCTION, /* u.promise_function_data */ JS_CLASS_ASYNC_FUNCTION, /* u.func */ JS_CLASS_ASYNC_FUNCTION_RESOLVE, /* u.async_function_data */ JS_CLASS_ASYNC_FUNCTION_REJECT, /* u.async_function_data */ JS_CLASS_ASYNC_FROM_SYNC_ITERATOR, /* u.async_from_sync_iterator_data */ JS_CLASS_ASYNC_GENERATOR_FUNCTION, /* u.func */ JS_CLASS_ASYNC_GENERATOR, /* u.async_generator_data */ JS_CLASS_INIT_COUNT, /* last entry for predefined classes */ }; /* number of typed array types */ #define JS_TYPED_ARRAY_COUNT (JS_CLASS_FLOAT64_ARRAY - JS_CLASS_UINT8C_ARRAY + 1) static uint8_t const typed_array_size_log2[JS_TYPED_ARRAY_COUNT]; #define typed_array_size_log2(classid) (typed_array_size_log2[(classid)- JS_CLASS_UINT8C_ARRAY]) typedef enum JSErrorEnum { JS_EVAL_ERROR, JS_RANGE_ERROR, JS_REFERENCE_ERROR, JS_SYNTAX_ERROR, JS_TYPE_ERROR, JS_URI_ERROR, JS_INTERNAL_ERROR, JS_AGGREGATE_ERROR, JS_NATIVE_ERROR_COUNT, /* number of different NativeError objects */ } JSErrorEnum; #define JS_MAX_LOCAL_VARS 65535 #define JS_STACK_SIZE_MAX 65534 #define JS_STRING_LEN_MAX ((1 << 30) - 1) #define __exception __attribute__((warn_unused_result)) typedef struct JSShape JSShape; typedef struct JSString JSString; typedef struct JSString JSAtomStruct; typedef enum { JS_GC_PHASE_NONE, JS_GC_PHASE_DECREF, JS_GC_PHASE_REMOVE_CYCLES, } JSGCPhaseEnum; // Forward-declarations of enums gives -Wpedantic warning, so // include full declaration early #ifdef STRICT_R_HEADERS enum OPCodeEnum { #define FMT(f) #define DEF(id, size, n_pop, n_push, f) OP_ ## id, #define def(id, size, n_pop, n_push, f) #include "quickjs-opcode.h" #undef def #undef DEF #undef FMT OP_COUNT, /* excluding temporary opcodes */ /* temporary opcodes : overlap with the short opcodes */ OP_TEMP_START = OP_nop + 1, OP___dummy = OP_TEMP_START - 1, #define FMT(f) #define DEF(id, size, n_pop, n_push, f) #define def(id, size, n_pop, n_push, f) OP_ ## id, #include "quickjs-opcode.h" #undef def #undef DEF #undef FMT OP_TEMP_END, }; #endif typedef enum OPCodeEnum OPCodeEnum; /* function pointers are used for numeric operations so that it is possible to remove some numeric types */ typedef struct { JSValue (*to_string)(JSContext *ctx, JSValueConst val); JSValue (*from_string)(JSContext *ctx, const char *buf, int radix, int flags, slimb_t *pexponent); int (*unary_arith)(JSContext *ctx, JSValue *pres, OPCodeEnum op, JSValue op1); int (*binary_arith)(JSContext *ctx, OPCodeEnum op, JSValue *pres, JSValue op1, JSValue op2); int (*compare)(JSContext *ctx, OPCodeEnum op, JSValue op1, JSValue op2); /* only for bigfloat: */ JSValue (*mul_pow10_to_float64)(JSContext *ctx, const bf_t *a, int64_t exponent); int (*mul_pow10)(JSContext *ctx, JSValue *sp); } JSNumericOperations; struct JSRuntime { JSMallocFunctions mf; JSMallocState malloc_state; const char *rt_info; int atom_hash_size; /* power of two */ int atom_count; int atom_size; int atom_count_resize; /* resize hash table at this count */ uint32_t *atom_hash; JSAtomStruct **atom_array; int atom_free_index; /* 0 = none */ int class_count; /* size of class_array */ JSClass *class_array; struct list_head context_list; /* list of JSContext.link */ /* list of JSGCObjectHeader.link. List of allocated GC objects (used by the garbage collector) */ struct list_head gc_obj_list; /* list of JSGCObjectHeader.link. Used during JS_FreeValueRT() */ struct list_head gc_zero_ref_count_list; struct list_head tmp_obj_list; /* used during GC */ JSGCPhaseEnum gc_phase : 8; size_t malloc_gc_threshold; #ifdef DUMP_LEAKS struct list_head string_list; /* list of JSString.link */ #endif /* stack limitation */ uintptr_t stack_size; /* in bytes, 0 if no limit */ uintptr_t stack_top; uintptr_t stack_limit; /* lower stack limit */ JSValue current_exception; /* true if inside an out of memory error, to avoid recursing */ BOOL in_out_of_memory : 8; struct JSStackFrame *current_stack_frame; JSInterruptHandler *interrupt_handler; void *interrupt_opaque; JSHostPromiseRejectionTracker *host_promise_rejection_tracker; void *host_promise_rejection_tracker_opaque; struct list_head job_list; /* list of JSJobEntry.link */ JSModuleNormalizeFunc *module_normalize_func; JSModuleLoaderFunc *module_loader_func; void *module_loader_opaque; /* timestamp for internal use in module evaluation */ int64_t module_async_evaluation_next_timestamp; BOOL can_block : 8; /* TRUE if Atomics.wait can block */ /* used to allocate, free and clone SharedArrayBuffers */ JSSharedArrayBufferFunctions sab_funcs; /* Shape hash table */ int shape_hash_bits; int shape_hash_size; int shape_hash_count; /* number of hashed shapes */ JSShape **shape_hash; bf_context_t bf_ctx; JSNumericOperations bigint_ops; #ifdef CONFIG_BIGNUM JSNumericOperations bigfloat_ops; JSNumericOperations bigdecimal_ops; uint32_t operator_count; #endif void *user_opaque; }; struct JSClass { uint32_t class_id; /* 0 means free entry */ JSAtom class_name; JSClassFinalizer *finalizer; JSClassGCMark *gc_mark; JSClassCall *call; /* pointers for exotic behavior, can be NULL if none are present */ const JSClassExoticMethods *exotic; }; #define JS_MODE_STRICT (1 << 0) #define JS_MODE_STRIP (1 << 1) #define JS_MODE_MATH (1 << 2) #define JS_MODE_ASYNC (1 << 3) /* async function */ typedef struct JSStackFrame { struct JSStackFrame *prev_frame; /* NULL if first stack frame */ JSValue cur_func; /* current function, JS_UNDEFINED if the frame is detached */ JSValue *arg_buf; /* arguments */ JSValue *var_buf; /* variables */ struct list_head var_ref_list; /* list of JSVarRef.var_ref_link */ const uint8_t *cur_pc; /* only used in bytecode functions : PC of the instruction after the call */ int arg_count; int js_mode; /* for C functions, only JS_MODE_MATH may be set */ /* only used in generators. Current stack pointer value. NULL if the function is running. */ JSValue *cur_sp; } JSStackFrame; typedef enum { JS_GC_OBJ_TYPE_JS_OBJECT, JS_GC_OBJ_TYPE_FUNCTION_BYTECODE, JS_GC_OBJ_TYPE_SHAPE, JS_GC_OBJ_TYPE_VAR_REF, JS_GC_OBJ_TYPE_ASYNC_FUNCTION, JS_GC_OBJ_TYPE_JS_CONTEXT, } JSGCObjectTypeEnum; /* header for GC objects. GC objects are C data structures with a reference count that can reference other GC objects. JS Objects are a particular type of GC object. */ struct JSGCObjectHeader { int ref_count; /* must come first, 32-bit */ JSGCObjectTypeEnum gc_obj_type : 4; uint8_t mark : 4; /* used by the GC */ uint8_t dummy1; /* not used by the GC */ uint16_t dummy2; /* not used by the GC */ struct list_head link; }; typedef struct JSVarRef { union { JSGCObjectHeader header; /* must come first */ struct { int __gc_ref_count; /* corresponds to header.ref_count */ uint8_t __gc_mark; /* corresponds to header.mark/gc_obj_type */ uint8_t is_detached : 1; uint8_t is_arg : 1; uint16_t var_idx; /* index of the corresponding function variable on the stack */ }; }; JSValue *pvalue; /* pointer to the value, either on the stack or to 'value' */ union { JSValue value; /* used when is_detached = TRUE */ struct { struct list_head var_ref_link; /* JSStackFrame.var_ref_list list */ struct JSAsyncFunctionState *async_func; /* != NULL if async stack frame */ }; /* used when is_detached = FALSE */ }; } JSVarRef; /* the same structure is used for big integers and big floats. Big integers are never infinite or NaNs */ typedef struct JSBigFloat { JSRefCountHeader header; /* must come first, 32-bit */ bf_t num; } JSBigFloat; #ifdef CONFIG_BIGNUM typedef struct JSFloatEnv { limb_t prec; bf_flags_t flags; unsigned int status; } JSFloatEnv; typedef struct JSBigDecimal { JSRefCountHeader header; /* must come first, 32-bit */ bfdec_t num; } JSBigDecimal; #endif typedef enum { JS_AUTOINIT_ID_PROTOTYPE, JS_AUTOINIT_ID_MODULE_NS, JS_AUTOINIT_ID_PROP, } JSAutoInitIDEnum; /* must be large enough to have a negligible runtime cost and small enough to call the interrupt callback often. */ #define JS_INTERRUPT_COUNTER_INIT 10000 struct JSContext { JSGCObjectHeader header; /* must come first */ JSRuntime *rt; struct list_head link; uint16_t binary_object_count; int binary_object_size; JSShape *array_shape; /* initial shape for Array objects */ JSValue *class_proto; JSValue function_proto; JSValue function_ctor; JSValue array_ctor; JSValue regexp_ctor; JSValue promise_ctor; JSValue native_error_proto[JS_NATIVE_ERROR_COUNT]; JSValue iterator_proto; JSValue async_iterator_proto; JSValue array_proto_values; JSValue throw_type_error; JSValue eval_obj; JSValue global_obj; /* global object */ JSValue global_var_obj; /* contains the global let/const definitions */ uint64_t random_state; bf_context_t *bf_ctx; /* points to rt->bf_ctx, shared by all contexts */ #ifdef CONFIG_BIGNUM JSFloatEnv fp_env; /* global FP environment */ BOOL bignum_ext : 8; /* enable math mode */ BOOL allow_operator_overloading : 8; #endif /* when the counter reaches zero, JSRutime.interrupt_handler is called */ int interrupt_counter; struct list_head loaded_modules; /* list of JSModuleDef.link */ /* if NULL, RegExp compilation is not supported */ JSValue (*compile_regexp)(JSContext *ctx, JSValueConst pattern, JSValueConst flags); /* if NULL, eval is not supported */ JSValue (*eval_internal)(JSContext *ctx, JSValueConst this_obj, const char *input, size_t input_len, const char *filename, int flags, int scope_idx); void *user_opaque; }; typedef union JSFloat64Union { double d; uint64_t u64; uint32_t u32[2]; } JSFloat64Union; enum { JS_ATOM_TYPE_STRING = 1, JS_ATOM_TYPE_GLOBAL_SYMBOL, JS_ATOM_TYPE_SYMBOL, JS_ATOM_TYPE_PRIVATE, }; enum { JS_ATOM_HASH_SYMBOL, JS_ATOM_HASH_PRIVATE, }; typedef enum { JS_ATOM_KIND_STRING, JS_ATOM_KIND_SYMBOL, JS_ATOM_KIND_PRIVATE, } JSAtomKindEnum; #define JS_ATOM_HASH_MASK ((1 << 30) - 1) struct JSString { JSRefCountHeader header; /* must come first, 32-bit */ uint32_t len : 31; uint8_t is_wide_char : 1; /* 0 = 8 bits, 1 = 16 bits characters */ /* for JS_ATOM_TYPE_SYMBOL: hash = 0, atom_type = 3, for JS_ATOM_TYPE_PRIVATE: hash = 1, atom_type = 3 XXX: could change encoding to have one more bit in hash */ uint32_t hash : 30; uint8_t atom_type : 2; /* != 0 if atom, JS_ATOM_TYPE_x */ uint32_t hash_next; /* atom_index for JS_ATOM_TYPE_SYMBOL */ #ifdef DUMP_LEAKS struct list_head link; /* string list */ #endif #ifdef STRICT_R_HEADERS union { __extension__ uint8_t str8[0]; /* 8 bit strings will get an extra null terminator */ __extension__ uint16_t str16[0]; } u; #else union { uint8_t str8[0]; /* 8 bit strings will get an extra null terminator */ uint16_t str16[0]; } u; #endif }; typedef struct JSClosureVar { uint8_t is_local : 1; uint8_t is_arg : 1; uint8_t is_const : 1; uint8_t is_lexical : 1; uint8_t var_kind : 4; /* see JSVarKindEnum */ /* 8 bits available */ uint16_t var_idx; /* is_local = TRUE: index to a normal variable of the parent function. otherwise: index to a closure variable of the parent function */ JSAtom var_name; } JSClosureVar; #define ARG_SCOPE_INDEX 1 #define ARG_SCOPE_END (-2) typedef struct JSVarScope { int parent; /* index into fd->scopes of the enclosing scope */ int first; /* index into fd->vars of the last variable in this scope */ } JSVarScope; typedef enum { /* XXX: add more variable kinds here instead of using bit fields */ JS_VAR_NORMAL, JS_VAR_FUNCTION_DECL, /* lexical var with function declaration */ JS_VAR_NEW_FUNCTION_DECL, /* lexical var with async/generator function declaration */ JS_VAR_CATCH, JS_VAR_FUNCTION_NAME, /* function expression name */ JS_VAR_PRIVATE_FIELD, JS_VAR_PRIVATE_METHOD, JS_VAR_PRIVATE_GETTER, JS_VAR_PRIVATE_SETTER, /* must come after JS_VAR_PRIVATE_GETTER */ JS_VAR_PRIVATE_GETTER_SETTER, /* must come after JS_VAR_PRIVATE_SETTER */ } JSVarKindEnum; /* XXX: could use a different structure in bytecode functions to save memory */ typedef struct JSVarDef { JSAtom var_name; /* index into fd->scopes of this variable lexical scope */ int scope_level; /* during compilation: - if scope_level = 0: scope in which the variable is defined - if scope_level != 0: index into fd->vars of the next variable in the same or enclosing lexical scope in a bytecode function: index into fd->vars of the next variable in the same or enclosing lexical scope */ int scope_next; uint8_t is_const : 1; uint8_t is_lexical : 1; uint8_t is_captured : 1; uint8_t is_static_private : 1; /* only used during private class field parsing */ uint8_t var_kind : 4; /* see JSVarKindEnum */ /* only used during compilation: function pool index for lexical variables with var_kind = JS_VAR_FUNCTION_DECL/JS_VAR_NEW_FUNCTION_DECL or scope level of the definition of the 'var' variables (they have scope_level = 0) */ int func_pool_idx : 24; /* only used during compilation : index in the constant pool for hoisted function definition */ } JSVarDef; /* for the encoding of the pc2line table */ #define PC2LINE_BASE (-1) #define PC2LINE_RANGE 5 #define PC2LINE_OP_FIRST 1 #define PC2LINE_DIFF_PC_MAX ((255 - PC2LINE_OP_FIRST) / PC2LINE_RANGE) typedef enum JSFunctionKindEnum { JS_FUNC_NORMAL = 0, JS_FUNC_GENERATOR = (1 << 0), JS_FUNC_ASYNC = (1 << 1), JS_FUNC_ASYNC_GENERATOR = (JS_FUNC_GENERATOR | JS_FUNC_ASYNC), } JSFunctionKindEnum; typedef struct JSFunctionBytecode { JSGCObjectHeader header; /* must come first */ uint8_t js_mode; uint8_t has_prototype : 1; /* true if a prototype field is necessary */ uint8_t has_simple_parameter_list : 1; uint8_t is_derived_class_constructor : 1; /* true if home_object needs to be initialized */ uint8_t need_home_object : 1; uint8_t func_kind : 2; uint8_t new_target_allowed : 1; uint8_t super_call_allowed : 1; uint8_t super_allowed : 1; uint8_t arguments_allowed : 1; uint8_t has_debug : 1; uint8_t backtrace_barrier : 1; /* stop backtrace on this function */ uint8_t read_only_bytecode : 1; uint8_t is_direct_or_indirect_eval : 1; /* used by JS_GetScriptOrModuleName() */ /* XXX: 4 bits available */ uint8_t *byte_code_buf; /* (self pointer) */ int byte_code_len; JSAtom func_name; JSVarDef *vardefs; /* arguments + local variables (arg_count + var_count) (self pointer) */ JSClosureVar *closure_var; /* list of variables in the closure (self pointer) */ uint16_t arg_count; uint16_t var_count; uint16_t defined_arg_count; /* for length function property */ uint16_t stack_size; /* maximum stack size */ JSContext *realm; /* function realm */ JSValue *cpool; /* constant pool (self pointer) */ int cpool_count; int closure_var_count; struct { /* debug info, move to separate structure to save memory? */ JSAtom filename; int line_num; int source_len; int pc2line_len; uint8_t *pc2line_buf; char *source; } debug; } JSFunctionBytecode; typedef struct JSBoundFunction { JSValue func_obj; JSValue this_val; int argc; #ifdef STRICT_R_HEADERS JSValue argv[]; #else JSValue argv[0]; #endif } JSBoundFunction; typedef enum JSIteratorKindEnum { JS_ITERATOR_KIND_KEY, JS_ITERATOR_KIND_VALUE, JS_ITERATOR_KIND_KEY_AND_VALUE, } JSIteratorKindEnum; typedef struct JSForInIterator { JSValue obj; uint32_t idx; uint32_t atom_count; uint8_t in_prototype_chain; uint8_t is_array; JSPropertyEnum *tab_atom; /* is_array = FALSE */ } JSForInIterator; typedef struct JSRegExp { JSString *pattern; JSString *bytecode; /* also contains the flags */ } JSRegExp; typedef struct JSProxyData { JSValue target; JSValue handler; uint8_t is_func; uint8_t is_revoked; } JSProxyData; typedef struct JSArrayBuffer { int byte_length; /* 0 if detached */ uint8_t detached; uint8_t shared; /* if shared, the array buffer cannot be detached */ uint8_t *data; /* NULL if detached */ struct list_head array_list; void *opaque; JSFreeArrayBufferDataFunc *free_func; } JSArrayBuffer; typedef struct JSTypedArray { struct list_head link; /* link to arraybuffer */ JSObject *obj; /* back pointer to the TypedArray/DataView object */ JSObject *buffer; /* based array buffer */ uint32_t offset; /* offset in the array buffer */ uint32_t length; /* length in the array buffer */ } JSTypedArray; typedef struct JSAsyncFunctionState { JSGCObjectHeader header; JSValue this_val; /* 'this' argument */ int argc; /* number of function arguments */ BOOL throw_flag; /* used to throw an exception in JS_CallInternal() */ BOOL is_completed; /* TRUE if the function has returned. The stack frame is no longer valid */ JSValue resolving_funcs[2]; /* only used in JS async functions */ JSStackFrame frame; } JSAsyncFunctionState; typedef enum { /* binary operators */ JS_OVOP_ADD, JS_OVOP_SUB, JS_OVOP_MUL, JS_OVOP_DIV, JS_OVOP_MOD, JS_OVOP_POW, JS_OVOP_OR, JS_OVOP_AND, JS_OVOP_XOR, JS_OVOP_SHL, JS_OVOP_SAR, JS_OVOP_SHR, JS_OVOP_EQ, JS_OVOP_LESS, JS_OVOP_BINARY_COUNT, /* unary operators */ JS_OVOP_POS = JS_OVOP_BINARY_COUNT, JS_OVOP_NEG, JS_OVOP_INC, JS_OVOP_DEC, JS_OVOP_NOT, JS_OVOP_COUNT, } JSOverloadableOperatorEnum; typedef struct { uint32_t operator_index; JSObject *ops[JS_OVOP_BINARY_COUNT]; /* self operators */ } JSBinaryOperatorDefEntry; typedef struct { int count; JSBinaryOperatorDefEntry *tab; } JSBinaryOperatorDef; typedef struct { uint32_t operator_counter; BOOL is_primitive; /* OperatorSet for a primitive type */ /* NULL if no operator is defined */ JSObject *self_ops[JS_OVOP_COUNT]; /* self operators */ JSBinaryOperatorDef left; JSBinaryOperatorDef right; } JSOperatorSetData; typedef struct JSReqModuleEntry { JSAtom module_name; JSModuleDef *module; /* used using resolution */ } JSReqModuleEntry; typedef enum JSExportTypeEnum { JS_EXPORT_TYPE_LOCAL, JS_EXPORT_TYPE_INDIRECT, } JSExportTypeEnum; typedef struct JSExportEntry { union { struct { int var_idx; /* closure variable index */ JSVarRef *var_ref; /* if != NULL, reference to the variable */ } local; /* for local export */ int req_module_idx; /* module for indirect export */ } u; JSExportTypeEnum export_type; JSAtom local_name; /* '*' if export ns from. not used for local export after compilation */ JSAtom export_name; /* exported variable name */ } JSExportEntry; typedef struct JSStarExportEntry { int req_module_idx; /* in req_module_entries */ } JSStarExportEntry; typedef struct JSImportEntry { int var_idx; /* closure variable index */ JSAtom import_name; int req_module_idx; /* in req_module_entries */ } JSImportEntry; typedef enum { JS_MODULE_STATUS_UNLINKED, JS_MODULE_STATUS_LINKING, JS_MODULE_STATUS_LINKED, JS_MODULE_STATUS_EVALUATING, JS_MODULE_STATUS_EVALUATING_ASYNC, JS_MODULE_STATUS_EVALUATED, } JSModuleStatus; struct JSModuleDef { JSRefCountHeader header; /* must come first, 32-bit */ JSAtom module_name; struct list_head link; JSReqModuleEntry *req_module_entries; int req_module_entries_count; int req_module_entries_size; JSExportEntry *export_entries; int export_entries_count; int export_entries_size; JSStarExportEntry *star_export_entries; int star_export_entries_count; int star_export_entries_size; JSImportEntry *import_entries; int import_entries_count; int import_entries_size; JSValue module_ns; JSValue func_obj; /* only used for JS modules */ JSModuleInitFunc *init_func; /* only used for C modules */ BOOL has_tla : 8; /* true if func_obj contains await */ BOOL resolved : 8; BOOL func_created : 8; JSModuleStatus status : 8; /* temp use during js_module_link() & js_module_evaluate() */ int dfs_index, dfs_ancestor_index; JSModuleDef *stack_prev; /* temp use during js_module_evaluate() */ JSModuleDef **async_parent_modules; int async_parent_modules_count; int async_parent_modules_size; int pending_async_dependencies; BOOL async_evaluation; int64_t async_evaluation_timestamp; JSModuleDef *cycle_root; JSValue promise; /* corresponds to spec field: capability */ JSValue resolving_funcs[2]; /* corresponds to spec field: capability */ /* true if evaluation yielded an exception. It is saved in eval_exception */ BOOL eval_has_exception : 8; JSValue eval_exception; JSValue meta_obj; /* for import.meta */ }; typedef struct JSJobEntry { struct list_head link; JSContext *ctx; JSJobFunc *job_func; int argc; #ifdef STRICT_R_HEADERS JSValue argv[]; #else JSValue argv[0]; #endif } JSJobEntry; typedef struct JSProperty { union { JSValue value; /* JS_PROP_NORMAL */ struct { /* JS_PROP_GETSET */ JSObject *getter; /* NULL if undefined */ JSObject *setter; /* NULL if undefined */ } getset; JSVarRef *var_ref; /* JS_PROP_VARREF */ struct { /* JS_PROP_AUTOINIT */ /* in order to use only 2 pointers, we compress the realm and the init function pointer */ uintptr_t realm_and_id; /* realm and init_id (JS_AUTOINIT_ID_x) in the 2 low bits */ void *opaque; } init; } u; } JSProperty; #define JS_PROP_INITIAL_SIZE 2 #define JS_PROP_INITIAL_HASH_SIZE 4 /* must be a power of two */ #define JS_ARRAY_INITIAL_SIZE 2 typedef struct JSShapeProperty { uint32_t hash_next : 26; /* 0 if last in list */ uint32_t flags : 6; /* JS_PROP_XXX */ JSAtom atom; /* JS_ATOM_NULL = free property entry */ } JSShapeProperty; struct JSShape { /* hash table of size hash_mask + 1 before the start of the structure (see prop_hash_end()). */ JSGCObjectHeader header; /* true if the shape is inserted in the shape hash table. If not, JSShape.hash is not valid */ uint8_t is_hashed; /* If true, the shape may have small array index properties 'n' with 0 <= n <= 2^31-1. If false, the shape is guaranteed not to have small array index properties */ uint8_t has_small_array_index; uint32_t hash; /* current hash value */ uint32_t prop_hash_mask; int prop_size; /* allocated properties */ int prop_count; /* include deleted properties */ int deleted_prop_count; JSShape *shape_hash_next; /* in JSRuntime.shape_hash[h] list */ JSObject *proto; #ifdef STRICT_R_HEADERS JSShapeProperty prop[]; /* prop_size elements */ #else JSShapeProperty prop[0]; /* prop_size elements */ #endif }; struct JSObject { union { JSGCObjectHeader header; struct { int __gc_ref_count; /* corresponds to header.ref_count */ uint8_t __gc_mark; /* corresponds to header.mark/gc_obj_type */ uint8_t extensible : 1; uint8_t free_mark : 1; /* only used when freeing objects with cycles */ uint8_t is_exotic : 1; /* TRUE if object has exotic property handlers */ uint8_t fast_array : 1; /* TRUE if u.array is used for get/put (for JS_CLASS_ARRAY, JS_CLASS_ARGUMENTS and typed arrays) */ uint8_t is_constructor : 1; /* TRUE if object is a constructor function */ uint8_t is_uncatchable_error : 1; /* if TRUE, error is not catchable */ uint8_t tmp_mark : 1; /* used in JS_WriteObjectRec() */ uint8_t is_HTMLDDA : 1; /* specific annex B IsHtmlDDA behavior */ uint16_t class_id; /* see JS_CLASS_x */ }; }; /* byte offsets: 16/24 */ JSShape *shape; /* prototype and property names + flag */ JSProperty *prop; /* array of properties */ /* byte offsets: 24/40 */ struct JSMapRecord *first_weak_ref; /* XXX: use a bit and an external hash table? */ /* byte offsets: 28/48 */ union { void *opaque; struct JSBoundFunction *bound_function; /* JS_CLASS_BOUND_FUNCTION */ struct JSCFunctionDataRecord *c_function_data_record; /* JS_CLASS_C_FUNCTION_DATA */ struct JSForInIterator *for_in_iterator; /* JS_CLASS_FOR_IN_ITERATOR */ struct JSArrayBuffer *array_buffer; /* JS_CLASS_ARRAY_BUFFER, JS_CLASS_SHARED_ARRAY_BUFFER */ struct JSTypedArray *typed_array; /* JS_CLASS_UINT8C_ARRAY..JS_CLASS_DATAVIEW */ #ifdef CONFIG_BIGNUM struct JSFloatEnv *float_env; /* JS_CLASS_FLOAT_ENV */ struct JSOperatorSetData *operator_set; /* JS_CLASS_OPERATOR_SET */ #endif struct JSMapState *map_state; /* JS_CLASS_MAP..JS_CLASS_WEAKSET */ struct JSMapIteratorData *map_iterator_data; /* JS_CLASS_MAP_ITERATOR, JS_CLASS_SET_ITERATOR */ struct JSArrayIteratorData *array_iterator_data; /* JS_CLASS_ARRAY_ITERATOR, JS_CLASS_STRING_ITERATOR */ struct JSRegExpStringIteratorData *regexp_string_iterator_data; /* JS_CLASS_REGEXP_STRING_ITERATOR */ struct JSGeneratorData *generator_data; /* JS_CLASS_GENERATOR */ struct JSProxyData *proxy_data; /* JS_CLASS_PROXY */ struct JSPromiseData *promise_data; /* JS_CLASS_PROMISE */ struct JSPromiseFunctionData *promise_function_data; /* JS_CLASS_PROMISE_RESOLVE_FUNCTION, JS_CLASS_PROMISE_REJECT_FUNCTION */ struct JSAsyncFunctionState *async_function_data; /* JS_CLASS_ASYNC_FUNCTION_RESOLVE, JS_CLASS_ASYNC_FUNCTION_REJECT */ struct JSAsyncFromSyncIteratorData *async_from_sync_iterator_data; /* JS_CLASS_ASYNC_FROM_SYNC_ITERATOR */ struct JSAsyncGeneratorData *async_generator_data; /* JS_CLASS_ASYNC_GENERATOR */ struct { /* JS_CLASS_BYTECODE_FUNCTION: 12/24 bytes */ /* also used by JS_CLASS_GENERATOR_FUNCTION, JS_CLASS_ASYNC_FUNCTION and JS_CLASS_ASYNC_GENERATOR_FUNCTION */ struct JSFunctionBytecode *function_bytecode; JSVarRef **var_refs; JSObject *home_object; /* for 'super' access */ } func; struct { /* JS_CLASS_C_FUNCTION: 12/20 bytes */ JSContext *realm; JSCFunctionType c_function; uint8_t length; uint8_t cproto; int16_t magic; } cfunc; /* array part for fast arrays and typed arrays */ struct { /* JS_CLASS_ARRAY, JS_CLASS_ARGUMENTS, JS_CLASS_UINT8C_ARRAY..JS_CLASS_FLOAT64_ARRAY */ union { uint32_t size; /* JS_CLASS_ARRAY, JS_CLASS_ARGUMENTS */ struct JSTypedArray *typed_array; /* JS_CLASS_UINT8C_ARRAY..JS_CLASS_FLOAT64_ARRAY */ } u1; union { JSValue *values; /* JS_CLASS_ARRAY, JS_CLASS_ARGUMENTS */ void *ptr; /* JS_CLASS_UINT8C_ARRAY..JS_CLASS_FLOAT64_ARRAY */ int8_t *int8_ptr; /* JS_CLASS_INT8_ARRAY */ uint8_t *uint8_ptr; /* JS_CLASS_UINT8_ARRAY, JS_CLASS_UINT8C_ARRAY */ int16_t *int16_ptr; /* JS_CLASS_INT16_ARRAY */ uint16_t *uint16_ptr; /* JS_CLASS_UINT16_ARRAY */ int32_t *int32_ptr; /* JS_CLASS_INT32_ARRAY */ uint32_t *uint32_ptr; /* JS_CLASS_UINT32_ARRAY */ int64_t *int64_ptr; /* JS_CLASS_INT64_ARRAY */ uint64_t *uint64_ptr; /* JS_CLASS_UINT64_ARRAY */ float *float_ptr; /* JS_CLASS_FLOAT32_ARRAY */ double *double_ptr; /* JS_CLASS_FLOAT64_ARRAY */ } u; uint32_t count; /* <= 2^31-1. 0 for a detached typed array */ } array; /* 12/20 bytes */ JSRegExp regexp; /* JS_CLASS_REGEXP: 8/16 bytes */ JSValue object_data; /* for JS_SetObjectData(): 8/16/16 bytes */ } u; /* byte sizes: 40/48/72 */ }; enum { __JS_ATOM_NULL = JS_ATOM_NULL, #define DEF(name, str) JS_ATOM_ ## name, #include "quickjs-atom.h" #undef DEF JS_ATOM_END, }; #define JS_ATOM_LAST_KEYWORD JS_ATOM_super #define JS_ATOM_LAST_STRICT_KEYWORD JS_ATOM_yield static const char js_atom_init[] = #define DEF(name, str) str "\0" #include "quickjs-atom.h" #undef DEF ; typedef enum OPCodeFormat { #define FMT(f) OP_FMT_ ## f, #define DEF(id, size, n_pop, n_push, f) #include "quickjs-opcode.h" #undef DEF #undef FMT } OPCodeFormat; // enum defined earlier for Wpedantic compatibility #ifndef STRICT_R_HEADERS enum OPCodeEnum { #define FMT(f) #define DEF(id, size, n_pop, n_push, f) OP_ ## id, #define def(id, size, n_pop, n_push, f) #include "quickjs-opcode.h" #undef def #undef DEF #undef FMT OP_COUNT, /* excluding temporary opcodes */ /* temporary opcodes : overlap with the short opcodes */ OP_TEMP_START = OP_nop + 1, OP___dummy = OP_TEMP_START - 1, #define FMT(f) #define DEF(id, size, n_pop, n_push, f) #define def(id, size, n_pop, n_push, f) OP_ ## id, #include "quickjs-opcode.h" #undef def #undef DEF #undef FMT OP_TEMP_END, }; #endif static int JS_InitAtoms(JSRuntime *rt); static JSAtom __JS_NewAtomInit(JSRuntime *rt, const char *str, int len, int atom_type); static void JS_FreeAtomStruct(JSRuntime *rt, JSAtomStruct *p); static void free_function_bytecode(JSRuntime *rt, JSFunctionBytecode *b); static JSValue js_call_c_function(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags); static JSValue js_call_bound_function(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags); static JSValue JS_CallInternal(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, JSValueConst new_target, int argc, JSValue *argv, int flags); static JSValue JS_CallConstructorInternal(JSContext *ctx, JSValueConst func_obj, JSValueConst new_target, int argc, JSValue *argv, int flags); static JSValue JS_CallFree(JSContext *ctx, JSValue func_obj, JSValueConst this_obj, int argc, JSValueConst *argv); static JSValue JS_InvokeFree(JSContext *ctx, JSValue this_val, JSAtom atom, int argc, JSValueConst *argv); static __exception int JS_ToArrayLengthFree(JSContext *ctx, uint32_t *plen, JSValue val, BOOL is_array_ctor); static JSValue JS_EvalObject(JSContext *ctx, JSValueConst this_obj, JSValueConst val, int flags, int scope_idx); JSValue __attribute__((format(printf, 2, 3))) JS_ThrowInternalError(JSContext *ctx, const char *fmt, ...); static __maybe_unused void JS_DumpAtoms(JSRuntime *rt); static __maybe_unused void JS_DumpString(JSRuntime *rt, const JSString *p); static __maybe_unused void JS_DumpObjectHeader(JSRuntime *rt); static __maybe_unused void JS_DumpObject(JSRuntime *rt, JSObject *p); static __maybe_unused void JS_DumpGCObject(JSRuntime *rt, JSGCObjectHeader *p); static __maybe_unused void JS_DumpValueShort(JSRuntime *rt, JSValueConst val); static __maybe_unused void JS_DumpValue(JSContext *ctx, JSValueConst val); static __maybe_unused void JS_PrintValue(JSContext *ctx, const char *str, JSValueConst val); static __maybe_unused void JS_DumpShapes(JSRuntime *rt); static JSValue js_function_apply(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic); static void js_array_finalizer(JSRuntime *rt, JSValue val); static void js_array_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_object_data_finalizer(JSRuntime *rt, JSValue val); static void js_object_data_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_c_function_finalizer(JSRuntime *rt, JSValue val); static void js_c_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_bytecode_function_finalizer(JSRuntime *rt, JSValue val); static void js_bytecode_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_bound_function_finalizer(JSRuntime *rt, JSValue val); static void js_bound_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_for_in_iterator_finalizer(JSRuntime *rt, JSValue val); static void js_for_in_iterator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_regexp_finalizer(JSRuntime *rt, JSValue val); static void js_array_buffer_finalizer(JSRuntime *rt, JSValue val); static void js_typed_array_finalizer(JSRuntime *rt, JSValue val); static void js_typed_array_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_proxy_finalizer(JSRuntime *rt, JSValue val); static void js_proxy_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_map_finalizer(JSRuntime *rt, JSValue val); static void js_map_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_map_iterator_finalizer(JSRuntime *rt, JSValue val); static void js_map_iterator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_array_iterator_finalizer(JSRuntime *rt, JSValue val); static void js_array_iterator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_regexp_string_iterator_finalizer(JSRuntime *rt, JSValue val); static void js_regexp_string_iterator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_generator_finalizer(JSRuntime *rt, JSValue obj); static void js_generator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_promise_finalizer(JSRuntime *rt, JSValue val); static void js_promise_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static void js_promise_resolve_function_finalizer(JSRuntime *rt, JSValue val); static void js_promise_resolve_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); #ifdef CONFIG_BIGNUM static void js_operator_set_finalizer(JSRuntime *rt, JSValue val); static void js_operator_set_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); #endif static JSValue JS_ToStringFree(JSContext *ctx, JSValue val); static int JS_ToBoolFree(JSContext *ctx, JSValue val); static int JS_ToInt32Free(JSContext *ctx, int32_t *pres, JSValue val); static int JS_ToFloat64Free(JSContext *ctx, double *pres, JSValue val); static int JS_ToUint8ClampFree(JSContext *ctx, int32_t *pres, JSValue val); static JSValue js_compile_regexp(JSContext *ctx, JSValueConst pattern, JSValueConst flags); static JSValue js_regexp_constructor_internal(JSContext *ctx, JSValueConst ctor, JSValue pattern, JSValue bc); static void gc_decref(JSRuntime *rt); static int JS_NewClass1(JSRuntime *rt, JSClassID class_id, const JSClassDef *class_def, JSAtom name); typedef enum JSStrictEqModeEnum { JS_EQ_STRICT, JS_EQ_SAME_VALUE, JS_EQ_SAME_VALUE_ZERO, } JSStrictEqModeEnum; static BOOL js_strict_eq2(JSContext *ctx, JSValue op1, JSValue op2, JSStrictEqModeEnum eq_mode); static BOOL js_strict_eq(JSContext *ctx, JSValue op1, JSValue op2); static BOOL js_same_value(JSContext *ctx, JSValueConst op1, JSValueConst op2); static BOOL js_same_value_zero(JSContext *ctx, JSValueConst op1, JSValueConst op2); static JSValue JS_ToObject(JSContext *ctx, JSValueConst val); static JSValue JS_ToObjectFree(JSContext *ctx, JSValue val); static JSProperty *add_property(JSContext *ctx, JSObject *p, JSAtom prop, int prop_flags); static JSValue JS_NewBigInt(JSContext *ctx); static inline bf_t *JS_GetBigInt(JSValueConst val) { JSBigFloat *p = JS_VALUE_GET_PTR(val); return &p->num; } static JSValue JS_CompactBigInt1(JSContext *ctx, JSValue val, BOOL convert_to_safe_integer); static JSValue JS_CompactBigInt(JSContext *ctx, JSValue val); static int JS_ToBigInt64Free(JSContext *ctx, int64_t *pres, JSValue val); static bf_t *JS_ToBigInt(JSContext *ctx, bf_t *buf, JSValueConst val); static void JS_FreeBigInt(JSContext *ctx, bf_t *a, bf_t *buf); #ifdef CONFIG_BIGNUM static void js_float_env_finalizer(JSRuntime *rt, JSValue val); static JSValue JS_NewBigFloat(JSContext *ctx); static inline bf_t *JS_GetBigFloat(JSValueConst val) { JSBigFloat *p = JS_VALUE_GET_PTR(val); return &p->num; } static JSValue JS_NewBigDecimal(JSContext *ctx); static inline bfdec_t *JS_GetBigDecimal(JSValueConst val) { JSBigDecimal *p = JS_VALUE_GET_PTR(val); return &p->num; } static bf_t *JS_ToBigFloat(JSContext *ctx, bf_t *buf, JSValueConst val); static JSValue JS_ToBigDecimalFree(JSContext *ctx, JSValue val, BOOL allow_null_or_undefined); static bfdec_t *JS_ToBigDecimal(JSContext *ctx, JSValueConst val); #endif JSValue JS_ThrowOutOfMemory(JSContext *ctx); static JSValue JS_ThrowTypeErrorRevokedProxy(JSContext *ctx); static JSValue js_proxy_getPrototypeOf(JSContext *ctx, JSValueConst obj); static int js_proxy_setPrototypeOf(JSContext *ctx, JSValueConst obj, JSValueConst proto_val, BOOL throw_flag); static int js_proxy_isExtensible(JSContext *ctx, JSValueConst obj); static int js_proxy_preventExtensions(JSContext *ctx, JSValueConst obj); static int js_proxy_isArray(JSContext *ctx, JSValueConst obj); static int JS_CreateProperty(JSContext *ctx, JSObject *p, JSAtom prop, JSValueConst val, JSValueConst getter, JSValueConst setter, int flags); static int js_string_memcmp(const JSString *p1, const JSString *p2, int len); static void reset_weak_ref(JSRuntime *rt, JSObject *p); static JSValue js_array_buffer_constructor3(JSContext *ctx, JSValueConst new_target, uint64_t len, JSClassID class_id, uint8_t *buf, JSFreeArrayBufferDataFunc *free_func, void *opaque, BOOL alloc_flag); static JSArrayBuffer *js_get_array_buffer(JSContext *ctx, JSValueConst obj); static JSValue js_typed_array_constructor(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int classid); static JSValue js_typed_array_constructor_ta(JSContext *ctx, JSValueConst new_target, JSValueConst src_obj, int classid); static BOOL typed_array_is_detached(JSContext *ctx, JSObject *p); static uint32_t typed_array_get_length(JSContext *ctx, JSObject *p); static JSValue JS_ThrowTypeErrorDetachedArrayBuffer(JSContext *ctx); static JSVarRef *get_var_ref(JSContext *ctx, JSStackFrame *sf, int var_idx, BOOL is_arg); static void __async_func_free(JSRuntime *rt, JSAsyncFunctionState *s); static void async_func_free(JSRuntime *rt, JSAsyncFunctionState *s); static JSValue js_generator_function_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags); static void js_async_function_resolve_finalizer(JSRuntime *rt, JSValue val); static void js_async_function_resolve_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static JSValue JS_EvalInternal(JSContext *ctx, JSValueConst this_obj, const char *input, size_t input_len, const char *filename, int flags, int scope_idx); static void js_free_module_def(JSContext *ctx, JSModuleDef *m); static void js_mark_module_def(JSRuntime *rt, JSModuleDef *m, JS_MarkFunc *mark_func); static JSValue js_import_meta(JSContext *ctx); static JSValue js_dynamic_import(JSContext *ctx, JSValueConst specifier); static void free_var_ref(JSRuntime *rt, JSVarRef *var_ref); static JSValue js_new_promise_capability(JSContext *ctx, JSValue *resolving_funcs, JSValueConst ctor); static __exception int perform_promise_then(JSContext *ctx, JSValueConst promise, JSValueConst *resolve_reject, JSValueConst *cap_resolving_funcs); static JSValue js_promise_resolve(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic); static JSValue js_promise_then(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv); static int js_string_compare(JSContext *ctx, const JSString *p1, const JSString *p2); static JSValue JS_ToNumber(JSContext *ctx, JSValueConst val); static int JS_SetPropertyValue(JSContext *ctx, JSValueConst this_obj, JSValue prop, JSValue val, int flags); static int JS_NumberIsInteger(JSContext *ctx, JSValueConst val); static BOOL JS_NumberIsNegativeOrMinusZero(JSContext *ctx, JSValueConst val); static JSValue JS_ToNumberFree(JSContext *ctx, JSValue val); static int JS_GetOwnPropertyInternal(JSContext *ctx, JSPropertyDescriptor *desc, JSObject *p, JSAtom prop); static void js_free_desc(JSContext *ctx, JSPropertyDescriptor *desc); static void JS_AddIntrinsicBasicObjects(JSContext *ctx); static void js_free_shape(JSRuntime *rt, JSShape *sh); static void js_free_shape_null(JSRuntime *rt, JSShape *sh); static int js_shape_prepare_update(JSContext *ctx, JSObject *p, JSShapeProperty **pprs); static int init_shape_hash(JSRuntime *rt); static __exception int js_get_length32(JSContext *ctx, uint32_t *pres, JSValueConst obj); static __exception int js_get_length64(JSContext *ctx, int64_t *pres, JSValueConst obj); static void free_arg_list(JSContext *ctx, JSValue *tab, uint32_t len); static JSValue *build_arg_list(JSContext *ctx, uint32_t *plen, JSValueConst array_arg); static BOOL js_get_fast_array(JSContext *ctx, JSValueConst obj, JSValue **arrpp, uint32_t *countp); static JSValue JS_CreateAsyncFromSyncIterator(JSContext *ctx, JSValueConst sync_iter); static void js_c_function_data_finalizer(JSRuntime *rt, JSValue val); static void js_c_function_data_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func); static JSValue js_c_function_data_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_val, int argc, JSValueConst *argv, int flags); static JSAtom js_symbol_to_atom(JSContext *ctx, JSValue val); static void add_gc_object(JSRuntime *rt, JSGCObjectHeader *h, JSGCObjectTypeEnum type); static void remove_gc_object(JSGCObjectHeader *h); static JSValue js_instantiate_prototype(JSContext *ctx, JSObject *p, JSAtom atom, void *opaque); static JSValue js_module_ns_autoinit(JSContext *ctx, JSObject *p, JSAtom atom, void *opaque); static JSValue JS_InstantiateFunctionListItem2(JSContext *ctx, JSObject *p, JSAtom atom, void *opaque); void JS_SetUncatchableError(JSContext *ctx, JSValueConst val, BOOL flag); static JSValue js_object_groupBy(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int is_map); static const JSClassExoticMethods js_arguments_exotic_methods; static const JSClassExoticMethods js_string_exotic_methods; static const JSClassExoticMethods js_proxy_exotic_methods; static const JSClassExoticMethods js_module_ns_exotic_methods; static JSClassID js_class_id_alloc = JS_CLASS_INIT_COUNT; static void js_trigger_gc(JSRuntime *rt, size_t size) { BOOL force_gc; #ifdef FORCE_GC_AT_MALLOC force_gc = TRUE; #else force_gc = ((rt->malloc_state.malloc_size + size) > rt->malloc_gc_threshold); #endif if (force_gc) { #ifdef DUMP_GC printf("GC: size=%" PRIu64 "\n", (uint64_t)rt->malloc_state.malloc_size); #endif JS_RunGC(rt); rt->malloc_gc_threshold = rt->malloc_state.malloc_size + (rt->malloc_state.malloc_size >> 1); } } static size_t js_malloc_usable_size_unknown(const void *ptr) { return 0; } void *js_malloc_rt(JSRuntime *rt, size_t size) { return rt->mf.js_malloc(&rt->malloc_state, size); } void js_free_rt(JSRuntime *rt, void *ptr) { rt->mf.js_free(&rt->malloc_state, ptr); } void *js_realloc_rt(JSRuntime *rt, void *ptr, size_t size) { return rt->mf.js_realloc(&rt->malloc_state, ptr, size); } size_t js_malloc_usable_size_rt(JSRuntime *rt, const void *ptr) { return rt->mf.js_malloc_usable_size(ptr); } void *js_mallocz_rt(JSRuntime *rt, size_t size) { void *ptr; ptr = js_malloc_rt(rt, size); if (!ptr) return NULL; return memset(ptr, 0, size); } /* called by libbf */ static void *js_bf_realloc(void *opaque, void *ptr, size_t size) { JSRuntime *rt = opaque; return js_realloc_rt(rt, ptr, size); } /* Throw out of memory in case of error */ void *js_malloc(JSContext *ctx, size_t size) { void *ptr; ptr = js_malloc_rt(ctx->rt, size); if (unlikely(!ptr)) { JS_ThrowOutOfMemory(ctx); return NULL; } return ptr; } /* Throw out of memory in case of error */ void *js_mallocz(JSContext *ctx, size_t size) { void *ptr; ptr = js_mallocz_rt(ctx->rt, size); if (unlikely(!ptr)) { JS_ThrowOutOfMemory(ctx); return NULL; } return ptr; } void js_free(JSContext *ctx, void *ptr) { js_free_rt(ctx->rt, ptr); } /* Throw out of memory in case of error */ void *js_realloc(JSContext *ctx, void *ptr, size_t size) { void *ret; ret = js_realloc_rt(ctx->rt, ptr, size); if (unlikely(!ret && size != 0)) { JS_ThrowOutOfMemory(ctx); return NULL; } return ret; } /* store extra allocated size in *pslack if successful */ void *js_realloc2(JSContext *ctx, void *ptr, size_t size, size_t *pslack) { void *ret; ret = js_realloc_rt(ctx->rt, ptr, size); if (unlikely(!ret && size != 0)) { JS_ThrowOutOfMemory(ctx); return NULL; } if (pslack) { size_t new_size = js_malloc_usable_size_rt(ctx->rt, ret); *pslack = (new_size > size) ? new_size - size : 0; } return ret; } size_t js_malloc_usable_size(JSContext *ctx, const void *ptr) { return js_malloc_usable_size_rt(ctx->rt, ptr); } /* Throw out of memory exception in case of error */ char *js_strndup(JSContext *ctx, const char *s, size_t n) { char *ptr; ptr = js_malloc(ctx, n + 1); if (ptr) { memcpy(ptr, s, n); ptr[n] = '\0'; } return ptr; } char *js_strdup(JSContext *ctx, const char *str) { return js_strndup(ctx, str, strlen(str)); } static no_inline int js_realloc_array(JSContext *ctx, void **parray, int elem_size, int *psize, int req_size) { int new_size; size_t slack; void *new_array; /* XXX: potential arithmetic overflow */ new_size = max_int(req_size, *psize * 3 / 2); new_array = js_realloc2(ctx, *parray, new_size * elem_size, &slack); if (!new_array) return -1; new_size += slack / elem_size; *psize = new_size; *parray = new_array; return 0; } /* resize the array and update its size if req_size > *psize */ static inline int js_resize_array(JSContext *ctx, void **parray, int elem_size, int *psize, int req_size) { if (unlikely(req_size > *psize)) return js_realloc_array(ctx, parray, elem_size, psize, req_size); else return 0; } static inline void js_dbuf_init(JSContext *ctx, DynBuf *s) { dbuf_init2(s, ctx->rt, (DynBufReallocFunc *)js_realloc_rt); } static inline int is_digit(int c) { return c >= '0' && c <= '9'; } typedef struct JSClassShortDef { JSAtom class_name; JSClassFinalizer *finalizer; JSClassGCMark *gc_mark; } JSClassShortDef; static JSClassShortDef const js_std_class_def[] = { { JS_ATOM_Object, NULL, NULL }, /* JS_CLASS_OBJECT */ { JS_ATOM_Array, js_array_finalizer, js_array_mark }, /* JS_CLASS_ARRAY */ { JS_ATOM_Error, NULL, NULL }, /* JS_CLASS_ERROR */ { JS_ATOM_Number, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_NUMBER */ { JS_ATOM_String, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_STRING */ { JS_ATOM_Boolean, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_BOOLEAN */ { JS_ATOM_Symbol, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_SYMBOL */ { JS_ATOM_Arguments, js_array_finalizer, js_array_mark }, /* JS_CLASS_ARGUMENTS */ { JS_ATOM_Arguments, NULL, NULL }, /* JS_CLASS_MAPPED_ARGUMENTS */ { JS_ATOM_Date, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_DATE */ { JS_ATOM_Object, NULL, NULL }, /* JS_CLASS_MODULE_NS */ { JS_ATOM_Function, js_c_function_finalizer, js_c_function_mark }, /* JS_CLASS_C_FUNCTION */ { JS_ATOM_Function, js_bytecode_function_finalizer, js_bytecode_function_mark }, /* JS_CLASS_BYTECODE_FUNCTION */ { JS_ATOM_Function, js_bound_function_finalizer, js_bound_function_mark }, /* JS_CLASS_BOUND_FUNCTION */ { JS_ATOM_Function, js_c_function_data_finalizer, js_c_function_data_mark }, /* JS_CLASS_C_FUNCTION_DATA */ { JS_ATOM_GeneratorFunction, js_bytecode_function_finalizer, js_bytecode_function_mark }, /* JS_CLASS_GENERATOR_FUNCTION */ { JS_ATOM_ForInIterator, js_for_in_iterator_finalizer, js_for_in_iterator_mark }, /* JS_CLASS_FOR_IN_ITERATOR */ { JS_ATOM_RegExp, js_regexp_finalizer, NULL }, /* JS_CLASS_REGEXP */ { JS_ATOM_ArrayBuffer, js_array_buffer_finalizer, NULL }, /* JS_CLASS_ARRAY_BUFFER */ { JS_ATOM_SharedArrayBuffer, js_array_buffer_finalizer, NULL }, /* JS_CLASS_SHARED_ARRAY_BUFFER */ { JS_ATOM_Uint8ClampedArray, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_UINT8C_ARRAY */ { JS_ATOM_Int8Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_INT8_ARRAY */ { JS_ATOM_Uint8Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_UINT8_ARRAY */ { JS_ATOM_Int16Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_INT16_ARRAY */ { JS_ATOM_Uint16Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_UINT16_ARRAY */ { JS_ATOM_Int32Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_INT32_ARRAY */ { JS_ATOM_Uint32Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_UINT32_ARRAY */ { JS_ATOM_BigInt64Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_BIG_INT64_ARRAY */ { JS_ATOM_BigUint64Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_BIG_UINT64_ARRAY */ { JS_ATOM_Float32Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_FLOAT32_ARRAY */ { JS_ATOM_Float64Array, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_FLOAT64_ARRAY */ { JS_ATOM_DataView, js_typed_array_finalizer, js_typed_array_mark }, /* JS_CLASS_DATAVIEW */ { JS_ATOM_BigInt, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_BIG_INT */ #ifdef CONFIG_BIGNUM { JS_ATOM_BigFloat, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_BIG_FLOAT */ { JS_ATOM_BigFloatEnv, js_float_env_finalizer, NULL }, /* JS_CLASS_FLOAT_ENV */ { JS_ATOM_BigDecimal, js_object_data_finalizer, js_object_data_mark }, /* JS_CLASS_BIG_DECIMAL */ { JS_ATOM_OperatorSet, js_operator_set_finalizer, js_operator_set_mark }, /* JS_CLASS_OPERATOR_SET */ #endif { JS_ATOM_Map, js_map_finalizer, js_map_mark }, /* JS_CLASS_MAP */ { JS_ATOM_Set, js_map_finalizer, js_map_mark }, /* JS_CLASS_SET */ { JS_ATOM_WeakMap, js_map_finalizer, js_map_mark }, /* JS_CLASS_WEAKMAP */ { JS_ATOM_WeakSet, js_map_finalizer, js_map_mark }, /* JS_CLASS_WEAKSET */ { JS_ATOM_Map_Iterator, js_map_iterator_finalizer, js_map_iterator_mark }, /* JS_CLASS_MAP_ITERATOR */ { JS_ATOM_Set_Iterator, js_map_iterator_finalizer, js_map_iterator_mark }, /* JS_CLASS_SET_ITERATOR */ { JS_ATOM_Array_Iterator, js_array_iterator_finalizer, js_array_iterator_mark }, /* JS_CLASS_ARRAY_ITERATOR */ { JS_ATOM_String_Iterator, js_array_iterator_finalizer, js_array_iterator_mark }, /* JS_CLASS_STRING_ITERATOR */ { JS_ATOM_RegExp_String_Iterator, js_regexp_string_iterator_finalizer, js_regexp_string_iterator_mark }, /* JS_CLASS_REGEXP_STRING_ITERATOR */ { JS_ATOM_Generator, js_generator_finalizer, js_generator_mark }, /* JS_CLASS_GENERATOR */ }; static int init_class_range(JSRuntime *rt, JSClassShortDef const *tab, int start, int count) { JSClassDef cm_s, *cm = &cm_s; int i, class_id; for(i = 0; i < count; i++) { class_id = i + start; memset(cm, 0, sizeof(*cm)); cm->finalizer = tab[i].finalizer; cm->gc_mark = tab[i].gc_mark; if (JS_NewClass1(rt, class_id, cm, tab[i].class_name) < 0) return -1; } return 0; } static JSValue JS_ThrowUnsupportedOperation(JSContext *ctx) { return JS_ThrowTypeError(ctx, "unsupported operation"); } static JSValue invalid_to_string(JSContext *ctx, JSValueConst val) { return JS_ThrowUnsupportedOperation(ctx); } static JSValue invalid_from_string(JSContext *ctx, const char *buf, int radix, int flags, slimb_t *pexponent) { return JS_NAN; } static int invalid_unary_arith(JSContext *ctx, JSValue *pres, OPCodeEnum op, JSValue op1) { JS_FreeValue(ctx, op1); JS_ThrowUnsupportedOperation(ctx); return -1; } static int invalid_binary_arith(JSContext *ctx, OPCodeEnum op, JSValue *pres, JSValue op1, JSValue op2) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); JS_ThrowUnsupportedOperation(ctx); return -1; } static JSValue invalid_mul_pow10_to_float64(JSContext *ctx, const bf_t *a, int64_t exponent) { return JS_ThrowUnsupportedOperation(ctx); } static int invalid_mul_pow10(JSContext *ctx, JSValue *sp) { JS_ThrowUnsupportedOperation(ctx); return -1; } static void set_dummy_numeric_ops(JSNumericOperations *ops) { ops->to_string = invalid_to_string; ops->from_string = invalid_from_string; ops->unary_arith = invalid_unary_arith; ops->binary_arith = invalid_binary_arith; ops->mul_pow10_to_float64 = invalid_mul_pow10_to_float64; ops->mul_pow10 = invalid_mul_pow10; } #if !defined(CONFIG_STACK_CHECK) /* no stack limitation */ static inline uintptr_t js_get_stack_pointer(void) { return 0; } static inline BOOL js_check_stack_overflow(JSRuntime *rt, size_t alloca_size) { return FALSE; } #else /* Note: OS and CPU dependent */ static inline uintptr_t js_get_stack_pointer(void) { return (uintptr_t)__builtin_frame_address(0); } static inline BOOL js_check_stack_overflow(JSRuntime *rt, size_t alloca_size) { uintptr_t sp; sp = js_get_stack_pointer() - alloca_size; return unlikely(sp < rt->stack_limit); } #endif JSRuntime *JS_NewRuntime2(const JSMallocFunctions *mf, void *opaque) { JSRuntime *rt; JSMallocState ms; memset(&ms, 0, sizeof(ms)); ms.opaque = opaque; ms.malloc_limit = -1; rt = mf->js_malloc(&ms, sizeof(JSRuntime)); if (!rt) return NULL; memset(rt, 0, sizeof(*rt)); rt->mf = *mf; if (!rt->mf.js_malloc_usable_size) { /* use dummy function if none provided */ rt->mf.js_malloc_usable_size = js_malloc_usable_size_unknown; } rt->malloc_state = ms; rt->malloc_gc_threshold = 256 * 1024; bf_context_init(&rt->bf_ctx, js_bf_realloc, rt); set_dummy_numeric_ops(&rt->bigint_ops); #ifdef CONFIG_BIGNUM set_dummy_numeric_ops(&rt->bigfloat_ops); set_dummy_numeric_ops(&rt->bigdecimal_ops); #endif init_list_head(&rt->context_list); init_list_head(&rt->gc_obj_list); init_list_head(&rt->gc_zero_ref_count_list); rt->gc_phase = JS_GC_PHASE_NONE; #ifdef DUMP_LEAKS init_list_head(&rt->string_list); #endif init_list_head(&rt->job_list); if (JS_InitAtoms(rt)) goto fail; /* create the object, array and function classes */ if (init_class_range(rt, js_std_class_def, JS_CLASS_OBJECT, countof(js_std_class_def)) < 0) goto fail; rt->class_array[JS_CLASS_ARGUMENTS].exotic = &js_arguments_exotic_methods; rt->class_array[JS_CLASS_STRING].exotic = &js_string_exotic_methods; rt->class_array[JS_CLASS_MODULE_NS].exotic = &js_module_ns_exotic_methods; rt->class_array[JS_CLASS_C_FUNCTION].call = js_call_c_function; rt->class_array[JS_CLASS_C_FUNCTION_DATA].call = js_c_function_data_call; rt->class_array[JS_CLASS_BOUND_FUNCTION].call = js_call_bound_function; rt->class_array[JS_CLASS_GENERATOR_FUNCTION].call = js_generator_function_call; if (init_shape_hash(rt)) goto fail; rt->stack_size = JS_DEFAULT_STACK_SIZE; JS_UpdateStackTop(rt); rt->current_exception = JS_NULL; return rt; fail: JS_FreeRuntime(rt); return NULL; } void *JS_GetRuntimeOpaque(JSRuntime *rt) { return rt->user_opaque; } void JS_SetRuntimeOpaque(JSRuntime *rt, void *opaque) { rt->user_opaque = opaque; } /* default memory allocation functions with memory limitation */ static size_t js_def_malloc_usable_size(const void *ptr) { #if defined(__APPLE__) return malloc_size(ptr); #elif defined(_WIN32) return _msize((void *)ptr); #elif defined(EMSCRIPTEN) return 0; #elif defined(__linux__) return malloc_usable_size((void *)ptr); #else /* change this to `return 0;` if compilation fails */ return malloc_usable_size((void *)ptr); #endif } static void *js_def_malloc(JSMallocState *s, size_t size) { void *ptr; /* Do not allocate zero bytes: behavior is platform dependent */ assert(size != 0); if (unlikely(s->malloc_size + size > s->malloc_limit)) return NULL; ptr = malloc(size); if (!ptr) return NULL; s->malloc_count++; s->malloc_size += js_def_malloc_usable_size(ptr) + MALLOC_OVERHEAD; return ptr; } static void js_def_free(JSMallocState *s, void *ptr) { if (!ptr) return; s->malloc_count--; s->malloc_size -= js_def_malloc_usable_size(ptr) + MALLOC_OVERHEAD; free(ptr); } static void *js_def_realloc(JSMallocState *s, void *ptr, size_t size) { size_t old_size; if (!ptr) { if (size == 0) return NULL; return js_def_malloc(s, size); } old_size = js_def_malloc_usable_size(ptr); if (size == 0) { s->malloc_count--; s->malloc_size -= old_size + MALLOC_OVERHEAD; free(ptr); return NULL; } if (s->malloc_size + size - old_size > s->malloc_limit) return NULL; ptr = realloc(ptr, size); if (!ptr) return NULL; s->malloc_size += js_def_malloc_usable_size(ptr) - old_size; return ptr; } static const JSMallocFunctions def_malloc_funcs = { js_def_malloc, js_def_free, js_def_realloc, js_def_malloc_usable_size, }; JSRuntime *JS_NewRuntime(void) { return JS_NewRuntime2(&def_malloc_funcs, NULL); } void JS_SetMemoryLimit(JSRuntime *rt, size_t limit) { rt->malloc_state.malloc_limit = limit; } /* use -1 to disable automatic GC */ void JS_SetGCThreshold(JSRuntime *rt, size_t gc_threshold) { rt->malloc_gc_threshold = gc_threshold; } #define malloc(s) malloc_is_forbidden(s) #define free(p) free_is_forbidden(p) #define realloc(p,s) realloc_is_forbidden(p,s) void JS_SetInterruptHandler(JSRuntime *rt, JSInterruptHandler *cb, void *opaque) { rt->interrupt_handler = cb; rt->interrupt_opaque = opaque; } void JS_SetCanBlock(JSRuntime *rt, BOOL can_block) { rt->can_block = can_block; } void JS_SetSharedArrayBufferFunctions(JSRuntime *rt, const JSSharedArrayBufferFunctions *sf) { rt->sab_funcs = *sf; } /* return 0 if OK, < 0 if exception */ int JS_EnqueueJob(JSContext *ctx, JSJobFunc *job_func, int argc, JSValueConst *argv) { JSRuntime *rt = ctx->rt; JSJobEntry *e; int i; e = js_malloc(ctx, sizeof(*e) + argc * sizeof(JSValue)); if (!e) return -1; e->ctx = ctx; e->job_func = job_func; e->argc = argc; for(i = 0; i < argc; i++) { e->argv[i] = JS_DupValue(ctx, argv[i]); } list_add_tail(&e->link, &rt->job_list); return 0; } BOOL JS_IsJobPending(JSRuntime *rt) { return !list_empty(&rt->job_list); } /* return < 0 if exception, 0 if no job pending, 1 if a job was executed successfully. the context of the job is stored in '*pctx' */ int JS_ExecutePendingJob(JSRuntime *rt, JSContext **pctx) { JSContext *ctx; JSJobEntry *e; JSValue res; int i, ret; if (list_empty(&rt->job_list)) { *pctx = NULL; return 0; } /* get the first pending job and execute it */ e = list_entry(rt->job_list.next, JSJobEntry, link); list_del(&e->link); ctx = e->ctx; res = e->job_func(e->ctx, e->argc, (JSValueConst *)e->argv); for(i = 0; i < e->argc; i++) JS_FreeValue(ctx, e->argv[i]); if (JS_IsException(res)) ret = -1; else ret = 1; JS_FreeValue(ctx, res); js_free(ctx, e); *pctx = ctx; return ret; } static inline uint32_t atom_get_free(const JSAtomStruct *p) { return (uintptr_t)p >> 1; } static inline BOOL atom_is_free(const JSAtomStruct *p) { return (uintptr_t)p & 1; } static inline JSAtomStruct *atom_set_free(uint32_t v) { return (JSAtomStruct *)(((uintptr_t)v << 1) | 1); } /* Note: the string contents are uninitialized */ static JSString *js_alloc_string_rt(JSRuntime *rt, int max_len, int is_wide_char) { JSString *str; str = js_malloc_rt(rt, sizeof(JSString) + (max_len << is_wide_char) + 1 - is_wide_char); if (unlikely(!str)) return NULL; str->header.ref_count = 1; str->is_wide_char = is_wide_char; str->len = max_len; str->atom_type = 0; str->hash = 0; /* optional but costless */ str->hash_next = 0; /* optional */ #ifdef DUMP_LEAKS list_add_tail(&str->link, &rt->string_list); #endif return str; } static JSString *js_alloc_string(JSContext *ctx, int max_len, int is_wide_char) { JSString *p; p = js_alloc_string_rt(ctx->rt, max_len, is_wide_char); if (unlikely(!p)) { JS_ThrowOutOfMemory(ctx); return NULL; } return p; } /* same as JS_FreeValueRT() but faster */ static inline void js_free_string(JSRuntime *rt, JSString *str) { if (--str->header.ref_count <= 0) { if (str->atom_type) { JS_FreeAtomStruct(rt, str); } else { #ifdef DUMP_LEAKS list_del(&str->link); #endif js_free_rt(rt, str); } } } void JS_SetRuntimeInfo(JSRuntime *rt, const char *s) { if (rt) rt->rt_info = s; } void JS_FreeRuntime(JSRuntime *rt) { struct list_head *el, *el1; int i; JS_FreeValueRT(rt, rt->current_exception); list_for_each_safe(el, el1, &rt->job_list) { JSJobEntry *e = list_entry(el, JSJobEntry, link); for(i = 0; i < e->argc; i++) JS_FreeValueRT(rt, e->argv[i]); js_free_rt(rt, e); } init_list_head(&rt->job_list); JS_RunGC(rt); #ifdef DUMP_LEAKS /* leaking objects */ { BOOL header_done; JSGCObjectHeader *p; int count; /* remove the internal refcounts to display only the object referenced externally */ list_for_each(el, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); p->mark = 0; } gc_decref(rt); header_done = FALSE; list_for_each(el, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); if (p->ref_count != 0) { if (!header_done) { printf("Object leaks:\n"); JS_DumpObjectHeader(rt); header_done = TRUE; } JS_DumpGCObject(rt, p); } } count = 0; list_for_each(el, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); if (p->ref_count == 0) { count++; } } if (count != 0) printf("Secondary object leaks: %d\n", count); } #endif assert(list_empty(&rt->gc_obj_list)); /* free the classes */ for(i = 0; i < rt->class_count; i++) { JSClass *cl = &rt->class_array[i]; if (cl->class_id != 0) { JS_FreeAtomRT(rt, cl->class_name); } } js_free_rt(rt, rt->class_array); bf_context_end(&rt->bf_ctx); #ifdef DUMP_LEAKS /* only the atoms defined in JS_InitAtoms() should be left */ { BOOL header_done = FALSE; for(i = 0; i < rt->atom_size; i++) { JSAtomStruct *p = rt->atom_array[i]; if (!atom_is_free(p) /* && p->str*/) { if (i >= JS_ATOM_END || p->header.ref_count != 1) { if (!header_done) { header_done = TRUE; if (rt->rt_info) { printf("%s:1: atom leakage:", rt->rt_info); } else { printf("Atom leaks:\n" " %6s %6s %s\n", "ID", "REFCNT", "NAME"); } } if (rt->rt_info) { printf(" "); } else { printf(" %6u %6u ", i, p->header.ref_count); } switch (p->atom_type) { case JS_ATOM_TYPE_STRING: JS_DumpString(rt, p); break; case JS_ATOM_TYPE_GLOBAL_SYMBOL: printf("Symbol.for("); JS_DumpString(rt, p); printf(")"); break; case JS_ATOM_TYPE_SYMBOL: if (p->hash == JS_ATOM_HASH_SYMBOL) { printf("Symbol("); JS_DumpString(rt, p); printf(")"); } else { printf("Private("); JS_DumpString(rt, p); printf(")"); } break; } if (rt->rt_info) { printf(":%u", p->header.ref_count); } else { printf("\n"); } } } } if (rt->rt_info && header_done) printf("\n"); } #endif /* free the atoms */ for(i = 0; i < rt->atom_size; i++) { JSAtomStruct *p = rt->atom_array[i]; if (!atom_is_free(p)) { #ifdef DUMP_LEAKS list_del(&p->link); #endif js_free_rt(rt, p); } } js_free_rt(rt, rt->atom_array); js_free_rt(rt, rt->atom_hash); js_free_rt(rt, rt->shape_hash); #ifdef DUMP_LEAKS if (!list_empty(&rt->string_list)) { if (rt->rt_info) { printf("%s:1: string leakage:", rt->rt_info); } else { printf("String leaks:\n" " %6s %s\n", "REFCNT", "VALUE"); } list_for_each_safe(el, el1, &rt->string_list) { JSString *str = list_entry(el, JSString, link); if (rt->rt_info) { printf(" "); } else { printf(" %6u ", str->header.ref_count); } JS_DumpString(rt, str); if (rt->rt_info) { printf(":%u", str->header.ref_count); } else { printf("\n"); } list_del(&str->link); js_free_rt(rt, str); } if (rt->rt_info) printf("\n"); } { JSMallocState *s = &rt->malloc_state; if (s->malloc_count > 1) { if (rt->rt_info) printf("%s:1: ", rt->rt_info); printf("Memory leak: %"PRIu64" bytes lost in %"PRIu64" block%s\n", (uint64_t)(s->malloc_size - sizeof(JSRuntime)), (uint64_t)(s->malloc_count - 1), &"s"[s->malloc_count == 2]); } } #endif { JSMallocState ms = rt->malloc_state; rt->mf.js_free(&ms, rt); } } JSContext *JS_NewContextRaw(JSRuntime *rt) { JSContext *ctx; int i; ctx = js_mallocz_rt(rt, sizeof(JSContext)); if (!ctx) return NULL; ctx->header.ref_count = 1; add_gc_object(rt, &ctx->header, JS_GC_OBJ_TYPE_JS_CONTEXT); ctx->class_proto = js_malloc_rt(rt, sizeof(ctx->class_proto[0]) * rt->class_count); if (!ctx->class_proto) { js_free_rt(rt, ctx); return NULL; } ctx->rt = rt; list_add_tail(&ctx->link, &rt->context_list); ctx->bf_ctx = &rt->bf_ctx; #ifdef CONFIG_BIGNUM ctx->fp_env.prec = 113; ctx->fp_env.flags = bf_set_exp_bits(15) | BF_RNDN | BF_FLAG_SUBNORMAL; #endif for(i = 0; i < rt->class_count; i++) ctx->class_proto[i] = JS_NULL; ctx->array_ctor = JS_NULL; ctx->regexp_ctor = JS_NULL; ctx->promise_ctor = JS_NULL; init_list_head(&ctx->loaded_modules); JS_AddIntrinsicBasicObjects(ctx); return ctx; } JSContext *JS_NewContext(JSRuntime *rt) { JSContext *ctx; ctx = JS_NewContextRaw(rt); if (!ctx) return NULL; JS_AddIntrinsicBaseObjects(ctx); JS_AddIntrinsicDate(ctx); JS_AddIntrinsicEval(ctx); JS_AddIntrinsicStringNormalize(ctx); JS_AddIntrinsicRegExp(ctx); JS_AddIntrinsicJSON(ctx); JS_AddIntrinsicProxy(ctx); JS_AddIntrinsicMapSet(ctx); JS_AddIntrinsicTypedArrays(ctx); JS_AddIntrinsicPromise(ctx); JS_AddIntrinsicBigInt(ctx); return ctx; } void *JS_GetContextOpaque(JSContext *ctx) { return ctx->user_opaque; } void JS_SetContextOpaque(JSContext *ctx, void *opaque) { ctx->user_opaque = opaque; } /* set the new value and free the old value after (freeing the value can reallocate the object data) */ static inline void set_value(JSContext *ctx, JSValue *pval, JSValue new_val) { JSValue old_val; old_val = *pval; *pval = new_val; JS_FreeValue(ctx, old_val); } void JS_SetClassProto(JSContext *ctx, JSClassID class_id, JSValue obj) { JSRuntime *rt = ctx->rt; assert(class_id < rt->class_count); set_value(ctx, &ctx->class_proto[class_id], obj); } JSValue JS_GetClassProto(JSContext *ctx, JSClassID class_id) { JSRuntime *rt = ctx->rt; assert(class_id < rt->class_count); return JS_DupValue(ctx, ctx->class_proto[class_id]); } typedef enum JSFreeModuleEnum { JS_FREE_MODULE_ALL, JS_FREE_MODULE_NOT_RESOLVED, } JSFreeModuleEnum; /* XXX: would be more efficient with separate module lists */ static void js_free_modules(JSContext *ctx, JSFreeModuleEnum flag) { struct list_head *el, *el1; list_for_each_safe(el, el1, &ctx->loaded_modules) { JSModuleDef *m = list_entry(el, JSModuleDef, link); if (flag == JS_FREE_MODULE_ALL || (flag == JS_FREE_MODULE_NOT_RESOLVED && !m->resolved)) { js_free_module_def(ctx, m); } } } JSContext *JS_DupContext(JSContext *ctx) { ctx->header.ref_count++; return ctx; } /* used by the GC */ static void JS_MarkContext(JSRuntime *rt, JSContext *ctx, JS_MarkFunc *mark_func) { int i; struct list_head *el; /* modules are not seen by the GC, so we directly mark the objects referenced by each module */ list_for_each(el, &ctx->loaded_modules) { JSModuleDef *m = list_entry(el, JSModuleDef, link); js_mark_module_def(rt, m, mark_func); } JS_MarkValue(rt, ctx->global_obj, mark_func); JS_MarkValue(rt, ctx->global_var_obj, mark_func); JS_MarkValue(rt, ctx->throw_type_error, mark_func); JS_MarkValue(rt, ctx->eval_obj, mark_func); JS_MarkValue(rt, ctx->array_proto_values, mark_func); for(i = 0; i < JS_NATIVE_ERROR_COUNT; i++) { JS_MarkValue(rt, ctx->native_error_proto[i], mark_func); } for(i = 0; i < rt->class_count; i++) { JS_MarkValue(rt, ctx->class_proto[i], mark_func); } JS_MarkValue(rt, ctx->iterator_proto, mark_func); JS_MarkValue(rt, ctx->async_iterator_proto, mark_func); JS_MarkValue(rt, ctx->promise_ctor, mark_func); JS_MarkValue(rt, ctx->array_ctor, mark_func); JS_MarkValue(rt, ctx->regexp_ctor, mark_func); JS_MarkValue(rt, ctx->function_ctor, mark_func); JS_MarkValue(rt, ctx->function_proto, mark_func); if (ctx->array_shape) mark_func(rt, &ctx->array_shape->header); } void JS_FreeContext(JSContext *ctx) { JSRuntime *rt = ctx->rt; int i; if (--ctx->header.ref_count > 0) return; assert(ctx->header.ref_count == 0); #ifdef DUMP_ATOMS JS_DumpAtoms(ctx->rt); #endif #ifdef DUMP_SHAPES JS_DumpShapes(ctx->rt); #endif #ifdef DUMP_OBJECTS { struct list_head *el; JSGCObjectHeader *p; printf("JSObjects: {\n"); JS_DumpObjectHeader(ctx->rt); list_for_each(el, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); JS_DumpGCObject(rt, p); } printf("}\n"); } #endif #ifdef DUMP_MEM { JSMemoryUsage stats; JS_ComputeMemoryUsage(rt, &stats); JS_DumpMemoryUsage(stdout, &stats, rt); } #endif js_free_modules(ctx, JS_FREE_MODULE_ALL); JS_FreeValue(ctx, ctx->global_obj); JS_FreeValue(ctx, ctx->global_var_obj); JS_FreeValue(ctx, ctx->throw_type_error); JS_FreeValue(ctx, ctx->eval_obj); JS_FreeValue(ctx, ctx->array_proto_values); for(i = 0; i < JS_NATIVE_ERROR_COUNT; i++) { JS_FreeValue(ctx, ctx->native_error_proto[i]); } for(i = 0; i < rt->class_count; i++) { JS_FreeValue(ctx, ctx->class_proto[i]); } js_free_rt(rt, ctx->class_proto); JS_FreeValue(ctx, ctx->iterator_proto); JS_FreeValue(ctx, ctx->async_iterator_proto); JS_FreeValue(ctx, ctx->promise_ctor); JS_FreeValue(ctx, ctx->array_ctor); JS_FreeValue(ctx, ctx->regexp_ctor); JS_FreeValue(ctx, ctx->function_ctor); JS_FreeValue(ctx, ctx->function_proto); js_free_shape_null(ctx->rt, ctx->array_shape); list_del(&ctx->link); remove_gc_object(&ctx->header); js_free_rt(ctx->rt, ctx); } JSRuntime *JS_GetRuntime(JSContext *ctx) { return ctx->rt; } static void update_stack_limit(JSRuntime *rt) { if (rt->stack_size == 0) { rt->stack_limit = 0; /* no limit */ } else { rt->stack_limit = rt->stack_top - rt->stack_size; } } void JS_SetMaxStackSize(JSRuntime *rt, size_t stack_size) { rt->stack_size = stack_size; update_stack_limit(rt); } void JS_UpdateStackTop(JSRuntime *rt) { rt->stack_top = js_get_stack_pointer(); update_stack_limit(rt); } static inline BOOL is_strict_mode(JSContext *ctx) { JSStackFrame *sf = ctx->rt->current_stack_frame; return (sf && (sf->js_mode & JS_MODE_STRICT)); } #ifdef CONFIG_BIGNUM static inline BOOL is_math_mode(JSContext *ctx) { JSStackFrame *sf = ctx->rt->current_stack_frame; return (sf && (sf->js_mode & JS_MODE_MATH)); } #else static inline BOOL is_math_mode(JSContext *ctx) { return FALSE; } #endif /* JSAtom support */ #define JS_ATOM_TAG_INT (1U << 31) #define JS_ATOM_MAX_INT (JS_ATOM_TAG_INT - 1) #define JS_ATOM_MAX ((1U << 30) - 1) /* return the max count from the hash size */ #define JS_ATOM_COUNT_RESIZE(n) ((n) * 2) static inline BOOL __JS_AtomIsConst(JSAtom v) { #if defined(DUMP_LEAKS) && DUMP_LEAKS > 1 return (int32_t)v <= 0; #else return (int32_t)v < JS_ATOM_END; #endif } static inline BOOL __JS_AtomIsTaggedInt(JSAtom v) { return (v & JS_ATOM_TAG_INT) != 0; } static inline JSAtom __JS_AtomFromUInt32(uint32_t v) { return v | JS_ATOM_TAG_INT; } static inline uint32_t __JS_AtomToUInt32(JSAtom atom) { return atom & ~JS_ATOM_TAG_INT; } static inline int is_num(int c) { return c >= '0' && c <= '9'; } /* return TRUE if the string is a number n with 0 <= n <= 2^32-1 */ static inline BOOL is_num_string(uint32_t *pval, const JSString *p) { uint32_t n; uint64_t n64; int c, i, len; len = p->len; if (len == 0 || len > 10) return FALSE; if (p->is_wide_char) c = p->u.str16[0]; else c = p->u.str8[0]; if (is_num(c)) { if (c == '0') { if (len != 1) return FALSE; n = 0; } else { n = c - '0'; for(i = 1; i < len; i++) { if (p->is_wide_char) c = p->u.str16[i]; else c = p->u.str8[i]; if (!is_num(c)) return FALSE; n64 = (uint64_t)n * 10 + (c - '0'); if ((n64 >> 32) != 0) return FALSE; n = n64; } } *pval = n; return TRUE; } else { return FALSE; } } /* XXX: could use faster version ? */ static inline uint32_t hash_string8(const uint8_t *str, size_t len, uint32_t h) { size_t i; for(i = 0; i < len; i++) h = h * 263 + str[i]; return h; } static inline uint32_t hash_string16(const uint16_t *str, size_t len, uint32_t h) { size_t i; for(i = 0; i < len; i++) h = h * 263 + str[i]; return h; } static uint32_t hash_string(const JSString *str, uint32_t h) { if (str->is_wide_char) h = hash_string16(str->u.str16, str->len, h); else h = hash_string8(str->u.str8, str->len, h); return h; } static __maybe_unused void JS_DumpString(JSRuntime *rt, const JSString *p) { int i, c, sep; if (p == NULL) { printf(""); return; } printf("%d", p->header.ref_count); sep = (p->header.ref_count == 1) ? '\"' : '\''; putchar(sep); for(i = 0; i < p->len; i++) { if (p->is_wide_char) c = p->u.str16[i]; else c = p->u.str8[i]; if (c == sep || c == '\\') { putchar('\\'); putchar(c); } else if (c >= ' ' && c <= 126) { putchar(c); } else if (c == '\n') { putchar('\\'); putchar('n'); } else { printf("\\u%04x", c); } } putchar(sep); } static __maybe_unused void JS_DumpAtoms(JSRuntime *rt) { JSAtomStruct *p; int h, i; /* This only dumps hashed atoms, not JS_ATOM_TYPE_SYMBOL atoms */ printf("JSAtom count=%d size=%d hash_size=%d:\n", rt->atom_count, rt->atom_size, rt->atom_hash_size); printf("JSAtom hash table: {\n"); for(i = 0; i < rt->atom_hash_size; i++) { h = rt->atom_hash[i]; if (h) { printf(" %d:", i); while (h) { p = rt->atom_array[h]; printf(" "); JS_DumpString(rt, p); h = p->hash_next; } printf("\n"); } } printf("}\n"); printf("JSAtom table: {\n"); for(i = 0; i < rt->atom_size; i++) { p = rt->atom_array[i]; if (!atom_is_free(p)) { printf(" %d: { %d %08x ", i, p->atom_type, p->hash); if (!(p->len == 0 && p->is_wide_char != 0)) JS_DumpString(rt, p); printf(" %d }\n", p->hash_next); } } printf("}\n"); } static int JS_ResizeAtomHash(JSRuntime *rt, int new_hash_size) { JSAtomStruct *p; uint32_t new_hash_mask, h, i, hash_next1, j, *new_hash; assert((new_hash_size & (new_hash_size - 1)) == 0); /* power of two */ new_hash_mask = new_hash_size - 1; new_hash = js_mallocz_rt(rt, sizeof(rt->atom_hash[0]) * new_hash_size); if (!new_hash) return -1; for(i = 0; i < rt->atom_hash_size; i++) { h = rt->atom_hash[i]; while (h != 0) { p = rt->atom_array[h]; hash_next1 = p->hash_next; /* add in new hash table */ j = p->hash & new_hash_mask; p->hash_next = new_hash[j]; new_hash[j] = h; h = hash_next1; } } js_free_rt(rt, rt->atom_hash); rt->atom_hash = new_hash; rt->atom_hash_size = new_hash_size; rt->atom_count_resize = JS_ATOM_COUNT_RESIZE(new_hash_size); // JS_DumpAtoms(rt); return 0; } static int JS_InitAtoms(JSRuntime *rt) { int i, len, atom_type; const char *p; rt->atom_hash_size = 0; rt->atom_hash = NULL; rt->atom_count = 0; rt->atom_size = 0; rt->atom_free_index = 0; if (JS_ResizeAtomHash(rt, 256)) /* there are at least 195 predefined atoms */ return -1; p = js_atom_init; for(i = 1; i < JS_ATOM_END; i++) { if (i == JS_ATOM_Private_brand) atom_type = JS_ATOM_TYPE_PRIVATE; else if (i >= JS_ATOM_Symbol_toPrimitive) atom_type = JS_ATOM_TYPE_SYMBOL; else atom_type = JS_ATOM_TYPE_STRING; len = strlen(p); if (__JS_NewAtomInit(rt, p, len, atom_type) == JS_ATOM_NULL) return -1; p = p + len + 1; } return 0; } static JSAtom JS_DupAtomRT(JSRuntime *rt, JSAtom v) { JSAtomStruct *p; if (!__JS_AtomIsConst(v)) { p = rt->atom_array[v]; p->header.ref_count++; } return v; } JSAtom JS_DupAtom(JSContext *ctx, JSAtom v) { JSRuntime *rt; JSAtomStruct *p; if (!__JS_AtomIsConst(v)) { rt = ctx->rt; p = rt->atom_array[v]; p->header.ref_count++; } return v; } static JSAtomKindEnum JS_AtomGetKind(JSContext *ctx, JSAtom v) { JSRuntime *rt; JSAtomStruct *p; rt = ctx->rt; if (__JS_AtomIsTaggedInt(v)) return JS_ATOM_KIND_STRING; p = rt->atom_array[v]; switch(p->atom_type) { case JS_ATOM_TYPE_STRING: return JS_ATOM_KIND_STRING; case JS_ATOM_TYPE_GLOBAL_SYMBOL: return JS_ATOM_KIND_SYMBOL; case JS_ATOM_TYPE_SYMBOL: switch(p->hash) { case JS_ATOM_HASH_SYMBOL: return JS_ATOM_KIND_SYMBOL; case JS_ATOM_HASH_PRIVATE: return JS_ATOM_KIND_PRIVATE; default: abort(); } default: abort(); } } static BOOL JS_AtomIsString(JSContext *ctx, JSAtom v) { return JS_AtomGetKind(ctx, v) == JS_ATOM_KIND_STRING; } static JSAtom js_get_atom_index(JSRuntime *rt, JSAtomStruct *p) { uint32_t i = p->hash_next; /* atom_index */ if (p->atom_type != JS_ATOM_TYPE_SYMBOL) { JSAtomStruct *p1; i = rt->atom_hash[p->hash & (rt->atom_hash_size - 1)]; p1 = rt->atom_array[i]; while (p1 != p) { assert(i != 0); i = p1->hash_next; p1 = rt->atom_array[i]; } } return i; } /* string case (internal). Return JS_ATOM_NULL if error. 'str' is freed. */ static JSAtom __JS_NewAtom(JSRuntime *rt, JSString *str, int atom_type) { uint32_t h, h1, i; JSAtomStruct *p; int len; #if 0 printf("__JS_NewAtom: "); JS_DumpString(rt, str); printf("\n"); #endif if (atom_type < JS_ATOM_TYPE_SYMBOL) { /* str is not NULL */ if (str->atom_type == atom_type) { /* str is the atom, return its index */ i = js_get_atom_index(rt, str); /* reduce string refcount and increase atom's unless constant */ if (__JS_AtomIsConst(i)) str->header.ref_count--; return i; } /* try and locate an already registered atom */ len = str->len; h = hash_string(str, atom_type); h &= JS_ATOM_HASH_MASK; h1 = h & (rt->atom_hash_size - 1); i = rt->atom_hash[h1]; while (i != 0) { p = rt->atom_array[i]; if (p->hash == h && p->atom_type == atom_type && p->len == len && js_string_memcmp(p, str, len) == 0) { if (!__JS_AtomIsConst(i)) p->header.ref_count++; goto done; } i = p->hash_next; } } else { h1 = 0; /* avoid warning */ if (atom_type == JS_ATOM_TYPE_SYMBOL) { h = JS_ATOM_HASH_SYMBOL; } else { h = JS_ATOM_HASH_PRIVATE; atom_type = JS_ATOM_TYPE_SYMBOL; } } if (rt->atom_free_index == 0) { /* allow new atom entries */ uint32_t new_size, start; JSAtomStruct **new_array; /* alloc new with size progression 3/2: 4 6 9 13 19 28 42 63 94 141 211 316 474 711 1066 1599 2398 3597 5395 8092 preallocating space for predefined atoms (at least 195). */ new_size = max_int(211, rt->atom_size * 3 / 2); if (new_size > JS_ATOM_MAX) goto fail; /* XXX: should use realloc2 to use slack space */ new_array = js_realloc_rt(rt, rt->atom_array, sizeof(*new_array) * new_size); if (!new_array) goto fail; /* Note: the atom 0 is not used */ start = rt->atom_size; if (start == 0) { /* JS_ATOM_NULL entry */ p = js_mallocz_rt(rt, sizeof(JSAtomStruct)); if (!p) { js_free_rt(rt, new_array); goto fail; } p->header.ref_count = 1; /* not refcounted */ p->atom_type = JS_ATOM_TYPE_SYMBOL; #ifdef DUMP_LEAKS list_add_tail(&p->link, &rt->string_list); #endif new_array[0] = p; rt->atom_count++; start = 1; } rt->atom_size = new_size; rt->atom_array = new_array; rt->atom_free_index = start; for(i = start; i < new_size; i++) { uint32_t next; if (i == (new_size - 1)) next = 0; else next = i + 1; rt->atom_array[i] = atom_set_free(next); } } if (str) { if (str->atom_type == 0) { p = str; p->atom_type = atom_type; } else { p = js_malloc_rt(rt, sizeof(JSString) + (str->len << str->is_wide_char) + 1 - str->is_wide_char); if (unlikely(!p)) goto fail; p->header.ref_count = 1; p->is_wide_char = str->is_wide_char; p->len = str->len; #ifdef DUMP_LEAKS list_add_tail(&p->link, &rt->string_list); #endif memcpy(p->u.str8, str->u.str8, (str->len << str->is_wide_char) + 1 - str->is_wide_char); js_free_string(rt, str); } } else { p = js_malloc_rt(rt, sizeof(JSAtomStruct)); /* empty wide string */ if (!p) return JS_ATOM_NULL; p->header.ref_count = 1; p->is_wide_char = 1; /* Hack to represent NULL as a JSString */ p->len = 0; #ifdef DUMP_LEAKS list_add_tail(&p->link, &rt->string_list); #endif } /* use an already free entry */ i = rt->atom_free_index; rt->atom_free_index = atom_get_free(rt->atom_array[i]); rt->atom_array[i] = p; p->hash = h; p->hash_next = i; /* atom_index */ p->atom_type = atom_type; rt->atom_count++; if (atom_type != JS_ATOM_TYPE_SYMBOL) { p->hash_next = rt->atom_hash[h1]; rt->atom_hash[h1] = i; if (unlikely(rt->atom_count >= rt->atom_count_resize)) JS_ResizeAtomHash(rt, rt->atom_hash_size * 2); } // JS_DumpAtoms(rt); return i; fail: i = JS_ATOM_NULL; done: if (str) js_free_string(rt, str); return i; } /* only works with zero terminated 8 bit strings */ static JSAtom __JS_NewAtomInit(JSRuntime *rt, const char *str, int len, int atom_type) { JSString *p; p = js_alloc_string_rt(rt, len, 0); if (!p) return JS_ATOM_NULL; memcpy(p->u.str8, str, len); p->u.str8[len] = '\0'; return __JS_NewAtom(rt, p, atom_type); } static JSAtom __JS_FindAtom(JSRuntime *rt, const char *str, size_t len, int atom_type) { uint32_t h, h1, i; JSAtomStruct *p; h = hash_string8((const uint8_t *)str, len, JS_ATOM_TYPE_STRING); h &= JS_ATOM_HASH_MASK; h1 = h & (rt->atom_hash_size - 1); i = rt->atom_hash[h1]; while (i != 0) { p = rt->atom_array[i]; if (p->hash == h && p->atom_type == JS_ATOM_TYPE_STRING && p->len == len && p->is_wide_char == 0 && memcmp(p->u.str8, str, len) == 0) { if (!__JS_AtomIsConst(i)) p->header.ref_count++; return i; } i = p->hash_next; } return JS_ATOM_NULL; } static void JS_FreeAtomStruct(JSRuntime *rt, JSAtomStruct *p) { #if 0 /* JS_ATOM_NULL is not refcounted: __JS_AtomIsConst() includes 0 */ if (unlikely(i == JS_ATOM_NULL)) { p->header.ref_count = INT32_MAX / 2; return; } #endif uint32_t i = p->hash_next; /* atom_index */ if (p->atom_type != JS_ATOM_TYPE_SYMBOL) { JSAtomStruct *p0, *p1; uint32_t h0; h0 = p->hash & (rt->atom_hash_size - 1); i = rt->atom_hash[h0]; p1 = rt->atom_array[i]; if (p1 == p) { rt->atom_hash[h0] = p1->hash_next; } else { for(;;) { assert(i != 0); p0 = p1; i = p1->hash_next; p1 = rt->atom_array[i]; if (p1 == p) { p0->hash_next = p1->hash_next; break; } } } } /* insert in free atom list */ rt->atom_array[i] = atom_set_free(rt->atom_free_index); rt->atom_free_index = i; /* free the string structure */ #ifdef DUMP_LEAKS list_del(&p->link); #endif js_free_rt(rt, p); rt->atom_count--; assert(rt->atom_count >= 0); } static void __JS_FreeAtom(JSRuntime *rt, uint32_t i) { JSAtomStruct *p; p = rt->atom_array[i]; if (--p->header.ref_count > 0) return; JS_FreeAtomStruct(rt, p); } /* Warning: 'p' is freed */ static JSAtom JS_NewAtomStr(JSContext *ctx, JSString *p) { JSRuntime *rt = ctx->rt; uint32_t n; if (is_num_string(&n, p)) { if (n <= JS_ATOM_MAX_INT) { js_free_string(rt, p); return __JS_AtomFromUInt32(n); } } /* XXX: should generate an exception */ return __JS_NewAtom(rt, p, JS_ATOM_TYPE_STRING); } JSAtom JS_NewAtomLen(JSContext *ctx, const char *str, size_t len) { JSValue val; if (len == 0 || !is_digit(*str)) { JSAtom atom = __JS_FindAtom(ctx->rt, str, len, JS_ATOM_TYPE_STRING); if (atom) return atom; } val = JS_NewStringLen(ctx, str, len); if (JS_IsException(val)) return JS_ATOM_NULL; return JS_NewAtomStr(ctx, JS_VALUE_GET_STRING(val)); } JSAtom JS_NewAtom(JSContext *ctx, const char *str) { return JS_NewAtomLen(ctx, str, strlen(str)); } JSAtom JS_NewAtomUInt32(JSContext *ctx, uint32_t n) { if (n <= JS_ATOM_MAX_INT) { return __JS_AtomFromUInt32(n); } else { char buf[11]; JSValue val; snprintf(buf, sizeof(buf), "%u", n); val = JS_NewString(ctx, buf); if (JS_IsException(val)) return JS_ATOM_NULL; return __JS_NewAtom(ctx->rt, JS_VALUE_GET_STRING(val), JS_ATOM_TYPE_STRING); } } static JSAtom JS_NewAtomInt64(JSContext *ctx, int64_t n) { if ((uint64_t)n <= JS_ATOM_MAX_INT) { return __JS_AtomFromUInt32((uint32_t)n); } else { char buf[24]; JSValue val; snprintf(buf, sizeof(buf), "%" PRId64 , n); val = JS_NewString(ctx, buf); if (JS_IsException(val)) return JS_ATOM_NULL; return __JS_NewAtom(ctx->rt, JS_VALUE_GET_STRING(val), JS_ATOM_TYPE_STRING); } } /* 'p' is freed */ static JSValue JS_NewSymbol(JSContext *ctx, JSString *p, int atom_type) { JSRuntime *rt = ctx->rt; JSAtom atom; atom = __JS_NewAtom(rt, p, atom_type); if (atom == JS_ATOM_NULL) return JS_ThrowOutOfMemory(ctx); return JS_MKPTR(JS_TAG_SYMBOL, rt->atom_array[atom]); } /* descr must be a non-numeric string atom */ static JSValue JS_NewSymbolFromAtom(JSContext *ctx, JSAtom descr, int atom_type) { JSRuntime *rt = ctx->rt; JSString *p; assert(!__JS_AtomIsTaggedInt(descr)); assert(descr < rt->atom_size); p = rt->atom_array[descr]; JS_DupValue(ctx, JS_MKPTR(JS_TAG_STRING, p)); return JS_NewSymbol(ctx, p, atom_type); } #define ATOM_GET_STR_BUF_SIZE 64 /* Should only be used for debug. */ static const char *JS_AtomGetStrRT(JSRuntime *rt, char *buf, int buf_size, JSAtom atom) { if (__JS_AtomIsTaggedInt(atom)) { snprintf(buf, buf_size, "%u", __JS_AtomToUInt32(atom)); } else { JSAtomStruct *p; assert(atom < rt->atom_size); if (atom == JS_ATOM_NULL) { snprintf(buf, buf_size, ""); } else { int i, c; char *q; JSString *str; q = buf; p = rt->atom_array[atom]; assert(!atom_is_free(p)); str = p; if (str) { if (!str->is_wide_char) { /* special case ASCII strings */ c = 0; for(i = 0; i < str->len; i++) { c |= str->u.str8[i]; } if (c < 0x80) return (const char *)str->u.str8; } for(i = 0; i < str->len; i++) { if (str->is_wide_char) c = str->u.str16[i]; else c = str->u.str8[i]; if ((q - buf) >= buf_size - UTF8_CHAR_LEN_MAX) break; if (c < 128) { *q++ = c; } else { q += unicode_to_utf8((uint8_t *)q, c); } } } *q = '\0'; } } return buf; } static const char *JS_AtomGetStr(JSContext *ctx, char *buf, int buf_size, JSAtom atom) { return JS_AtomGetStrRT(ctx->rt, buf, buf_size, atom); } static JSValue __JS_AtomToValue(JSContext *ctx, JSAtom atom, BOOL force_string) { char buf[ATOM_GET_STR_BUF_SIZE]; if (__JS_AtomIsTaggedInt(atom)) { snprintf(buf, sizeof(buf), "%u", __JS_AtomToUInt32(atom)); return JS_NewString(ctx, buf); } else { JSRuntime *rt = ctx->rt; JSAtomStruct *p; assert(atom < rt->atom_size); p = rt->atom_array[atom]; if (p->atom_type == JS_ATOM_TYPE_STRING) { goto ret_string; } else if (force_string) { if (p->len == 0 && p->is_wide_char != 0) { /* no description string */ p = rt->atom_array[JS_ATOM_empty_string]; } ret_string: return JS_DupValue(ctx, JS_MKPTR(JS_TAG_STRING, p)); } else { return JS_DupValue(ctx, JS_MKPTR(JS_TAG_SYMBOL, p)); } } } JSValue JS_AtomToValue(JSContext *ctx, JSAtom atom) { return __JS_AtomToValue(ctx, atom, FALSE); } JSValue JS_AtomToString(JSContext *ctx, JSAtom atom) { return __JS_AtomToValue(ctx, atom, TRUE); } /* return TRUE if the atom is an array index (i.e. 0 <= index <= 2^32-2 and return its value */ static BOOL JS_AtomIsArrayIndex(JSContext *ctx, uint32_t *pval, JSAtom atom) { if (__JS_AtomIsTaggedInt(atom)) { *pval = __JS_AtomToUInt32(atom); return TRUE; } else { JSRuntime *rt = ctx->rt; JSAtomStruct *p; uint32_t val; assert(atom < rt->atom_size); p = rt->atom_array[atom]; if (p->atom_type == JS_ATOM_TYPE_STRING && is_num_string(&val, p) && val != -1) { *pval = val; return TRUE; } else { *pval = 0; return FALSE; } } } /* This test must be fast if atom is not a numeric index (e.g. a method name). Return JS_UNDEFINED if not a numeric index. JS_EXCEPTION can also be returned. */ static JSValue JS_AtomIsNumericIndex1(JSContext *ctx, JSAtom atom) { JSRuntime *rt = ctx->rt; JSAtomStruct *p1; JSString *p; int c, len, ret; JSValue num, str; if (__JS_AtomIsTaggedInt(atom)) return JS_NewInt32(ctx, __JS_AtomToUInt32(atom)); assert(atom < rt->atom_size); p1 = rt->atom_array[atom]; if (p1->atom_type != JS_ATOM_TYPE_STRING) return JS_UNDEFINED; p = p1; len = p->len; if (p->is_wide_char) { const uint16_t *r = p->u.str16, *r_end = p->u.str16 + len; if (r >= r_end) return JS_UNDEFINED; c = *r; if (c == '-') { if (r >= r_end) return JS_UNDEFINED; r++; c = *r; /* -0 case is specific */ if (c == '0' && len == 2) goto minus_zero; } /* XXX: should test NaN, but the tests do not check it */ if (!is_num(c)) { /* XXX: String should be normalized, therefore 8-bit only */ const uint16_t nfinity16[7] = { 'n', 'f', 'i', 'n', 'i', 't', 'y' }; if (!(c =='I' && (r_end - r) == 8 && !memcmp(r + 1, nfinity16, sizeof(nfinity16)))) return JS_UNDEFINED; } } else { const uint8_t *r = p->u.str8, *r_end = p->u.str8 + len; if (r >= r_end) return JS_UNDEFINED; c = *r; if (c == '-') { if (r >= r_end) return JS_UNDEFINED; r++; c = *r; /* -0 case is specific */ if (c == '0' && len == 2) { minus_zero: return __JS_NewFloat64(ctx, -0.0); } } if (!is_num(c)) { if (!(c =='I' && (r_end - r) == 8 && !memcmp(r + 1, "nfinity", 7))) return JS_UNDEFINED; } } /* XXX: bignum: would be better to only accept integer to avoid relying on current floating point precision */ /* this is ECMA CanonicalNumericIndexString primitive */ num = JS_ToNumber(ctx, JS_MKPTR(JS_TAG_STRING, p)); if (JS_IsException(num)) return num; str = JS_ToString(ctx, num); if (JS_IsException(str)) { JS_FreeValue(ctx, num); return str; } ret = js_string_compare(ctx, p, JS_VALUE_GET_STRING(str)); JS_FreeValue(ctx, str); if (ret == 0) { return num; } else { JS_FreeValue(ctx, num); return JS_UNDEFINED; } } /* return -1 if exception or TRUE/FALSE */ static int JS_AtomIsNumericIndex(JSContext *ctx, JSAtom atom) { JSValue num; num = JS_AtomIsNumericIndex1(ctx, atom); if (likely(JS_IsUndefined(num))) return FALSE; if (JS_IsException(num)) return -1; JS_FreeValue(ctx, num); return TRUE; } void JS_FreeAtom(JSContext *ctx, JSAtom v) { if (!__JS_AtomIsConst(v)) __JS_FreeAtom(ctx->rt, v); } void JS_FreeAtomRT(JSRuntime *rt, JSAtom v) { if (!__JS_AtomIsConst(v)) __JS_FreeAtom(rt, v); } /* return TRUE if 'v' is a symbol with a string description */ static BOOL JS_AtomSymbolHasDescription(JSContext *ctx, JSAtom v) { JSRuntime *rt; JSAtomStruct *p; rt = ctx->rt; if (__JS_AtomIsTaggedInt(v)) return FALSE; p = rt->atom_array[v]; return (((p->atom_type == JS_ATOM_TYPE_SYMBOL && p->hash == JS_ATOM_HASH_SYMBOL) || p->atom_type == JS_ATOM_TYPE_GLOBAL_SYMBOL) && !(p->len == 0 && p->is_wide_char != 0)); } static __maybe_unused void print_atom(JSContext *ctx, JSAtom atom) { char buf[ATOM_GET_STR_BUF_SIZE]; const char *p; int i; /* XXX: should handle embedded null characters */ /* XXX: should move encoding code to JS_AtomGetStr */ p = JS_AtomGetStr(ctx, buf, sizeof(buf), atom); for (i = 0; p[i]; i++) { int c = (unsigned char)p[i]; if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '_' || c == '$') || (c >= '0' && c <= '9' && i > 0))) break; } if (i > 0 && p[i] == '\0') { printf("%s", p); } else { putchar('"'); printf("%.*s", i, p); for (; p[i]; i++) { int c = (unsigned char)p[i]; if (c == '\"' || c == '\\') { putchar('\\'); putchar(c); } else if (c >= ' ' && c <= 126) { putchar(c); } else if (c == '\n') { putchar('\\'); putchar('n'); } else { printf("\\u%04x", c); } } putchar('\"'); } } /* free with JS_FreeCString() */ const char *JS_AtomToCString(JSContext *ctx, JSAtom atom) { JSValue str; const char *cstr; str = JS_AtomToString(ctx, atom); if (JS_IsException(str)) return NULL; cstr = JS_ToCString(ctx, str); JS_FreeValue(ctx, str); return cstr; } /* return a string atom containing name concatenated with str1 */ static JSAtom js_atom_concat_str(JSContext *ctx, JSAtom name, const char *str1) { JSValue str; JSAtom atom; const char *cstr; char *cstr2; size_t len, len1; str = JS_AtomToString(ctx, name); if (JS_IsException(str)) return JS_ATOM_NULL; cstr = JS_ToCStringLen(ctx, &len, str); if (!cstr) goto fail; len1 = strlen(str1); cstr2 = js_malloc(ctx, len + len1 + 1); if (!cstr2) goto fail; memcpy(cstr2, cstr, len); memcpy(cstr2 + len, str1, len1); cstr2[len + len1] = '\0'; atom = JS_NewAtomLen(ctx, cstr2, len + len1); js_free(ctx, cstr2); JS_FreeCString(ctx, cstr); JS_FreeValue(ctx, str); return atom; fail: JS_FreeCString(ctx, cstr); JS_FreeValue(ctx, str); return JS_ATOM_NULL; } static JSAtom js_atom_concat_num(JSContext *ctx, JSAtom name, uint32_t n) { char buf[16]; snprintf(buf, sizeof(buf), "%u", n); return js_atom_concat_str(ctx, name, buf); } static inline BOOL JS_IsEmptyString(JSValueConst v) { return JS_VALUE_GET_TAG(v) == JS_TAG_STRING && JS_VALUE_GET_STRING(v)->len == 0; } /* JSClass support */ #ifdef CONFIG_ATOMICS static pthread_mutex_t js_class_id_mutex = PTHREAD_MUTEX_INITIALIZER; #endif /* a new class ID is allocated if *pclass_id != 0 */ JSClassID JS_NewClassID(JSClassID *pclass_id) { JSClassID class_id; #ifdef CONFIG_ATOMICS pthread_mutex_lock(&js_class_id_mutex); #endif class_id = *pclass_id; if (class_id == 0) { class_id = js_class_id_alloc++; *pclass_id = class_id; } #ifdef CONFIG_ATOMICS pthread_mutex_unlock(&js_class_id_mutex); #endif return class_id; } BOOL JS_IsRegisteredClass(JSRuntime *rt, JSClassID class_id) { return (class_id < rt->class_count && rt->class_array[class_id].class_id != 0); } /* create a new object internal class. Return -1 if error, 0 if OK. The finalizer can be NULL if none is needed. */ static int JS_NewClass1(JSRuntime *rt, JSClassID class_id, const JSClassDef *class_def, JSAtom name) { int new_size, i; JSClass *cl, *new_class_array; struct list_head *el; if (class_id >= (1 << 16)) return -1; if (class_id < rt->class_count && rt->class_array[class_id].class_id != 0) return -1; if (class_id >= rt->class_count) { new_size = max_int(JS_CLASS_INIT_COUNT, max_int(class_id + 1, rt->class_count * 3 / 2)); /* reallocate the context class prototype array, if any */ list_for_each(el, &rt->context_list) { JSContext *ctx = list_entry(el, JSContext, link); JSValue *new_tab; new_tab = js_realloc_rt(rt, ctx->class_proto, sizeof(ctx->class_proto[0]) * new_size); if (!new_tab) return -1; for(i = rt->class_count; i < new_size; i++) new_tab[i] = JS_NULL; ctx->class_proto = new_tab; } /* reallocate the class array */ new_class_array = js_realloc_rt(rt, rt->class_array, sizeof(JSClass) * new_size); if (!new_class_array) return -1; memset(new_class_array + rt->class_count, 0, (new_size - rt->class_count) * sizeof(JSClass)); rt->class_array = new_class_array; rt->class_count = new_size; } cl = &rt->class_array[class_id]; cl->class_id = class_id; cl->class_name = JS_DupAtomRT(rt, name); cl->finalizer = class_def->finalizer; cl->gc_mark = class_def->gc_mark; cl->call = class_def->call; cl->exotic = class_def->exotic; return 0; } int JS_NewClass(JSRuntime *rt, JSClassID class_id, const JSClassDef *class_def) { int ret, len; JSAtom name; len = strlen(class_def->class_name); name = __JS_FindAtom(rt, class_def->class_name, len, JS_ATOM_TYPE_STRING); if (name == JS_ATOM_NULL) { name = __JS_NewAtomInit(rt, class_def->class_name, len, JS_ATOM_TYPE_STRING); if (name == JS_ATOM_NULL) return -1; } ret = JS_NewClass1(rt, class_id, class_def, name); JS_FreeAtomRT(rt, name); return ret; } static JSValue js_new_string8(JSContext *ctx, const uint8_t *buf, int len) { JSString *str; if (len <= 0) { return JS_AtomToString(ctx, JS_ATOM_empty_string); } str = js_alloc_string(ctx, len, 0); if (!str) return JS_EXCEPTION; memcpy(str->u.str8, buf, len); str->u.str8[len] = '\0'; return JS_MKPTR(JS_TAG_STRING, str); } static JSValue js_new_string16(JSContext *ctx, const uint16_t *buf, int len) { JSString *str; str = js_alloc_string(ctx, len, 1); if (!str) return JS_EXCEPTION; memcpy(str->u.str16, buf, len * 2); return JS_MKPTR(JS_TAG_STRING, str); } static JSValue js_new_string_char(JSContext *ctx, uint16_t c) { if (c < 0x100) { uint8_t ch8 = c; return js_new_string8(ctx, &ch8, 1); } else { uint16_t ch16 = c; return js_new_string16(ctx, &ch16, 1); } } static JSValue js_sub_string(JSContext *ctx, JSString *p, int start, int end) { int len = end - start; if (start == 0 && end == p->len) { return JS_DupValue(ctx, JS_MKPTR(JS_TAG_STRING, p)); } if (p->is_wide_char && len > 0) { JSString *str; int i; uint16_t c = 0; for (i = start; i < end; i++) { c |= p->u.str16[i]; } if (c > 0xFF) return js_new_string16(ctx, p->u.str16 + start, len); str = js_alloc_string(ctx, len, 0); if (!str) return JS_EXCEPTION; for (i = 0; i < len; i++) { str->u.str8[i] = p->u.str16[start + i]; } str->u.str8[len] = '\0'; return JS_MKPTR(JS_TAG_STRING, str); } else { return js_new_string8(ctx, p->u.str8 + start, len); } } typedef struct StringBuffer { JSContext *ctx; JSString *str; int len; int size; int is_wide_char; int error_status; } StringBuffer; /* It is valid to call string_buffer_end() and all string_buffer functions even if string_buffer_init() or another string_buffer function returns an error. If the error_status is set, string_buffer_end() returns JS_EXCEPTION. */ static int string_buffer_init2(JSContext *ctx, StringBuffer *s, int size, int is_wide) { s->ctx = ctx; s->size = size; s->len = 0; s->is_wide_char = is_wide; s->error_status = 0; s->str = js_alloc_string(ctx, size, is_wide); if (unlikely(!s->str)) { s->size = 0; return s->error_status = -1; } #ifdef DUMP_LEAKS /* the StringBuffer may reallocate the JSString, only link it at the end */ list_del(&s->str->link); #endif return 0; } static inline int string_buffer_init(JSContext *ctx, StringBuffer *s, int size) { return string_buffer_init2(ctx, s, size, 0); } static void string_buffer_free(StringBuffer *s) { js_free(s->ctx, s->str); s->str = NULL; } static int string_buffer_set_error(StringBuffer *s) { js_free(s->ctx, s->str); s->str = NULL; s->size = 0; s->len = 0; return s->error_status = -1; } static no_inline int string_buffer_widen(StringBuffer *s, int size) { JSString *str; size_t slack; int i; if (s->error_status) return -1; str = js_realloc2(s->ctx, s->str, sizeof(JSString) + (size << 1), &slack); if (!str) return string_buffer_set_error(s); size += slack >> 1; for(i = s->len; i-- > 0;) { str->u.str16[i] = str->u.str8[i]; } s->is_wide_char = 1; s->size = size; s->str = str; return 0; } static no_inline int string_buffer_realloc(StringBuffer *s, int new_len, int c) { JSString *new_str; int new_size; size_t new_size_bytes, slack; if (s->error_status) return -1; if (new_len > JS_STRING_LEN_MAX) { JS_ThrowInternalError(s->ctx, "string too long"); return string_buffer_set_error(s); } new_size = min_int(max_int(new_len, s->size * 3 / 2), JS_STRING_LEN_MAX); if (!s->is_wide_char && c >= 0x100) { return string_buffer_widen(s, new_size); } new_size_bytes = sizeof(JSString) + (new_size << s->is_wide_char) + 1 - s->is_wide_char; new_str = js_realloc2(s->ctx, s->str, new_size_bytes, &slack); if (!new_str) return string_buffer_set_error(s); new_size = min_int(new_size + (slack >> s->is_wide_char), JS_STRING_LEN_MAX); s->size = new_size; s->str = new_str; return 0; } static no_inline int string_buffer_putc_slow(StringBuffer *s, uint32_t c) { if (unlikely(s->len >= s->size)) { if (string_buffer_realloc(s, s->len + 1, c)) return -1; } if (s->is_wide_char) { s->str->u.str16[s->len++] = c; } else if (c < 0x100) { s->str->u.str8[s->len++] = c; } else { if (string_buffer_widen(s, s->size)) return -1; s->str->u.str16[s->len++] = c; } return 0; } /* 0 <= c <= 0xff */ static int string_buffer_putc8(StringBuffer *s, uint32_t c) { if (unlikely(s->len >= s->size)) { if (string_buffer_realloc(s, s->len + 1, c)) return -1; } if (s->is_wide_char) { s->str->u.str16[s->len++] = c; } else { s->str->u.str8[s->len++] = c; } return 0; } /* 0 <= c <= 0xffff */ static int string_buffer_putc16(StringBuffer *s, uint32_t c) { if (likely(s->len < s->size)) { if (s->is_wide_char) { s->str->u.str16[s->len++] = c; return 0; } else if (c < 0x100) { s->str->u.str8[s->len++] = c; return 0; } } return string_buffer_putc_slow(s, c); } /* 0 <= c <= 0x10ffff */ static int string_buffer_putc(StringBuffer *s, uint32_t c) { if (unlikely(c >= 0x10000)) { /* surrogate pair */ c -= 0x10000; if (string_buffer_putc16(s, (c >> 10) + 0xd800)) return -1; c = (c & 0x3ff) + 0xdc00; } return string_buffer_putc16(s, c); } static int string_get(const JSString *p, int idx) { return p->is_wide_char ? p->u.str16[idx] : p->u.str8[idx]; } static int string_getc(const JSString *p, int *pidx) { int idx, c, c1; idx = *pidx; if (p->is_wide_char) { c = p->u.str16[idx++]; if (c >= 0xd800 && c < 0xdc00 && idx < p->len) { c1 = p->u.str16[idx]; if (c1 >= 0xdc00 && c1 < 0xe000) { c = (((c & 0x3ff) << 10) | (c1 & 0x3ff)) + 0x10000; idx++; } } } else { c = p->u.str8[idx++]; } *pidx = idx; return c; } static int string_buffer_write8(StringBuffer *s, const uint8_t *p, int len) { int i; if (s->len + len > s->size) { if (string_buffer_realloc(s, s->len + len, 0)) return -1; } if (s->is_wide_char) { for (i = 0; i < len; i++) { s->str->u.str16[s->len + i] = p[i]; } s->len += len; } else { memcpy(&s->str->u.str8[s->len], p, len); s->len += len; } return 0; } static int string_buffer_write16(StringBuffer *s, const uint16_t *p, int len) { int c = 0, i; for (i = 0; i < len; i++) { c |= p[i]; } if (s->len + len > s->size) { if (string_buffer_realloc(s, s->len + len, c)) return -1; } else if (!s->is_wide_char && c >= 0x100) { if (string_buffer_widen(s, s->size)) return -1; } if (s->is_wide_char) { memcpy(&s->str->u.str16[s->len], p, len << 1); s->len += len; } else { for (i = 0; i < len; i++) { s->str->u.str8[s->len + i] = p[i]; } s->len += len; } return 0; } /* appending an ASCII string */ static int string_buffer_puts8(StringBuffer *s, const char *str) { return string_buffer_write8(s, (const uint8_t *)str, strlen(str)); } static int string_buffer_concat(StringBuffer *s, const JSString *p, uint32_t from, uint32_t to) { if (to <= from) return 0; if (p->is_wide_char) return string_buffer_write16(s, p->u.str16 + from, to - from); else return string_buffer_write8(s, p->u.str8 + from, to - from); } static int string_buffer_concat_value(StringBuffer *s, JSValueConst v) { JSString *p; JSValue v1; int res; if (s->error_status) { /* prevent exception overload */ return -1; } if (unlikely(JS_VALUE_GET_TAG(v) != JS_TAG_STRING)) { v1 = JS_ToString(s->ctx, v); if (JS_IsException(v1)) return string_buffer_set_error(s); p = JS_VALUE_GET_STRING(v1); res = string_buffer_concat(s, p, 0, p->len); JS_FreeValue(s->ctx, v1); return res; } p = JS_VALUE_GET_STRING(v); return string_buffer_concat(s, p, 0, p->len); } static int string_buffer_concat_value_free(StringBuffer *s, JSValue v) { JSString *p; int res; if (s->error_status) { /* prevent exception overload */ JS_FreeValue(s->ctx, v); return -1; } if (unlikely(JS_VALUE_GET_TAG(v) != JS_TAG_STRING)) { v = JS_ToStringFree(s->ctx, v); if (JS_IsException(v)) return string_buffer_set_error(s); } p = JS_VALUE_GET_STRING(v); res = string_buffer_concat(s, p, 0, p->len); JS_FreeValue(s->ctx, v); return res; } static int string_buffer_fill(StringBuffer *s, int c, int count) { /* XXX: optimize */ if (s->len + count > s->size) { if (string_buffer_realloc(s, s->len + count, c)) return -1; } while (count-- > 0) { if (string_buffer_putc16(s, c)) return -1; } return 0; } static JSValue string_buffer_end(StringBuffer *s) { JSString *str; str = s->str; if (s->error_status) return JS_EXCEPTION; if (s->len == 0) { js_free(s->ctx, str); s->str = NULL; return JS_AtomToString(s->ctx, JS_ATOM_empty_string); } if (s->len < s->size) { /* smaller size so js_realloc should not fail, but OK if it does */ /* XXX: should add some slack to avoid unnecessary calls */ /* XXX: might need to use malloc+free to ensure smaller size */ str = js_realloc_rt(s->ctx->rt, str, sizeof(JSString) + (s->len << s->is_wide_char) + 1 - s->is_wide_char); if (str == NULL) str = s->str; s->str = str; } if (!s->is_wide_char) str->u.str8[s->len] = 0; #ifdef DUMP_LEAKS list_add_tail(&str->link, &s->ctx->rt->string_list); #endif str->is_wide_char = s->is_wide_char; str->len = s->len; s->str = NULL; return JS_MKPTR(JS_TAG_STRING, str); } /* create a string from a UTF-8 buffer */ JSValue JS_NewStringLen(JSContext *ctx, const char *buf, size_t buf_len) { const uint8_t *p, *p_end, *p_start, *p_next; uint32_t c; StringBuffer b_s, *b = &b_s; size_t len1; p_start = (const uint8_t *)buf; p_end = p_start + buf_len; p = p_start; while (p < p_end && *p < 128) p++; len1 = p - p_start; if (len1 > JS_STRING_LEN_MAX) return JS_ThrowInternalError(ctx, "string too long"); if (p == p_end) { /* ASCII string */ return js_new_string8(ctx, (const uint8_t *)buf, buf_len); } else { if (string_buffer_init(ctx, b, buf_len)) goto fail; string_buffer_write8(b, p_start, len1); while (p < p_end) { if (*p < 128) { string_buffer_putc8(b, *p++); } else { /* parse utf-8 sequence, return 0xFFFFFFFF for error */ c = unicode_from_utf8(p, p_end - p, &p_next); if (c < 0x10000) { p = p_next; } else if (c <= 0x10FFFF) { p = p_next; /* surrogate pair */ c -= 0x10000; string_buffer_putc16(b, (c >> 10) + 0xd800); c = (c & 0x3ff) + 0xdc00; } else { /* invalid char */ c = 0xfffd; /* skip the invalid chars */ /* XXX: seems incorrect. Why not just use c = *p++; ? */ while (p < p_end && (*p >= 0x80 && *p < 0xc0)) p++; if (p < p_end) { p++; while (p < p_end && (*p >= 0x80 && *p < 0xc0)) p++; } } string_buffer_putc16(b, c); } } } return string_buffer_end(b); fail: string_buffer_free(b); return JS_EXCEPTION; } static JSValue JS_ConcatString3(JSContext *ctx, const char *str1, JSValue str2, const char *str3) { StringBuffer b_s, *b = &b_s; int len1, len3; JSString *p; if (unlikely(JS_VALUE_GET_TAG(str2) != JS_TAG_STRING)) { str2 = JS_ToStringFree(ctx, str2); if (JS_IsException(str2)) goto fail; } p = JS_VALUE_GET_STRING(str2); len1 = strlen(str1); len3 = strlen(str3); if (string_buffer_init2(ctx, b, len1 + p->len + len3, p->is_wide_char)) goto fail; string_buffer_write8(b, (const uint8_t *)str1, len1); string_buffer_concat(b, p, 0, p->len); string_buffer_write8(b, (const uint8_t *)str3, len3); JS_FreeValue(ctx, str2); return string_buffer_end(b); fail: JS_FreeValue(ctx, str2); return JS_EXCEPTION; } JSValue JS_NewString(JSContext *ctx, const char *str) { return JS_NewStringLen(ctx, str, strlen(str)); } JSValue JS_NewAtomString(JSContext *ctx, const char *str) { JSAtom atom = JS_NewAtom(ctx, str); if (atom == JS_ATOM_NULL) return JS_EXCEPTION; JSValue val = JS_AtomToString(ctx, atom); JS_FreeAtom(ctx, atom); return val; } /* return (NULL, 0) if exception. */ /* return pointer into a JSString with a live ref_count */ /* cesu8 determines if non-BMP1 codepoints are encoded as 1 or 2 utf-8 sequences */ const char *JS_ToCStringLen2(JSContext *ctx, size_t *plen, JSValueConst val1, BOOL cesu8) { JSValue val; JSString *str, *str_new; int pos, len, c, c1; uint8_t *q; if (JS_VALUE_GET_TAG(val1) != JS_TAG_STRING) { val = JS_ToString(ctx, val1); if (JS_IsException(val)) goto fail; } else { val = JS_DupValue(ctx, val1); } str = JS_VALUE_GET_STRING(val); len = str->len; if (!str->is_wide_char) { const uint8_t *src = str->u.str8; int count; /* count the number of non-ASCII characters */ /* Scanning the whole string is required for ASCII strings, and computing the number of non-ASCII bytes is less expensive than testing each byte, hence this method is faster for ASCII strings, which is the most common case. */ count = 0; for (pos = 0; pos < len; pos++) { count += src[pos] >> 7; } if (count == 0) { if (plen) *plen = len; return (const char *)src; } str_new = js_alloc_string(ctx, len + count, 0); if (!str_new) goto fail; q = str_new->u.str8; for (pos = 0; pos < len; pos++) { c = src[pos]; if (c < 0x80) { *q++ = c; } else { *q++ = (c >> 6) | 0xc0; *q++ = (c & 0x3f) | 0x80; } } } else { const uint16_t *src = str->u.str16; /* Allocate 3 bytes per 16 bit code point. Surrogate pairs may produce 4 bytes but use 2 code points. */ str_new = js_alloc_string(ctx, len * 3, 0); if (!str_new) goto fail; q = str_new->u.str8; pos = 0; while (pos < len) { c = src[pos++]; if (c < 0x80) { *q++ = c; } else { if (c >= 0xd800 && c < 0xdc00) { if (pos < len && !cesu8) { c1 = src[pos]; if (c1 >= 0xdc00 && c1 < 0xe000) { pos++; /* surrogate pair */ c = (((c & 0x3ff) << 10) | (c1 & 0x3ff)) + 0x10000; } else { /* Keep unmatched surrogate code points */ /* c = 0xfffd; */ /* error */ } } else { /* Keep unmatched surrogate code points */ /* c = 0xfffd; */ /* error */ } } q += unicode_to_utf8(q, c); } } } *q = '\0'; str_new->len = q - str_new->u.str8; JS_FreeValue(ctx, val); if (plen) *plen = str_new->len; return (const char *)str_new->u.str8; fail: if (plen) *plen = 0; return NULL; } void JS_FreeCString(JSContext *ctx, const char *ptr) { JSString *p; if (!ptr) return; /* purposely removing constness */ p = container_of(ptr, JSString, u); JS_FreeValue(ctx, JS_MKPTR(JS_TAG_STRING, p)); } static int memcmp16_8(const uint16_t *src1, const uint8_t *src2, int len) { int c, i; for(i = 0; i < len; i++) { c = src1[i] - src2[i]; if (c != 0) return c; } return 0; } static int memcmp16(const uint16_t *src1, const uint16_t *src2, int len) { int c, i; for(i = 0; i < len; i++) { c = src1[i] - src2[i]; if (c != 0) return c; } return 0; } static int js_string_memcmp(const JSString *p1, const JSString *p2, int len) { int res; if (likely(!p1->is_wide_char)) { if (likely(!p2->is_wide_char)) res = memcmp(p1->u.str8, p2->u.str8, len); else res = -memcmp16_8(p2->u.str16, p1->u.str8, len); } else { if (!p2->is_wide_char) res = memcmp16_8(p1->u.str16, p2->u.str8, len); else res = memcmp16(p1->u.str16, p2->u.str16, len); } return res; } /* return < 0, 0 or > 0 */ static int js_string_compare(JSContext *ctx, const JSString *p1, const JSString *p2) { int res, len; len = min_int(p1->len, p2->len); res = js_string_memcmp(p1, p2, len); if (res == 0) { if (p1->len == p2->len) res = 0; else if (p1->len < p2->len) res = -1; else res = 1; } return res; } static void copy_str16(uint16_t *dst, const JSString *p, int offset, int len) { if (p->is_wide_char) { memcpy(dst, p->u.str16 + offset, len * 2); } else { const uint8_t *src1 = p->u.str8 + offset; int i; for(i = 0; i < len; i++) dst[i] = src1[i]; } } static JSValue JS_ConcatString1(JSContext *ctx, const JSString *p1, const JSString *p2) { JSString *p; uint32_t len; int is_wide_char; len = p1->len + p2->len; if (len > JS_STRING_LEN_MAX) return JS_ThrowInternalError(ctx, "string too long"); is_wide_char = p1->is_wide_char | p2->is_wide_char; p = js_alloc_string(ctx, len, is_wide_char); if (!p) return JS_EXCEPTION; if (!is_wide_char) { memcpy(p->u.str8, p1->u.str8, p1->len); memcpy(p->u.str8 + p1->len, p2->u.str8, p2->len); p->u.str8[len] = '\0'; } else { copy_str16(p->u.str16, p1, 0, p1->len); copy_str16(p->u.str16 + p1->len, p2, 0, p2->len); } return JS_MKPTR(JS_TAG_STRING, p); } /* op1 and op2 are converted to strings. For convience, op1 or op2 = JS_EXCEPTION are accepted and return JS_EXCEPTION. */ static JSValue JS_ConcatString(JSContext *ctx, JSValue op1, JSValue op2) { JSValue ret; JSString *p1, *p2; if (unlikely(JS_VALUE_GET_TAG(op1) != JS_TAG_STRING)) { op1 = JS_ToStringFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); return JS_EXCEPTION; } } if (unlikely(JS_VALUE_GET_TAG(op2) != JS_TAG_STRING)) { op2 = JS_ToStringFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); return JS_EXCEPTION; } } p1 = JS_VALUE_GET_STRING(op1); p2 = JS_VALUE_GET_STRING(op2); /* XXX: could also check if p1 is empty */ if (p2->len == 0) { goto ret_op1; } if (p1->header.ref_count == 1 && p1->is_wide_char == p2->is_wide_char && js_malloc_usable_size(ctx, p1) >= sizeof(*p1) + ((p1->len + p2->len) << p2->is_wide_char) + 1 - p1->is_wide_char) { /* Concatenate in place in available space at the end of p1 */ if (p1->is_wide_char) { memcpy(p1->u.str16 + p1->len, p2->u.str16, p2->len << 1); p1->len += p2->len; } else { memcpy(p1->u.str8 + p1->len, p2->u.str8, p2->len); p1->len += p2->len; p1->u.str8[p1->len] = '\0'; } ret_op1: JS_FreeValue(ctx, op2); return op1; } ret = JS_ConcatString1(ctx, p1, p2); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return ret; } /* Shape support */ static inline size_t get_shape_size(size_t hash_size, size_t prop_size) { return hash_size * sizeof(uint32_t) + sizeof(JSShape) + prop_size * sizeof(JSShapeProperty); } static inline JSShape *get_shape_from_alloc(void *sh_alloc, size_t hash_size) { return (JSShape *)(void *)((uint32_t *)sh_alloc + hash_size); } static inline uint32_t *prop_hash_end(JSShape *sh) { return (uint32_t *)sh; } static inline void *get_alloc_from_shape(JSShape *sh) { return prop_hash_end(sh) - ((intptr_t)sh->prop_hash_mask + 1); } static inline JSShapeProperty *get_shape_prop(JSShape *sh) { return sh->prop; } static int init_shape_hash(JSRuntime *rt) { rt->shape_hash_bits = 4; /* 16 shapes */ rt->shape_hash_size = 1 << rt->shape_hash_bits; rt->shape_hash_count = 0; rt->shape_hash = js_mallocz_rt(rt, sizeof(rt->shape_hash[0]) * rt->shape_hash_size); if (!rt->shape_hash) return -1; return 0; } /* same magic hash multiplier as the Linux kernel */ static uint32_t shape_hash(uint32_t h, uint32_t val) { return (h + val) * 0x9e370001; } /* truncate the shape hash to 'hash_bits' bits */ static uint32_t get_shape_hash(uint32_t h, int hash_bits) { return h >> (32 - hash_bits); } static uint32_t shape_initial_hash(JSObject *proto) { uint32_t h; h = shape_hash(1, (uintptr_t)proto); if (sizeof(proto) > 4) h = shape_hash(h, (uint64_t)(uintptr_t)proto >> 32); return h; } static int resize_shape_hash(JSRuntime *rt, int new_shape_hash_bits) { int new_shape_hash_size, i; uint32_t h; JSShape **new_shape_hash, *sh, *sh_next; new_shape_hash_size = 1 << new_shape_hash_bits; new_shape_hash = js_mallocz_rt(rt, sizeof(rt->shape_hash[0]) * new_shape_hash_size); if (!new_shape_hash) return -1; for(i = 0; i < rt->shape_hash_size; i++) { for(sh = rt->shape_hash[i]; sh != NULL; sh = sh_next) { sh_next = sh->shape_hash_next; h = get_shape_hash(sh->hash, new_shape_hash_bits); sh->shape_hash_next = new_shape_hash[h]; new_shape_hash[h] = sh; } } js_free_rt(rt, rt->shape_hash); rt->shape_hash_bits = new_shape_hash_bits; rt->shape_hash_size = new_shape_hash_size; rt->shape_hash = new_shape_hash; return 0; } static void js_shape_hash_link(JSRuntime *rt, JSShape *sh) { uint32_t h; h = get_shape_hash(sh->hash, rt->shape_hash_bits); sh->shape_hash_next = rt->shape_hash[h]; rt->shape_hash[h] = sh; rt->shape_hash_count++; } static void js_shape_hash_unlink(JSRuntime *rt, JSShape *sh) { uint32_t h; JSShape **psh; h = get_shape_hash(sh->hash, rt->shape_hash_bits); psh = &rt->shape_hash[h]; while (*psh != sh) psh = &(*psh)->shape_hash_next; *psh = sh->shape_hash_next; rt->shape_hash_count--; } /* create a new empty shape with prototype 'proto' */ static no_inline JSShape *js_new_shape2(JSContext *ctx, JSObject *proto, int hash_size, int prop_size) { JSRuntime *rt = ctx->rt; void *sh_alloc; JSShape *sh; /* resize the shape hash table if necessary */ if (2 * (rt->shape_hash_count + 1) > rt->shape_hash_size) { resize_shape_hash(rt, rt->shape_hash_bits + 1); } sh_alloc = js_malloc(ctx, get_shape_size(hash_size, prop_size)); if (!sh_alloc) return NULL; sh = get_shape_from_alloc(sh_alloc, hash_size); sh->header.ref_count = 1; add_gc_object(rt, &sh->header, JS_GC_OBJ_TYPE_SHAPE); if (proto) JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, proto)); sh->proto = proto; memset(prop_hash_end(sh) - hash_size, 0, sizeof(prop_hash_end(sh)[0]) * hash_size); sh->prop_hash_mask = hash_size - 1; sh->prop_size = prop_size; sh->prop_count = 0; sh->deleted_prop_count = 0; /* insert in the hash table */ sh->hash = shape_initial_hash(proto); sh->is_hashed = TRUE; sh->has_small_array_index = FALSE; js_shape_hash_link(ctx->rt, sh); return sh; } static JSShape *js_new_shape(JSContext *ctx, JSObject *proto) { return js_new_shape2(ctx, proto, JS_PROP_INITIAL_HASH_SIZE, JS_PROP_INITIAL_SIZE); } /* The shape is cloned. The new shape is not inserted in the shape hash table */ static JSShape *js_clone_shape(JSContext *ctx, JSShape *sh1) { JSShape *sh; void *sh_alloc, *sh_alloc1; size_t size; JSShapeProperty *pr; uint32_t i, hash_size; hash_size = sh1->prop_hash_mask + 1; size = get_shape_size(hash_size, sh1->prop_size); sh_alloc = js_malloc(ctx, size); if (!sh_alloc) return NULL; sh_alloc1 = get_alloc_from_shape(sh1); memcpy(sh_alloc, sh_alloc1, size); sh = get_shape_from_alloc(sh_alloc, hash_size); sh->header.ref_count = 1; add_gc_object(ctx->rt, &sh->header, JS_GC_OBJ_TYPE_SHAPE); sh->is_hashed = FALSE; if (sh->proto) { JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, sh->proto)); } for(i = 0, pr = get_shape_prop(sh); i < sh->prop_count; i++, pr++) { JS_DupAtom(ctx, pr->atom); } return sh; } static JSShape *js_dup_shape(JSShape *sh) { sh->header.ref_count++; return sh; } static void js_free_shape0(JSRuntime *rt, JSShape *sh) { uint32_t i; JSShapeProperty *pr; assert(sh->header.ref_count == 0); if (sh->is_hashed) js_shape_hash_unlink(rt, sh); if (sh->proto != NULL) { JS_FreeValueRT(rt, JS_MKPTR(JS_TAG_OBJECT, sh->proto)); } pr = get_shape_prop(sh); for(i = 0; i < sh->prop_count; i++) { JS_FreeAtomRT(rt, pr->atom); pr++; } remove_gc_object(&sh->header); js_free_rt(rt, get_alloc_from_shape(sh)); } static void js_free_shape(JSRuntime *rt, JSShape *sh) { if (unlikely(--sh->header.ref_count <= 0)) { js_free_shape0(rt, sh); } } static void js_free_shape_null(JSRuntime *rt, JSShape *sh) { if (sh) js_free_shape(rt, sh); } /* make space to hold at least 'count' properties */ static no_inline int resize_properties(JSContext *ctx, JSShape **psh, JSObject *p, uint32_t count) { JSShape *sh; uint32_t new_size, new_hash_size, new_hash_mask, i; JSShapeProperty *pr; void *sh_alloc; intptr_t h; JSShape *old_sh; sh = *psh; new_size = max_int(count, sh->prop_size * 3 / 2); /* Reallocate prop array first to avoid crash or size inconsistency in case of memory allocation failure */ if (p) { JSProperty *new_prop; new_prop = js_realloc(ctx, p->prop, sizeof(new_prop[0]) * new_size); if (unlikely(!new_prop)) return -1; p->prop = new_prop; } new_hash_size = sh->prop_hash_mask + 1; while (new_hash_size < new_size) new_hash_size = 2 * new_hash_size; /* resize the property shapes. Using js_realloc() is not possible in case the GC runs during the allocation */ old_sh = sh; sh_alloc = js_malloc(ctx, get_shape_size(new_hash_size, new_size)); if (!sh_alloc) return -1; sh = get_shape_from_alloc(sh_alloc, new_hash_size); list_del(&old_sh->header.link); /* copy all the shape properties */ memcpy(sh, old_sh, sizeof(JSShape) + sizeof(sh->prop[0]) * old_sh->prop_count); list_add_tail(&sh->header.link, &ctx->rt->gc_obj_list); if (new_hash_size != (sh->prop_hash_mask + 1)) { /* resize the hash table and the properties */ new_hash_mask = new_hash_size - 1; sh->prop_hash_mask = new_hash_mask; memset(prop_hash_end(sh) - new_hash_size, 0, sizeof(prop_hash_end(sh)[0]) * new_hash_size); for(i = 0, pr = sh->prop; i < sh->prop_count; i++, pr++) { if (pr->atom != JS_ATOM_NULL) { h = ((uintptr_t)pr->atom & new_hash_mask); pr->hash_next = prop_hash_end(sh)[-h - 1]; prop_hash_end(sh)[-h - 1] = i + 1; } } } else { /* just copy the previous hash table */ memcpy(prop_hash_end(sh) - new_hash_size, prop_hash_end(old_sh) - new_hash_size, sizeof(prop_hash_end(sh)[0]) * new_hash_size); } js_free(ctx, get_alloc_from_shape(old_sh)); *psh = sh; sh->prop_size = new_size; return 0; } /* remove the deleted properties. */ static int compact_properties(JSContext *ctx, JSObject *p) { JSShape *sh, *old_sh; void *sh_alloc; intptr_t h; uint32_t new_hash_size, i, j, new_hash_mask, new_size; JSShapeProperty *old_pr, *pr; JSProperty *prop, *new_prop; sh = p->shape; assert(!sh->is_hashed); new_size = max_int(JS_PROP_INITIAL_SIZE, sh->prop_count - sh->deleted_prop_count); assert(new_size <= sh->prop_size); new_hash_size = sh->prop_hash_mask + 1; while ((new_hash_size / 2) >= new_size) new_hash_size = new_hash_size / 2; new_hash_mask = new_hash_size - 1; /* resize the hash table and the properties */ old_sh = sh; sh_alloc = js_malloc(ctx, get_shape_size(new_hash_size, new_size)); if (!sh_alloc) return -1; sh = get_shape_from_alloc(sh_alloc, new_hash_size); list_del(&old_sh->header.link); memcpy(sh, old_sh, sizeof(JSShape)); list_add_tail(&sh->header.link, &ctx->rt->gc_obj_list); memset(prop_hash_end(sh) - new_hash_size, 0, sizeof(prop_hash_end(sh)[0]) * new_hash_size); j = 0; old_pr = old_sh->prop; pr = sh->prop; prop = p->prop; for(i = 0; i < sh->prop_count; i++) { if (old_pr->atom != JS_ATOM_NULL) { pr->atom = old_pr->atom; pr->flags = old_pr->flags; h = ((uintptr_t)old_pr->atom & new_hash_mask); pr->hash_next = prop_hash_end(sh)[-h - 1]; prop_hash_end(sh)[-h - 1] = j + 1; prop[j] = prop[i]; j++; pr++; } old_pr++; } assert(j == (sh->prop_count - sh->deleted_prop_count)); sh->prop_hash_mask = new_hash_mask; sh->prop_size = new_size; sh->deleted_prop_count = 0; sh->prop_count = j; p->shape = sh; js_free(ctx, get_alloc_from_shape(old_sh)); /* reduce the size of the object properties */ new_prop = js_realloc(ctx, p->prop, sizeof(new_prop[0]) * new_size); if (new_prop) p->prop = new_prop; return 0; } static int add_shape_property(JSContext *ctx, JSShape **psh, JSObject *p, JSAtom atom, int prop_flags) { JSRuntime *rt = ctx->rt; JSShape *sh = *psh; JSShapeProperty *pr, *prop; uint32_t hash_mask, new_shape_hash = 0; intptr_t h; /* update the shape hash */ if (sh->is_hashed) { js_shape_hash_unlink(rt, sh); new_shape_hash = shape_hash(shape_hash(sh->hash, atom), prop_flags); } if (unlikely(sh->prop_count >= sh->prop_size)) { if (resize_properties(ctx, psh, p, sh->prop_count + 1)) { /* in case of error, reinsert in the hash table. sh is still valid if resize_properties() failed */ if (sh->is_hashed) js_shape_hash_link(rt, sh); return -1; } sh = *psh; } if (sh->is_hashed) { sh->hash = new_shape_hash; js_shape_hash_link(rt, sh); } /* Initialize the new shape property. The object property at p->prop[sh->prop_count] is uninitialized */ prop = get_shape_prop(sh); pr = &prop[sh->prop_count++]; pr->atom = JS_DupAtom(ctx, atom); pr->flags = prop_flags; sh->has_small_array_index |= __JS_AtomIsTaggedInt(atom); /* add in hash table */ hash_mask = sh->prop_hash_mask; h = atom & hash_mask; pr->hash_next = prop_hash_end(sh)[-h - 1]; prop_hash_end(sh)[-h - 1] = sh->prop_count; return 0; } /* find a hashed empty shape matching the prototype. Return NULL if not found */ static JSShape *find_hashed_shape_proto(JSRuntime *rt, JSObject *proto) { JSShape *sh1; uint32_t h, h1; h = shape_initial_hash(proto); h1 = get_shape_hash(h, rt->shape_hash_bits); for(sh1 = rt->shape_hash[h1]; sh1 != NULL; sh1 = sh1->shape_hash_next) { if (sh1->hash == h && sh1->proto == proto && sh1->prop_count == 0) { return sh1; } } return NULL; } /* find a hashed shape matching sh + (prop, prop_flags). Return NULL if not found */ static JSShape *find_hashed_shape_prop(JSRuntime *rt, JSShape *sh, JSAtom atom, int prop_flags) { JSShape *sh1; uint32_t h, h1, i, n; h = sh->hash; h = shape_hash(h, atom); h = shape_hash(h, prop_flags); h1 = get_shape_hash(h, rt->shape_hash_bits); for(sh1 = rt->shape_hash[h1]; sh1 != NULL; sh1 = sh1->shape_hash_next) { /* we test the hash first so that the rest is done only if the shapes really match */ if (sh1->hash == h && sh1->proto == sh->proto && sh1->prop_count == ((n = sh->prop_count) + 1)) { for(i = 0; i < n; i++) { if (unlikely(sh1->prop[i].atom != sh->prop[i].atom) || unlikely(sh1->prop[i].flags != sh->prop[i].flags)) goto next; } if (unlikely(sh1->prop[n].atom != atom) || unlikely(sh1->prop[n].flags != prop_flags)) goto next; return sh1; } next: ; } return NULL; } static __maybe_unused void JS_DumpShape(JSRuntime *rt, int i, JSShape *sh) { char atom_buf[ATOM_GET_STR_BUF_SIZE]; int j; /* XXX: should output readable class prototype */ printf("%5d %3d%c %14p %5d %5d", i, sh->header.ref_count, " *"[sh->is_hashed], (void *)sh->proto, sh->prop_size, sh->prop_count); for(j = 0; j < sh->prop_count; j++) { printf(" %s", JS_AtomGetStrRT(rt, atom_buf, sizeof(atom_buf), sh->prop[j].atom)); } printf("\n"); } static __maybe_unused void JS_DumpShapes(JSRuntime *rt) { int i; JSShape *sh; struct list_head *el; JSObject *p; JSGCObjectHeader *gp; printf("JSShapes: {\n"); printf("%5s %4s %14s %5s %5s %s\n", "SLOT", "REFS", "PROTO", "SIZE", "COUNT", "PROPS"); for(i = 0; i < rt->shape_hash_size; i++) { for(sh = rt->shape_hash[i]; sh != NULL; sh = sh->shape_hash_next) { JS_DumpShape(rt, i, sh); assert(sh->is_hashed); } } /* dump non-hashed shapes */ list_for_each(el, &rt->gc_obj_list) { gp = list_entry(el, JSGCObjectHeader, link); if (gp->gc_obj_type == JS_GC_OBJ_TYPE_JS_OBJECT) { p = (JSObject *)gp; if (!p->shape->is_hashed) { JS_DumpShape(rt, -1, p->shape); } } } printf("}\n"); } static JSValue JS_NewObjectFromShape(JSContext *ctx, JSShape *sh, JSClassID class_id) { JSObject *p; js_trigger_gc(ctx->rt, sizeof(JSObject)); p = js_malloc(ctx, sizeof(JSObject)); if (unlikely(!p)) goto fail; p->class_id = class_id; p->extensible = TRUE; p->free_mark = 0; p->is_exotic = 0; p->fast_array = 0; p->is_constructor = 0; p->is_uncatchable_error = 0; p->tmp_mark = 0; p->is_HTMLDDA = 0; p->first_weak_ref = NULL; p->u.opaque = NULL; p->shape = sh; p->prop = js_malloc(ctx, sizeof(JSProperty) * sh->prop_size); if (unlikely(!p->prop)) { js_free(ctx, p); fail: js_free_shape(ctx->rt, sh); return JS_EXCEPTION; } switch(class_id) { case JS_CLASS_OBJECT: break; case JS_CLASS_ARRAY: { JSProperty *pr; p->is_exotic = 1; p->fast_array = 1; p->u.array.u.values = NULL; p->u.array.count = 0; p->u.array.u1.size = 0; /* the length property is always the first one */ if (likely(sh == ctx->array_shape)) { pr = &p->prop[0]; } else { /* only used for the first array */ /* cannot fail */ pr = add_property(ctx, p, JS_ATOM_length, JS_PROP_WRITABLE | JS_PROP_LENGTH); } pr->u.value = JS_NewInt32(ctx, 0); } break; case JS_CLASS_C_FUNCTION: p->prop[0].u.value = JS_UNDEFINED; break; case JS_CLASS_ARGUMENTS: case JS_CLASS_UINT8C_ARRAY: case JS_CLASS_INT8_ARRAY: case JS_CLASS_UINT8_ARRAY: case JS_CLASS_INT16_ARRAY: case JS_CLASS_UINT16_ARRAY: case JS_CLASS_INT32_ARRAY: case JS_CLASS_UINT32_ARRAY: case JS_CLASS_BIG_INT64_ARRAY: case JS_CLASS_BIG_UINT64_ARRAY: case JS_CLASS_FLOAT32_ARRAY: case JS_CLASS_FLOAT64_ARRAY: p->is_exotic = 1; p->fast_array = 1; p->u.array.u.ptr = NULL; p->u.array.count = 0; break; case JS_CLASS_DATAVIEW: p->u.array.u.ptr = NULL; p->u.array.count = 0; break; case JS_CLASS_NUMBER: case JS_CLASS_STRING: case JS_CLASS_BOOLEAN: case JS_CLASS_SYMBOL: case JS_CLASS_DATE: case JS_CLASS_BIG_INT: #ifdef CONFIG_BIGNUM case JS_CLASS_BIG_FLOAT: case JS_CLASS_BIG_DECIMAL: #endif p->u.object_data = JS_UNDEFINED; goto set_exotic; case JS_CLASS_REGEXP: p->u.regexp.pattern = NULL; p->u.regexp.bytecode = NULL; goto set_exotic; default: set_exotic: if (ctx->rt->class_array[class_id].exotic) { p->is_exotic = 1; } break; } p->header.ref_count = 1; add_gc_object(ctx->rt, &p->header, JS_GC_OBJ_TYPE_JS_OBJECT); return JS_MKPTR(JS_TAG_OBJECT, p); } static JSObject *get_proto_obj(JSValueConst proto_val) { if (JS_VALUE_GET_TAG(proto_val) != JS_TAG_OBJECT) return NULL; else return JS_VALUE_GET_OBJ(proto_val); } /* WARNING: proto must be an object or JS_NULL */ JSValue JS_NewObjectProtoClass(JSContext *ctx, JSValueConst proto_val, JSClassID class_id) { JSShape *sh; JSObject *proto; proto = get_proto_obj(proto_val); sh = find_hashed_shape_proto(ctx->rt, proto); if (likely(sh)) { sh = js_dup_shape(sh); } else { sh = js_new_shape(ctx, proto); if (!sh) return JS_EXCEPTION; } return JS_NewObjectFromShape(ctx, sh, class_id); } #if 0 static JSValue JS_GetObjectData(JSContext *ctx, JSValueConst obj) { JSObject *p; if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { p = JS_VALUE_GET_OBJ(obj); switch(p->class_id) { case JS_CLASS_NUMBER: case JS_CLASS_STRING: case JS_CLASS_BOOLEAN: case JS_CLASS_SYMBOL: case JS_CLASS_DATE: case JS_CLASS_BIG_INT: #ifdef CONFIG_BIGNUM case JS_CLASS_BIG_FLOAT: case JS_CLASS_BIG_DECIMAL: #endif return JS_DupValue(ctx, p->u.object_data); } } return JS_UNDEFINED; } #endif static int JS_SetObjectData(JSContext *ctx, JSValueConst obj, JSValue val) { JSObject *p; if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { p = JS_VALUE_GET_OBJ(obj); switch(p->class_id) { case JS_CLASS_NUMBER: case JS_CLASS_STRING: case JS_CLASS_BOOLEAN: case JS_CLASS_SYMBOL: case JS_CLASS_DATE: case JS_CLASS_BIG_INT: #ifdef CONFIG_BIGNUM case JS_CLASS_BIG_FLOAT: case JS_CLASS_BIG_DECIMAL: #endif JS_FreeValue(ctx, p->u.object_data); p->u.object_data = val; return 0; } } JS_FreeValue(ctx, val); if (!JS_IsException(obj)) JS_ThrowTypeError(ctx, "invalid object type"); return -1; } JSValue JS_NewObjectClass(JSContext *ctx, int class_id) { return JS_NewObjectProtoClass(ctx, ctx->class_proto[class_id], class_id); } JSValue JS_NewObjectProto(JSContext *ctx, JSValueConst proto) { return JS_NewObjectProtoClass(ctx, proto, JS_CLASS_OBJECT); } JSValue JS_NewArray(JSContext *ctx) { return JS_NewObjectFromShape(ctx, js_dup_shape(ctx->array_shape), JS_CLASS_ARRAY); } JSValue JS_NewObject(JSContext *ctx) { /* inline JS_NewObjectClass(ctx, JS_CLASS_OBJECT); */ return JS_NewObjectProtoClass(ctx, ctx->class_proto[JS_CLASS_OBJECT], JS_CLASS_OBJECT); } static void js_function_set_properties(JSContext *ctx, JSValueConst func_obj, JSAtom name, int len) { /* ES6 feature non compatible with ES5.1: length is configurable */ JS_DefinePropertyValue(ctx, func_obj, JS_ATOM_length, JS_NewInt32(ctx, len), JS_PROP_CONFIGURABLE); JS_DefinePropertyValue(ctx, func_obj, JS_ATOM_name, JS_AtomToString(ctx, name), JS_PROP_CONFIGURABLE); } static BOOL js_class_has_bytecode(JSClassID class_id) { return (class_id == JS_CLASS_BYTECODE_FUNCTION || class_id == JS_CLASS_GENERATOR_FUNCTION || class_id == JS_CLASS_ASYNC_FUNCTION || class_id == JS_CLASS_ASYNC_GENERATOR_FUNCTION); } /* return NULL without exception if not a function or no bytecode */ static JSFunctionBytecode *JS_GetFunctionBytecode(JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return NULL; p = JS_VALUE_GET_OBJ(val); if (!js_class_has_bytecode(p->class_id)) return NULL; return p->u.func.function_bytecode; } static void js_method_set_home_object(JSContext *ctx, JSValueConst func_obj, JSValueConst home_obj) { JSObject *p, *p1; JSFunctionBytecode *b; if (JS_VALUE_GET_TAG(func_obj) != JS_TAG_OBJECT) return; p = JS_VALUE_GET_OBJ(func_obj); if (!js_class_has_bytecode(p->class_id)) return; b = p->u.func.function_bytecode; if (b->need_home_object) { p1 = p->u.func.home_object; if (p1) { JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p1)); } if (JS_VALUE_GET_TAG(home_obj) == JS_TAG_OBJECT) p1 = JS_VALUE_GET_OBJ(JS_DupValue(ctx, home_obj)); else p1 = NULL; p->u.func.home_object = p1; } } static JSValue js_get_function_name(JSContext *ctx, JSAtom name) { JSValue name_str; name_str = JS_AtomToString(ctx, name); if (JS_AtomSymbolHasDescription(ctx, name)) { name_str = JS_ConcatString3(ctx, "[", name_str, "]"); } return name_str; } /* Modify the name of a method according to the atom and 'flags'. 'flags' is a bitmask of JS_PROP_HAS_GET and JS_PROP_HAS_SET. Also set the home object of the method. Return < 0 if exception. */ static int js_method_set_properties(JSContext *ctx, JSValueConst func_obj, JSAtom name, int flags, JSValueConst home_obj) { JSValue name_str; name_str = js_get_function_name(ctx, name); if (flags & JS_PROP_HAS_GET) { name_str = JS_ConcatString3(ctx, "get ", name_str, ""); } else if (flags & JS_PROP_HAS_SET) { name_str = JS_ConcatString3(ctx, "set ", name_str, ""); } if (JS_IsException(name_str)) return -1; if (JS_DefinePropertyValue(ctx, func_obj, JS_ATOM_name, name_str, JS_PROP_CONFIGURABLE) < 0) return -1; js_method_set_home_object(ctx, func_obj, home_obj); return 0; } /* Note: at least 'length' arguments will be readable in 'argv' */ static JSValue JS_NewCFunction3(JSContext *ctx, JSCFunction *func, const char *name, int length, JSCFunctionEnum cproto, int magic, JSValueConst proto_val) { JSValue func_obj; JSObject *p; JSAtom name_atom; func_obj = JS_NewObjectProtoClass(ctx, proto_val, JS_CLASS_C_FUNCTION); if (JS_IsException(func_obj)) return func_obj; p = JS_VALUE_GET_OBJ(func_obj); p->u.cfunc.realm = JS_DupContext(ctx); p->u.cfunc.c_function.generic = func; p->u.cfunc.length = length; p->u.cfunc.cproto = cproto; p->u.cfunc.magic = magic; p->is_constructor = (cproto == JS_CFUNC_constructor || cproto == JS_CFUNC_constructor_magic || cproto == JS_CFUNC_constructor_or_func || cproto == JS_CFUNC_constructor_or_func_magic); if (!name) name = ""; name_atom = JS_NewAtom(ctx, name); js_function_set_properties(ctx, func_obj, name_atom, length); JS_FreeAtom(ctx, name_atom); return func_obj; } /* Note: at least 'length' arguments will be readable in 'argv' */ JSValue JS_NewCFunction2(JSContext *ctx, JSCFunction *func, const char *name, int length, JSCFunctionEnum cproto, int magic) { return JS_NewCFunction3(ctx, func, name, length, cproto, magic, ctx->function_proto); } typedef struct JSCFunctionDataRecord { JSCFunctionData *func; uint8_t length; uint8_t data_len; uint16_t magic; #ifdef STRICT_R_HEADERS JSValue data[]; #else JSValue data[0]; #endif } JSCFunctionDataRecord; static void js_c_function_data_finalizer(JSRuntime *rt, JSValue val) { JSCFunctionDataRecord *s = JS_GetOpaque(val, JS_CLASS_C_FUNCTION_DATA); int i; if (s) { for(i = 0; i < s->data_len; i++) { JS_FreeValueRT(rt, s->data[i]); } js_free_rt(rt, s); } } static void js_c_function_data_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSCFunctionDataRecord *s = JS_GetOpaque(val, JS_CLASS_C_FUNCTION_DATA); int i; if (s) { for(i = 0; i < s->data_len; i++) { JS_MarkValue(rt, s->data[i], mark_func); } } } static JSValue js_c_function_data_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_val, int argc, JSValueConst *argv, int flags) { JSCFunctionDataRecord *s = JS_GetOpaque(func_obj, JS_CLASS_C_FUNCTION_DATA); JSValueConst *arg_buf; int i; /* XXX: could add the function on the stack for debug */ if (unlikely(argc < s->length)) { arg_buf = alloca(sizeof(arg_buf[0]) * s->length); for(i = 0; i < argc; i++) arg_buf[i] = argv[i]; for(i = argc; i < s->length; i++) arg_buf[i] = JS_UNDEFINED; } else { arg_buf = argv; } return s->func(ctx, this_val, argc, arg_buf, s->magic, s->data); } JSValue JS_NewCFunctionData(JSContext *ctx, JSCFunctionData *func, int length, int magic, int data_len, JSValueConst *data) { JSCFunctionDataRecord *s; JSValue func_obj; int i; func_obj = JS_NewObjectProtoClass(ctx, ctx->function_proto, JS_CLASS_C_FUNCTION_DATA); if (JS_IsException(func_obj)) return func_obj; s = js_malloc(ctx, sizeof(*s) + data_len * sizeof(JSValue)); if (!s) { JS_FreeValue(ctx, func_obj); return JS_EXCEPTION; } s->func = func; s->length = length; s->data_len = data_len; s->magic = magic; for(i = 0; i < data_len; i++) s->data[i] = JS_DupValue(ctx, data[i]); JS_SetOpaque(func_obj, s); js_function_set_properties(ctx, func_obj, JS_ATOM_empty_string, length); return func_obj; } static JSContext *js_autoinit_get_realm(JSProperty *pr) { return (JSContext *)(pr->u.init.realm_and_id & ~3); } static JSAutoInitIDEnum js_autoinit_get_id(JSProperty *pr) { return pr->u.init.realm_and_id & 3; } static void js_autoinit_free(JSRuntime *rt, JSProperty *pr) { JS_FreeContext(js_autoinit_get_realm(pr)); } static void js_autoinit_mark(JSRuntime *rt, JSProperty *pr, JS_MarkFunc *mark_func) { mark_func(rt, &js_autoinit_get_realm(pr)->header); } static void free_property(JSRuntime *rt, JSProperty *pr, int prop_flags) { if (unlikely(prop_flags & JS_PROP_TMASK)) { if ((prop_flags & JS_PROP_TMASK) == JS_PROP_GETSET) { if (pr->u.getset.getter) JS_FreeValueRT(rt, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter)); if (pr->u.getset.setter) JS_FreeValueRT(rt, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.setter)); } else if ((prop_flags & JS_PROP_TMASK) == JS_PROP_VARREF) { free_var_ref(rt, pr->u.var_ref); } else if ((prop_flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { js_autoinit_free(rt, pr); } } else { JS_FreeValueRT(rt, pr->u.value); } } static force_inline JSShapeProperty *find_own_property1(JSObject *p, JSAtom atom) { JSShape *sh; JSShapeProperty *pr, *prop; intptr_t h; sh = p->shape; h = (uintptr_t)atom & sh->prop_hash_mask; h = prop_hash_end(sh)[-h - 1]; prop = get_shape_prop(sh); while (h) { pr = &prop[h - 1]; if (likely(pr->atom == atom)) { return pr; } h = pr->hash_next; } return NULL; } static force_inline JSShapeProperty *find_own_property(JSProperty **ppr, JSObject *p, JSAtom atom) { JSShape *sh; JSShapeProperty *pr, *prop; intptr_t h; sh = p->shape; h = (uintptr_t)atom & sh->prop_hash_mask; h = prop_hash_end(sh)[-h - 1]; prop = get_shape_prop(sh); while (h) { pr = &prop[h - 1]; if (likely(pr->atom == atom)) { *ppr = &p->prop[h - 1]; /* the compiler should be able to assume that pr != NULL here */ return pr; } h = pr->hash_next; } *ppr = NULL; return NULL; } /* indicate that the object may be part of a function prototype cycle */ static void set_cycle_flag(JSContext *ctx, JSValueConst obj) { } static void free_var_ref(JSRuntime *rt, JSVarRef *var_ref) { if (var_ref) { assert(var_ref->header.ref_count > 0); if (--var_ref->header.ref_count == 0) { if (var_ref->is_detached) { JS_FreeValueRT(rt, var_ref->value); } else { list_del(&var_ref->var_ref_link); /* still on the stack */ if (var_ref->async_func) async_func_free(rt, var_ref->async_func); } remove_gc_object(&var_ref->header); js_free_rt(rt, var_ref); } } } static void js_array_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); int i; for(i = 0; i < p->u.array.count; i++) { JS_FreeValueRT(rt, p->u.array.u.values[i]); } js_free_rt(rt, p->u.array.u.values); } static void js_array_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); int i; for(i = 0; i < p->u.array.count; i++) { JS_MarkValue(rt, p->u.array.u.values[i], mark_func); } } static void js_object_data_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); JS_FreeValueRT(rt, p->u.object_data); p->u.object_data = JS_UNDEFINED; } static void js_object_data_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JS_MarkValue(rt, p->u.object_data, mark_func); } static void js_c_function_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); if (p->u.cfunc.realm) JS_FreeContext(p->u.cfunc.realm); } static void js_c_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); if (p->u.cfunc.realm) mark_func(rt, &p->u.cfunc.realm->header); } static void js_bytecode_function_finalizer(JSRuntime *rt, JSValue val) { JSObject *p1, *p = JS_VALUE_GET_OBJ(val); JSFunctionBytecode *b; JSVarRef **var_refs; int i; p1 = p->u.func.home_object; if (p1) { JS_FreeValueRT(rt, JS_MKPTR(JS_TAG_OBJECT, p1)); } b = p->u.func.function_bytecode; if (b) { var_refs = p->u.func.var_refs; if (var_refs) { for(i = 0; i < b->closure_var_count; i++) free_var_ref(rt, var_refs[i]); js_free_rt(rt, var_refs); } JS_FreeValueRT(rt, JS_MKPTR(JS_TAG_FUNCTION_BYTECODE, b)); } } static void js_bytecode_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JSVarRef **var_refs = p->u.func.var_refs; JSFunctionBytecode *b = p->u.func.function_bytecode; int i; if (p->u.func.home_object) { JS_MarkValue(rt, JS_MKPTR(JS_TAG_OBJECT, p->u.func.home_object), mark_func); } if (b) { if (var_refs) { for(i = 0; i < b->closure_var_count; i++) { JSVarRef *var_ref = var_refs[i]; if (var_ref) { mark_func(rt, &var_ref->header); } } } /* must mark the function bytecode because template objects may be part of a cycle */ JS_MarkValue(rt, JS_MKPTR(JS_TAG_FUNCTION_BYTECODE, b), mark_func); } } static void js_bound_function_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); JSBoundFunction *bf = p->u.bound_function; int i; JS_FreeValueRT(rt, bf->func_obj); JS_FreeValueRT(rt, bf->this_val); for(i = 0; i < bf->argc; i++) { JS_FreeValueRT(rt, bf->argv[i]); } js_free_rt(rt, bf); } static void js_bound_function_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JSBoundFunction *bf = p->u.bound_function; int i; JS_MarkValue(rt, bf->func_obj, mark_func); JS_MarkValue(rt, bf->this_val, mark_func); for(i = 0; i < bf->argc; i++) JS_MarkValue(rt, bf->argv[i], mark_func); } static void js_for_in_iterator_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); JSForInIterator *it = p->u.for_in_iterator; int i; JS_FreeValueRT(rt, it->obj); if (!it->is_array) { for(i = 0; i < it->atom_count; i++) { JS_FreeAtomRT(rt, it->tab_atom[i].atom); } js_free_rt(rt, it->tab_atom); } js_free_rt(rt, it); } static void js_for_in_iterator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JSForInIterator *it = p->u.for_in_iterator; JS_MarkValue(rt, it->obj, mark_func); } static void free_object(JSRuntime *rt, JSObject *p) { int i; JSClassFinalizer *finalizer; JSShape *sh; JSShapeProperty *pr; p->free_mark = 1; /* used to tell the object is invalid when freeing cycles */ /* free all the fields */ sh = p->shape; pr = get_shape_prop(sh); for(i = 0; i < sh->prop_count; i++) { free_property(rt, &p->prop[i], pr->flags); pr++; } js_free_rt(rt, p->prop); /* as an optimization we destroy the shape immediately without putting it in gc_zero_ref_count_list */ js_free_shape(rt, sh); /* fail safe */ p->shape = NULL; p->prop = NULL; if (unlikely(p->first_weak_ref)) { reset_weak_ref(rt, p); } finalizer = rt->class_array[p->class_id].finalizer; if (finalizer) (*finalizer)(rt, JS_MKPTR(JS_TAG_OBJECT, p)); /* fail safe */ p->class_id = 0; p->u.opaque = NULL; p->u.func.var_refs = NULL; p->u.func.home_object = NULL; remove_gc_object(&p->header); if (rt->gc_phase == JS_GC_PHASE_REMOVE_CYCLES && p->header.ref_count != 0) { list_add_tail(&p->header.link, &rt->gc_zero_ref_count_list); } else { js_free_rt(rt, p); } } static void free_gc_object(JSRuntime *rt, JSGCObjectHeader *gp) { switch(gp->gc_obj_type) { case JS_GC_OBJ_TYPE_JS_OBJECT: free_object(rt, (JSObject *)gp); break; case JS_GC_OBJ_TYPE_FUNCTION_BYTECODE: free_function_bytecode(rt, (JSFunctionBytecode *)gp); break; case JS_GC_OBJ_TYPE_ASYNC_FUNCTION: __async_func_free(rt, (JSAsyncFunctionState *)gp); break; default: abort(); } } static void free_zero_refcount(JSRuntime *rt) { struct list_head *el; JSGCObjectHeader *p; rt->gc_phase = JS_GC_PHASE_DECREF; for(;;) { el = rt->gc_zero_ref_count_list.next; if (el == &rt->gc_zero_ref_count_list) break; p = list_entry(el, JSGCObjectHeader, link); assert(p->ref_count == 0); free_gc_object(rt, p); } rt->gc_phase = JS_GC_PHASE_NONE; } /* called with the ref_count of 'v' reaches zero. */ void __JS_FreeValueRT(JSRuntime *rt, JSValue v) { uint32_t tag = JS_VALUE_GET_TAG(v); #ifdef DUMP_FREE { printf("Freeing "); if (tag == JS_TAG_OBJECT) { JS_DumpObject(rt, JS_VALUE_GET_OBJ(v)); } else { JS_DumpValueShort(rt, v); printf("\n"); } } #endif switch(tag) { case JS_TAG_STRING: { JSString *p = JS_VALUE_GET_STRING(v); if (p->atom_type) { JS_FreeAtomStruct(rt, p); } else { #ifdef DUMP_LEAKS list_del(&p->link); #endif js_free_rt(rt, p); } } break; case JS_TAG_OBJECT: case JS_TAG_FUNCTION_BYTECODE: { JSGCObjectHeader *p = JS_VALUE_GET_PTR(v); if (rt->gc_phase != JS_GC_PHASE_REMOVE_CYCLES) { list_del(&p->link); list_add(&p->link, &rt->gc_zero_ref_count_list); if (rt->gc_phase == JS_GC_PHASE_NONE) { free_zero_refcount(rt); } } } break; case JS_TAG_MODULE: abort(); /* never freed here */ break; case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: #endif { JSBigFloat *bf = JS_VALUE_GET_PTR(v); bf_delete(&bf->num); js_free_rt(rt, bf); } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_DECIMAL: { JSBigDecimal *bf = JS_VALUE_GET_PTR(v); bfdec_delete(&bf->num); js_free_rt(rt, bf); } break; #endif case JS_TAG_SYMBOL: { JSAtomStruct *p = JS_VALUE_GET_PTR(v); JS_FreeAtomStruct(rt, p); } break; default: printf("__JS_FreeValue: unknown tag=%d\n", tag); abort(); } } void __JS_FreeValue(JSContext *ctx, JSValue v) { __JS_FreeValueRT(ctx->rt, v); } /* garbage collection */ static void add_gc_object(JSRuntime *rt, JSGCObjectHeader *h, JSGCObjectTypeEnum type) { h->mark = 0; h->gc_obj_type = type; list_add_tail(&h->link, &rt->gc_obj_list); } static void remove_gc_object(JSGCObjectHeader *h) { list_del(&h->link); } void JS_MarkValue(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { if (JS_VALUE_HAS_REF_COUNT(val)) { switch(JS_VALUE_GET_TAG(val)) { case JS_TAG_OBJECT: case JS_TAG_FUNCTION_BYTECODE: mark_func(rt, JS_VALUE_GET_PTR(val)); break; default: break; } } } static void mark_children(JSRuntime *rt, JSGCObjectHeader *gp, JS_MarkFunc *mark_func) { switch(gp->gc_obj_type) { case JS_GC_OBJ_TYPE_JS_OBJECT: { JSObject *p = (JSObject *)gp; JSShapeProperty *prs; JSShape *sh; int i; sh = p->shape; mark_func(rt, &sh->header); /* mark all the fields */ prs = get_shape_prop(sh); for(i = 0; i < sh->prop_count; i++) { JSProperty *pr = &p->prop[i]; if (prs->atom != JS_ATOM_NULL) { if (prs->flags & JS_PROP_TMASK) { if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { if (pr->u.getset.getter) mark_func(rt, &pr->u.getset.getter->header); if (pr->u.getset.setter) mark_func(rt, &pr->u.getset.setter->header); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { /* Note: the tag does not matter provided it is a GC object */ mark_func(rt, &pr->u.var_ref->header); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { js_autoinit_mark(rt, pr, mark_func); } } else { JS_MarkValue(rt, pr->u.value, mark_func); } } prs++; } if (p->class_id != JS_CLASS_OBJECT) { JSClassGCMark *gc_mark; gc_mark = rt->class_array[p->class_id].gc_mark; if (gc_mark) gc_mark(rt, JS_MKPTR(JS_TAG_OBJECT, p), mark_func); } } break; case JS_GC_OBJ_TYPE_FUNCTION_BYTECODE: /* the template objects can be part of a cycle */ { JSFunctionBytecode *b = (JSFunctionBytecode *)gp; int i; for(i = 0; i < b->cpool_count; i++) { JS_MarkValue(rt, b->cpool[i], mark_func); } if (b->realm) mark_func(rt, &b->realm->header); } break; case JS_GC_OBJ_TYPE_VAR_REF: { JSVarRef *var_ref = (JSVarRef *)gp; if (var_ref->is_detached) { JS_MarkValue(rt, *var_ref->pvalue, mark_func); } else if (var_ref->async_func) { mark_func(rt, &var_ref->async_func->header); } } break; case JS_GC_OBJ_TYPE_ASYNC_FUNCTION: { JSAsyncFunctionState *s = (JSAsyncFunctionState *)gp; JSStackFrame *sf = &s->frame; JSValue *sp; if (!s->is_completed) { JS_MarkValue(rt, sf->cur_func, mark_func); JS_MarkValue(rt, s->this_val, mark_func); /* sf->cur_sp = NULL if the function is running */ if (sf->cur_sp) { /* if the function is running, cur_sp is not known so we cannot mark the stack. Marking the variables is not needed because a running function cannot be part of a removable cycle */ for(sp = sf->arg_buf; sp < sf->cur_sp; sp++) JS_MarkValue(rt, *sp, mark_func); } } JS_MarkValue(rt, s->resolving_funcs[0], mark_func); JS_MarkValue(rt, s->resolving_funcs[1], mark_func); } break; case JS_GC_OBJ_TYPE_SHAPE: { JSShape *sh = (JSShape *)gp; if (sh->proto != NULL) { mark_func(rt, &sh->proto->header); } } break; case JS_GC_OBJ_TYPE_JS_CONTEXT: { JSContext *ctx = (JSContext *)gp; JS_MarkContext(rt, ctx, mark_func); } break; default: abort(); } } static void gc_decref_child(JSRuntime *rt, JSGCObjectHeader *p) { assert(p->ref_count > 0); p->ref_count--; if (p->ref_count == 0 && p->mark == 1) { list_del(&p->link); list_add_tail(&p->link, &rt->tmp_obj_list); } } static void gc_decref(JSRuntime *rt) { struct list_head *el, *el1; JSGCObjectHeader *p; init_list_head(&rt->tmp_obj_list); /* decrement the refcount of all the children of all the GC objects and move the GC objects with zero refcount to tmp_obj_list */ list_for_each_safe(el, el1, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); assert(p->mark == 0); mark_children(rt, p, gc_decref_child); p->mark = 1; if (p->ref_count == 0) { list_del(&p->link); list_add_tail(&p->link, &rt->tmp_obj_list); } } } static void gc_scan_incref_child(JSRuntime *rt, JSGCObjectHeader *p) { p->ref_count++; if (p->ref_count == 1) { /* ref_count was 0: remove from tmp_obj_list and add at the end of gc_obj_list */ list_del(&p->link); list_add_tail(&p->link, &rt->gc_obj_list); p->mark = 0; /* reset the mark for the next GC call */ } } static void gc_scan_incref_child2(JSRuntime *rt, JSGCObjectHeader *p) { p->ref_count++; } static void gc_scan(JSRuntime *rt) { struct list_head *el; JSGCObjectHeader *p; /* keep the objects with a refcount > 0 and their children. */ list_for_each(el, &rt->gc_obj_list) { p = list_entry(el, JSGCObjectHeader, link); assert(p->ref_count > 0); p->mark = 0; /* reset the mark for the next GC call */ mark_children(rt, p, gc_scan_incref_child); } /* restore the refcount of the objects to be deleted. */ list_for_each(el, &rt->tmp_obj_list) { p = list_entry(el, JSGCObjectHeader, link); mark_children(rt, p, gc_scan_incref_child2); } } static void gc_free_cycles(JSRuntime *rt) { struct list_head *el, *el1; JSGCObjectHeader *p; #ifdef DUMP_GC_FREE BOOL header_done = FALSE; #endif rt->gc_phase = JS_GC_PHASE_REMOVE_CYCLES; for(;;) { el = rt->tmp_obj_list.next; if (el == &rt->tmp_obj_list) break; p = list_entry(el, JSGCObjectHeader, link); /* Only need to free the GC object associated with JS values or async functions. The rest will be automatically removed because they must be referenced by them. */ switch(p->gc_obj_type) { case JS_GC_OBJ_TYPE_JS_OBJECT: case JS_GC_OBJ_TYPE_FUNCTION_BYTECODE: case JS_GC_OBJ_TYPE_ASYNC_FUNCTION: #ifdef DUMP_GC_FREE if (!header_done) { printf("Freeing cycles:\n"); JS_DumpObjectHeader(rt); header_done = TRUE; } JS_DumpGCObject(rt, p); #endif free_gc_object(rt, p); break; default: list_del(&p->link); list_add_tail(&p->link, &rt->gc_zero_ref_count_list); break; } } rt->gc_phase = JS_GC_PHASE_NONE; list_for_each_safe(el, el1, &rt->gc_zero_ref_count_list) { p = list_entry(el, JSGCObjectHeader, link); assert(p->gc_obj_type == JS_GC_OBJ_TYPE_JS_OBJECT || p->gc_obj_type == JS_GC_OBJ_TYPE_FUNCTION_BYTECODE || p->gc_obj_type == JS_GC_OBJ_TYPE_ASYNC_FUNCTION); js_free_rt(rt, p); } init_list_head(&rt->gc_zero_ref_count_list); } void JS_RunGC(JSRuntime *rt) { /* decrement the reference of the children of each object. mark = 1 after this pass. */ gc_decref(rt); /* keep the GC objects with a non zero refcount and their childs */ gc_scan(rt); /* free the GC objects in a cycle */ gc_free_cycles(rt); } /* Return false if not an object or if the object has already been freed (zombie objects are visible in finalizers when freeing cycles). */ BOOL JS_IsLiveObject(JSRuntime *rt, JSValueConst obj) { JSObject *p; if (!JS_IsObject(obj)) return FALSE; p = JS_VALUE_GET_OBJ(obj); return !p->free_mark; } /* Compute memory used by various object types */ /* XXX: poor man's approach to handling multiply referenced objects */ typedef struct JSMemoryUsage_helper { double memory_used_count; double str_count; double str_size; int64_t js_func_count; double js_func_size; int64_t js_func_code_size; int64_t js_func_pc2line_count; int64_t js_func_pc2line_size; } JSMemoryUsage_helper; static void compute_value_size(JSValueConst val, JSMemoryUsage_helper *hp); static void compute_jsstring_size(JSString *str, JSMemoryUsage_helper *hp) { if (!str->atom_type) { /* atoms are handled separately */ double s_ref_count = str->header.ref_count; hp->str_count += 1 / s_ref_count; hp->str_size += ((sizeof(*str) + (str->len << str->is_wide_char) + 1 - str->is_wide_char) / s_ref_count); } } static void compute_bytecode_size(JSFunctionBytecode *b, JSMemoryUsage_helper *hp) { int memory_used_count, js_func_size, i; memory_used_count = 0; js_func_size = offsetof(JSFunctionBytecode, debug); if (b->vardefs) { js_func_size += (b->arg_count + b->var_count) * sizeof(*b->vardefs); } if (b->cpool) { js_func_size += b->cpool_count * sizeof(*b->cpool); for (i = 0; i < b->cpool_count; i++) { JSValueConst val = b->cpool[i]; compute_value_size(val, hp); } } if (b->closure_var) { js_func_size += b->closure_var_count * sizeof(*b->closure_var); } if (!b->read_only_bytecode && b->byte_code_buf) { hp->js_func_code_size += b->byte_code_len; } if (b->has_debug) { js_func_size += sizeof(*b) - offsetof(JSFunctionBytecode, debug); if (b->debug.source) { memory_used_count++; js_func_size += b->debug.source_len + 1; } if (b->debug.pc2line_len) { memory_used_count++; hp->js_func_pc2line_count += 1; hp->js_func_pc2line_size += b->debug.pc2line_len; } } hp->js_func_size += js_func_size; hp->js_func_count += 1; hp->memory_used_count += memory_used_count; } static void compute_value_size(JSValueConst val, JSMemoryUsage_helper *hp) { switch(JS_VALUE_GET_TAG(val)) { case JS_TAG_STRING: compute_jsstring_size(JS_VALUE_GET_STRING(val), hp); break; case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: case JS_TAG_BIG_DECIMAL: #endif /* should track JSBigFloat usage */ break; } } void JS_ComputeMemoryUsage(JSRuntime *rt, JSMemoryUsage *s) { struct list_head *el, *el1; int i; JSMemoryUsage_helper mem = { 0 }, *hp = &mem; memset(s, 0, sizeof(*s)); s->malloc_count = rt->malloc_state.malloc_count; s->malloc_size = rt->malloc_state.malloc_size; s->malloc_limit = rt->malloc_state.malloc_limit; s->memory_used_count = 2; /* rt + rt->class_array */ s->memory_used_size = sizeof(JSRuntime) + sizeof(JSValue) * rt->class_count; list_for_each(el, &rt->context_list) { JSContext *ctx = list_entry(el, JSContext, link); JSShape *sh = ctx->array_shape; s->memory_used_count += 2; /* ctx + ctx->class_proto */ s->memory_used_size += sizeof(JSContext) + sizeof(JSValue) * rt->class_count; s->binary_object_count += ctx->binary_object_count; s->binary_object_size += ctx->binary_object_size; /* the hashed shapes are counted separately */ if (sh && !sh->is_hashed) { int hash_size = sh->prop_hash_mask + 1; s->shape_count++; s->shape_size += get_shape_size(hash_size, sh->prop_size); } list_for_each(el1, &ctx->loaded_modules) { JSModuleDef *m = list_entry(el1, JSModuleDef, link); s->memory_used_count += 1; s->memory_used_size += sizeof(*m); if (m->req_module_entries) { s->memory_used_count += 1; s->memory_used_size += m->req_module_entries_count * sizeof(*m->req_module_entries); } if (m->export_entries) { s->memory_used_count += 1; s->memory_used_size += m->export_entries_count * sizeof(*m->export_entries); for (i = 0; i < m->export_entries_count; i++) { JSExportEntry *me = &m->export_entries[i]; if (me->export_type == JS_EXPORT_TYPE_LOCAL && me->u.local.var_ref) { /* potential multiple count */ s->memory_used_count += 1; compute_value_size(me->u.local.var_ref->value, hp); } } } if (m->star_export_entries) { s->memory_used_count += 1; s->memory_used_size += m->star_export_entries_count * sizeof(*m->star_export_entries); } if (m->import_entries) { s->memory_used_count += 1; s->memory_used_size += m->import_entries_count * sizeof(*m->import_entries); } compute_value_size(m->module_ns, hp); compute_value_size(m->func_obj, hp); } } list_for_each(el, &rt->gc_obj_list) { JSGCObjectHeader *gp = list_entry(el, JSGCObjectHeader, link); JSObject *p; JSShape *sh; JSShapeProperty *prs; /* XXX: could count the other GC object types too */ if (gp->gc_obj_type == JS_GC_OBJ_TYPE_FUNCTION_BYTECODE) { compute_bytecode_size((JSFunctionBytecode *)gp, hp); continue; } else if (gp->gc_obj_type != JS_GC_OBJ_TYPE_JS_OBJECT) { continue; } p = (JSObject *)gp; sh = p->shape; s->obj_count++; if (p->prop) { s->memory_used_count++; s->prop_size += sh->prop_size * sizeof(*p->prop); s->prop_count += sh->prop_count; prs = get_shape_prop(sh); for(i = 0; i < sh->prop_count; i++) { JSProperty *pr = &p->prop[i]; if (prs->atom != JS_ATOM_NULL && !(prs->flags & JS_PROP_TMASK)) { compute_value_size(pr->u.value, hp); } prs++; } } /* the hashed shapes are counted separately */ if (!sh->is_hashed) { int hash_size = sh->prop_hash_mask + 1; s->shape_count++; s->shape_size += get_shape_size(hash_size, sh->prop_size); } switch(p->class_id) { case JS_CLASS_ARRAY: /* u.array | length */ case JS_CLASS_ARGUMENTS: /* u.array | length */ s->array_count++; if (p->fast_array) { s->fast_array_count++; if (p->u.array.u.values) { s->memory_used_count++; s->memory_used_size += p->u.array.count * sizeof(*p->u.array.u.values); s->fast_array_elements += p->u.array.count; for (i = 0; i < p->u.array.count; i++) { compute_value_size(p->u.array.u.values[i], hp); } } } break; case JS_CLASS_NUMBER: /* u.object_data */ case JS_CLASS_STRING: /* u.object_data */ case JS_CLASS_BOOLEAN: /* u.object_data */ case JS_CLASS_SYMBOL: /* u.object_data */ case JS_CLASS_DATE: /* u.object_data */ case JS_CLASS_BIG_INT: /* u.object_data */ #ifdef CONFIG_BIGNUM case JS_CLASS_BIG_FLOAT: /* u.object_data */ case JS_CLASS_BIG_DECIMAL: /* u.object_data */ #endif compute_value_size(p->u.object_data, hp); break; case JS_CLASS_C_FUNCTION: /* u.cfunc */ s->c_func_count++; break; case JS_CLASS_BYTECODE_FUNCTION: /* u.func */ { JSFunctionBytecode *b = p->u.func.function_bytecode; JSVarRef **var_refs = p->u.func.var_refs; /* home_object: object will be accounted for in list scan */ if (var_refs) { s->memory_used_count++; s->js_func_size += b->closure_var_count * sizeof(*var_refs); for (i = 0; i < b->closure_var_count; i++) { if (var_refs[i]) { double ref_count = var_refs[i]->header.ref_count; s->memory_used_count += 1 / ref_count; s->js_func_size += sizeof(*var_refs[i]) / ref_count; /* handle non object closed values */ if (var_refs[i]->pvalue == &var_refs[i]->value) { /* potential multiple count */ compute_value_size(var_refs[i]->value, hp); } } } } } break; case JS_CLASS_BOUND_FUNCTION: /* u.bound_function */ { JSBoundFunction *bf = p->u.bound_function; /* func_obj and this_val are objects */ for (i = 0; i < bf->argc; i++) { compute_value_size(bf->argv[i], hp); } s->memory_used_count += 1; s->memory_used_size += sizeof(*bf) + bf->argc * sizeof(*bf->argv); } break; case JS_CLASS_C_FUNCTION_DATA: /* u.c_function_data_record */ { JSCFunctionDataRecord *fd = p->u.c_function_data_record; if (fd) { for (i = 0; i < fd->data_len; i++) { compute_value_size(fd->data[i], hp); } s->memory_used_count += 1; s->memory_used_size += sizeof(*fd) + fd->data_len * sizeof(*fd->data); } } break; case JS_CLASS_REGEXP: /* u.regexp */ compute_jsstring_size(p->u.regexp.pattern, hp); compute_jsstring_size(p->u.regexp.bytecode, hp); break; case JS_CLASS_FOR_IN_ITERATOR: /* u.for_in_iterator */ { JSForInIterator *it = p->u.for_in_iterator; if (it) { compute_value_size(it->obj, hp); s->memory_used_count += 1; s->memory_used_size += sizeof(*it); } } break; case JS_CLASS_ARRAY_BUFFER: /* u.array_buffer */ case JS_CLASS_SHARED_ARRAY_BUFFER: /* u.array_buffer */ { JSArrayBuffer *abuf = p->u.array_buffer; if (abuf) { s->memory_used_count += 1; s->memory_used_size += sizeof(*abuf); if (abuf->data) { s->memory_used_count += 1; s->memory_used_size += abuf->byte_length; } } } break; case JS_CLASS_GENERATOR: /* u.generator_data */ case JS_CLASS_UINT8C_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_INT8_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_UINT8_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_INT16_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_UINT16_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_INT32_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_UINT32_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_BIG_INT64_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_BIG_UINT64_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_FLOAT32_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_FLOAT64_ARRAY: /* u.typed_array / u.array */ case JS_CLASS_DATAVIEW: /* u.typed_array */ #ifdef CONFIG_BIGNUM case JS_CLASS_FLOAT_ENV: /* u.float_env */ #endif case JS_CLASS_MAP: /* u.map_state */ case JS_CLASS_SET: /* u.map_state */ case JS_CLASS_WEAKMAP: /* u.map_state */ case JS_CLASS_WEAKSET: /* u.map_state */ case JS_CLASS_MAP_ITERATOR: /* u.map_iterator_data */ case JS_CLASS_SET_ITERATOR: /* u.map_iterator_data */ case JS_CLASS_ARRAY_ITERATOR: /* u.array_iterator_data */ case JS_CLASS_STRING_ITERATOR: /* u.array_iterator_data */ case JS_CLASS_PROXY: /* u.proxy_data */ case JS_CLASS_PROMISE: /* u.promise_data */ case JS_CLASS_PROMISE_RESOLVE_FUNCTION: /* u.promise_function_data */ case JS_CLASS_PROMISE_REJECT_FUNCTION: /* u.promise_function_data */ case JS_CLASS_ASYNC_FUNCTION_RESOLVE: /* u.async_function_data */ case JS_CLASS_ASYNC_FUNCTION_REJECT: /* u.async_function_data */ case JS_CLASS_ASYNC_FROM_SYNC_ITERATOR: /* u.async_from_sync_iterator_data */ case JS_CLASS_ASYNC_GENERATOR: /* u.async_generator_data */ /* TODO */ default: /* XXX: class definition should have an opaque block size */ if (p->u.opaque) { s->memory_used_count += 1; } break; } } s->obj_size += s->obj_count * sizeof(JSObject); /* hashed shapes */ s->memory_used_count++; /* rt->shape_hash */ s->memory_used_size += sizeof(rt->shape_hash[0]) * rt->shape_hash_size; for(i = 0; i < rt->shape_hash_size; i++) { JSShape *sh; for(sh = rt->shape_hash[i]; sh != NULL; sh = sh->shape_hash_next) { int hash_size = sh->prop_hash_mask + 1; s->shape_count++; s->shape_size += get_shape_size(hash_size, sh->prop_size); } } /* atoms */ s->memory_used_count += 2; /* rt->atom_array, rt->atom_hash */ s->atom_count = rt->atom_count; s->atom_size = sizeof(rt->atom_array[0]) * rt->atom_size + sizeof(rt->atom_hash[0]) * rt->atom_hash_size; for(i = 0; i < rt->atom_size; i++) { JSAtomStruct *p = rt->atom_array[i]; if (!atom_is_free(p)) { s->atom_size += (sizeof(*p) + (p->len << p->is_wide_char) + 1 - p->is_wide_char); } } s->str_count = round(mem.str_count); s->str_size = round(mem.str_size); s->js_func_count = mem.js_func_count; s->js_func_size = round(mem.js_func_size); s->js_func_code_size = mem.js_func_code_size; s->js_func_pc2line_count = mem.js_func_pc2line_count; s->js_func_pc2line_size = mem.js_func_pc2line_size; s->memory_used_count += round(mem.memory_used_count) + s->atom_count + s->str_count + s->obj_count + s->shape_count + s->js_func_count + s->js_func_pc2line_count; s->memory_used_size += s->atom_size + s->str_size + s->obj_size + s->prop_size + s->shape_size + s->js_func_size + s->js_func_code_size + s->js_func_pc2line_size; } void JS_DumpMemoryUsage(FILE *fp, const JSMemoryUsage *s, JSRuntime *rt) { fprintf(fp, "QuickJS memory usage -- " #ifdef CONFIG_BIGNUM "BigNum " #endif CONFIG_VERSION " version, %d-bit, malloc limit: %"PRId64"\n\n", (int)sizeof(void *) * 8, (int64_t)(ssize_t)s->malloc_limit); #if 1 if (rt) { static const struct { const char *name; size_t size; } object_types[] = { { "JSRuntime", sizeof(JSRuntime) }, { "JSContext", sizeof(JSContext) }, { "JSObject", sizeof(JSObject) }, { "JSString", sizeof(JSString) }, { "JSFunctionBytecode", sizeof(JSFunctionBytecode) }, }; int i, usage_size_ok = 0; for(i = 0; i < countof(object_types); i++) { unsigned int size = object_types[i].size; void *p = js_malloc_rt(rt, size); if (p) { unsigned int size1 = js_malloc_usable_size_rt(rt, p); if (size1 >= size) { usage_size_ok = 1; fprintf(fp, " %3u + %-2u %s\n", size, size1 - size, object_types[i].name); } js_free_rt(rt, p); } } if (!usage_size_ok) { fprintf(fp, " malloc_usable_size unavailable\n"); } { int obj_classes[JS_CLASS_INIT_COUNT + 1] = { 0 }; int class_id; struct list_head *el; list_for_each(el, &rt->gc_obj_list) { JSGCObjectHeader *gp = list_entry(el, JSGCObjectHeader, link); JSObject *p; if (gp->gc_obj_type == JS_GC_OBJ_TYPE_JS_OBJECT) { p = (JSObject *)gp; obj_classes[min_uint32(p->class_id, JS_CLASS_INIT_COUNT)]++; } } fprintf(fp, "\n" "JSObject classes\n"); if (obj_classes[0]) fprintf(fp, " %5d %2.0d %s\n", obj_classes[0], 0, "none"); for (class_id = 1; class_id < JS_CLASS_INIT_COUNT; class_id++) { if (obj_classes[class_id] && class_id < rt->class_count) { char buf[ATOM_GET_STR_BUF_SIZE]; fprintf(fp, " %5d %2.0d %s\n", obj_classes[class_id], class_id, JS_AtomGetStrRT(rt, buf, sizeof(buf), rt->class_array[class_id].class_name)); } } if (obj_classes[JS_CLASS_INIT_COUNT]) fprintf(fp, " %5d %2.0d %s\n", obj_classes[JS_CLASS_INIT_COUNT], 0, "other"); } fprintf(fp, "\n"); } #endif fprintf(fp, "%-20s %8s %8s\n", "NAME", "COUNT", "SIZE"); if (s->malloc_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per block)\n", "memory allocated", s->malloc_count, s->malloc_size, (double)s->malloc_size / s->malloc_count); fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%d overhead, %0.1f average slack)\n", "memory used", s->memory_used_count, s->memory_used_size, MALLOC_OVERHEAD, ((double)(s->malloc_size - s->memory_used_size) / s->memory_used_count)); } if (s->atom_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per atom)\n", "atoms", s->atom_count, s->atom_size, (double)s->atom_size / s->atom_count); } if (s->str_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per string)\n", "strings", s->str_count, s->str_size, (double)s->str_size / s->str_count); } if (s->obj_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per object)\n", "objects", s->obj_count, s->obj_size, (double)s->obj_size / s->obj_count); fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per object)\n", " properties", s->prop_count, s->prop_size, (double)s->prop_count / s->obj_count); fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per shape)\n", " shapes", s->shape_count, s->shape_size, (double)s->shape_size / s->shape_count); } if (s->js_func_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64"\n", "bytecode functions", s->js_func_count, s->js_func_size); fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per function)\n", " bytecode", s->js_func_count, s->js_func_code_size, (double)s->js_func_code_size / s->js_func_count); if (s->js_func_pc2line_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per function)\n", " pc2line", s->js_func_pc2line_count, s->js_func_pc2line_size, (double)s->js_func_pc2line_size / s->js_func_pc2line_count); } } if (s->c_func_count) { fprintf(fp, "%-20s %8"PRId64"\n", "C functions", s->c_func_count); } if (s->array_count) { fprintf(fp, "%-20s %8"PRId64"\n", "arrays", s->array_count); if (s->fast_array_count) { fprintf(fp, "%-20s %8"PRId64"\n", " fast arrays", s->fast_array_count); fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per fast array)\n", " elements", s->fast_array_elements, s->fast_array_elements * (int)sizeof(JSValue), (double)s->fast_array_elements / s->fast_array_count); } } if (s->binary_object_count) { fprintf(fp, "%-20s %8"PRId64" %8"PRId64"\n", "binary objects", s->binary_object_count, s->binary_object_size); } } JSValue JS_GetGlobalObject(JSContext *ctx) { return JS_DupValue(ctx, ctx->global_obj); } /* WARNING: obj is freed */ JSValue JS_Throw(JSContext *ctx, JSValue obj) { JSRuntime *rt = ctx->rt; JS_FreeValue(ctx, rt->current_exception); rt->current_exception = obj; return JS_EXCEPTION; } /* return the pending exception (cannot be called twice). */ JSValue JS_GetException(JSContext *ctx) { JSValue val; JSRuntime *rt = ctx->rt; val = rt->current_exception; rt->current_exception = JS_NULL; return val; } static void dbuf_put_leb128(DynBuf *s, uint32_t v) { uint32_t a; for(;;) { a = v & 0x7f; v >>= 7; if (v != 0) { dbuf_putc(s, a | 0x80); } else { dbuf_putc(s, a); break; } } } static void dbuf_put_sleb128(DynBuf *s, int32_t v1) { uint32_t v = v1; dbuf_put_leb128(s, (2 * v) ^ -(v >> 31)); } static int get_leb128(uint32_t *pval, const uint8_t *buf, const uint8_t *buf_end) { const uint8_t *ptr = buf; uint32_t v, a, i; v = 0; for(i = 0; i < 5; i++) { if (unlikely(ptr >= buf_end)) break; a = *ptr++; v |= (a & 0x7f) << (i * 7); if (!(a & 0x80)) { *pval = v; return ptr - buf; } } *pval = 0; return -1; } static int get_sleb128(int32_t *pval, const uint8_t *buf, const uint8_t *buf_end) { int ret; uint32_t val; ret = get_leb128(&val, buf, buf_end); if (ret < 0) { *pval = 0; return -1; } *pval = (val >> 1) ^ -(val & 1); return ret; } static int find_line_num(JSContext *ctx, JSFunctionBytecode *b, uint32_t pc_value) { const uint8_t *p_end, *p; int new_line_num, line_num, pc, v, ret; unsigned int op; if (!b->has_debug || !b->debug.pc2line_buf) { /* function was stripped */ return -1; } p = b->debug.pc2line_buf; p_end = p + b->debug.pc2line_len; pc = 0; line_num = b->debug.line_num; while (p < p_end) { op = *p++; if (op == 0) { uint32_t val; ret = get_leb128(&val, p, p_end); if (ret < 0) goto fail; pc += val; p += ret; ret = get_sleb128(&v, p, p_end); if (ret < 0) { fail: /* should never happen */ return b->debug.line_num; } p += ret; new_line_num = line_num + v; } else { op -= PC2LINE_OP_FIRST; pc += (op / PC2LINE_RANGE); new_line_num = line_num + (op % PC2LINE_RANGE) + PC2LINE_BASE; } if (pc_value < pc) return line_num; line_num = new_line_num; } return line_num; } /* in order to avoid executing arbitrary code during the stack trace generation, we only look at simple 'name' properties containing a string. */ static const char *get_func_name(JSContext *ctx, JSValueConst func) { JSProperty *pr; JSShapeProperty *prs; JSValueConst val; if (JS_VALUE_GET_TAG(func) != JS_TAG_OBJECT) return NULL; prs = find_own_property(&pr, JS_VALUE_GET_OBJ(func), JS_ATOM_name); if (!prs) return NULL; if ((prs->flags & JS_PROP_TMASK) != JS_PROP_NORMAL) return NULL; val = pr->u.value; if (JS_VALUE_GET_TAG(val) != JS_TAG_STRING) return NULL; return JS_ToCString(ctx, val); } #define JS_BACKTRACE_FLAG_SKIP_FIRST_LEVEL (1 << 0) /* only taken into account if filename is provided */ #define JS_BACKTRACE_FLAG_SINGLE_LEVEL (1 << 1) /* if filename != NULL, an additional level is added with the filename and line number information (used for parse error). */ static void build_backtrace(JSContext *ctx, JSValueConst error_obj, const char *filename, int line_num, int backtrace_flags) { JSStackFrame *sf; JSValue str; DynBuf dbuf; const char *func_name_str; const char *str1; JSObject *p; BOOL backtrace_barrier; js_dbuf_init(ctx, &dbuf); if (filename) { dbuf_printf(&dbuf, " at %s", filename); if (line_num != -1) dbuf_printf(&dbuf, ":%d", line_num); dbuf_putc(&dbuf, '\n'); str = JS_NewString(ctx, filename); JS_DefinePropertyValue(ctx, error_obj, JS_ATOM_fileName, str, JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); JS_DefinePropertyValue(ctx, error_obj, JS_ATOM_lineNumber, JS_NewInt32(ctx, line_num), JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); if (backtrace_flags & JS_BACKTRACE_FLAG_SINGLE_LEVEL) goto done; } for(sf = ctx->rt->current_stack_frame; sf != NULL; sf = sf->prev_frame) { if (backtrace_flags & JS_BACKTRACE_FLAG_SKIP_FIRST_LEVEL) { backtrace_flags &= ~JS_BACKTRACE_FLAG_SKIP_FIRST_LEVEL; continue; } func_name_str = get_func_name(ctx, sf->cur_func); if (!func_name_str || func_name_str[0] == '\0') str1 = ""; else str1 = func_name_str; dbuf_printf(&dbuf, " at %s", str1); JS_FreeCString(ctx, func_name_str); p = JS_VALUE_GET_OBJ(sf->cur_func); backtrace_barrier = FALSE; if (js_class_has_bytecode(p->class_id)) { JSFunctionBytecode *b; const char *atom_str; int line_num1; b = p->u.func.function_bytecode; backtrace_barrier = b->backtrace_barrier; if (b->has_debug) { line_num1 = find_line_num(ctx, b, sf->cur_pc - b->byte_code_buf - 1); atom_str = JS_AtomToCString(ctx, b->debug.filename); dbuf_printf(&dbuf, " (%s", atom_str ? atom_str : ""); JS_FreeCString(ctx, atom_str); if (line_num1 != -1) dbuf_printf(&dbuf, ":%d", line_num1); dbuf_putc(&dbuf, ')'); } } else { dbuf_printf(&dbuf, " (native)"); } dbuf_putc(&dbuf, '\n'); /* stop backtrace if JS_EVAL_FLAG_BACKTRACE_BARRIER was used */ if (backtrace_barrier) break; } done: dbuf_putc(&dbuf, '\0'); if (dbuf_error(&dbuf)) str = JS_NULL; else str = JS_NewString(ctx, (char *)dbuf.buf); dbuf_free(&dbuf); JS_DefinePropertyValue(ctx, error_obj, JS_ATOM_stack, str, JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); } /* Note: it is important that no exception is returned by this function */ static BOOL is_backtrace_needed(JSContext *ctx, JSValueConst obj) { JSObject *p; if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(obj); if (p->class_id != JS_CLASS_ERROR) return FALSE; if (find_own_property1(p, JS_ATOM_stack)) return FALSE; return TRUE; } JSValue JS_NewError(JSContext *ctx) { return JS_NewObjectClass(ctx, JS_CLASS_ERROR); } static JSValue JS_ThrowError2(JSContext *ctx, JSErrorEnum error_num, const char *fmt, va_list ap, BOOL add_backtrace) { char buf[256]; JSValue obj, ret; vsnprintf(buf, sizeof(buf), fmt, ap); obj = JS_NewObjectProtoClass(ctx, ctx->native_error_proto[error_num], JS_CLASS_ERROR); if (unlikely(JS_IsException(obj))) { /* out of memory: throw JS_NULL to avoid recursing */ obj = JS_NULL; } else { JS_DefinePropertyValue(ctx, obj, JS_ATOM_message, JS_NewString(ctx, buf), JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); } if (add_backtrace) { build_backtrace(ctx, obj, NULL, 0, 0); } ret = JS_Throw(ctx, obj); return ret; } static JSValue JS_ThrowError(JSContext *ctx, JSErrorEnum error_num, const char *fmt, va_list ap) { JSRuntime *rt = ctx->rt; JSStackFrame *sf; BOOL add_backtrace; /* the backtrace is added later if called from a bytecode function */ sf = rt->current_stack_frame; add_backtrace = !rt->in_out_of_memory && (!sf || (JS_GetFunctionBytecode(sf->cur_func) == NULL)); return JS_ThrowError2(ctx, error_num, fmt, ap, add_backtrace); } JSValue __attribute__((format(printf, 2, 3))) JS_ThrowSyntaxError(JSContext *ctx, const char *fmt, ...) { JSValue val; va_list ap; va_start(ap, fmt); val = JS_ThrowError(ctx, JS_SYNTAX_ERROR, fmt, ap); va_end(ap); return val; } JSValue __attribute__((format(printf, 2, 3))) JS_ThrowTypeError(JSContext *ctx, const char *fmt, ...) { JSValue val; va_list ap; va_start(ap, fmt); val = JS_ThrowError(ctx, JS_TYPE_ERROR, fmt, ap); va_end(ap); return val; } static int __attribute__((format(printf, 3, 4))) JS_ThrowTypeErrorOrFalse(JSContext *ctx, int flags, const char *fmt, ...) { va_list ap; if ((flags & JS_PROP_THROW) || ((flags & JS_PROP_THROW_STRICT) && is_strict_mode(ctx))) { va_start(ap, fmt); JS_ThrowError(ctx, JS_TYPE_ERROR, fmt, ap); va_end(ap); return -1; } else { return FALSE; } } /* never use it directly */ static JSValue __attribute__((format(printf, 3, 4))) __JS_ThrowTypeErrorAtom(JSContext *ctx, JSAtom atom, const char *fmt, ...) { char buf[ATOM_GET_STR_BUF_SIZE]; return JS_ThrowTypeError(ctx, fmt, JS_AtomGetStr(ctx, buf, sizeof(buf), atom)); } /* never use it directly */ static JSValue __attribute__((format(printf, 3, 4))) __JS_ThrowSyntaxErrorAtom(JSContext *ctx, JSAtom atom, const char *fmt, ...) { char buf[ATOM_GET_STR_BUF_SIZE]; return JS_ThrowSyntaxError(ctx, fmt, JS_AtomGetStr(ctx, buf, sizeof(buf), atom)); } /* %s is replaced by 'atom'. The macro is used so that gcc can check the format string. */ #define JS_ThrowTypeErrorAtom(ctx, fmt, atom) __JS_ThrowTypeErrorAtom(ctx, atom, fmt, "") #define JS_ThrowSyntaxErrorAtom(ctx, fmt, atom) __JS_ThrowSyntaxErrorAtom(ctx, atom, fmt, "") static int JS_ThrowTypeErrorReadOnly(JSContext *ctx, int flags, JSAtom atom) { if ((flags & JS_PROP_THROW) || ((flags & JS_PROP_THROW_STRICT) && is_strict_mode(ctx))) { JS_ThrowTypeErrorAtom(ctx, "'%s' is read-only", atom); return -1; } else { return FALSE; } } JSValue __attribute__((format(printf, 2, 3))) JS_ThrowReferenceError(JSContext *ctx, const char *fmt, ...) { JSValue val; va_list ap; va_start(ap, fmt); val = JS_ThrowError(ctx, JS_REFERENCE_ERROR, fmt, ap); va_end(ap); return val; } JSValue __attribute__((format(printf, 2, 3))) JS_ThrowRangeError(JSContext *ctx, const char *fmt, ...) { JSValue val; va_list ap; va_start(ap, fmt); val = JS_ThrowError(ctx, JS_RANGE_ERROR, fmt, ap); va_end(ap); return val; } JSValue __attribute__((format(printf, 2, 3))) JS_ThrowInternalError(JSContext *ctx, const char *fmt, ...) { JSValue val; va_list ap; va_start(ap, fmt); val = JS_ThrowError(ctx, JS_INTERNAL_ERROR, fmt, ap); va_end(ap); return val; } JSValue JS_ThrowOutOfMemory(JSContext *ctx) { JSRuntime *rt = ctx->rt; if (!rt->in_out_of_memory) { rt->in_out_of_memory = TRUE; JS_ThrowInternalError(ctx, "out of memory"); rt->in_out_of_memory = FALSE; } return JS_EXCEPTION; } static JSValue JS_ThrowStackOverflow(JSContext *ctx) { return JS_ThrowInternalError(ctx, "stack overflow"); } static JSValue JS_ThrowTypeErrorNotAnObject(JSContext *ctx) { return JS_ThrowTypeError(ctx, "not an object"); } static JSValue JS_ThrowTypeErrorNotASymbol(JSContext *ctx) { return JS_ThrowTypeError(ctx, "not a symbol"); } static JSValue JS_ThrowReferenceErrorNotDefined(JSContext *ctx, JSAtom name) { char buf[ATOM_GET_STR_BUF_SIZE]; return JS_ThrowReferenceError(ctx, "'%s' is not defined", JS_AtomGetStr(ctx, buf, sizeof(buf), name)); } static JSValue JS_ThrowReferenceErrorUninitialized(JSContext *ctx, JSAtom name) { char buf[ATOM_GET_STR_BUF_SIZE]; return JS_ThrowReferenceError(ctx, "%s is not initialized", name == JS_ATOM_NULL ? "lexical variable" : JS_AtomGetStr(ctx, buf, sizeof(buf), name)); } static JSValue JS_ThrowReferenceErrorUninitialized2(JSContext *ctx, JSFunctionBytecode *b, int idx, BOOL is_ref) { JSAtom atom = JS_ATOM_NULL; if (is_ref) { atom = b->closure_var[idx].var_name; } else { /* not present if the function is stripped and contains no eval() */ if (b->vardefs) atom = b->vardefs[b->arg_count + idx].var_name; } return JS_ThrowReferenceErrorUninitialized(ctx, atom); } static JSValue JS_ThrowTypeErrorInvalidClass(JSContext *ctx, int class_id) { JSRuntime *rt = ctx->rt; JSAtom name; name = rt->class_array[class_id].class_name; return JS_ThrowTypeErrorAtom(ctx, "%s object expected", name); } static no_inline __exception int __js_poll_interrupts(JSContext *ctx) { JSRuntime *rt = ctx->rt; ctx->interrupt_counter = JS_INTERRUPT_COUNTER_INIT; if (rt->interrupt_handler) { if (rt->interrupt_handler(rt, rt->interrupt_opaque)) { /* XXX: should set a specific flag to avoid catching */ JS_ThrowInternalError(ctx, "interrupted"); JS_SetUncatchableError(ctx, ctx->rt->current_exception, TRUE); return -1; } } return 0; } static inline __exception int js_poll_interrupts(JSContext *ctx) { if (unlikely(--ctx->interrupt_counter <= 0)) { return __js_poll_interrupts(ctx); } else { return 0; } } /* return -1 (exception) or TRUE/FALSE */ static int JS_SetPrototypeInternal(JSContext *ctx, JSValueConst obj, JSValueConst proto_val, BOOL throw_flag) { JSObject *proto, *p, *p1; JSShape *sh; if (throw_flag) { if (JS_VALUE_GET_TAG(obj) == JS_TAG_NULL || JS_VALUE_GET_TAG(obj) == JS_TAG_UNDEFINED) goto not_obj; } else { if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) goto not_obj; } p = JS_VALUE_GET_OBJ(obj); if (JS_VALUE_GET_TAG(proto_val) != JS_TAG_OBJECT) { if (JS_VALUE_GET_TAG(proto_val) != JS_TAG_NULL) { not_obj: JS_ThrowTypeErrorNotAnObject(ctx); return -1; } proto = NULL; } else { proto = JS_VALUE_GET_OBJ(proto_val); } if (throw_flag && JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) return TRUE; if (unlikely(p->class_id == JS_CLASS_PROXY)) return js_proxy_setPrototypeOf(ctx, obj, proto_val, throw_flag); sh = p->shape; if (sh->proto == proto) return TRUE; if (!p->extensible) { if (throw_flag) { JS_ThrowTypeError(ctx, "object is not extensible"); return -1; } else { return FALSE; } } if (proto) { /* check if there is a cycle */ p1 = proto; do { if (p1 == p) { if (throw_flag) { JS_ThrowTypeError(ctx, "circular prototype chain"); return -1; } else { return FALSE; } } /* Note: for Proxy objects, proto is NULL */ p1 = p1->shape->proto; } while (p1 != NULL); JS_DupValue(ctx, proto_val); } if (js_shape_prepare_update(ctx, p, NULL)) return -1; sh = p->shape; if (sh->proto) JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, sh->proto)); sh->proto = proto; return TRUE; } /* return -1 (exception) or TRUE/FALSE */ int JS_SetPrototype(JSContext *ctx, JSValueConst obj, JSValueConst proto_val) { return JS_SetPrototypeInternal(ctx, obj, proto_val, TRUE); } /* Only works for primitive types, otherwise return JS_NULL. */ static JSValueConst JS_GetPrototypePrimitive(JSContext *ctx, JSValueConst val) { switch(JS_VALUE_GET_NORM_TAG(val)) { case JS_TAG_BIG_INT: val = ctx->class_proto[JS_CLASS_BIG_INT]; break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: val = ctx->class_proto[JS_CLASS_BIG_FLOAT]; break; case JS_TAG_BIG_DECIMAL: val = ctx->class_proto[JS_CLASS_BIG_DECIMAL]; break; #endif case JS_TAG_INT: case JS_TAG_FLOAT64: val = ctx->class_proto[JS_CLASS_NUMBER]; break; case JS_TAG_BOOL: val = ctx->class_proto[JS_CLASS_BOOLEAN]; break; case JS_TAG_STRING: val = ctx->class_proto[JS_CLASS_STRING]; break; case JS_TAG_SYMBOL: val = ctx->class_proto[JS_CLASS_SYMBOL]; break; case JS_TAG_OBJECT: case JS_TAG_NULL: case JS_TAG_UNDEFINED: default: val = JS_NULL; break; } return val; } /* Return an Object, JS_NULL or JS_EXCEPTION in case of Proxy object. */ JSValue JS_GetPrototype(JSContext *ctx, JSValueConst obj) { JSValue val; if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { JSObject *p; p = JS_VALUE_GET_OBJ(obj); if (unlikely(p->class_id == JS_CLASS_PROXY)) { val = js_proxy_getPrototypeOf(ctx, obj); } else { p = p->shape->proto; if (!p) val = JS_NULL; else val = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); } } else { val = JS_DupValue(ctx, JS_GetPrototypePrimitive(ctx, obj)); } return val; } static JSValue JS_GetPrototypeFree(JSContext *ctx, JSValue obj) { JSValue obj1; obj1 = JS_GetPrototype(ctx, obj); JS_FreeValue(ctx, obj); return obj1; } /* return TRUE, FALSE or (-1) in case of exception */ static int JS_OrdinaryIsInstanceOf(JSContext *ctx, JSValueConst val, JSValueConst obj) { JSValue obj_proto; JSObject *proto; const JSObject *p, *proto1; BOOL ret; if (!JS_IsFunction(ctx, obj)) return FALSE; p = JS_VALUE_GET_OBJ(obj); if (p->class_id == JS_CLASS_BOUND_FUNCTION) { JSBoundFunction *s = p->u.bound_function; return JS_IsInstanceOf(ctx, val, s->func_obj); } /* Only explicitly boxed values are instances of constructors */ if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; obj_proto = JS_GetProperty(ctx, obj, JS_ATOM_prototype); if (JS_VALUE_GET_TAG(obj_proto) != JS_TAG_OBJECT) { if (!JS_IsException(obj_proto)) JS_ThrowTypeError(ctx, "operand 'prototype' property is not an object"); ret = -1; goto done; } proto = JS_VALUE_GET_OBJ(obj_proto); p = JS_VALUE_GET_OBJ(val); for(;;) { proto1 = p->shape->proto; if (!proto1) { /* slow case if proxy in the prototype chain */ if (unlikely(p->class_id == JS_CLASS_PROXY)) { JSValue obj1; obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, (JSObject *)p)); for(;;) { obj1 = JS_GetPrototypeFree(ctx, obj1); if (JS_IsException(obj1)) { ret = -1; break; } if (JS_IsNull(obj1)) { ret = FALSE; break; } if (proto == JS_VALUE_GET_OBJ(obj1)) { JS_FreeValue(ctx, obj1); ret = TRUE; break; } /* must check for timeout to avoid infinite loop */ if (js_poll_interrupts(ctx)) { JS_FreeValue(ctx, obj1); ret = -1; break; } } } else { ret = FALSE; } break; } p = proto1; if (proto == p) { ret = TRUE; break; } } done: JS_FreeValue(ctx, obj_proto); return ret; } /* return TRUE, FALSE or (-1) in case of exception */ int JS_IsInstanceOf(JSContext *ctx, JSValueConst val, JSValueConst obj) { JSValue method; if (!JS_IsObject(obj)) goto fail; method = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_hasInstance); if (JS_IsException(method)) return -1; if (!JS_IsNull(method) && !JS_IsUndefined(method)) { JSValue ret; ret = JS_CallFree(ctx, method, obj, 1, &val); return JS_ToBoolFree(ctx, ret); } /* legacy case */ if (!JS_IsFunction(ctx, obj)) { fail: JS_ThrowTypeError(ctx, "invalid 'instanceof' right operand"); return -1; } return JS_OrdinaryIsInstanceOf(ctx, val, obj); } /* return the value associated to the autoinit property or an exception */ typedef JSValue JSAutoInitFunc(JSContext *ctx, JSObject *p, JSAtom atom, void *opaque); static JSAutoInitFunc *js_autoinit_func_table[] = { js_instantiate_prototype, /* JS_AUTOINIT_ID_PROTOTYPE */ js_module_ns_autoinit, /* JS_AUTOINIT_ID_MODULE_NS */ JS_InstantiateFunctionListItem2, /* JS_AUTOINIT_ID_PROP */ }; /* warning: 'prs' is reallocated after it */ static int JS_AutoInitProperty(JSContext *ctx, JSObject *p, JSAtom prop, JSProperty *pr, JSShapeProperty *prs) { JSValue val; JSContext *realm; JSAutoInitFunc *func; if (js_shape_prepare_update(ctx, p, &prs)) return -1; realm = js_autoinit_get_realm(pr); func = js_autoinit_func_table[js_autoinit_get_id(pr)]; /* 'func' shall not modify the object properties 'pr' */ val = func(realm, p, prop, pr->u.init.opaque); js_autoinit_free(ctx->rt, pr); prs->flags &= ~JS_PROP_TMASK; pr->u.value = JS_UNDEFINED; if (JS_IsException(val)) return -1; pr->u.value = val; return 0; } JSValue JS_GetPropertyInternal(JSContext *ctx, JSValueConst obj, JSAtom prop, JSValueConst this_obj, BOOL throw_ref_error) { JSObject *p; JSProperty *pr; JSShapeProperty *prs; uint32_t tag; tag = JS_VALUE_GET_TAG(obj); if (unlikely(tag != JS_TAG_OBJECT)) { switch(tag) { case JS_TAG_NULL: return JS_ThrowTypeErrorAtom(ctx, "cannot read property '%s' of null", prop); case JS_TAG_UNDEFINED: return JS_ThrowTypeErrorAtom(ctx, "cannot read property '%s' of undefined", prop); case JS_TAG_EXCEPTION: return JS_EXCEPTION; case JS_TAG_STRING: { JSString *p1 = JS_VALUE_GET_STRING(obj); if (__JS_AtomIsTaggedInt(prop)) { uint32_t idx, ch; idx = __JS_AtomToUInt32(prop); if (idx < p1->len) { if (p1->is_wide_char) ch = p1->u.str16[idx]; else ch = p1->u.str8[idx]; return js_new_string_char(ctx, ch); } } else if (prop == JS_ATOM_length) { return JS_NewInt32(ctx, p1->len); } } break; default: break; } /* cannot raise an exception */ p = JS_VALUE_GET_OBJ(JS_GetPrototypePrimitive(ctx, obj)); if (!p) return JS_UNDEFINED; } else { p = JS_VALUE_GET_OBJ(obj); } for(;;) { prs = find_own_property(&pr, p, prop); if (prs) { /* found */ if (unlikely(prs->flags & JS_PROP_TMASK)) { if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { if (unlikely(!pr->u.getset.getter)) { return JS_UNDEFINED; } else { JSValue func = JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter); /* Note: the field could be removed in the getter */ func = JS_DupValue(ctx, func); return JS_CallFree(ctx, func, this_obj, 0, NULL); } } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { JSValue val = *pr->u.var_ref->pvalue; if (unlikely(JS_IsUninitialized(val))) return JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return JS_DupValue(ctx, val); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* Instantiate property and retry */ if (JS_AutoInitProperty(ctx, p, prop, pr, prs)) return JS_EXCEPTION; continue; } } else { return JS_DupValue(ctx, pr->u.value); } } if (unlikely(p->is_exotic)) { /* exotic behaviors */ if (p->fast_array) { if (__JS_AtomIsTaggedInt(prop)) { uint32_t idx = __JS_AtomToUInt32(prop); if (idx < p->u.array.count) { /* we avoid duplicating the code */ return JS_GetPropertyUint32(ctx, JS_MKPTR(JS_TAG_OBJECT, p), idx); } else if (p->class_id >= JS_CLASS_UINT8C_ARRAY && p->class_id <= JS_CLASS_FLOAT64_ARRAY) { return JS_UNDEFINED; } } else if (p->class_id >= JS_CLASS_UINT8C_ARRAY && p->class_id <= JS_CLASS_FLOAT64_ARRAY) { int ret; ret = JS_AtomIsNumericIndex(ctx, prop); if (ret != 0) { if (ret < 0) return JS_EXCEPTION; return JS_UNDEFINED; } } } else { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em) { if (em->get_property) { JSValue obj1, retval; /* XXX: should pass throw_ref_error */ /* Note: if 'p' is a prototype, it can be freed in the called function */ obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); retval = em->get_property(ctx, obj1, prop, this_obj); JS_FreeValue(ctx, obj1); return retval; } if (em->get_own_property) { JSPropertyDescriptor desc; int ret; JSValue obj1; /* Note: if 'p' is a prototype, it can be freed in the called function */ obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); ret = em->get_own_property(ctx, &desc, obj1, prop); JS_FreeValue(ctx, obj1); if (ret < 0) return JS_EXCEPTION; if (ret) { if (desc.flags & JS_PROP_GETSET) { JS_FreeValue(ctx, desc.setter); return JS_CallFree(ctx, desc.getter, this_obj, 0, NULL); } else { return desc.value; } } } } } } p = p->shape->proto; if (!p) break; } if (unlikely(throw_ref_error)) { return JS_ThrowReferenceErrorNotDefined(ctx, prop); } else { return JS_UNDEFINED; } } static JSValue JS_ThrowTypeErrorPrivateNotFound(JSContext *ctx, JSAtom atom) { return JS_ThrowTypeErrorAtom(ctx, "private class field '%s' does not exist", atom); } /* Private fields can be added even on non extensible objects or Proxies */ static int JS_DefinePrivateField(JSContext *ctx, JSValueConst obj, JSValueConst name, JSValue val) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; JSAtom prop; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) { JS_ThrowTypeErrorNotAnObject(ctx); goto fail; } /* safety check */ if (unlikely(JS_VALUE_GET_TAG(name) != JS_TAG_SYMBOL)) { JS_ThrowTypeErrorNotASymbol(ctx); goto fail; } #ifdef STRICT_R_HEADERS prop = js_symbol_to_atom(ctx, name); #else prop = js_symbol_to_atom(ctx, (JSValue)name); #endif p = JS_VALUE_GET_OBJ(obj); prs = find_own_property(&pr, p, prop); if (prs) { JS_ThrowTypeErrorAtom(ctx, "private class field '%s' already exists", prop); goto fail; } pr = add_property(ctx, p, prop, JS_PROP_C_W_E); if (unlikely(!pr)) { fail: JS_FreeValue(ctx, val); return -1; } pr->u.value = val; return 0; } static JSValue JS_GetPrivateField(JSContext *ctx, JSValueConst obj, JSValueConst name) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; JSAtom prop; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) return JS_ThrowTypeErrorNotAnObject(ctx); /* safety check */ if (unlikely(JS_VALUE_GET_TAG(name) != JS_TAG_SYMBOL)) return JS_ThrowTypeErrorNotASymbol(ctx); #ifdef STRICT_R_HEADERS prop = js_symbol_to_atom(ctx, name); #else prop = js_symbol_to_atom(ctx, (JSValue)name); #endif p = JS_VALUE_GET_OBJ(obj); prs = find_own_property(&pr, p, prop); if (!prs) { JS_ThrowTypeErrorPrivateNotFound(ctx, prop); return JS_EXCEPTION; } return JS_DupValue(ctx, pr->u.value); } static int JS_SetPrivateField(JSContext *ctx, JSValueConst obj, JSValueConst name, JSValue val) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; JSAtom prop; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) { JS_ThrowTypeErrorNotAnObject(ctx); goto fail; } /* safety check */ if (unlikely(JS_VALUE_GET_TAG(name) != JS_TAG_SYMBOL)) { JS_ThrowTypeErrorNotASymbol(ctx); goto fail; } #ifdef STRICT_R_HEADERS prop = js_symbol_to_atom(ctx, name); #else prop = js_symbol_to_atom(ctx, (JSValue)name); #endif p = JS_VALUE_GET_OBJ(obj); prs = find_own_property(&pr, p, prop); if (!prs) { JS_ThrowTypeErrorPrivateNotFound(ctx, prop); fail: JS_FreeValue(ctx, val); return -1; } set_value(ctx, &pr->u.value, val); return 0; } /* add a private brand field to 'home_obj' if not already present and if obj is != null add a private brand to it */ static int JS_AddBrand(JSContext *ctx, JSValueConst obj, JSValueConst home_obj) { JSObject *p, *p1; JSShapeProperty *prs; JSProperty *pr; JSValue brand; JSAtom brand_atom; if (unlikely(JS_VALUE_GET_TAG(home_obj) != JS_TAG_OBJECT)) { JS_ThrowTypeErrorNotAnObject(ctx); return -1; } p = JS_VALUE_GET_OBJ(home_obj); prs = find_own_property(&pr, p, JS_ATOM_Private_brand); if (!prs) { /* if the brand is not present, add it */ brand = JS_NewSymbolFromAtom(ctx, JS_ATOM_brand, JS_ATOM_TYPE_PRIVATE); if (JS_IsException(brand)) return -1; pr = add_property(ctx, p, JS_ATOM_Private_brand, JS_PROP_C_W_E); if (!pr) { JS_FreeValue(ctx, brand); return -1; } pr->u.value = JS_DupValue(ctx, brand); } else { brand = JS_DupValue(ctx, pr->u.value); } brand_atom = js_symbol_to_atom(ctx, brand); if (JS_IsObject(obj)) { p1 = JS_VALUE_GET_OBJ(obj); prs = find_own_property(&pr, p1, brand_atom); if (unlikely(prs)) { JS_FreeAtom(ctx, brand_atom); JS_ThrowTypeError(ctx, "private method is already present"); return -1; } pr = add_property(ctx, p1, brand_atom, JS_PROP_C_W_E); JS_FreeAtom(ctx, brand_atom); if (!pr) return -1; pr->u.value = JS_UNDEFINED; } else { JS_FreeAtom(ctx, brand_atom); } return 0; } /* return a boolean telling if the brand of the home object of 'func' is present on 'obj' or -1 in case of exception */ static int JS_CheckBrand(JSContext *ctx, JSValueConst obj, JSValueConst func) { JSObject *p, *p1, *home_obj; JSShapeProperty *prs; JSProperty *pr; JSValueConst brand; /* get the home object of 'func' */ if (unlikely(JS_VALUE_GET_TAG(func) != JS_TAG_OBJECT)) goto not_obj; p1 = JS_VALUE_GET_OBJ(func); if (!js_class_has_bytecode(p1->class_id)) goto not_obj; home_obj = p1->u.func.home_object; if (!home_obj) goto not_obj; prs = find_own_property(&pr, home_obj, JS_ATOM_Private_brand); if (!prs) { JS_ThrowTypeError(ctx, "expecting private field"); return -1; } brand = pr->u.value; /* safety check */ if (unlikely(JS_VALUE_GET_TAG(brand) != JS_TAG_SYMBOL)) goto not_obj; /* get the brand array of 'obj' */ if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) { not_obj: JS_ThrowTypeErrorNotAnObject(ctx); return -1; } p = JS_VALUE_GET_OBJ(obj); #ifdef STRICT_R_HEADERS prs = find_own_property(&pr, p, js_symbol_to_atom(ctx, brand)); #else prs = find_own_property(&pr, p, js_symbol_to_atom(ctx, (JSValue)brand)); #endif return (prs != NULL); } static uint32_t js_string_obj_get_length(JSContext *ctx, JSValueConst obj) { JSObject *p; JSString *p1; uint32_t len = 0; /* This is a class exotic method: obj class_id is JS_CLASS_STRING */ p = JS_VALUE_GET_OBJ(obj); if (JS_VALUE_GET_TAG(p->u.object_data) == JS_TAG_STRING) { p1 = JS_VALUE_GET_STRING(p->u.object_data); len = p1->len; } return len; } static int num_keys_cmp(const void *p1, const void *p2, void *opaque) { JSContext *ctx = opaque; JSAtom atom1 = ((const JSPropertyEnum *)p1)->atom; JSAtom atom2 = ((const JSPropertyEnum *)p2)->atom; uint32_t v1, v2; BOOL atom1_is_integer, atom2_is_integer; atom1_is_integer = JS_AtomIsArrayIndex(ctx, &v1, atom1); atom2_is_integer = JS_AtomIsArrayIndex(ctx, &v2, atom2); assert(atom1_is_integer && atom2_is_integer); if (v1 < v2) return -1; else if (v1 == v2) return 0; else return 1; } static void js_free_prop_enum(JSContext *ctx, JSPropertyEnum *tab, uint32_t len) { uint32_t i; if (tab) { for(i = 0; i < len; i++) JS_FreeAtom(ctx, tab[i].atom); js_free(ctx, tab); } } /* return < 0 in case if exception, 0 if OK. ptab and its atoms must be freed by the user. */ static int __exception JS_GetOwnPropertyNamesInternal(JSContext *ctx, JSPropertyEnum **ptab, uint32_t *plen, JSObject *p, int flags) { int i, j; JSShape *sh; JSShapeProperty *prs; JSPropertyEnum *tab_atom, *tab_exotic; JSAtom atom; uint32_t num_keys_count, str_keys_count, sym_keys_count, atom_count; uint32_t num_index, str_index, sym_index, exotic_count, exotic_keys_count; BOOL is_enumerable, num_sorted; uint32_t num_key; JSAtomKindEnum kind; /* clear pointer for consistency in case of failure */ *ptab = NULL; *plen = 0; /* compute the number of returned properties */ num_keys_count = 0; str_keys_count = 0; sym_keys_count = 0; exotic_keys_count = 0; exotic_count = 0; tab_exotic = NULL; sh = p->shape; for(i = 0, prs = get_shape_prop(sh); i < sh->prop_count; i++, prs++) { atom = prs->atom; if (atom != JS_ATOM_NULL) { is_enumerable = ((prs->flags & JS_PROP_ENUMERABLE) != 0); kind = JS_AtomGetKind(ctx, atom); if ((!(flags & JS_GPN_ENUM_ONLY) || is_enumerable) && ((flags >> kind) & 1) != 0) { /* need to raise an exception in case of the module name space (implicit GetOwnProperty) */ if (unlikely((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) && (flags & (JS_GPN_SET_ENUM | JS_GPN_ENUM_ONLY))) { JSVarRef *var_ref = p->prop[i].u.var_ref; if (unlikely(JS_IsUninitialized(*var_ref->pvalue))) { JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return -1; } } if (JS_AtomIsArrayIndex(ctx, &num_key, atom)) { num_keys_count++; } else if (kind == JS_ATOM_KIND_STRING) { str_keys_count++; } else { sym_keys_count++; } } } } if (p->is_exotic) { if (p->fast_array) { if (flags & JS_GPN_STRING_MASK) { num_keys_count += p->u.array.count; } } else if (p->class_id == JS_CLASS_STRING) { if (flags & JS_GPN_STRING_MASK) { num_keys_count += js_string_obj_get_length(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); } } else { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em && em->get_own_property_names) { if (em->get_own_property_names(ctx, &tab_exotic, &exotic_count, JS_MKPTR(JS_TAG_OBJECT, p))) return -1; for(i = 0; i < exotic_count; i++) { atom = tab_exotic[i].atom; kind = JS_AtomGetKind(ctx, atom); if (((flags >> kind) & 1) != 0) { is_enumerable = FALSE; if (flags & (JS_GPN_SET_ENUM | JS_GPN_ENUM_ONLY)) { JSPropertyDescriptor desc; int res; /* set the "is_enumerable" field if necessary */ res = JS_GetOwnPropertyInternal(ctx, &desc, p, atom); if (res < 0) { js_free_prop_enum(ctx, tab_exotic, exotic_count); return -1; } if (res) { is_enumerable = ((desc.flags & JS_PROP_ENUMERABLE) != 0); js_free_desc(ctx, &desc); } tab_exotic[i].is_enumerable = is_enumerable; } if (!(flags & JS_GPN_ENUM_ONLY) || is_enumerable) { exotic_keys_count++; } } } } } } /* fill them */ atom_count = num_keys_count + str_keys_count + sym_keys_count + exotic_keys_count; /* avoid allocating 0 bytes */ tab_atom = js_malloc(ctx, sizeof(tab_atom[0]) * max_int(atom_count, 1)); if (!tab_atom) { js_free_prop_enum(ctx, tab_exotic, exotic_count); return -1; } num_index = 0; str_index = num_keys_count; sym_index = str_index + str_keys_count; num_sorted = TRUE; sh = p->shape; for(i = 0, prs = get_shape_prop(sh); i < sh->prop_count; i++, prs++) { atom = prs->atom; if (atom != JS_ATOM_NULL) { is_enumerable = ((prs->flags & JS_PROP_ENUMERABLE) != 0); kind = JS_AtomGetKind(ctx, atom); if ((!(flags & JS_GPN_ENUM_ONLY) || is_enumerable) && ((flags >> kind) & 1) != 0) { if (JS_AtomIsArrayIndex(ctx, &num_key, atom)) { j = num_index++; num_sorted = FALSE; } else if (kind == JS_ATOM_KIND_STRING) { j = str_index++; } else { j = sym_index++; } tab_atom[j].atom = JS_DupAtom(ctx, atom); tab_atom[j].is_enumerable = is_enumerable; } } } if (p->is_exotic) { int len; if (p->fast_array) { if (flags & JS_GPN_STRING_MASK) { len = p->u.array.count; goto add_array_keys; } } else if (p->class_id == JS_CLASS_STRING) { if (flags & JS_GPN_STRING_MASK) { len = js_string_obj_get_length(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); add_array_keys: for(i = 0; i < len; i++) { tab_atom[num_index].atom = __JS_AtomFromUInt32(i); if (tab_atom[num_index].atom == JS_ATOM_NULL) { js_free_prop_enum(ctx, tab_atom, num_index); return -1; } tab_atom[num_index].is_enumerable = TRUE; num_index++; } } } else { /* Note: exotic keys are not reordered and comes after the object own properties. */ for(i = 0; i < exotic_count; i++) { atom = tab_exotic[i].atom; is_enumerable = tab_exotic[i].is_enumerable; kind = JS_AtomGetKind(ctx, atom); if ((!(flags & JS_GPN_ENUM_ONLY) || is_enumerable) && ((flags >> kind) & 1) != 0) { tab_atom[sym_index].atom = atom; tab_atom[sym_index].is_enumerable = is_enumerable; sym_index++; } else { JS_FreeAtom(ctx, atom); } } js_free(ctx, tab_exotic); } } assert(num_index == num_keys_count); assert(str_index == num_keys_count + str_keys_count); assert(sym_index == atom_count); if (num_keys_count != 0 && !num_sorted) { rqsort(tab_atom, num_keys_count, sizeof(tab_atom[0]), num_keys_cmp, ctx); } *ptab = tab_atom; *plen = atom_count; return 0; } int JS_GetOwnPropertyNames(JSContext *ctx, JSPropertyEnum **ptab, uint32_t *plen, JSValueConst obj, int flags) { if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) { JS_ThrowTypeErrorNotAnObject(ctx); return -1; } return JS_GetOwnPropertyNamesInternal(ctx, ptab, plen, JS_VALUE_GET_OBJ(obj), flags); } /* Return -1 if exception, FALSE if the property does not exist, TRUE if it exists. If TRUE is returned, the property descriptor 'desc' is filled present. */ static int JS_GetOwnPropertyInternal(JSContext *ctx, JSPropertyDescriptor *desc, JSObject *p, JSAtom prop) { JSShapeProperty *prs; JSProperty *pr; retry: prs = find_own_property(&pr, p, prop); if (prs) { if (desc) { desc->flags = prs->flags & JS_PROP_C_W_E; desc->getter = JS_UNDEFINED; desc->setter = JS_UNDEFINED; desc->value = JS_UNDEFINED; if (unlikely(prs->flags & JS_PROP_TMASK)) { if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { desc->flags |= JS_PROP_GETSET; if (pr->u.getset.getter) desc->getter = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter)); if (pr->u.getset.setter) desc->setter = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.setter)); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { JSValue val = *pr->u.var_ref->pvalue; if (unlikely(JS_IsUninitialized(val))) { JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return -1; } desc->value = JS_DupValue(ctx, val); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* Instantiate property and retry */ if (JS_AutoInitProperty(ctx, p, prop, pr, prs)) return -1; goto retry; } } else { desc->value = JS_DupValue(ctx, pr->u.value); } } else { /* for consistency, send the exception even if desc is NULL */ if (unlikely((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF)) { if (unlikely(JS_IsUninitialized(*pr->u.var_ref->pvalue))) { JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return -1; } } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* nothing to do: delay instantiation until actual value and/or attributes are read */ } } return TRUE; } if (p->is_exotic) { if (p->fast_array) { /* specific case for fast arrays */ if (__JS_AtomIsTaggedInt(prop)) { uint32_t idx; idx = __JS_AtomToUInt32(prop); if (idx < p->u.array.count) { if (desc) { desc->flags = JS_PROP_WRITABLE | JS_PROP_ENUMERABLE | JS_PROP_CONFIGURABLE; desc->getter = JS_UNDEFINED; desc->setter = JS_UNDEFINED; desc->value = JS_GetPropertyUint32(ctx, JS_MKPTR(JS_TAG_OBJECT, p), idx); } return TRUE; } } } else { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em && em->get_own_property) { return em->get_own_property(ctx, desc, JS_MKPTR(JS_TAG_OBJECT, p), prop); } } } return FALSE; } int JS_GetOwnProperty(JSContext *ctx, JSPropertyDescriptor *desc, JSValueConst obj, JSAtom prop) { if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) { JS_ThrowTypeErrorNotAnObject(ctx); return -1; } return JS_GetOwnPropertyInternal(ctx, desc, JS_VALUE_GET_OBJ(obj), prop); } /* return -1 if exception (Proxy object only) or TRUE/FALSE */ int JS_IsExtensible(JSContext *ctx, JSValueConst obj) { JSObject *p; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) return FALSE; p = JS_VALUE_GET_OBJ(obj); if (unlikely(p->class_id == JS_CLASS_PROXY)) return js_proxy_isExtensible(ctx, obj); else return p->extensible; } /* return -1 if exception (Proxy object only) or TRUE/FALSE */ int JS_PreventExtensions(JSContext *ctx, JSValueConst obj) { JSObject *p; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) return FALSE; p = JS_VALUE_GET_OBJ(obj); if (unlikely(p->class_id == JS_CLASS_PROXY)) return js_proxy_preventExtensions(ctx, obj); p->extensible = FALSE; return TRUE; } /* return -1 if exception otherwise TRUE or FALSE */ int JS_HasProperty(JSContext *ctx, JSValueConst obj, JSAtom prop) { JSObject *p; int ret; JSValue obj1; if (unlikely(JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT)) return FALSE; p = JS_VALUE_GET_OBJ(obj); for(;;) { if (p->is_exotic) { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em && em->has_property) { /* has_property can free the prototype */ obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); ret = em->has_property(ctx, obj1, prop); JS_FreeValue(ctx, obj1); return ret; } } /* JS_GetOwnPropertyInternal can free the prototype */ JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); ret = JS_GetOwnPropertyInternal(ctx, NULL, p, prop); JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); if (ret != 0) return ret; if (p->class_id >= JS_CLASS_UINT8C_ARRAY && p->class_id <= JS_CLASS_FLOAT64_ARRAY) { ret = JS_AtomIsNumericIndex(ctx, prop); if (ret != 0) { if (ret < 0) return -1; return FALSE; } } p = p->shape->proto; if (!p) break; } return FALSE; } /* val must be a symbol */ static JSAtom js_symbol_to_atom(JSContext *ctx, JSValue val) { JSAtomStruct *p = JS_VALUE_GET_PTR(val); return js_get_atom_index(ctx->rt, p); } /* return JS_ATOM_NULL in case of exception */ JSAtom JS_ValueToAtom(JSContext *ctx, JSValueConst val) { JSAtom atom; uint32_t tag; tag = JS_VALUE_GET_TAG(val); if (tag == JS_TAG_INT && (uint32_t)JS_VALUE_GET_INT(val) <= JS_ATOM_MAX_INT) { /* fast path for integer values */ atom = __JS_AtomFromUInt32(JS_VALUE_GET_INT(val)); } else if (tag == JS_TAG_SYMBOL) { JSAtomStruct *p = JS_VALUE_GET_PTR(val); atom = JS_DupAtom(ctx, js_get_atom_index(ctx->rt, p)); } else { JSValue str; str = JS_ToPropertyKey(ctx, val); if (JS_IsException(str)) return JS_ATOM_NULL; if (JS_VALUE_GET_TAG(str) == JS_TAG_SYMBOL) { atom = js_symbol_to_atom(ctx, str); } else { atom = JS_NewAtomStr(ctx, JS_VALUE_GET_STRING(str)); } } return atom; } static JSValue JS_GetPropertyValue(JSContext *ctx, JSValueConst this_obj, JSValue prop) { JSAtom atom; JSValue ret; if (likely(JS_VALUE_GET_TAG(this_obj) == JS_TAG_OBJECT && JS_VALUE_GET_TAG(prop) == JS_TAG_INT)) { JSObject *p; uint32_t idx; /* fast path for array access */ p = JS_VALUE_GET_OBJ(this_obj); idx = JS_VALUE_GET_INT(prop); switch(p->class_id) { case JS_CLASS_ARRAY: case JS_CLASS_ARGUMENTS: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_DupValue(ctx, p->u.array.u.values[idx]); case JS_CLASS_INT8_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewInt32(ctx, p->u.array.u.int8_ptr[idx]); case JS_CLASS_UINT8C_ARRAY: case JS_CLASS_UINT8_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewInt32(ctx, p->u.array.u.uint8_ptr[idx]); case JS_CLASS_INT16_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewInt32(ctx, p->u.array.u.int16_ptr[idx]); case JS_CLASS_UINT16_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewInt32(ctx, p->u.array.u.uint16_ptr[idx]); case JS_CLASS_INT32_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewInt32(ctx, p->u.array.u.int32_ptr[idx]); case JS_CLASS_UINT32_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewUint32(ctx, p->u.array.u.uint32_ptr[idx]); case JS_CLASS_BIG_INT64_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewBigInt64(ctx, p->u.array.u.int64_ptr[idx]); case JS_CLASS_BIG_UINT64_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return JS_NewBigUint64(ctx, p->u.array.u.uint64_ptr[idx]); case JS_CLASS_FLOAT32_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return __JS_NewFloat64(ctx, p->u.array.u.float_ptr[idx]); case JS_CLASS_FLOAT64_ARRAY: if (unlikely(idx >= p->u.array.count)) goto slow_path; return __JS_NewFloat64(ctx, p->u.array.u.double_ptr[idx]); default: goto slow_path; } } else { slow_path: atom = JS_ValueToAtom(ctx, prop); JS_FreeValue(ctx, prop); if (unlikely(atom == JS_ATOM_NULL)) return JS_EXCEPTION; ret = JS_GetProperty(ctx, this_obj, atom); JS_FreeAtom(ctx, atom); return ret; } } JSValue JS_GetPropertyUint32(JSContext *ctx, JSValueConst this_obj, uint32_t idx) { return JS_GetPropertyValue(ctx, this_obj, JS_NewUint32(ctx, idx)); } /* Check if an object has a generalized numeric property. Return value: -1 for exception, TRUE if property exists, stored into *pval, FALSE if proprty does not exist. */ static int JS_TryGetPropertyInt64(JSContext *ctx, JSValueConst obj, int64_t idx, JSValue *pval) { JSValue val = JS_UNDEFINED; JSAtom prop; int present; if (likely((uint64_t)idx <= JS_ATOM_MAX_INT)) { /* fast path */ present = JS_HasProperty(ctx, obj, __JS_AtomFromUInt32(idx)); if (present > 0) { val = JS_GetPropertyValue(ctx, obj, JS_NewInt32(ctx, idx)); if (unlikely(JS_IsException(val))) present = -1; } } else { prop = JS_NewAtomInt64(ctx, idx); present = -1; if (likely(prop != JS_ATOM_NULL)) { present = JS_HasProperty(ctx, obj, prop); if (present > 0) { val = JS_GetProperty(ctx, obj, prop); if (unlikely(JS_IsException(val))) present = -1; } JS_FreeAtom(ctx, prop); } } *pval = val; return present; } static JSValue JS_GetPropertyInt64(JSContext *ctx, JSValueConst obj, int64_t idx) { JSAtom prop; JSValue val; if ((uint64_t)idx <= INT32_MAX) { /* fast path for fast arrays */ return JS_GetPropertyValue(ctx, obj, JS_NewInt32(ctx, idx)); } prop = JS_NewAtomInt64(ctx, idx); if (prop == JS_ATOM_NULL) return JS_EXCEPTION; val = JS_GetProperty(ctx, obj, prop); JS_FreeAtom(ctx, prop); return val; } JSValue JS_GetPropertyStr(JSContext *ctx, JSValueConst this_obj, const char *prop) { JSAtom atom; JSValue ret; atom = JS_NewAtom(ctx, prop); ret = JS_GetProperty(ctx, this_obj, atom); JS_FreeAtom(ctx, atom); return ret; } /* Note: the property value is not initialized. Return NULL if memory error. */ static JSProperty *add_property(JSContext *ctx, JSObject *p, JSAtom prop, int prop_flags) { JSShape *sh, *new_sh; sh = p->shape; if (sh->is_hashed) { /* try to find an existing shape */ new_sh = find_hashed_shape_prop(ctx->rt, sh, prop, prop_flags); if (new_sh) { /* matching shape found: use it */ /* the property array may need to be resized */ if (new_sh->prop_size != sh->prop_size) { JSProperty *new_prop; new_prop = js_realloc(ctx, p->prop, sizeof(p->prop[0]) * new_sh->prop_size); if (!new_prop) return NULL; p->prop = new_prop; } p->shape = js_dup_shape(new_sh); js_free_shape(ctx->rt, sh); return &p->prop[new_sh->prop_count - 1]; } else if (sh->header.ref_count != 1) { /* if the shape is shared, clone it */ new_sh = js_clone_shape(ctx, sh); if (!new_sh) return NULL; /* hash the cloned shape */ new_sh->is_hashed = TRUE; js_shape_hash_link(ctx->rt, new_sh); js_free_shape(ctx->rt, p->shape); p->shape = new_sh; } } assert(p->shape->header.ref_count == 1); if (add_shape_property(ctx, &p->shape, p, prop, prop_flags)) return NULL; return &p->prop[p->shape->prop_count - 1]; } /* can be called on Array or Arguments objects. return < 0 if memory alloc error. */ static no_inline __exception int convert_fast_array_to_array(JSContext *ctx, JSObject *p) { JSProperty *pr; JSShape *sh; JSValue *tab; uint32_t i, len, new_count; if (js_shape_prepare_update(ctx, p, NULL)) return -1; len = p->u.array.count; /* resize the properties once to simplify the error handling */ sh = p->shape; new_count = sh->prop_count + len; if (new_count > sh->prop_size) { if (resize_properties(ctx, &p->shape, p, new_count)) return -1; } tab = p->u.array.u.values; for(i = 0; i < len; i++) { /* add_property cannot fail here but __JS_AtomFromUInt32(i) fails for i > INT32_MAX */ pr = add_property(ctx, p, __JS_AtomFromUInt32(i), JS_PROP_C_W_E); pr->u.value = *tab++; } js_free(ctx, p->u.array.u.values); p->u.array.count = 0; p->u.array.u.values = NULL; /* fail safe */ p->u.array.u1.size = 0; p->fast_array = 0; return 0; } static int delete_property(JSContext *ctx, JSObject *p, JSAtom atom) { JSShape *sh; JSShapeProperty *pr, *lpr, *prop; JSProperty *pr1; uint32_t lpr_idx; intptr_t h, h1; redo: sh = p->shape; h1 = atom & sh->prop_hash_mask; h = prop_hash_end(sh)[-h1 - 1]; prop = get_shape_prop(sh); lpr = NULL; lpr_idx = 0; /* prevent warning */ while (h != 0) { pr = &prop[h - 1]; if (likely(pr->atom == atom)) { /* found ! */ if (!(pr->flags & JS_PROP_CONFIGURABLE)) return FALSE; /* realloc the shape if needed */ if (lpr) lpr_idx = lpr - get_shape_prop(sh); if (js_shape_prepare_update(ctx, p, &pr)) return -1; sh = p->shape; /* remove property */ if (lpr) { lpr = get_shape_prop(sh) + lpr_idx; lpr->hash_next = pr->hash_next; } else { prop_hash_end(sh)[-h1 - 1] = pr->hash_next; } sh->deleted_prop_count++; /* free the entry */ pr1 = &p->prop[h - 1]; free_property(ctx->rt, pr1, pr->flags); JS_FreeAtom(ctx, pr->atom); /* put default values */ pr->flags = 0; pr->atom = JS_ATOM_NULL; pr1->u.value = JS_UNDEFINED; /* compact the properties if too many deleted properties */ if (sh->deleted_prop_count >= 8 && sh->deleted_prop_count >= ((unsigned)sh->prop_count / 2)) { compact_properties(ctx, p); } return TRUE; } lpr = pr; h = pr->hash_next; } if (p->is_exotic) { if (p->fast_array) { uint32_t idx; if (JS_AtomIsArrayIndex(ctx, &idx, atom) && idx < p->u.array.count) { if (p->class_id == JS_CLASS_ARRAY || p->class_id == JS_CLASS_ARGUMENTS) { /* Special case deleting the last element of a fast Array */ if (idx == p->u.array.count - 1) { JS_FreeValue(ctx, p->u.array.u.values[idx]); p->u.array.count = idx; return TRUE; } if (convert_fast_array_to_array(ctx, p)) return -1; goto redo; } else { return FALSE; } } } else { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em && em->delete_property) { return em->delete_property(ctx, JS_MKPTR(JS_TAG_OBJECT, p), atom); } } } /* not found */ return TRUE; } static int call_setter(JSContext *ctx, JSObject *setter, JSValueConst this_obj, JSValue val, int flags) { JSValue ret, func; if (likely(setter)) { func = JS_MKPTR(JS_TAG_OBJECT, setter); /* Note: the field could be removed in the setter */ func = JS_DupValue(ctx, func); ret = JS_CallFree(ctx, func, this_obj, 1, (JSValueConst *)&val); JS_FreeValue(ctx, val); if (JS_IsException(ret)) return -1; JS_FreeValue(ctx, ret); return TRUE; } else { JS_FreeValue(ctx, val); if ((flags & JS_PROP_THROW) || ((flags & JS_PROP_THROW_STRICT) && is_strict_mode(ctx))) { JS_ThrowTypeError(ctx, "no setter for property"); return -1; } return FALSE; } } /* set the array length and remove the array elements if necessary. */ static int set_array_length(JSContext *ctx, JSObject *p, JSValue val, int flags) { uint32_t len, idx, cur_len; int i, ret; /* Note: this call can reallocate the properties of 'p' */ ret = JS_ToArrayLengthFree(ctx, &len, val, FALSE); if (ret) return -1; /* JS_ToArrayLengthFree() must be done before the read-only test */ if (unlikely(!(p->shape->prop[0].flags & JS_PROP_WRITABLE))) return JS_ThrowTypeErrorReadOnly(ctx, flags, JS_ATOM_length); if (likely(p->fast_array)) { uint32_t old_len = p->u.array.count; if (len < old_len) { for(i = len; i < old_len; i++) { JS_FreeValue(ctx, p->u.array.u.values[i]); } p->u.array.count = len; } p->prop[0].u.value = JS_NewUint32(ctx, len); } else { /* Note: length is always a uint32 because the object is an array */ JS_ToUint32(ctx, &cur_len, p->prop[0].u.value); if (len < cur_len) { uint32_t d; JSShape *sh; JSShapeProperty *pr; d = cur_len - len; sh = p->shape; if (d <= sh->prop_count) { JSAtom atom; /* faster to iterate */ while (cur_len > len) { atom = JS_NewAtomUInt32(ctx, cur_len - 1); ret = delete_property(ctx, p, atom); JS_FreeAtom(ctx, atom); if (unlikely(!ret)) { /* unlikely case: property is not configurable */ break; } cur_len--; } } else { /* faster to iterate thru all the properties. Need two passes in case one of the property is not configurable */ cur_len = len; for(i = 0, pr = get_shape_prop(sh); i < sh->prop_count; i++, pr++) { if (pr->atom != JS_ATOM_NULL && JS_AtomIsArrayIndex(ctx, &idx, pr->atom)) { if (idx >= cur_len && !(pr->flags & JS_PROP_CONFIGURABLE)) { cur_len = idx + 1; } } } for(i = 0, pr = get_shape_prop(sh); i < sh->prop_count; i++, pr++) { if (pr->atom != JS_ATOM_NULL && JS_AtomIsArrayIndex(ctx, &idx, pr->atom)) { if (idx >= cur_len) { /* remove the property */ delete_property(ctx, p, pr->atom); /* WARNING: the shape may have been modified */ sh = p->shape; pr = get_shape_prop(sh) + i; } } } } } else { cur_len = len; } set_value(ctx, &p->prop[0].u.value, JS_NewUint32(ctx, cur_len)); if (unlikely(cur_len > len)) { return JS_ThrowTypeErrorOrFalse(ctx, flags, "not configurable"); } } return TRUE; } /* return -1 if exception */ static int expand_fast_array(JSContext *ctx, JSObject *p, uint32_t new_len) { uint32_t new_size; size_t slack; JSValue *new_array_prop; /* XXX: potential arithmetic overflow */ new_size = max_int(new_len, p->u.array.u1.size * 3 / 2); new_array_prop = js_realloc2(ctx, p->u.array.u.values, sizeof(JSValue) * new_size, &slack); if (!new_array_prop) return -1; new_size += slack / sizeof(*new_array_prop); p->u.array.u.values = new_array_prop; p->u.array.u1.size = new_size; return 0; } /* Preconditions: 'p' must be of class JS_CLASS_ARRAY, p->fast_array = TRUE and p->extensible = TRUE */ static int add_fast_array_element(JSContext *ctx, JSObject *p, JSValue val, int flags) { uint32_t new_len, array_len; /* extend the array by one */ /* XXX: convert to slow array if new_len > 2^31-1 elements */ new_len = p->u.array.count + 1; /* update the length if necessary. We assume that if the length is not an integer, then if it >= 2^31. */ if (likely(JS_VALUE_GET_TAG(p->prop[0].u.value) == JS_TAG_INT)) { array_len = JS_VALUE_GET_INT(p->prop[0].u.value); if (new_len > array_len) { if (unlikely(!(get_shape_prop(p->shape)->flags & JS_PROP_WRITABLE))) { JS_FreeValue(ctx, val); return JS_ThrowTypeErrorReadOnly(ctx, flags, JS_ATOM_length); } p->prop[0].u.value = JS_NewInt32(ctx, new_len); } } if (unlikely(new_len > p->u.array.u1.size)) { if (expand_fast_array(ctx, p, new_len)) { JS_FreeValue(ctx, val); return -1; } } p->u.array.u.values[new_len - 1] = val; p->u.array.count = new_len; return TRUE; } /* Allocate a new fast array. Its 'length' property is set to zero. It maximum size is 2^31-1 elements. For convenience, 'len' is a 64 bit integer. WARNING: the content of the array is not initialized. */ static JSValue js_allocate_fast_array(JSContext *ctx, int64_t len) { JSValue arr; JSObject *p; if (len > INT32_MAX) return JS_ThrowRangeError(ctx, "invalid array length"); arr = JS_NewArray(ctx); if (JS_IsException(arr)) return arr; if (len > 0) { p = JS_VALUE_GET_OBJ(arr); if (expand_fast_array(ctx, p, len) < 0) { JS_FreeValue(ctx, arr); return JS_EXCEPTION; } p->u.array.count = len; } return arr; } static void js_free_desc(JSContext *ctx, JSPropertyDescriptor *desc) { JS_FreeValue(ctx, desc->getter); JS_FreeValue(ctx, desc->setter); JS_FreeValue(ctx, desc->value); } /* return -1 in case of exception or TRUE or FALSE. Warning: 'val' is freed by the function. 'flags' is a bitmask of JS_PROP_NO_ADD, JS_PROP_THROW or JS_PROP_THROW_STRICT. If JS_PROP_NO_ADD is set, the new property is not added and an error is raised. 'this_obj' is the receiver. If obj != this_obj, then obj must be an object (Reflect.set case). */ int JS_SetPropertyInternal(JSContext *ctx, JSValueConst obj, JSAtom prop, JSValue val, JSValueConst this_obj, int flags) { JSObject *p, *p1; JSShapeProperty *prs; JSProperty *pr; uint32_t tag; JSPropertyDescriptor desc; int ret; #if 0 printf("JS_SetPropertyInternal: "); print_atom(ctx, prop); printf("\n"); #endif tag = JS_VALUE_GET_TAG(this_obj); if (unlikely(tag != JS_TAG_OBJECT)) { if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { p = NULL; p1 = JS_VALUE_GET_OBJ(obj); goto prototype_lookup; } else { switch(tag) { case JS_TAG_NULL: JS_FreeValue(ctx, val); JS_ThrowTypeErrorAtom(ctx, "cannot set property '%s' of null", prop); return -1; case JS_TAG_UNDEFINED: JS_FreeValue(ctx, val); JS_ThrowTypeErrorAtom(ctx, "cannot set property '%s' of undefined", prop); return -1; default: /* even on a primitive type we can have setters on the prototype */ p = NULL; p1 = JS_VALUE_GET_OBJ(JS_GetPrototypePrimitive(ctx, obj)); goto prototype_lookup; } } } else { p = JS_VALUE_GET_OBJ(this_obj); p1 = JS_VALUE_GET_OBJ(obj); if (unlikely(p != p1)) goto retry2; } /* fast path if obj == this_obj */ retry: prs = find_own_property(&pr, p1, prop); if (prs) { if (likely((prs->flags & (JS_PROP_TMASK | JS_PROP_WRITABLE | JS_PROP_LENGTH)) == JS_PROP_WRITABLE)) { /* fast case */ set_value(ctx, &pr->u.value, val); return TRUE; } else if (prs->flags & JS_PROP_LENGTH) { assert(p->class_id == JS_CLASS_ARRAY); assert(prop == JS_ATOM_length); return set_array_length(ctx, p, val, flags); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { return call_setter(ctx, pr->u.getset.setter, this_obj, val, flags); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { /* JS_PROP_WRITABLE is always true for variable references, but they are write protected in module name spaces. */ if (p->class_id == JS_CLASS_MODULE_NS) goto read_only_prop; set_value(ctx, pr->u.var_ref->pvalue, val); return TRUE; } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* Instantiate property and retry (potentially useless) */ if (JS_AutoInitProperty(ctx, p, prop, pr, prs)) { JS_FreeValue(ctx, val); return -1; } goto retry; } else { goto read_only_prop; } } for(;;) { if (p1->is_exotic) { if (p1->fast_array) { if (__JS_AtomIsTaggedInt(prop)) { uint32_t idx = __JS_AtomToUInt32(prop); if (idx < p1->u.array.count) { if (unlikely(p == p1)) return JS_SetPropertyValue(ctx, this_obj, JS_NewInt32(ctx, idx), val, flags); else break; } else if (p1->class_id >= JS_CLASS_UINT8C_ARRAY && p1->class_id <= JS_CLASS_FLOAT64_ARRAY) { goto typed_array_oob; } } else if (p1->class_id >= JS_CLASS_UINT8C_ARRAY && p1->class_id <= JS_CLASS_FLOAT64_ARRAY) { ret = JS_AtomIsNumericIndex(ctx, prop); if (ret != 0) { if (ret < 0) { JS_FreeValue(ctx, val); return -1; } typed_array_oob: /* must convert the argument even if out of bound access */ if (p1->class_id == JS_CLASS_BIG_INT64_ARRAY || p1->class_id == JS_CLASS_BIG_UINT64_ARRAY) { int64_t v; if (JS_ToBigInt64Free(ctx, &v, val)) return -1; } else { val = JS_ToNumberFree(ctx, val); JS_FreeValue(ctx, val); if (JS_IsException(val)) return -1; } return TRUE; } } } else { const JSClassExoticMethods *em = ctx->rt->class_array[p1->class_id].exotic; if (em) { JSValue obj1; if (em->set_property) { /* set_property can free the prototype */ obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p1)); ret = em->set_property(ctx, obj1, prop, val, this_obj, flags); JS_FreeValue(ctx, obj1); JS_FreeValue(ctx, val); return ret; } if (em->get_own_property) { /* get_own_property can free the prototype */ obj1 = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p1)); ret = em->get_own_property(ctx, &desc, obj1, prop); JS_FreeValue(ctx, obj1); if (ret < 0) { JS_FreeValue(ctx, val); return ret; } if (ret) { if (desc.flags & JS_PROP_GETSET) { JSObject *setter; if (JS_IsUndefined(desc.setter)) setter = NULL; else setter = JS_VALUE_GET_OBJ(desc.setter); ret = call_setter(ctx, setter, this_obj, val, flags); JS_FreeValue(ctx, desc.getter); JS_FreeValue(ctx, desc.setter); return ret; } else { JS_FreeValue(ctx, desc.value); if (!(desc.flags & JS_PROP_WRITABLE)) goto read_only_prop; if (likely(p == p1)) { ret = JS_DefineProperty(ctx, this_obj, prop, val, JS_UNDEFINED, JS_UNDEFINED, JS_PROP_HAS_VALUE); JS_FreeValue(ctx, val); return ret; } else { break; } } } } } } } p1 = p1->shape->proto; prototype_lookup: if (!p1) break; retry2: prs = find_own_property(&pr, p1, prop); if (prs) { if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { return call_setter(ctx, pr->u.getset.setter, this_obj, val, flags); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* Instantiate property and retry (potentially useless) */ if (JS_AutoInitProperty(ctx, p1, prop, pr, prs)) return -1; goto retry2; } else if (!(prs->flags & JS_PROP_WRITABLE)) { goto read_only_prop; } } } if (unlikely(flags & JS_PROP_NO_ADD)) { JS_FreeValue(ctx, val); JS_ThrowReferenceErrorNotDefined(ctx, prop); return -1; } if (unlikely(!p)) { JS_FreeValue(ctx, val); return JS_ThrowTypeErrorOrFalse(ctx, flags, "not an object"); } if (unlikely(!p->extensible)) { JS_FreeValue(ctx, val); return JS_ThrowTypeErrorOrFalse(ctx, flags, "object is not extensible"); } if (likely(p == JS_VALUE_GET_OBJ(obj))) { if (p->is_exotic) { if (p->class_id == JS_CLASS_ARRAY && p->fast_array && __JS_AtomIsTaggedInt(prop)) { uint32_t idx = __JS_AtomToUInt32(prop); if (idx == p->u.array.count) { /* fast case */ return add_fast_array_element(ctx, p, val, flags); } else { goto generic_create_prop; } } else { goto generic_create_prop; } } else { pr = add_property(ctx, p, prop, JS_PROP_C_W_E); if (unlikely(!pr)) { JS_FreeValue(ctx, val); return -1; } pr->u.value = val; return TRUE; } } else { /* generic case: modify the property in this_obj if it already exists */ ret = JS_GetOwnPropertyInternal(ctx, &desc, p, prop); if (ret < 0) { JS_FreeValue(ctx, val); return ret; } if (ret) { if (desc.flags & JS_PROP_GETSET) { JS_FreeValue(ctx, desc.getter); JS_FreeValue(ctx, desc.setter); JS_FreeValue(ctx, val); return JS_ThrowTypeErrorOrFalse(ctx, flags, "setter is forbidden"); } else { JS_FreeValue(ctx, desc.value); if (!(desc.flags & JS_PROP_WRITABLE) || p->class_id == JS_CLASS_MODULE_NS) { read_only_prop: JS_FreeValue(ctx, val); return JS_ThrowTypeErrorReadOnly(ctx, flags, prop); } } ret = JS_DefineProperty(ctx, this_obj, prop, val, JS_UNDEFINED, JS_UNDEFINED, JS_PROP_HAS_VALUE); JS_FreeValue(ctx, val); return ret; } else { generic_create_prop: ret = JS_CreateProperty(ctx, p, prop, val, JS_UNDEFINED, JS_UNDEFINED, flags | JS_PROP_HAS_VALUE | JS_PROP_HAS_ENUMERABLE | JS_PROP_HAS_WRITABLE | JS_PROP_HAS_CONFIGURABLE | JS_PROP_C_W_E); JS_FreeValue(ctx, val); return ret; } } } /* flags can be JS_PROP_THROW or JS_PROP_THROW_STRICT */ static int JS_SetPropertyValue(JSContext *ctx, JSValueConst this_obj, JSValue prop, JSValue val, int flags) { if (likely(JS_VALUE_GET_TAG(this_obj) == JS_TAG_OBJECT && JS_VALUE_GET_TAG(prop) == JS_TAG_INT)) { JSObject *p; uint32_t idx; double d; int32_t v; /* fast path for array access */ p = JS_VALUE_GET_OBJ(this_obj); idx = JS_VALUE_GET_INT(prop); switch(p->class_id) { case JS_CLASS_ARRAY: if (unlikely(idx >= (uint32_t)p->u.array.count)) { JSObject *p1; JSShape *sh1; /* fast path to add an element to the array */ if (idx != (uint32_t)p->u.array.count || !p->fast_array || !p->extensible) goto slow_path; /* check if prototype chain has a numeric property */ p1 = p->shape->proto; while (p1 != NULL) { sh1 = p1->shape; if (p1->class_id == JS_CLASS_ARRAY) { if (unlikely(!p1->fast_array)) goto slow_path; } else if (p1->class_id == JS_CLASS_OBJECT) { if (unlikely(sh1->has_small_array_index)) goto slow_path; } else { goto slow_path; } p1 = sh1->proto; } /* add element */ return add_fast_array_element(ctx, p, val, flags); } set_value(ctx, &p->u.array.u.values[idx], val); break; case JS_CLASS_ARGUMENTS: if (unlikely(idx >= (uint32_t)p->u.array.count)) goto slow_path; set_value(ctx, &p->u.array.u.values[idx], val); break; case JS_CLASS_UINT8C_ARRAY: if (JS_ToUint8ClampFree(ctx, &v, val)) return -1; /* Note: the conversion can detach the typed array, so the array bound check must be done after */ if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.uint8_ptr[idx] = v; break; case JS_CLASS_INT8_ARRAY: case JS_CLASS_UINT8_ARRAY: if (JS_ToInt32Free(ctx, &v, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.uint8_ptr[idx] = v; break; case JS_CLASS_INT16_ARRAY: case JS_CLASS_UINT16_ARRAY: if (JS_ToInt32Free(ctx, &v, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.uint16_ptr[idx] = v; break; case JS_CLASS_INT32_ARRAY: case JS_CLASS_UINT32_ARRAY: if (JS_ToInt32Free(ctx, &v, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.uint32_ptr[idx] = v; break; case JS_CLASS_BIG_INT64_ARRAY: case JS_CLASS_BIG_UINT64_ARRAY: /* XXX: need specific conversion function */ { int64_t v; if (JS_ToBigInt64Free(ctx, &v, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.uint64_ptr[idx] = v; } break; case JS_CLASS_FLOAT32_ARRAY: if (JS_ToFloat64Free(ctx, &d, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) goto ta_out_of_bound; p->u.array.u.float_ptr[idx] = d; break; case JS_CLASS_FLOAT64_ARRAY: if (JS_ToFloat64Free(ctx, &d, val)) return -1; if (unlikely(idx >= (uint32_t)p->u.array.count)) { ta_out_of_bound: return TRUE; } p->u.array.u.double_ptr[idx] = d; break; default: goto slow_path; } return TRUE; } else { JSAtom atom; int ret; slow_path: atom = JS_ValueToAtom(ctx, prop); JS_FreeValue(ctx, prop); if (unlikely(atom == JS_ATOM_NULL)) { JS_FreeValue(ctx, val); return -1; } ret = JS_SetPropertyInternal(ctx, this_obj, atom, val, this_obj, flags); JS_FreeAtom(ctx, atom); return ret; } } int JS_SetPropertyUint32(JSContext *ctx, JSValueConst this_obj, uint32_t idx, JSValue val) { return JS_SetPropertyValue(ctx, this_obj, JS_NewUint32(ctx, idx), val, JS_PROP_THROW); } int JS_SetPropertyInt64(JSContext *ctx, JSValueConst this_obj, int64_t idx, JSValue val) { JSAtom prop; int res; if ((uint64_t)idx <= INT32_MAX) { /* fast path for fast arrays */ return JS_SetPropertyValue(ctx, this_obj, JS_NewInt32(ctx, idx), val, JS_PROP_THROW); } prop = JS_NewAtomInt64(ctx, idx); if (prop == JS_ATOM_NULL) { JS_FreeValue(ctx, val); return -1; } res = JS_SetProperty(ctx, this_obj, prop, val); JS_FreeAtom(ctx, prop); return res; } int JS_SetPropertyStr(JSContext *ctx, JSValueConst this_obj, const char *prop, JSValue val) { JSAtom atom; int ret; atom = JS_NewAtom(ctx, prop); ret = JS_SetPropertyInternal(ctx, this_obj, atom, val, this_obj, JS_PROP_THROW); JS_FreeAtom(ctx, atom); return ret; } /* compute the property flags. For each flag: (JS_PROP_HAS_x forces it, otherwise def_flags is used) Note: makes assumption about the bit pattern of the flags */ static int get_prop_flags(int flags, int def_flags) { int mask; mask = (flags >> JS_PROP_HAS_SHIFT) & JS_PROP_C_W_E; return (flags & mask) | (def_flags & ~mask); } static int JS_CreateProperty(JSContext *ctx, JSObject *p, JSAtom prop, JSValueConst val, JSValueConst getter, JSValueConst setter, int flags) { JSProperty *pr; int ret, prop_flags; /* add a new property or modify an existing exotic one */ if (p->is_exotic) { if (p->class_id == JS_CLASS_ARRAY) { uint32_t idx, len; if (p->fast_array) { if (__JS_AtomIsTaggedInt(prop)) { idx = __JS_AtomToUInt32(prop); if (idx == p->u.array.count) { if (!p->extensible) goto not_extensible; if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) goto convert_to_array; prop_flags = get_prop_flags(flags, 0); if (prop_flags != JS_PROP_C_W_E) goto convert_to_array; return add_fast_array_element(ctx, p, JS_DupValue(ctx, val), flags); } else { goto convert_to_array; } } else if (JS_AtomIsArrayIndex(ctx, &idx, prop)) { /* convert the fast array to normal array */ convert_to_array: if (convert_fast_array_to_array(ctx, p)) return -1; goto generic_array; } } else if (JS_AtomIsArrayIndex(ctx, &idx, prop)) { JSProperty *plen; JSShapeProperty *pslen; generic_array: /* update the length field */ plen = &p->prop[0]; JS_ToUint32(ctx, &len, plen->u.value); if ((idx + 1) > len) { pslen = get_shape_prop(p->shape); if (unlikely(!(pslen->flags & JS_PROP_WRITABLE))) return JS_ThrowTypeErrorReadOnly(ctx, flags, JS_ATOM_length); /* XXX: should update the length after defining the property */ len = idx + 1; set_value(ctx, &plen->u.value, JS_NewUint32(ctx, len)); } } } else if (p->class_id >= JS_CLASS_UINT8C_ARRAY && p->class_id <= JS_CLASS_FLOAT64_ARRAY) { ret = JS_AtomIsNumericIndex(ctx, prop); if (ret != 0) { if (ret < 0) return -1; return JS_ThrowTypeErrorOrFalse(ctx, flags, "cannot create numeric index in typed array"); } } else if (!(flags & JS_PROP_NO_EXOTIC)) { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; if (em) { if (em->define_own_property) { return em->define_own_property(ctx, JS_MKPTR(JS_TAG_OBJECT, p), prop, val, getter, setter, flags); } ret = JS_IsExtensible(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); if (ret < 0) return -1; if (!ret) goto not_extensible; } } } if (!p->extensible) { not_extensible: return JS_ThrowTypeErrorOrFalse(ctx, flags, "object is not extensible"); } if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { prop_flags = (flags & (JS_PROP_CONFIGURABLE | JS_PROP_ENUMERABLE)) | JS_PROP_GETSET; } else { prop_flags = flags & JS_PROP_C_W_E; } pr = add_property(ctx, p, prop, prop_flags); if (unlikely(!pr)) return -1; if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { pr->u.getset.getter = NULL; if ((flags & JS_PROP_HAS_GET) && JS_IsFunction(ctx, getter)) { pr->u.getset.getter = JS_VALUE_GET_OBJ(JS_DupValue(ctx, getter)); } pr->u.getset.setter = NULL; if ((flags & JS_PROP_HAS_SET) && JS_IsFunction(ctx, setter)) { pr->u.getset.setter = JS_VALUE_GET_OBJ(JS_DupValue(ctx, setter)); } } else { if (flags & JS_PROP_HAS_VALUE) { pr->u.value = JS_DupValue(ctx, val); } else { pr->u.value = JS_UNDEFINED; } } return TRUE; } /* return FALSE if not OK */ static BOOL check_define_prop_flags(int prop_flags, int flags) { BOOL has_accessor, is_getset; if (!(prop_flags & JS_PROP_CONFIGURABLE)) { if ((flags & (JS_PROP_HAS_CONFIGURABLE | JS_PROP_CONFIGURABLE)) == (JS_PROP_HAS_CONFIGURABLE | JS_PROP_CONFIGURABLE)) { return FALSE; } if ((flags & JS_PROP_HAS_ENUMERABLE) && (flags & JS_PROP_ENUMERABLE) != (prop_flags & JS_PROP_ENUMERABLE)) return FALSE; } if (flags & (JS_PROP_HAS_VALUE | JS_PROP_HAS_WRITABLE | JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { if (!(prop_flags & JS_PROP_CONFIGURABLE)) { has_accessor = ((flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) != 0); is_getset = ((prop_flags & JS_PROP_TMASK) == JS_PROP_GETSET); if (has_accessor != is_getset) return FALSE; if (!has_accessor && !is_getset && !(prop_flags & JS_PROP_WRITABLE)) { /* not writable: cannot set the writable bit */ if ((flags & (JS_PROP_HAS_WRITABLE | JS_PROP_WRITABLE)) == (JS_PROP_HAS_WRITABLE | JS_PROP_WRITABLE)) return FALSE; } } } return TRUE; } /* ensure that the shape can be safely modified */ static int js_shape_prepare_update(JSContext *ctx, JSObject *p, JSShapeProperty **pprs) { JSShape *sh; uint32_t idx = 0; /* prevent warning */ sh = p->shape; if (sh->is_hashed) { if (sh->header.ref_count != 1) { if (pprs) idx = *pprs - get_shape_prop(sh); /* clone the shape (the resulting one is no longer hashed) */ sh = js_clone_shape(ctx, sh); if (!sh) return -1; js_free_shape(ctx->rt, p->shape); p->shape = sh; if (pprs) *pprs = get_shape_prop(sh) + idx; } else { js_shape_hash_unlink(ctx->rt, sh); sh->is_hashed = FALSE; } } return 0; } static int js_update_property_flags(JSContext *ctx, JSObject *p, JSShapeProperty **pprs, int flags) { if (flags != (*pprs)->flags) { if (js_shape_prepare_update(ctx, p, pprs)) return -1; (*pprs)->flags = flags; } return 0; } /* allowed flags: JS_PROP_CONFIGURABLE, JS_PROP_WRITABLE, JS_PROP_ENUMERABLE JS_PROP_HAS_GET, JS_PROP_HAS_SET, JS_PROP_HAS_VALUE, JS_PROP_HAS_CONFIGURABLE, JS_PROP_HAS_WRITABLE, JS_PROP_HAS_ENUMERABLE, JS_PROP_THROW, JS_PROP_NO_EXOTIC. If JS_PROP_THROW is set, return an exception instead of FALSE. if JS_PROP_NO_EXOTIC is set, do not call the exotic define_own_property callback. return -1 (exception), FALSE or TRUE. */ int JS_DefineProperty(JSContext *ctx, JSValueConst this_obj, JSAtom prop, JSValueConst val, JSValueConst getter, JSValueConst setter, int flags) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; int mask, res; if (JS_VALUE_GET_TAG(this_obj) != JS_TAG_OBJECT) { JS_ThrowTypeErrorNotAnObject(ctx); return -1; } p = JS_VALUE_GET_OBJ(this_obj); redo_prop_update: prs = find_own_property(&pr, p, prop); if (prs) { /* the range of the Array length property is always tested before */ if ((prs->flags & JS_PROP_LENGTH) && (flags & JS_PROP_HAS_VALUE)) { uint32_t array_length; if (JS_ToArrayLengthFree(ctx, &array_length, JS_DupValue(ctx, val), FALSE)) { return -1; } /* this code relies on the fact that Uint32 are never allocated */ #ifdef STRICT_R_HEADERS val = JS_NewUint32(ctx, array_length); #else val = (JSValueConst)JS_NewUint32(ctx, array_length); #endif /* prs may have been modified */ prs = find_own_property(&pr, p, prop); assert(prs != NULL); } /* property already exists */ if (!check_define_prop_flags(prs->flags, flags)) { not_configurable: return JS_ThrowTypeErrorOrFalse(ctx, flags, "property is not configurable"); } if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { /* Instantiate property and retry */ if (JS_AutoInitProperty(ctx, p, prop, pr, prs)) return -1; goto redo_prop_update; } if (flags & (JS_PROP_HAS_VALUE | JS_PROP_HAS_WRITABLE | JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { JSObject *new_getter, *new_setter; if (JS_IsFunction(ctx, getter)) { new_getter = JS_VALUE_GET_OBJ(getter); } else { new_getter = NULL; } if (JS_IsFunction(ctx, setter)) { new_setter = JS_VALUE_GET_OBJ(setter); } else { new_setter = NULL; } if ((prs->flags & JS_PROP_TMASK) != JS_PROP_GETSET) { if (js_shape_prepare_update(ctx, p, &prs)) return -1; /* convert to getset */ if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { free_var_ref(ctx->rt, pr->u.var_ref); } else { JS_FreeValue(ctx, pr->u.value); } prs->flags = (prs->flags & (JS_PROP_CONFIGURABLE | JS_PROP_ENUMERABLE)) | JS_PROP_GETSET; pr->u.getset.getter = NULL; pr->u.getset.setter = NULL; } else { if (!(prs->flags & JS_PROP_CONFIGURABLE)) { if ((flags & JS_PROP_HAS_GET) && new_getter != pr->u.getset.getter) { goto not_configurable; } if ((flags & JS_PROP_HAS_SET) && new_setter != pr->u.getset.setter) { goto not_configurable; } } } if (flags & JS_PROP_HAS_GET) { if (pr->u.getset.getter) JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter)); if (new_getter) JS_DupValue(ctx, getter); pr->u.getset.getter = new_getter; } if (flags & JS_PROP_HAS_SET) { if (pr->u.getset.setter) JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.setter)); if (new_setter) JS_DupValue(ctx, setter); pr->u.getset.setter = new_setter; } } else { if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { /* convert to data descriptor */ if (js_shape_prepare_update(ctx, p, &prs)) return -1; if (pr->u.getset.getter) JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.getter)); if (pr->u.getset.setter) JS_FreeValue(ctx, JS_MKPTR(JS_TAG_OBJECT, pr->u.getset.setter)); prs->flags &= ~(JS_PROP_TMASK | JS_PROP_WRITABLE); pr->u.value = JS_UNDEFINED; } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { /* Note: JS_PROP_VARREF is always writable */ } else { if ((prs->flags & (JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE)) == 0 && (flags & JS_PROP_HAS_VALUE)) { if (!js_same_value(ctx, val, pr->u.value)) { goto not_configurable; } else { return TRUE; } } } if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { if (flags & JS_PROP_HAS_VALUE) { if (p->class_id == JS_CLASS_MODULE_NS) { /* JS_PROP_WRITABLE is always true for variable references, but they are write protected in module name spaces. */ if (!js_same_value(ctx, val, *pr->u.var_ref->pvalue)) goto not_configurable; } else { /* update the reference */ set_value(ctx, pr->u.var_ref->pvalue, JS_DupValue(ctx, val)); } } /* if writable is set to false, no longer a reference (for mapped arguments) */ if ((flags & (JS_PROP_HAS_WRITABLE | JS_PROP_WRITABLE)) == JS_PROP_HAS_WRITABLE) { JSValue val1; if (p->class_id == JS_CLASS_MODULE_NS) { return JS_ThrowTypeErrorOrFalse(ctx, flags, "module namespace properties have writable = false"); } if (js_shape_prepare_update(ctx, p, &prs)) return -1; val1 = JS_DupValue(ctx, *pr->u.var_ref->pvalue); free_var_ref(ctx->rt, pr->u.var_ref); pr->u.value = val1; prs->flags &= ~(JS_PROP_TMASK | JS_PROP_WRITABLE); } } else if (prs->flags & JS_PROP_LENGTH) { if (flags & JS_PROP_HAS_VALUE) { /* Note: no JS code is executable because 'val' is guaranted to be a Uint32 */ res = set_array_length(ctx, p, JS_DupValue(ctx, val), flags); } else { res = TRUE; } /* still need to reset the writable flag if needed. The JS_PROP_LENGTH is kept because the Uint32 test is still done if the length property is read-only. */ if ((flags & (JS_PROP_HAS_WRITABLE | JS_PROP_WRITABLE)) == JS_PROP_HAS_WRITABLE) { prs = get_shape_prop(p->shape); if (js_update_property_flags(ctx, p, &prs, prs->flags & ~JS_PROP_WRITABLE)) return -1; } return res; } else { if (flags & JS_PROP_HAS_VALUE) { JS_FreeValue(ctx, pr->u.value); pr->u.value = JS_DupValue(ctx, val); } if (flags & JS_PROP_HAS_WRITABLE) { if (js_update_property_flags(ctx, p, &prs, (prs->flags & ~JS_PROP_WRITABLE) | (flags & JS_PROP_WRITABLE))) return -1; } } } } mask = 0; if (flags & JS_PROP_HAS_CONFIGURABLE) mask |= JS_PROP_CONFIGURABLE; if (flags & JS_PROP_HAS_ENUMERABLE) mask |= JS_PROP_ENUMERABLE; if (js_update_property_flags(ctx, p, &prs, (prs->flags & ~mask) | (flags & mask))) return -1; return TRUE; } /* handle modification of fast array elements */ if (p->fast_array) { uint32_t idx; uint32_t prop_flags; if (p->class_id == JS_CLASS_ARRAY) { if (__JS_AtomIsTaggedInt(prop)) { idx = __JS_AtomToUInt32(prop); if (idx < p->u.array.count) { prop_flags = get_prop_flags(flags, JS_PROP_C_W_E); if (prop_flags != JS_PROP_C_W_E) goto convert_to_slow_array; if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET)) { convert_to_slow_array: if (convert_fast_array_to_array(ctx, p)) return -1; else goto redo_prop_update; } if (flags & JS_PROP_HAS_VALUE) { set_value(ctx, &p->u.array.u.values[idx], JS_DupValue(ctx, val)); } return TRUE; } } } else if (p->class_id >= JS_CLASS_UINT8C_ARRAY && p->class_id <= JS_CLASS_FLOAT64_ARRAY) { JSValue num; int ret; if (!__JS_AtomIsTaggedInt(prop)) { /* slow path with to handle all numeric indexes */ num = JS_AtomIsNumericIndex1(ctx, prop); if (JS_IsUndefined(num)) goto typed_array_done; if (JS_IsException(num)) return -1; ret = JS_NumberIsInteger(ctx, num); if (ret < 0) { JS_FreeValue(ctx, num); return -1; } if (!ret) { JS_FreeValue(ctx, num); return JS_ThrowTypeErrorOrFalse(ctx, flags, "non integer index in typed array"); } ret = JS_NumberIsNegativeOrMinusZero(ctx, num); JS_FreeValue(ctx, num); if (ret) { return JS_ThrowTypeErrorOrFalse(ctx, flags, "negative index in typed array"); } if (!__JS_AtomIsTaggedInt(prop)) goto typed_array_oob; } idx = __JS_AtomToUInt32(prop); /* if the typed array is detached, p->u.array.count = 0 */ if (idx >= p->u.array.count) { typed_array_oob: return JS_ThrowTypeErrorOrFalse(ctx, flags, "out-of-bound index in typed array"); } prop_flags = get_prop_flags(flags, JS_PROP_ENUMERABLE | JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); if (flags & (JS_PROP_HAS_GET | JS_PROP_HAS_SET) || prop_flags != (JS_PROP_ENUMERABLE | JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE)) { return JS_ThrowTypeErrorOrFalse(ctx, flags, "invalid descriptor flags"); } if (flags & JS_PROP_HAS_VALUE) { return JS_SetPropertyValue(ctx, this_obj, JS_NewInt32(ctx, idx), JS_DupValue(ctx, val), flags); } return TRUE; typed_array_done: ; } } return JS_CreateProperty(ctx, p, prop, val, getter, setter, flags); } static int JS_DefineAutoInitProperty(JSContext *ctx, JSValueConst this_obj, JSAtom prop, JSAutoInitIDEnum id, void *opaque, int flags) { JSObject *p; JSProperty *pr; if (JS_VALUE_GET_TAG(this_obj) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(this_obj); if (find_own_property(&pr, p, prop)) { /* property already exists */ abort(); return FALSE; } /* Specialized CreateProperty */ pr = add_property(ctx, p, prop, (flags & JS_PROP_C_W_E) | JS_PROP_AUTOINIT); if (unlikely(!pr)) return -1; pr->u.init.realm_and_id = (uintptr_t)JS_DupContext(ctx); assert((pr->u.init.realm_and_id & 3) == 0); assert(id <= 3); pr->u.init.realm_and_id |= id; pr->u.init.opaque = opaque; return TRUE; } /* shortcut to add or redefine a new property value */ int JS_DefinePropertyValue(JSContext *ctx, JSValueConst this_obj, JSAtom prop, JSValue val, int flags) { int ret; ret = JS_DefineProperty(ctx, this_obj, prop, val, JS_UNDEFINED, JS_UNDEFINED, flags | JS_PROP_HAS_VALUE | JS_PROP_HAS_CONFIGURABLE | JS_PROP_HAS_WRITABLE | JS_PROP_HAS_ENUMERABLE); JS_FreeValue(ctx, val); return ret; } int JS_DefinePropertyValueValue(JSContext *ctx, JSValueConst this_obj, JSValue prop, JSValue val, int flags) { JSAtom atom; int ret; atom = JS_ValueToAtom(ctx, prop); JS_FreeValue(ctx, prop); if (unlikely(atom == JS_ATOM_NULL)) { JS_FreeValue(ctx, val); return -1; } ret = JS_DefinePropertyValue(ctx, this_obj, atom, val, flags); JS_FreeAtom(ctx, atom); return ret; } int JS_DefinePropertyValueUint32(JSContext *ctx, JSValueConst this_obj, uint32_t idx, JSValue val, int flags) { return JS_DefinePropertyValueValue(ctx, this_obj, JS_NewUint32(ctx, idx), val, flags); } int JS_DefinePropertyValueInt64(JSContext *ctx, JSValueConst this_obj, int64_t idx, JSValue val, int flags) { return JS_DefinePropertyValueValue(ctx, this_obj, JS_NewInt64(ctx, idx), val, flags); } int JS_DefinePropertyValueStr(JSContext *ctx, JSValueConst this_obj, const char *prop, JSValue val, int flags) { JSAtom atom; int ret; atom = JS_NewAtom(ctx, prop); ret = JS_DefinePropertyValue(ctx, this_obj, atom, val, flags); JS_FreeAtom(ctx, atom); return ret; } /* shortcut to add getter & setter */ int JS_DefinePropertyGetSet(JSContext *ctx, JSValueConst this_obj, JSAtom prop, JSValue getter, JSValue setter, int flags) { int ret; ret = JS_DefineProperty(ctx, this_obj, prop, JS_UNDEFINED, getter, setter, flags | JS_PROP_HAS_GET | JS_PROP_HAS_SET | JS_PROP_HAS_CONFIGURABLE | JS_PROP_HAS_ENUMERABLE); JS_FreeValue(ctx, getter); JS_FreeValue(ctx, setter); return ret; } static int JS_CreateDataPropertyUint32(JSContext *ctx, JSValueConst this_obj, int64_t idx, JSValue val, int flags) { return JS_DefinePropertyValueValue(ctx, this_obj, JS_NewInt64(ctx, idx), val, flags | JS_PROP_CONFIGURABLE | JS_PROP_ENUMERABLE | JS_PROP_WRITABLE); } /* return TRUE if 'obj' has a non empty 'name' string */ static BOOL js_object_has_name(JSContext *ctx, JSValueConst obj) { JSProperty *pr; JSShapeProperty *prs; JSValueConst val; JSString *p; prs = find_own_property(&pr, JS_VALUE_GET_OBJ(obj), JS_ATOM_name); if (!prs) return FALSE; if ((prs->flags & JS_PROP_TMASK) != JS_PROP_NORMAL) return TRUE; val = pr->u.value; if (JS_VALUE_GET_TAG(val) != JS_TAG_STRING) return TRUE; p = JS_VALUE_GET_STRING(val); return (p->len != 0); } static int JS_DefineObjectName(JSContext *ctx, JSValueConst obj, JSAtom name, int flags) { if (name != JS_ATOM_NULL && JS_IsObject(obj) && !js_object_has_name(ctx, obj) && JS_DefinePropertyValue(ctx, obj, JS_ATOM_name, JS_AtomToString(ctx, name), flags) < 0) { return -1; } return 0; } static int JS_DefineObjectNameComputed(JSContext *ctx, JSValueConst obj, JSValueConst str, int flags) { if (JS_IsObject(obj) && !js_object_has_name(ctx, obj)) { JSAtom prop; JSValue name_str; prop = JS_ValueToAtom(ctx, str); if (prop == JS_ATOM_NULL) return -1; name_str = js_get_function_name(ctx, prop); JS_FreeAtom(ctx, prop); if (JS_IsException(name_str)) return -1; if (JS_DefinePropertyValue(ctx, obj, JS_ATOM_name, name_str, flags) < 0) return -1; } return 0; } #define DEFINE_GLOBAL_LEX_VAR (1 << 7) #define DEFINE_GLOBAL_FUNC_VAR (1 << 6) static JSValue JS_ThrowSyntaxErrorVarRedeclaration(JSContext *ctx, JSAtom prop) { return JS_ThrowSyntaxErrorAtom(ctx, "redeclaration of '%s'", prop); } /* flags is 0, DEFINE_GLOBAL_LEX_VAR or DEFINE_GLOBAL_FUNC_VAR */ /* XXX: could support exotic global object. */ static int JS_CheckDefineGlobalVar(JSContext *ctx, JSAtom prop, int flags) { JSObject *p; JSShapeProperty *prs; p = JS_VALUE_GET_OBJ(ctx->global_obj); prs = find_own_property1(p, prop); /* XXX: should handle JS_PROP_AUTOINIT */ if (flags & DEFINE_GLOBAL_LEX_VAR) { if (prs && !(prs->flags & JS_PROP_CONFIGURABLE)) goto fail_redeclaration; } else { if (!prs && !p->extensible) goto define_error; if (flags & DEFINE_GLOBAL_FUNC_VAR) { if (prs) { if (!(prs->flags & JS_PROP_CONFIGURABLE) && ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET || ((prs->flags & (JS_PROP_WRITABLE | JS_PROP_ENUMERABLE)) != (JS_PROP_WRITABLE | JS_PROP_ENUMERABLE)))) { define_error: JS_ThrowTypeErrorAtom(ctx, "cannot define variable '%s'", prop); return -1; } } } } /* check if there already is a lexical declaration */ p = JS_VALUE_GET_OBJ(ctx->global_var_obj); prs = find_own_property1(p, prop); if (prs) { fail_redeclaration: JS_ThrowSyntaxErrorVarRedeclaration(ctx, prop); return -1; } return 0; } /* def_flags is (0, DEFINE_GLOBAL_LEX_VAR) | JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE */ /* XXX: could support exotic global object. */ static int JS_DefineGlobalVar(JSContext *ctx, JSAtom prop, int def_flags) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; JSValue val; int flags; if (def_flags & DEFINE_GLOBAL_LEX_VAR) { p = JS_VALUE_GET_OBJ(ctx->global_var_obj); flags = JS_PROP_ENUMERABLE | (def_flags & JS_PROP_WRITABLE) | JS_PROP_CONFIGURABLE; val = JS_UNINITIALIZED; } else { p = JS_VALUE_GET_OBJ(ctx->global_obj); flags = JS_PROP_ENUMERABLE | JS_PROP_WRITABLE | (def_flags & JS_PROP_CONFIGURABLE); val = JS_UNDEFINED; } prs = find_own_property1(p, prop); if (prs) return 0; if (!p->extensible) return 0; pr = add_property(ctx, p, prop, flags); if (unlikely(!pr)) return -1; pr->u.value = val; return 0; } /* 'def_flags' is 0 or JS_PROP_CONFIGURABLE. */ /* XXX: could support exotic global object. */ static int JS_DefineGlobalFunction(JSContext *ctx, JSAtom prop, JSValueConst func, int def_flags) { JSObject *p; JSShapeProperty *prs; int flags; p = JS_VALUE_GET_OBJ(ctx->global_obj); prs = find_own_property1(p, prop); flags = JS_PROP_HAS_VALUE | JS_PROP_THROW; if (!prs || (prs->flags & JS_PROP_CONFIGURABLE)) { flags |= JS_PROP_ENUMERABLE | JS_PROP_WRITABLE | def_flags | JS_PROP_HAS_CONFIGURABLE | JS_PROP_HAS_WRITABLE | JS_PROP_HAS_ENUMERABLE; } if (JS_DefineProperty(ctx, ctx->global_obj, prop, func, JS_UNDEFINED, JS_UNDEFINED, flags) < 0) return -1; return 0; } static JSValue JS_GetGlobalVar(JSContext *ctx, JSAtom prop, BOOL throw_ref_error) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; /* no exotic behavior is possible in global_var_obj */ p = JS_VALUE_GET_OBJ(ctx->global_var_obj); prs = find_own_property(&pr, p, prop); if (prs) { /* XXX: should handle JS_PROP_TMASK properties */ if (unlikely(JS_IsUninitialized(pr->u.value))) return JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return JS_DupValue(ctx, pr->u.value); } return JS_GetPropertyInternal(ctx, ctx->global_obj, prop, ctx->global_obj, throw_ref_error); } /* construct a reference to a global variable */ static int JS_GetGlobalVarRef(JSContext *ctx, JSAtom prop, JSValue *sp) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; /* no exotic behavior is possible in global_var_obj */ p = JS_VALUE_GET_OBJ(ctx->global_var_obj); prs = find_own_property(&pr, p, prop); if (prs) { /* XXX: should handle JS_PROP_AUTOINIT properties? */ /* XXX: conformance: do these tests in OP_put_var_ref/OP_get_var_ref ? */ if (unlikely(JS_IsUninitialized(pr->u.value))) { JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return -1; } if (unlikely(!(prs->flags & JS_PROP_WRITABLE))) { return JS_ThrowTypeErrorReadOnly(ctx, JS_PROP_THROW, prop); } sp[0] = JS_DupValue(ctx, ctx->global_var_obj); } else { int ret; ret = JS_HasProperty(ctx, ctx->global_obj, prop); if (ret < 0) return -1; if (ret) { sp[0] = JS_DupValue(ctx, ctx->global_obj); } else { sp[0] = JS_UNDEFINED; } } sp[1] = JS_AtomToValue(ctx, prop); return 0; } /* use for strict variable access: test if the variable exists */ static int JS_CheckGlobalVar(JSContext *ctx, JSAtom prop) { JSObject *p; JSShapeProperty *prs; int ret; /* no exotic behavior is possible in global_var_obj */ p = JS_VALUE_GET_OBJ(ctx->global_var_obj); prs = find_own_property1(p, prop); if (prs) { ret = TRUE; } else { ret = JS_HasProperty(ctx, ctx->global_obj, prop); if (ret < 0) return -1; } return ret; } /* flag = 0: normal variable write flag = 1: initialize lexical variable flag = 2: normal variable write, strict check was done before */ static int JS_SetGlobalVar(JSContext *ctx, JSAtom prop, JSValue val, int flag) { JSObject *p; JSShapeProperty *prs; JSProperty *pr; int flags; /* no exotic behavior is possible in global_var_obj */ p = JS_VALUE_GET_OBJ(ctx->global_var_obj); prs = find_own_property(&pr, p, prop); if (prs) { /* XXX: should handle JS_PROP_AUTOINIT properties? */ if (flag != 1) { if (unlikely(JS_IsUninitialized(pr->u.value))) { JS_FreeValue(ctx, val); JS_ThrowReferenceErrorUninitialized(ctx, prs->atom); return -1; } if (unlikely(!(prs->flags & JS_PROP_WRITABLE))) { JS_FreeValue(ctx, val); return JS_ThrowTypeErrorReadOnly(ctx, JS_PROP_THROW, prop); } } set_value(ctx, &pr->u.value, val); return 0; } flags = JS_PROP_THROW_STRICT; if (is_strict_mode(ctx)) flags |= JS_PROP_NO_ADD; return JS_SetPropertyInternal(ctx, ctx->global_obj, prop, val, ctx->global_obj, flags); } /* return -1, FALSE or TRUE. return FALSE if not configurable or invalid object. return -1 in case of exception. flags can be 0, JS_PROP_THROW or JS_PROP_THROW_STRICT */ int JS_DeleteProperty(JSContext *ctx, JSValueConst obj, JSAtom prop, int flags) { JSValue obj1; JSObject *p; int res; obj1 = JS_ToObject(ctx, obj); if (JS_IsException(obj1)) return -1; p = JS_VALUE_GET_OBJ(obj1); res = delete_property(ctx, p, prop); JS_FreeValue(ctx, obj1); if (res != FALSE) return res; if ((flags & JS_PROP_THROW) || ((flags & JS_PROP_THROW_STRICT) && is_strict_mode(ctx))) { JS_ThrowTypeError(ctx, "could not delete property"); return -1; } return FALSE; } int JS_DeletePropertyInt64(JSContext *ctx, JSValueConst obj, int64_t idx, int flags) { JSAtom prop; int res; if ((uint64_t)idx <= JS_ATOM_MAX_INT) { /* fast path for fast arrays */ return JS_DeleteProperty(ctx, obj, __JS_AtomFromUInt32(idx), flags); } prop = JS_NewAtomInt64(ctx, idx); if (prop == JS_ATOM_NULL) return -1; res = JS_DeleteProperty(ctx, obj, prop, flags); JS_FreeAtom(ctx, prop); return res; } BOOL JS_IsFunction(JSContext *ctx, JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(val); switch(p->class_id) { case JS_CLASS_BYTECODE_FUNCTION: return TRUE; case JS_CLASS_PROXY: return p->u.proxy_data->is_func; default: return (ctx->rt->class_array[p->class_id].call != NULL); } } BOOL JS_IsCFunction(JSContext *ctx, JSValueConst val, JSCFunction *func, int magic) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(val); if (p->class_id == JS_CLASS_C_FUNCTION) return (p->u.cfunc.c_function.generic == func && p->u.cfunc.magic == magic); else return FALSE; } BOOL JS_IsConstructor(JSContext *ctx, JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(val); return p->is_constructor; } BOOL JS_SetConstructorBit(JSContext *ctx, JSValueConst func_obj, BOOL val) { JSObject *p; if (JS_VALUE_GET_TAG(func_obj) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(func_obj); p->is_constructor = val; return TRUE; } BOOL JS_IsError(JSContext *ctx, JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(val); return (p->class_id == JS_CLASS_ERROR); } /* used to avoid catching interrupt exceptions */ BOOL JS_IsUncatchableError(JSContext *ctx, JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(val); return p->class_id == JS_CLASS_ERROR && p->is_uncatchable_error; } void JS_SetUncatchableError(JSContext *ctx, JSValueConst val, BOOL flag) { JSObject *p; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return; p = JS_VALUE_GET_OBJ(val); if (p->class_id == JS_CLASS_ERROR) p->is_uncatchable_error = flag; } void JS_ResetUncatchableError(JSContext *ctx) { JS_SetUncatchableError(ctx, ctx->rt->current_exception, FALSE); } void JS_SetOpaque(JSValue obj, void *opaque) { JSObject *p; if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { p = JS_VALUE_GET_OBJ(obj); p->u.opaque = opaque; } } /* return NULL if not an object of class class_id */ void *JS_GetOpaque(JSValueConst obj, JSClassID class_id) { JSObject *p; if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) return NULL; p = JS_VALUE_GET_OBJ(obj); if (p->class_id != class_id) return NULL; return p->u.opaque; } void *JS_GetOpaque2(JSContext *ctx, JSValueConst obj, JSClassID class_id) { void *p = JS_GetOpaque(obj, class_id); if (unlikely(!p)) { JS_ThrowTypeErrorInvalidClass(ctx, class_id); } return p; } #define HINT_STRING 0 #define HINT_NUMBER 1 #define HINT_NONE 2 /* don't try Symbol.toPrimitive */ #define HINT_FORCE_ORDINARY (1 << 4) static JSValue JS_ToPrimitiveFree(JSContext *ctx, JSValue val, int hint) { int i; BOOL force_ordinary; JSAtom method_name; JSValue method, ret; if (JS_VALUE_GET_TAG(val) != JS_TAG_OBJECT) return val; force_ordinary = hint & HINT_FORCE_ORDINARY; hint &= ~HINT_FORCE_ORDINARY; if (!force_ordinary) { method = JS_GetProperty(ctx, val, JS_ATOM_Symbol_toPrimitive); if (JS_IsException(method)) goto exception; /* ECMA says *If exoticToPrim is not undefined* but tests in test262 use null as a non callable converter */ if (!JS_IsUndefined(method) && !JS_IsNull(method)) { JSAtom atom; JSValue arg; switch(hint) { case HINT_STRING: atom = JS_ATOM_string; break; case HINT_NUMBER: atom = JS_ATOM_number; break; default: case HINT_NONE: atom = JS_ATOM_default; break; } arg = JS_AtomToString(ctx, atom); ret = JS_CallFree(ctx, method, val, 1, (JSValueConst *)&arg); JS_FreeValue(ctx, arg); if (JS_IsException(ret)) goto exception; JS_FreeValue(ctx, val); if (JS_VALUE_GET_TAG(ret) != JS_TAG_OBJECT) return ret; JS_FreeValue(ctx, ret); return JS_ThrowTypeError(ctx, "toPrimitive"); } } if (hint != HINT_STRING) hint = HINT_NUMBER; for(i = 0; i < 2; i++) { if ((i ^ hint) == 0) { method_name = JS_ATOM_toString; } else { method_name = JS_ATOM_valueOf; } method = JS_GetProperty(ctx, val, method_name); if (JS_IsException(method)) goto exception; if (JS_IsFunction(ctx, method)) { ret = JS_CallFree(ctx, method, val, 0, NULL); if (JS_IsException(ret)) goto exception; if (JS_VALUE_GET_TAG(ret) != JS_TAG_OBJECT) { JS_FreeValue(ctx, val); return ret; } JS_FreeValue(ctx, ret); } else { JS_FreeValue(ctx, method); } } JS_ThrowTypeError(ctx, "toPrimitive"); exception: JS_FreeValue(ctx, val); return JS_EXCEPTION; } static JSValue JS_ToPrimitive(JSContext *ctx, JSValueConst val, int hint) { return JS_ToPrimitiveFree(ctx, JS_DupValue(ctx, val), hint); } void JS_SetIsHTMLDDA(JSContext *ctx, JSValueConst obj) { JSObject *p; if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) return; p = JS_VALUE_GET_OBJ(obj); p->is_HTMLDDA = TRUE; } static inline BOOL JS_IsHTMLDDA(JSContext *ctx, JSValueConst obj) { JSObject *p; if (JS_VALUE_GET_TAG(obj) != JS_TAG_OBJECT) return FALSE; p = JS_VALUE_GET_OBJ(obj); return p->is_HTMLDDA; } static int JS_ToBoolFree(JSContext *ctx, JSValue val) { uint32_t tag = JS_VALUE_GET_TAG(val); switch(tag) { case JS_TAG_INT: return JS_VALUE_GET_INT(val) != 0; case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: return JS_VALUE_GET_INT(val); case JS_TAG_EXCEPTION: return -1; case JS_TAG_STRING: { BOOL ret = JS_VALUE_GET_STRING(val)->len != 0; JS_FreeValue(ctx, val); return ret; } case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: #endif { JSBigFloat *p = JS_VALUE_GET_PTR(val); BOOL ret; ret = p->num.expn != BF_EXP_ZERO && p->num.expn != BF_EXP_NAN; JS_FreeValue(ctx, val); return ret; } #ifdef CONFIG_BIGNUM case JS_TAG_BIG_DECIMAL: { JSBigDecimal *p = JS_VALUE_GET_PTR(val); BOOL ret; ret = p->num.expn != BF_EXP_ZERO && p->num.expn != BF_EXP_NAN; JS_FreeValue(ctx, val); return ret; } #endif case JS_TAG_OBJECT: { JSObject *p = JS_VALUE_GET_OBJ(val); BOOL ret; ret = !p->is_HTMLDDA; JS_FreeValue(ctx, val); return ret; } break; default: if (JS_TAG_IS_FLOAT64(tag)) { double d = JS_VALUE_GET_FLOAT64(val); return !isnan(d) && d != 0; } else { JS_FreeValue(ctx, val); return TRUE; } } } int JS_ToBool(JSContext *ctx, JSValueConst val) { return JS_ToBoolFree(ctx, JS_DupValue(ctx, val)); } static int skip_spaces(const char *pc) { const uint8_t *p, *p_next, *p_start; uint32_t c; p = p_start = (const uint8_t *)pc; for (;;) { c = *p; if (c < 128) { if (!((c >= 0x09 && c <= 0x0d) || (c == 0x20))) break; p++; } else { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p_next); if (!lre_is_space(c)) break; p = p_next; } } return p - p_start; } static inline int to_digit(int c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'A' && c <= 'Z') return c - 'A' + 10; else if (c >= 'a' && c <= 'z') return c - 'a' + 10; else return 36; } /* XXX: remove */ static double js_strtod(const char *str, int radix, BOOL is_float) { double d; int c; if (!is_float || radix != 10) { const char *p = str; uint64_t n_max, n; int int_exp, is_neg; is_neg = 0; if (*p == '-') { is_neg = 1; p++; } /* skip leading zeros */ while (*p == '0') p++; n = 0; if (radix == 10) n_max = ((uint64_t)-1 - 9) / 10; /* most common case */ else n_max = ((uint64_t)-1 - (radix - 1)) / radix; /* XXX: could be more precise */ int_exp = 0; while (*p != '\0') { c = to_digit((uint8_t)*p); if (c >= radix) break; if (n <= n_max) { n = n * radix + c; } else { if (radix == 10) goto strtod_case; int_exp++; } p++; } d = n; if (int_exp != 0) { d *= pow(radix, int_exp); } if (is_neg) d = -d; } else { strtod_case: d = strtod(str, NULL); } return d; } #define ATOD_INT_ONLY (1 << 0) /* accept Oo and Ob prefixes in addition to 0x prefix if radix = 0 */ #define ATOD_ACCEPT_BIN_OCT (1 << 2) /* accept O prefix as octal if radix == 0 and properly formed (Annex B) */ #define ATOD_ACCEPT_LEGACY_OCTAL (1 << 4) /* accept _ between digits as a digit separator */ #define ATOD_ACCEPT_UNDERSCORES (1 << 5) /* allow a suffix to override the type */ #define ATOD_ACCEPT_SUFFIX (1 << 6) /* default type */ #define ATOD_TYPE_MASK (3 << 7) #define ATOD_TYPE_FLOAT64 (0 << 7) #define ATOD_TYPE_BIG_INT (1 << 7) #ifdef CONFIG_BIGNUM #define ATOD_TYPE_BIG_FLOAT (2 << 7) #define ATOD_TYPE_BIG_DECIMAL (3 << 7) /* assume bigint mode: floats are parsed as integers if no decimal point nor exponent */ #define ATOD_MODE_BIGINT (1 << 9) #endif /* accept -0x1 */ #define ATOD_ACCEPT_PREFIX_AFTER_SIGN (1 << 10) static JSValue js_string_to_bigint(JSContext *ctx, const char *buf, int radix, int flags, slimb_t *pexponent) { bf_t a_s, *a = &a_s; int ret; JSValue val; val = JS_NewBigInt(ctx); if (JS_IsException(val)) return val; a = JS_GetBigInt(val); ret = bf_atof(a, buf, NULL, radix, BF_PREC_INF, BF_RNDZ); if (ret & BF_ST_MEM_ERROR) { JS_FreeValue(ctx, val); return JS_ThrowOutOfMemory(ctx); } #ifdef CONFIG_BIGNUM val = JS_CompactBigInt1(ctx, val, (flags & ATOD_MODE_BIGINT) != 0); #else val = JS_CompactBigInt1(ctx, val, FALSE); #endif return val; } #ifdef CONFIG_BIGNUM static JSValue js_string_to_bigfloat(JSContext *ctx, const char *buf, int radix, int flags, slimb_t *pexponent) { bf_t *a; int ret; JSValue val; val = JS_NewBigFloat(ctx); if (JS_IsException(val)) return val; a = JS_GetBigFloat(val); if (flags & ATOD_ACCEPT_SUFFIX) { /* return the exponent to get infinite precision */ ret = bf_atof2(a, pexponent, buf, NULL, radix, BF_PREC_INF, BF_RNDZ | BF_ATOF_EXPONENT); } else { ret = bf_atof(a, buf, NULL, radix, ctx->fp_env.prec, ctx->fp_env.flags); } if (ret & BF_ST_MEM_ERROR) { JS_FreeValue(ctx, val); return JS_ThrowOutOfMemory(ctx); } return val; } static JSValue js_string_to_bigdecimal(JSContext *ctx, const char *buf, int radix, int flags, slimb_t *pexponent) { bfdec_t *a; int ret; JSValue val; val = JS_NewBigDecimal(ctx); if (JS_IsException(val)) return val; a = JS_GetBigDecimal(val); ret = bfdec_atof(a, buf, NULL, BF_PREC_INF, BF_RNDZ | BF_ATOF_NO_NAN_INF); if (ret & BF_ST_MEM_ERROR) { JS_FreeValue(ctx, val); return JS_ThrowOutOfMemory(ctx); } return val; } #endif /* return an exception in case of memory error. Return JS_NAN if invalid syntax */ #ifdef CONFIG_BIGNUM static JSValue js_atof2(JSContext *ctx, const char *str, const char **pp, int radix, int flags, slimb_t *pexponent) #else static JSValue js_atof(JSContext *ctx, const char *str, const char **pp, int radix, int flags) #endif { const char *p, *p_start; int sep, is_neg; BOOL is_float, has_legacy_octal; int atod_type = flags & ATOD_TYPE_MASK; char buf1[64], *buf; int i, j, len; BOOL buf_allocated = FALSE; JSValue val; /* optional separator between digits */ sep = (flags & ATOD_ACCEPT_UNDERSCORES) ? '_' : 256; has_legacy_octal = FALSE; p = str; p_start = p; is_neg = 0; if (p[0] == '+') { p++; p_start++; if (!(flags & ATOD_ACCEPT_PREFIX_AFTER_SIGN)) goto no_radix_prefix; } else if (p[0] == '-') { p++; p_start++; is_neg = 1; if (!(flags & ATOD_ACCEPT_PREFIX_AFTER_SIGN)) goto no_radix_prefix; } if (p[0] == '0') { if ((p[1] == 'x' || p[1] == 'X') && (radix == 0 || radix == 16)) { p += 2; radix = 16; } else if ((p[1] == 'o' || p[1] == 'O') && radix == 0 && (flags & ATOD_ACCEPT_BIN_OCT)) { p += 2; radix = 8; } else if ((p[1] == 'b' || p[1] == 'B') && radix == 0 && (flags & ATOD_ACCEPT_BIN_OCT)) { p += 2; radix = 2; } else if ((p[1] >= '0' && p[1] <= '9') && radix == 0 && (flags & ATOD_ACCEPT_LEGACY_OCTAL)) { int i; has_legacy_octal = TRUE; sep = 256; for (i = 1; (p[i] >= '0' && p[i] <= '7'); i++) continue; if (p[i] == '8' || p[i] == '9') goto no_prefix; p += 1; radix = 8; } else { goto no_prefix; } /* there must be a digit after the prefix */ if (to_digit((uint8_t)*p) >= radix) goto fail; no_prefix: ; } else { no_radix_prefix: if (!(flags & ATOD_INT_ONLY) && (atod_type == ATOD_TYPE_FLOAT64 #ifdef CONFIG_BIGNUM || atod_type == ATOD_TYPE_BIG_FLOAT #endif ) && strstart(p, "Infinity", &p)) { #ifdef CONFIG_BIGNUM if (atod_type == ATOD_TYPE_BIG_FLOAT) { bf_t *a; val = JS_NewBigFloat(ctx); if (JS_IsException(val)) goto done; a = JS_GetBigFloat(val); bf_set_inf(a, is_neg); } else #endif { double d = 1.0 / 0.0; if (is_neg) d = -d; val = JS_NewFloat64(ctx, d); } goto done; } } if (radix == 0) radix = 10; is_float = FALSE; p_start = p; while (to_digit((uint8_t)*p) < radix || (*p == sep && (radix != 10 || p != p_start + 1 || p[-1] != '0') && to_digit((uint8_t)p[1]) < radix)) { p++; } if (!(flags & ATOD_INT_ONLY)) { if (*p == '.' && (p > p_start || to_digit((uint8_t)p[1]) < radix)) { is_float = TRUE; p++; if (*p == sep) goto fail; while (to_digit((uint8_t)*p) < radix || (*p == sep && to_digit((uint8_t)p[1]) < radix)) p++; } if (p > p_start && (((*p == 'e' || *p == 'E') && radix == 10) || ((*p == 'p' || *p == 'P') && (radix == 2 || radix == 8 || radix == 16)))) { const char *p1 = p + 1; is_float = TRUE; if (*p1 == '+') { p1++; } else if (*p1 == '-') { p1++; } if (is_digit((uint8_t)*p1)) { p = p1 + 1; while (is_digit((uint8_t)*p) || (*p == sep && is_digit((uint8_t)p[1]))) p++; } } } if (p == p_start) goto fail; buf = buf1; buf_allocated = FALSE; len = p - p_start; if (unlikely((len + 2) > sizeof(buf1))) { buf = js_malloc_rt(ctx->rt, len + 2); /* no exception raised */ if (!buf) goto mem_error; buf_allocated = TRUE; } /* remove the separators and the radix prefixes */ j = 0; if (is_neg) buf[j++] = '-'; for (i = 0; i < len; i++) { if (p_start[i] != '_') buf[j++] = p_start[i]; } buf[j] = '\0'; if (flags & ATOD_ACCEPT_SUFFIX) { if (*p == 'n') { p++; atod_type = ATOD_TYPE_BIG_INT; } else #ifdef CONFIG_BIGNUM if (*p == 'l') { p++; atod_type = ATOD_TYPE_BIG_FLOAT; } else if (*p == 'm') { p++; atod_type = ATOD_TYPE_BIG_DECIMAL; } else if (flags & ATOD_MODE_BIGINT) { if (!is_float) atod_type = ATOD_TYPE_BIG_INT; if (has_legacy_octal) goto fail; } else #endif { if (is_float && radix != 10) goto fail; } } else { if (atod_type == ATOD_TYPE_FLOAT64) { #ifdef CONFIG_BIGNUM if (flags & ATOD_MODE_BIGINT) { if (!is_float) atod_type = ATOD_TYPE_BIG_INT; if (has_legacy_octal) goto fail; } else #endif { if (is_float && radix != 10) goto fail; } } } switch(atod_type) { case ATOD_TYPE_FLOAT64: { double d; d = js_strtod(buf, radix, is_float); /* return int or float64 */ val = JS_NewFloat64(ctx, d); } break; case ATOD_TYPE_BIG_INT: if (has_legacy_octal || is_float) goto fail; val = ctx->rt->bigint_ops.from_string(ctx, buf, radix, flags, NULL); break; #ifdef CONFIG_BIGNUM case ATOD_TYPE_BIG_FLOAT: if (has_legacy_octal) goto fail; val = ctx->rt->bigfloat_ops.from_string(ctx, buf, radix, flags, pexponent); break; case ATOD_TYPE_BIG_DECIMAL: if (radix != 10) goto fail; val = ctx->rt->bigdecimal_ops.from_string(ctx, buf, radix, flags, NULL); break; #endif default: abort(); } done: if (buf_allocated) js_free_rt(ctx->rt, buf); if (pp) *pp = p; return val; fail: val = JS_NAN; goto done; mem_error: val = JS_ThrowOutOfMemory(ctx); goto done; } #ifdef CONFIG_BIGNUM static JSValue js_atof(JSContext *ctx, const char *str, const char **pp, int radix, int flags) { return js_atof2(ctx, str, pp, radix, flags, NULL); } #endif typedef enum JSToNumberHintEnum { TON_FLAG_NUMBER, TON_FLAG_NUMERIC, } JSToNumberHintEnum; static JSValue JS_ToNumberHintFree(JSContext *ctx, JSValue val, JSToNumberHintEnum flag) { uint32_t tag; JSValue ret; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_BIG_INT: if (flag != TON_FLAG_NUMERIC) { JS_FreeValue(ctx, val); return JS_ThrowTypeError(ctx, "cannot convert bigint to number"); } ret = val; break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_DECIMAL: if (flag != TON_FLAG_NUMERIC) { JS_FreeValue(ctx, val); return JS_ThrowTypeError(ctx, "cannot convert bigdecimal to number"); } ret = val; break; case JS_TAG_BIG_FLOAT: if (flag != TON_FLAG_NUMERIC) { JS_FreeValue(ctx, val); return JS_ThrowTypeError(ctx, "cannot convert bigfloat to number"); } ret = val; break; #endif case JS_TAG_FLOAT64: case JS_TAG_INT: case JS_TAG_EXCEPTION: ret = val; break; case JS_TAG_BOOL: case JS_TAG_NULL: ret = JS_NewInt32(ctx, JS_VALUE_GET_INT(val)); break; case JS_TAG_UNDEFINED: ret = JS_NAN; break; case JS_TAG_OBJECT: val = JS_ToPrimitiveFree(ctx, val, HINT_NUMBER); if (JS_IsException(val)) return JS_EXCEPTION; goto redo; case JS_TAG_STRING: { const char *str; const char *p; size_t len; str = JS_ToCStringLen(ctx, &len, val); JS_FreeValue(ctx, val); if (!str) return JS_EXCEPTION; p = str; p += skip_spaces(p); if ((p - str) == len) { ret = JS_NewInt32(ctx, 0); } else { int flags = ATOD_ACCEPT_BIN_OCT; ret = js_atof(ctx, p, &p, 0, flags); if (!JS_IsException(ret)) { p += skip_spaces(p); if ((p - str) != len) { JS_FreeValue(ctx, ret); ret = JS_NAN; } } } JS_FreeCString(ctx, str); } break; case JS_TAG_SYMBOL: JS_FreeValue(ctx, val); return JS_ThrowTypeError(ctx, "cannot convert symbol to number"); default: JS_FreeValue(ctx, val); ret = JS_NAN; break; } return ret; } static JSValue JS_ToNumberFree(JSContext *ctx, JSValue val) { return JS_ToNumberHintFree(ctx, val, TON_FLAG_NUMBER); } static JSValue JS_ToNumericFree(JSContext *ctx, JSValue val) { return JS_ToNumberHintFree(ctx, val, TON_FLAG_NUMERIC); } static JSValue JS_ToNumeric(JSContext *ctx, JSValueConst val) { return JS_ToNumericFree(ctx, JS_DupValue(ctx, val)); } static __exception int __JS_ToFloat64Free(JSContext *ctx, double *pres, JSValue val) { double d; uint32_t tag; val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = JS_FLOAT64_NAN; return -1; } tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: d = JS_VALUE_GET_INT(val); break; case JS_TAG_FLOAT64: d = JS_VALUE_GET_FLOAT64(val); break; case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: #endif { JSBigFloat *p = JS_VALUE_GET_PTR(val); /* XXX: there can be a double rounding issue with some primitives (such as JS_ToUint8ClampFree()), but it is not critical to fix it. */ bf_get_float64(&p->num, &d, BF_RNDN); JS_FreeValue(ctx, val); } break; default: abort(); } *pres = d; return 0; } static inline int JS_ToFloat64Free(JSContext *ctx, double *pres, JSValue val) { uint32_t tag; tag = JS_VALUE_GET_TAG(val); if (tag <= JS_TAG_NULL) { *pres = JS_VALUE_GET_INT(val); return 0; } else if (JS_TAG_IS_FLOAT64(tag)) { *pres = JS_VALUE_GET_FLOAT64(val); return 0; } else { return __JS_ToFloat64Free(ctx, pres, val); } } int JS_ToFloat64(JSContext *ctx, double *pres, JSValueConst val) { return JS_ToFloat64Free(ctx, pres, JS_DupValue(ctx, val)); } static JSValue JS_ToNumber(JSContext *ctx, JSValueConst val) { return JS_ToNumberFree(ctx, JS_DupValue(ctx, val)); } /* same as JS_ToNumber() but return 0 in case of NaN/Undefined */ static __maybe_unused JSValue JS_ToIntegerFree(JSContext *ctx, JSValue val) { uint32_t tag; JSValue ret; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: ret = JS_NewInt32(ctx, JS_VALUE_GET_INT(val)); break; case JS_TAG_FLOAT64: { double d = JS_VALUE_GET_FLOAT64(val); if (isnan(d)) { ret = JS_NewInt32(ctx, 0); } else { /* convert -0 to +0 */ d = trunc(d) + 0.0; ret = JS_NewFloat64(ctx, d); } } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { bf_t a_s, *a, r_s, *r = &r_s; BOOL is_nan; a = JS_ToBigFloat(ctx, &a_s, val); if (!a) { JS_FreeValue(ctx, val); return JS_EXCEPTION; } if (!bf_is_finite(a)) { is_nan = bf_is_nan(a); if (is_nan) ret = JS_NewInt32(ctx, 0); else ret = JS_DupValue(ctx, val); } else { ret = JS_NewBigInt(ctx); if (!JS_IsException(ret)) { r = JS_GetBigInt(ret); bf_set(r, a); bf_rint(r, BF_RNDZ); ret = JS_CompactBigInt(ctx, ret); } } if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, val); } break; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) return val; goto redo; } return ret; } /* Note: the integer value is satured to 32 bits */ static int JS_ToInt32SatFree(JSContext *ctx, int *pres, JSValue val) { uint32_t tag; int ret; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: ret = JS_VALUE_GET_INT(val); break; case JS_TAG_EXCEPTION: *pres = 0; return -1; case JS_TAG_FLOAT64: { double d = JS_VALUE_GET_FLOAT64(val); if (isnan(d)) { ret = 0; } else { if (d < INT32_MIN) ret = INT32_MIN; else if (d > INT32_MAX) ret = INT32_MAX; else ret = (int)d; } } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_get_int32(&ret, &p->num, 0); JS_FreeValue(ctx, val); } break; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = 0; return -1; } goto redo; } *pres = ret; return 0; } int JS_ToInt32Sat(JSContext *ctx, int *pres, JSValueConst val) { return JS_ToInt32SatFree(ctx, pres, JS_DupValue(ctx, val)); } int JS_ToInt32Clamp(JSContext *ctx, int *pres, JSValueConst val, int min, int max, int min_offset) { int res = JS_ToInt32SatFree(ctx, pres, JS_DupValue(ctx, val)); if (res == 0) { if (*pres < min) { *pres += min_offset; if (*pres < min) *pres = min; } else { if (*pres > max) *pres = max; } } return res; } static int JS_ToInt64SatFree(JSContext *ctx, int64_t *pres, JSValue val) { uint32_t tag; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: *pres = JS_VALUE_GET_INT(val); return 0; case JS_TAG_EXCEPTION: *pres = 0; return -1; case JS_TAG_FLOAT64: { double d = JS_VALUE_GET_FLOAT64(val); if (isnan(d)) { *pres = 0; } else { if (d < INT64_MIN) *pres = INT64_MIN; else if (d > INT64_MAX) *pres = INT64_MAX; else *pres = (int64_t)d; } } return 0; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_get_int64(pres, &p->num, 0); JS_FreeValue(ctx, val); } return 0; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = 0; return -1; } goto redo; } } int JS_ToInt64Sat(JSContext *ctx, int64_t *pres, JSValueConst val) { return JS_ToInt64SatFree(ctx, pres, JS_DupValue(ctx, val)); } int JS_ToInt64Clamp(JSContext *ctx, int64_t *pres, JSValueConst val, int64_t min, int64_t max, int64_t neg_offset) { int res = JS_ToInt64SatFree(ctx, pres, JS_DupValue(ctx, val)); if (res == 0) { if (*pres < 0) *pres += neg_offset; if (*pres < min) *pres = min; else if (*pres > max) *pres = max; } return res; } /* Same as JS_ToInt32Free() but with a 64 bit result. Return (<0, 0) in case of exception */ static int JS_ToInt64Free(JSContext *ctx, int64_t *pres, JSValue val) { uint32_t tag; int64_t ret; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: ret = JS_VALUE_GET_INT(val); break; case JS_TAG_FLOAT64: { JSFloat64Union u; double d; int e; d = JS_VALUE_GET_FLOAT64(val); u.d = d; /* we avoid doing fmod(x, 2^64) */ e = (u.u64 >> 52) & 0x7ff; if (likely(e <= (1023 + 62))) { /* fast case */ ret = (int64_t)d; } else if (e <= (1023 + 62 + 53)) { uint64_t v; /* remainder modulo 2^64 */ v = (u.u64 & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52); ret = v << ((e - 1023) - 52); /* take the sign into account */ if (u.u64 >> 63) ret = -ret; } else { ret = 0; /* also handles NaN and +inf */ } } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_get_int64(&ret, &p->num, BF_GET_INT_MOD); JS_FreeValue(ctx, val); } break; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = 0; return -1; } goto redo; } *pres = ret; return 0; } int JS_ToInt64(JSContext *ctx, int64_t *pres, JSValueConst val) { return JS_ToInt64Free(ctx, pres, JS_DupValue(ctx, val)); } int JS_ToInt64Ext(JSContext *ctx, int64_t *pres, JSValueConst val) { if (JS_IsBigInt(ctx, val)) return JS_ToBigInt64(ctx, pres, val); else return JS_ToInt64(ctx, pres, val); } /* return (<0, 0) in case of exception */ static int JS_ToInt32Free(JSContext *ctx, int32_t *pres, JSValue val) { uint32_t tag; int32_t ret; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: ret = JS_VALUE_GET_INT(val); break; case JS_TAG_FLOAT64: { JSFloat64Union u; double d; int e; d = JS_VALUE_GET_FLOAT64(val); u.d = d; /* we avoid doing fmod(x, 2^32) */ e = (u.u64 >> 52) & 0x7ff; if (likely(e <= (1023 + 30))) { /* fast case */ ret = (int32_t)d; } else if (e <= (1023 + 30 + 53)) { uint64_t v; /* remainder modulo 2^32 */ v = (u.u64 & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52); v = v << ((e - 1023) - 52 + 32); ret = v >> 32; /* take the sign into account */ if (u.u64 >> 63) ret = -ret; } else { ret = 0; /* also handles NaN and +inf */ } } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_get_int32(&ret, &p->num, BF_GET_INT_MOD); JS_FreeValue(ctx, val); } break; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = 0; return -1; } goto redo; } *pres = ret; return 0; } int JS_ToInt32(JSContext *ctx, int32_t *pres, JSValueConst val) { return JS_ToInt32Free(ctx, pres, JS_DupValue(ctx, val)); } static inline int JS_ToUint32Free(JSContext *ctx, uint32_t *pres, JSValue val) { return JS_ToInt32Free(ctx, (int32_t *)pres, val); } static int JS_ToUint8ClampFree(JSContext *ctx, int32_t *pres, JSValue val) { uint32_t tag; int res; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: case JS_TAG_UNDEFINED: res = JS_VALUE_GET_INT(val); #ifdef CONFIG_BIGNUM int_clamp: #endif res = max_int(0, min_int(255, res)); break; case JS_TAG_FLOAT64: { double d = JS_VALUE_GET_FLOAT64(val); if (isnan(d)) { res = 0; } else { if (d < 0) res = 0; else if (d > 255) res = 255; else res = lrint(d); } } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_t r_s, *r = &r_s; bf_init(ctx->bf_ctx, r); bf_set(r, &p->num); bf_rint(r, BF_RNDN); bf_get_int32(&res, r, 0); bf_delete(r); JS_FreeValue(ctx, val); } goto int_clamp; #endif default: val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) { *pres = 0; return -1; } goto redo; } *pres = res; return 0; } static __exception int JS_ToArrayLengthFree(JSContext *ctx, uint32_t *plen, JSValue val, BOOL is_array_ctor) { uint32_t tag, len; tag = JS_VALUE_GET_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: { int v; v = JS_VALUE_GET_INT(val); if (v < 0) goto fail; len = v; } break; case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: #endif { JSBigFloat *p = JS_VALUE_GET_PTR(val); bf_t a; BOOL res; bf_get_int32((int32_t *)&len, &p->num, BF_GET_INT_MOD); bf_init(ctx->bf_ctx, &a); bf_set_ui(&a, len); res = bf_cmp_eq(&a, &p->num); bf_delete(&a); JS_FreeValue(ctx, val); if (!res) goto fail; } break; default: if (JS_TAG_IS_FLOAT64(tag)) { double d; d = JS_VALUE_GET_FLOAT64(val); len = (uint32_t)d; if (len != d) goto fail; } else { uint32_t len1; if (is_array_ctor) { val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) return -1; /* cannot recurse because val is a number */ if (JS_ToArrayLengthFree(ctx, &len, val, TRUE)) return -1; } else { /* legacy behavior: must do the conversion twice and compare */ if (JS_ToUint32(ctx, &len, val)) { JS_FreeValue(ctx, val); return -1; } val = JS_ToNumberFree(ctx, val); if (JS_IsException(val)) return -1; /* cannot recurse because val is a number */ if (JS_ToArrayLengthFree(ctx, &len1, val, FALSE)) return -1; if (len1 != len) { fail: JS_ThrowRangeError(ctx, "invalid array length"); return -1; } } } break; } *plen = len; return 0; } #define MAX_SAFE_INTEGER (((int64_t)1 << 53) - 1) static BOOL is_safe_integer(double d) { return isfinite(d) && floor(d) == d && fabs(d) <= (double)MAX_SAFE_INTEGER; } int JS_ToIndex(JSContext *ctx, uint64_t *plen, JSValueConst val) { int64_t v; if (JS_ToInt64Sat(ctx, &v, val)) return -1; if (v < 0 || v > MAX_SAFE_INTEGER) { JS_ThrowRangeError(ctx, "invalid array index"); *plen = 0; return -1; } *plen = v; return 0; } /* convert a value to a length between 0 and MAX_SAFE_INTEGER. return -1 for exception */ static __exception int JS_ToLengthFree(JSContext *ctx, int64_t *plen, JSValue val) { int res = JS_ToInt64Clamp(ctx, plen, val, 0, MAX_SAFE_INTEGER, 0); JS_FreeValue(ctx, val); return res; } /* Note: can return an exception */ static int JS_NumberIsInteger(JSContext *ctx, JSValueConst val) { double d; if (!JS_IsNumber(val)) return FALSE; if (unlikely(JS_ToFloat64(ctx, &d, val))) return -1; return isfinite(d) && floor(d) == d; } static BOOL JS_NumberIsNegativeOrMinusZero(JSContext *ctx, JSValueConst val) { uint32_t tag; tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: { int v; v = JS_VALUE_GET_INT(val); return (v < 0); } case JS_TAG_FLOAT64: { JSFloat64Union u; u.d = JS_VALUE_GET_FLOAT64(val); return (u.u64 >> 63); } case JS_TAG_BIG_INT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); /* Note: integer zeros are not necessarily positive */ return p->num.sign && !bf_is_zero(&p->num); } #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); return p->num.sign; } break; case JS_TAG_BIG_DECIMAL: { JSBigDecimal *p = JS_VALUE_GET_PTR(val); return p->num.sign; } break; #endif default: return FALSE; } } static JSValue js_bigint_to_string1(JSContext *ctx, JSValueConst val, int radix) { JSValue ret; bf_t a_s, *a; char *str; int saved_sign; a = JS_ToBigInt(ctx, &a_s, val); if (!a) return JS_EXCEPTION; saved_sign = a->sign; if (a->expn == BF_EXP_ZERO) a->sign = 0; str = bf_ftoa(NULL, a, radix, 0, BF_RNDZ | BF_FTOA_FORMAT_FRAC | BF_FTOA_JS_QUIRKS); a->sign = saved_sign; JS_FreeBigInt(ctx, a, &a_s); if (!str) return JS_ThrowOutOfMemory(ctx); ret = JS_NewString(ctx, str); bf_free(ctx->bf_ctx, str); return ret; } static JSValue js_bigint_to_string(JSContext *ctx, JSValueConst val) { return js_bigint_to_string1(ctx, val, 10); } #ifdef CONFIG_BIGNUM static JSValue js_ftoa(JSContext *ctx, JSValueConst val1, int radix, limb_t prec, bf_flags_t flags) { JSValue val, ret; bf_t a_s, *a; char *str; int saved_sign; val = JS_ToNumeric(ctx, val1); if (JS_IsException(val)) return val; a = JS_ToBigFloat(ctx, &a_s, val); if (!a) { JS_FreeValue(ctx, val); return JS_EXCEPTION; } saved_sign = a->sign; if (a->expn == BF_EXP_ZERO) a->sign = 0; flags |= BF_FTOA_JS_QUIRKS; if ((flags & BF_FTOA_FORMAT_MASK) == BF_FTOA_FORMAT_FREE_MIN) { /* Note: for floating point numbers with a radix which is not a power of two, the current precision is used to compute the number of digits. */ if ((radix & (radix - 1)) != 0) { bf_t r_s, *r = &r_s; int prec, flags1; /* must round first */ if (JS_VALUE_GET_TAG(val) == JS_TAG_BIG_FLOAT) { prec = ctx->fp_env.prec; flags1 = ctx->fp_env.flags & (BF_FLAG_SUBNORMAL | (BF_EXP_BITS_MASK << BF_EXP_BITS_SHIFT)); } else { prec = 53; flags1 = bf_set_exp_bits(11) | BF_FLAG_SUBNORMAL; } bf_init(ctx->bf_ctx, r); bf_set(r, a); bf_round(r, prec, flags1 | BF_RNDN); str = bf_ftoa(NULL, r, radix, prec, flags1 | flags); bf_delete(r); } else { str = bf_ftoa(NULL, a, radix, BF_PREC_INF, flags); } } else { str = bf_ftoa(NULL, a, radix, prec, flags); } a->sign = saved_sign; if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, val); if (!str) return JS_ThrowOutOfMemory(ctx); ret = JS_NewString(ctx, str); bf_free(ctx->bf_ctx, str); return ret; } static JSValue js_bigfloat_to_string(JSContext *ctx, JSValueConst val) { return js_ftoa(ctx, val, 10, 0, BF_RNDN | BF_FTOA_FORMAT_FREE_MIN); } static JSValue js_bigdecimal_to_string1(JSContext *ctx, JSValueConst val, limb_t prec, int flags) { JSValue ret; bfdec_t *a; char *str; int saved_sign; a = JS_ToBigDecimal(ctx, val); if (!a) return JS_EXCEPTION; saved_sign = a->sign; if (a->expn == BF_EXP_ZERO) a->sign = 0; str = bfdec_ftoa(NULL, a, prec, flags | BF_FTOA_JS_QUIRKS); a->sign = saved_sign; if (!str) return JS_ThrowOutOfMemory(ctx); ret = JS_NewString(ctx, str); bf_free(ctx->bf_ctx, str); return ret; } static JSValue js_bigdecimal_to_string(JSContext *ctx, JSValueConst val) { return js_bigdecimal_to_string1(ctx, val, 0, BF_RNDZ | BF_FTOA_FORMAT_FREE); } #endif /* CONFIG_BIGNUM */ /* 2 <= base <= 36 */ static char *i64toa(char *buf_end, int64_t n, unsigned int base) { char *q = buf_end; int digit, is_neg; is_neg = 0; if (n < 0) { is_neg = 1; n = -n; } *--q = '\0'; do { digit = (uint64_t)n % base; n = (uint64_t)n / base; if (digit < 10) digit += '0'; else digit += 'a' - 10; *--q = digit; } while (n != 0); if (is_neg) *--q = '-'; return q; } /* buf1 contains the printf result */ static void js_ecvt1(double d, int n_digits, int *decpt, int *sign, char *buf, int rounding_mode, char *buf1, int buf1_size) { if (rounding_mode != FE_TONEAREST) fesetround(rounding_mode); snprintf(buf1, buf1_size, "%+.*e", n_digits - 1, d); if (rounding_mode != FE_TONEAREST) fesetround(FE_TONEAREST); *sign = (buf1[0] == '-'); /* mantissa */ buf[0] = buf1[1]; if (n_digits > 1) memcpy(buf + 1, buf1 + 3, n_digits - 1); buf[n_digits] = '\0'; /* exponent */ *decpt = atoi(buf1 + n_digits + 2 + (n_digits > 1)) + 1; } /* maximum buffer size for js_dtoa */ #define JS_DTOA_BUF_SIZE 128 /* needed because ecvt usually limits the number of digits to 17. Return the number of digits. */ static int js_ecvt(double d, int n_digits, int *decpt, int *sign, char *buf, BOOL is_fixed) { int rounding_mode; char buf_tmp[JS_DTOA_BUF_SIZE]; if (!is_fixed) { unsigned int n_digits_min, n_digits_max; /* find the minimum amount of digits (XXX: inefficient but simple) */ n_digits_min = 1; n_digits_max = 17; while (n_digits_min < n_digits_max) { n_digits = (n_digits_min + n_digits_max) / 2; js_ecvt1(d, n_digits, decpt, sign, buf, FE_TONEAREST, buf_tmp, sizeof(buf_tmp)); if (strtod(buf_tmp, NULL) == d) { /* no need to keep the trailing zeros */ while (n_digits >= 2 && buf[n_digits - 1] == '0') n_digits--; n_digits_max = n_digits; } else { n_digits_min = n_digits + 1; } } n_digits = n_digits_max; rounding_mode = FE_TONEAREST; } else { rounding_mode = FE_TONEAREST; #ifdef CONFIG_PRINTF_RNDN { char buf1[JS_DTOA_BUF_SIZE], buf2[JS_DTOA_BUF_SIZE]; int decpt1, sign1, decpt2, sign2; /* The JS rounding is specified as round to nearest ties away from zero (RNDNA), but in printf the "ties" case is not specified (for example it is RNDN for glibc, RNDNA for Windows), so we must round manually. */ js_ecvt1(d, n_digits + 1, &decpt1, &sign1, buf1, FE_TONEAREST, buf_tmp, sizeof(buf_tmp)); /* XXX: could use 2 digits to reduce the average running time */ if (buf1[n_digits] == '5') { js_ecvt1(d, n_digits + 1, &decpt1, &sign1, buf1, FE_DOWNWARD, buf_tmp, sizeof(buf_tmp)); js_ecvt1(d, n_digits + 1, &decpt2, &sign2, buf2, FE_UPWARD, buf_tmp, sizeof(buf_tmp)); if (memcmp(buf1, buf2, n_digits + 1) == 0 && decpt1 == decpt2) { /* exact result: round away from zero */ if (sign1) rounding_mode = FE_DOWNWARD; else rounding_mode = FE_UPWARD; } } } #endif /* CONFIG_PRINTF_RNDN */ } js_ecvt1(d, n_digits, decpt, sign, buf, rounding_mode, buf_tmp, sizeof(buf_tmp)); return n_digits; } static int js_fcvt1(char *buf, int buf_size, double d, int n_digits, int rounding_mode) { int n; if (rounding_mode != FE_TONEAREST) fesetround(rounding_mode); n = snprintf(buf, buf_size, "%.*f", n_digits, d); if (rounding_mode != FE_TONEAREST) fesetround(FE_TONEAREST); assert(n < buf_size); return n; } static void js_fcvt(char *buf, int buf_size, double d, int n_digits) { int rounding_mode; rounding_mode = FE_TONEAREST; #ifdef CONFIG_PRINTF_RNDN { int n1, n2; char buf1[JS_DTOA_BUF_SIZE]; char buf2[JS_DTOA_BUF_SIZE]; /* The JS rounding is specified as round to nearest ties away from zero (RNDNA), but in printf the "ties" case is not specified (for example it is RNDN for glibc, RNDNA for Windows), so we must round manually. */ n1 = js_fcvt1(buf1, sizeof(buf1), d, n_digits + 1, FE_TONEAREST); rounding_mode = FE_TONEAREST; /* XXX: could use 2 digits to reduce the average running time */ if (buf1[n1 - 1] == '5') { n1 = js_fcvt1(buf1, sizeof(buf1), d, n_digits + 1, FE_DOWNWARD); n2 = js_fcvt1(buf2, sizeof(buf2), d, n_digits + 1, FE_UPWARD); if (n1 == n2 && memcmp(buf1, buf2, n1) == 0) { /* exact result: round away from zero */ if (buf1[0] == '-') rounding_mode = FE_DOWNWARD; else rounding_mode = FE_UPWARD; } } } #endif /* CONFIG_PRINTF_RNDN */ js_fcvt1(buf, buf_size, d, n_digits, rounding_mode); } /* radix != 10 is only supported with flags = JS_DTOA_VAR_FORMAT */ /* use as many digits as necessary */ #define JS_DTOA_VAR_FORMAT (0 << 0) /* use n_digits significant digits (1 <= n_digits <= 101) */ #define JS_DTOA_FIXED_FORMAT (1 << 0) /* force fractional format: [-]dd.dd with n_digits fractional digits */ #define JS_DTOA_FRAC_FORMAT (2 << 0) /* force exponential notation either in fixed or variable format */ #define JS_DTOA_FORCE_EXP (1 << 2) /* XXX: slow and maybe not fully correct. Use libbf when it is fast enough. XXX: radix != 10 is only supported for small integers */ static void js_dtoa1(char *buf, double d, int radix, int n_digits, int flags) { char *q; if (!isfinite(d)) { if (isnan(d)) { strcpy(buf, "NaN"); } else { q = buf; if (d < 0) *q++ = '-'; strcpy(q, "Infinity"); } } else if (flags == JS_DTOA_VAR_FORMAT) { int64_t i64; char buf1[70], *ptr; i64 = (int64_t)d; if (d != i64 || i64 > MAX_SAFE_INTEGER || i64 < -MAX_SAFE_INTEGER) goto generic_conv; /* fast path for integers */ ptr = i64toa(buf1 + sizeof(buf1), i64, radix); strcpy(buf, ptr); } else { if (d == 0.0) d = 0.0; /* convert -0 to 0 */ if (flags == JS_DTOA_FRAC_FORMAT) { js_fcvt(buf, JS_DTOA_BUF_SIZE, d, n_digits); } else { char buf1[JS_DTOA_BUF_SIZE]; int sign, decpt, k, n, i, p, n_max; BOOL is_fixed; generic_conv: is_fixed = ((flags & 3) == JS_DTOA_FIXED_FORMAT); if (is_fixed) { n_max = n_digits; } else { n_max = 21; } /* the number has k digits (k >= 1) */ k = js_ecvt(d, n_digits, &decpt, &sign, buf1, is_fixed); n = decpt; /* d=10^(n-k)*(buf1) i.e. d= < x.yyyy 10^(n-1) */ q = buf; if (sign) *q++ = '-'; if (flags & JS_DTOA_FORCE_EXP) goto force_exp; if (n >= 1 && n <= n_max) { if (k <= n) { memcpy(q, buf1, k); q += k; for(i = 0; i < (n - k); i++) *q++ = '0'; *q = '\0'; } else { /* k > n */ memcpy(q, buf1, n); q += n; *q++ = '.'; for(i = 0; i < (k - n); i++) *q++ = buf1[n + i]; *q = '\0'; } } else if (n >= -5 && n <= 0) { *q++ = '0'; *q++ = '.'; for(i = 0; i < -n; i++) *q++ = '0'; memcpy(q, buf1, k); q += k; *q = '\0'; } else { force_exp: /* exponential notation */ *q++ = buf1[0]; if (k > 1) { *q++ = '.'; for(i = 1; i < k; i++) *q++ = buf1[i]; } *q++ = 'e'; p = n - 1; if (p >= 0) *q++ = '+'; sprintf(q, "%d", p); } } } } static JSValue js_dtoa(JSContext *ctx, double d, int radix, int n_digits, int flags) { char buf[JS_DTOA_BUF_SIZE]; js_dtoa1(buf, d, radix, n_digits, flags); return JS_NewString(ctx, buf); } JSValue JS_ToStringInternal(JSContext *ctx, JSValueConst val, BOOL is_ToPropertyKey) { uint32_t tag; const char *str; char buf[32]; tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_STRING: return JS_DupValue(ctx, val); case JS_TAG_INT: snprintf(buf, sizeof(buf), "%d", JS_VALUE_GET_INT(val)); str = buf; goto new_string; case JS_TAG_BOOL: return JS_AtomToString(ctx, JS_VALUE_GET_BOOL(val) ? JS_ATOM_true : JS_ATOM_false); case JS_TAG_NULL: return JS_AtomToString(ctx, JS_ATOM_null); case JS_TAG_UNDEFINED: return JS_AtomToString(ctx, JS_ATOM_undefined); case JS_TAG_EXCEPTION: return JS_EXCEPTION; case JS_TAG_OBJECT: { JSValue val1, ret; val1 = JS_ToPrimitive(ctx, val, HINT_STRING); if (JS_IsException(val1)) return val1; ret = JS_ToStringInternal(ctx, val1, is_ToPropertyKey); JS_FreeValue(ctx, val1); return ret; } break; case JS_TAG_FUNCTION_BYTECODE: str = "[function bytecode]"; goto new_string; case JS_TAG_SYMBOL: if (is_ToPropertyKey) { return JS_DupValue(ctx, val); } else { return JS_ThrowTypeError(ctx, "cannot convert symbol to string"); } case JS_TAG_FLOAT64: return js_dtoa(ctx, JS_VALUE_GET_FLOAT64(val), 10, 0, JS_DTOA_VAR_FORMAT); case JS_TAG_BIG_INT: return ctx->rt->bigint_ops.to_string(ctx, val); #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: return ctx->rt->bigfloat_ops.to_string(ctx, val); case JS_TAG_BIG_DECIMAL: return ctx->rt->bigdecimal_ops.to_string(ctx, val); #endif default: str = "[unsupported type]"; new_string: return JS_NewString(ctx, str); } } JSValue JS_ToString(JSContext *ctx, JSValueConst val) { return JS_ToStringInternal(ctx, val, FALSE); } static JSValue JS_ToStringFree(JSContext *ctx, JSValue val) { JSValue ret; ret = JS_ToString(ctx, val); JS_FreeValue(ctx, val); return ret; } static JSValue JS_ToLocaleStringFree(JSContext *ctx, JSValue val) { if (JS_IsUndefined(val) || JS_IsNull(val)) return JS_ToStringFree(ctx, val); return JS_InvokeFree(ctx, val, JS_ATOM_toLocaleString, 0, NULL); } JSValue JS_ToPropertyKey(JSContext *ctx, JSValueConst val) { return JS_ToStringInternal(ctx, val, TRUE); } static JSValue JS_ToStringCheckObject(JSContext *ctx, JSValueConst val) { uint32_t tag = JS_VALUE_GET_TAG(val); if (tag == JS_TAG_NULL || tag == JS_TAG_UNDEFINED) return JS_ThrowTypeError(ctx, "null or undefined are forbidden"); return JS_ToString(ctx, val); } static JSValue JS_ToQuotedString(JSContext *ctx, JSValueConst val1) { JSValue val; JSString *p; int i; uint32_t c; StringBuffer b_s, *b = &b_s; char buf[16]; val = JS_ToStringCheckObject(ctx, val1); if (JS_IsException(val)) return val; p = JS_VALUE_GET_STRING(val); if (string_buffer_init(ctx, b, p->len + 2)) goto fail; if (string_buffer_putc8(b, '\"')) goto fail; for(i = 0; i < p->len; ) { c = string_getc(p, &i); switch(c) { case '\t': c = 't'; goto quote; case '\r': c = 'r'; goto quote; case '\n': c = 'n'; goto quote; case '\b': c = 'b'; goto quote; case '\f': c = 'f'; goto quote; case '\"': case '\\': quote: if (string_buffer_putc8(b, '\\')) goto fail; if (string_buffer_putc8(b, c)) goto fail; break; default: if (c < 32 || (c >= 0xd800 && c < 0xe000)) { snprintf(buf, sizeof(buf), "\\u%04x", c); if (string_buffer_puts8(b, buf)) goto fail; } else { if (string_buffer_putc(b, c)) goto fail; } break; } } if (string_buffer_putc8(b, '\"')) goto fail; JS_FreeValue(ctx, val); return string_buffer_end(b); fail: JS_FreeValue(ctx, val); string_buffer_free(b); return JS_EXCEPTION; } static __maybe_unused void JS_DumpObjectHeader(JSRuntime *rt) { printf("%14s %4s %4s %14s %10s %s\n", "ADDRESS", "REFS", "SHRF", "PROTO", "CLASS", "PROPS"); } /* for debug only: dump an object without side effect */ static __maybe_unused void JS_DumpObject(JSRuntime *rt, JSObject *p) { uint32_t i; char atom_buf[ATOM_GET_STR_BUF_SIZE]; JSShape *sh; JSShapeProperty *prs; JSProperty *pr; BOOL is_first = TRUE; /* XXX: should encode atoms with special characters */ sh = p->shape; /* the shape can be NULL while freeing an object */ printf("%14p %4d ", (void *)p, p->header.ref_count); if (sh) { printf("%3d%c %14p ", sh->header.ref_count, " *"[sh->is_hashed], (void *)sh->proto); } else { printf("%3s %14s ", "-", "-"); } printf("%10s ", JS_AtomGetStrRT(rt, atom_buf, sizeof(atom_buf), rt->class_array[p->class_id].class_name)); if (p->is_exotic && p->fast_array) { printf("[ "); for(i = 0; i < p->u.array.count; i++) { if (i != 0) printf(", "); switch (p->class_id) { case JS_CLASS_ARRAY: case JS_CLASS_ARGUMENTS: JS_DumpValueShort(rt, p->u.array.u.values[i]); break; case JS_CLASS_UINT8C_ARRAY: case JS_CLASS_INT8_ARRAY: case JS_CLASS_UINT8_ARRAY: case JS_CLASS_INT16_ARRAY: case JS_CLASS_UINT16_ARRAY: case JS_CLASS_INT32_ARRAY: case JS_CLASS_UINT32_ARRAY: case JS_CLASS_BIG_INT64_ARRAY: case JS_CLASS_BIG_UINT64_ARRAY: case JS_CLASS_FLOAT32_ARRAY: case JS_CLASS_FLOAT64_ARRAY: { int size = 1 << typed_array_size_log2(p->class_id); const uint8_t *b = p->u.array.u.uint8_ptr + i * size; while (size-- > 0) printf("%02X", *b++); } break; } } printf(" ] "); } if (sh) { printf("{ "); for(i = 0, prs = get_shape_prop(sh); i < sh->prop_count; i++, prs++) { if (prs->atom != JS_ATOM_NULL) { pr = &p->prop[i]; if (!is_first) printf(", "); printf("%s: ", JS_AtomGetStrRT(rt, atom_buf, sizeof(atom_buf), prs->atom)); if ((prs->flags & JS_PROP_TMASK) == JS_PROP_GETSET) { printf("[getset %p %p]", (void *)pr->u.getset.getter, (void *)pr->u.getset.setter); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_VARREF) { printf("[varref %p]", (void *)pr->u.var_ref); } else if ((prs->flags & JS_PROP_TMASK) == JS_PROP_AUTOINIT) { printf("[autoinit %p %d %p]", (void *)js_autoinit_get_realm(pr), js_autoinit_get_id(pr), (void *)pr->u.init.opaque); } else { JS_DumpValueShort(rt, pr->u.value); } is_first = FALSE; } } printf(" }"); } if (js_class_has_bytecode(p->class_id)) { JSFunctionBytecode *b = p->u.func.function_bytecode; JSVarRef **var_refs; if (b->closure_var_count) { var_refs = p->u.func.var_refs; printf(" Closure:"); for(i = 0; i < b->closure_var_count; i++) { printf(" "); JS_DumpValueShort(rt, var_refs[i]->value); } if (p->u.func.home_object) { printf(" HomeObject: "); JS_DumpValueShort(rt, JS_MKPTR(JS_TAG_OBJECT, p->u.func.home_object)); } } } printf("\n"); } static __maybe_unused void JS_DumpGCObject(JSRuntime *rt, JSGCObjectHeader *p) { if (p->gc_obj_type == JS_GC_OBJ_TYPE_JS_OBJECT) { JS_DumpObject(rt, (JSObject *)p); } else { printf("%14p %4d ", (void *)p, p->ref_count); switch(p->gc_obj_type) { case JS_GC_OBJ_TYPE_FUNCTION_BYTECODE: printf("[function bytecode]"); break; case JS_GC_OBJ_TYPE_SHAPE: printf("[shape]"); break; case JS_GC_OBJ_TYPE_VAR_REF: printf("[var_ref]"); break; case JS_GC_OBJ_TYPE_ASYNC_FUNCTION: printf("[async_function]"); break; case JS_GC_OBJ_TYPE_JS_CONTEXT: printf("[js_context]"); break; default: printf("[unknown %d]", p->gc_obj_type); break; } printf("\n"); } } static __maybe_unused void JS_DumpValueShort(JSRuntime *rt, JSValueConst val) { uint32_t tag = JS_VALUE_GET_NORM_TAG(val); const char *str; switch(tag) { case JS_TAG_INT: printf("%d", JS_VALUE_GET_INT(val)); break; case JS_TAG_BOOL: if (JS_VALUE_GET_BOOL(val)) str = "true"; else str = "false"; goto print_str; case JS_TAG_NULL: str = "null"; goto print_str; case JS_TAG_EXCEPTION: str = "exception"; goto print_str; case JS_TAG_UNINITIALIZED: str = "uninitialized"; goto print_str; case JS_TAG_UNDEFINED: str = "undefined"; print_str: printf("%s", str); break; case JS_TAG_FLOAT64: printf("%.14g", JS_VALUE_GET_FLOAT64(val)); break; case JS_TAG_BIG_INT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); char *str; str = bf_ftoa(NULL, &p->num, 10, 0, BF_RNDZ | BF_FTOA_FORMAT_FRAC); printf("%sn", str); bf_realloc(&rt->bf_ctx, str, 0); } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p = JS_VALUE_GET_PTR(val); char *str; str = bf_ftoa(NULL, &p->num, 16, BF_PREC_INF, BF_RNDZ | BF_FTOA_FORMAT_FREE | BF_FTOA_ADD_PREFIX); printf("%sl", str); bf_free(&rt->bf_ctx, str); } break; case JS_TAG_BIG_DECIMAL: { JSBigDecimal *p = JS_VALUE_GET_PTR(val); char *str; str = bfdec_ftoa(NULL, &p->num, BF_PREC_INF, BF_RNDZ | BF_FTOA_FORMAT_FREE); printf("%sm", str); bf_free(&rt->bf_ctx, str); } break; #endif case JS_TAG_STRING: { JSString *p; p = JS_VALUE_GET_STRING(val); JS_DumpString(rt, p); } break; case JS_TAG_FUNCTION_BYTECODE: { JSFunctionBytecode *b = JS_VALUE_GET_PTR(val); char buf[ATOM_GET_STR_BUF_SIZE]; printf("[bytecode %s]", JS_AtomGetStrRT(rt, buf, sizeof(buf), b->func_name)); } break; case JS_TAG_OBJECT: { JSObject *p = JS_VALUE_GET_OBJ(val); JSAtom atom = rt->class_array[p->class_id].class_name; char atom_buf[ATOM_GET_STR_BUF_SIZE]; printf("[%s %p]", JS_AtomGetStrRT(rt, atom_buf, sizeof(atom_buf), atom), (void *)p); } break; case JS_TAG_SYMBOL: { JSAtomStruct *p = JS_VALUE_GET_PTR(val); char atom_buf[ATOM_GET_STR_BUF_SIZE]; printf("Symbol(%s)", JS_AtomGetStrRT(rt, atom_buf, sizeof(atom_buf), js_get_atom_index(rt, p))); } break; case JS_TAG_MODULE: printf("[module]"); break; default: printf("[unknown tag %d]", tag); break; } } static __maybe_unused void JS_DumpValue(JSContext *ctx, JSValueConst val) { JS_DumpValueShort(ctx->rt, val); } static __maybe_unused void JS_PrintValue(JSContext *ctx, const char *str, JSValueConst val) { printf("%s=", str); JS_DumpValueShort(ctx->rt, val); printf("\n"); } /* return -1 if exception (proxy case) or TRUE/FALSE */ int JS_IsArray(JSContext *ctx, JSValueConst val) { JSObject *p; if (JS_VALUE_GET_TAG(val) == JS_TAG_OBJECT) { p = JS_VALUE_GET_OBJ(val); if (unlikely(p->class_id == JS_CLASS_PROXY)) return js_proxy_isArray(ctx, val); else return p->class_id == JS_CLASS_ARRAY; } else { return FALSE; } } static double js_pow(double a, double b) { if (unlikely(!isfinite(b)) && fabs(a) == 1) { /* not compatible with IEEE 754 */ return JS_FLOAT64_NAN; } else { return pow(a, b); } } JSValue JS_NewBigInt64_1(JSContext *ctx, int64_t v) { JSValue val; bf_t *a; val = JS_NewBigInt(ctx); if (JS_IsException(val)) return val; a = JS_GetBigInt(val); if (bf_set_si(a, v)) { JS_FreeValue(ctx, val); return JS_ThrowOutOfMemory(ctx); } return val; } JSValue JS_NewBigInt64(JSContext *ctx, int64_t v) { if (is_math_mode(ctx) && v >= -MAX_SAFE_INTEGER && v <= MAX_SAFE_INTEGER) { return JS_NewInt64(ctx, v); } else { return JS_NewBigInt64_1(ctx, v); } } JSValue JS_NewBigUint64(JSContext *ctx, uint64_t v) { JSValue val; if (is_math_mode(ctx) && v <= MAX_SAFE_INTEGER) { val = JS_NewInt64(ctx, v); } else { bf_t *a; val = JS_NewBigInt(ctx); if (JS_IsException(val)) return val; a = JS_GetBigInt(val); if (bf_set_ui(a, v)) { JS_FreeValue(ctx, val); return JS_ThrowOutOfMemory(ctx); } } return val; } /* return NaN if bad bigint literal */ static JSValue JS_StringToBigInt(JSContext *ctx, JSValue val) { const char *str, *p; size_t len; int flags; str = JS_ToCStringLen(ctx, &len, val); JS_FreeValue(ctx, val); if (!str) return JS_EXCEPTION; p = str; p += skip_spaces(p); if ((p - str) == len) { val = JS_NewBigInt64(ctx, 0); } else { flags = ATOD_INT_ONLY | ATOD_ACCEPT_BIN_OCT | ATOD_TYPE_BIG_INT; #ifdef CONFIG_BIGNUM if (is_math_mode(ctx)) flags |= ATOD_MODE_BIGINT; #endif val = js_atof(ctx, p, &p, 0, flags); p += skip_spaces(p); if (!JS_IsException(val)) { if ((p - str) != len) { JS_FreeValue(ctx, val); val = JS_NAN; } } } JS_FreeCString(ctx, str); return val; } static JSValue JS_StringToBigIntErr(JSContext *ctx, JSValue val) { val = JS_StringToBigInt(ctx, val); if (JS_VALUE_IS_NAN(val)) return JS_ThrowSyntaxError(ctx, "invalid bigint literal"); return val; } /* if the returned bigfloat is allocated it is equal to 'buf'. Otherwise it is a pointer to the bigfloat in 'val'. */ static bf_t *JS_ToBigIntFree(JSContext *ctx, bf_t *buf, JSValue val) { uint32_t tag; bf_t *r; JSBigFloat *p; redo: tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_NULL: case JS_TAG_UNDEFINED: if (!is_math_mode(ctx)) goto fail; /* fall tru */ case JS_TAG_BOOL: r = buf; bf_init(ctx->bf_ctx, r); bf_set_si(r, JS_VALUE_GET_INT(val)); break; case JS_TAG_FLOAT64: { double d = JS_VALUE_GET_FLOAT64(val); if (!is_math_mode(ctx)) goto fail; if (!isfinite(d)) goto fail; r = buf; bf_init(ctx->bf_ctx, r); d = trunc(d); bf_set_float64(r, d); } break; case JS_TAG_BIG_INT: p = JS_VALUE_GET_PTR(val); r = &p->num; break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: if (!is_math_mode(ctx)) goto fail; p = JS_VALUE_GET_PTR(val); if (!bf_is_finite(&p->num)) goto fail; r = buf; bf_init(ctx->bf_ctx, r); bf_set(r, &p->num); bf_rint(r, BF_RNDZ); JS_FreeValue(ctx, val); break; #endif case JS_TAG_STRING: val = JS_StringToBigIntErr(ctx, val); if (JS_IsException(val)) return NULL; goto redo; case JS_TAG_OBJECT: val = JS_ToPrimitiveFree(ctx, val, HINT_NUMBER); if (JS_IsException(val)) return NULL; goto redo; default: fail: JS_FreeValue(ctx, val); JS_ThrowTypeError(ctx, "cannot convert to bigint"); return NULL; } return r; } static bf_t *JS_ToBigInt(JSContext *ctx, bf_t *buf, JSValueConst val) { return JS_ToBigIntFree(ctx, buf, JS_DupValue(ctx, val)); } static __maybe_unused JSValue JS_ToBigIntValueFree(JSContext *ctx, JSValue val) { if (JS_VALUE_GET_TAG(val) == JS_TAG_BIG_INT) { return val; } else { bf_t a_s, *a, *r; int ret; JSValue res; res = JS_NewBigInt(ctx); if (JS_IsException(res)) return JS_EXCEPTION; a = JS_ToBigIntFree(ctx, &a_s, val); if (!a) { JS_FreeValue(ctx, res); return JS_EXCEPTION; } r = JS_GetBigInt(res); ret = bf_set(r, a); JS_FreeBigInt(ctx, a, &a_s); if (ret) { JS_FreeValue(ctx, res); return JS_ThrowOutOfMemory(ctx); } return JS_CompactBigInt(ctx, res); } } /* free the bf_t allocated by JS_ToBigInt */ static void JS_FreeBigInt(JSContext *ctx, bf_t *a, bf_t *buf) { if (a == buf) { bf_delete(a); } else { JSBigFloat *p = (JSBigFloat *)((uint8_t *)a - offsetof(JSBigFloat, num)); JS_FreeValue(ctx, JS_MKPTR(JS_TAG_BIG_INT, p)); } } /* XXX: merge with JS_ToInt64Free with a specific flag */ static int JS_ToBigInt64Free(JSContext *ctx, int64_t *pres, JSValue val) { bf_t a_s, *a; a = JS_ToBigIntFree(ctx, &a_s, val); if (!a) { *pres = 0; return -1; } bf_get_int64(pres, a, BF_GET_INT_MOD); JS_FreeBigInt(ctx, a, &a_s); return 0; } int JS_ToBigInt64(JSContext *ctx, int64_t *pres, JSValueConst val) { return JS_ToBigInt64Free(ctx, pres, JS_DupValue(ctx, val)); } static JSBigFloat *js_new_bf(JSContext *ctx) { JSBigFloat *p; p = js_malloc(ctx, sizeof(*p)); if (!p) return NULL; p->header.ref_count = 1; bf_init(ctx->bf_ctx, &p->num); return p; } static JSValue JS_NewBigInt(JSContext *ctx) { JSBigFloat *p; p = js_malloc(ctx, sizeof(*p)); if (!p) return JS_EXCEPTION; p->header.ref_count = 1; bf_init(ctx->bf_ctx, &p->num); return JS_MKPTR(JS_TAG_BIG_INT, p); } static JSValue JS_CompactBigInt1(JSContext *ctx, JSValue val, BOOL convert_to_safe_integer) { int64_t v; bf_t *a; if (JS_VALUE_GET_TAG(val) != JS_TAG_BIG_INT) return val; /* fail safe */ a = JS_GetBigInt(val); if (convert_to_safe_integer && bf_get_int64(&v, a, 0) == 0 && v >= -MAX_SAFE_INTEGER && v <= MAX_SAFE_INTEGER) { JS_FreeValue(ctx, val); return JS_NewInt64(ctx, v); } else if (a->expn == BF_EXP_ZERO && a->sign) { JSBigFloat *p = JS_VALUE_GET_PTR(val); assert(p->header.ref_count == 1); a->sign = 0; } return val; } /* Convert the big int to a safe integer if in math mode. normalize the zero representation. Could also be used to convert the bigint to a short bigint value. The reference count of the value must be 1. Cannot fail */ static JSValue JS_CompactBigInt(JSContext *ctx, JSValue val) { return JS_CompactBigInt1(ctx, val, is_math_mode(ctx)); } static JSValue throw_bf_exception(JSContext *ctx, int status) { const char *str; if (status & BF_ST_MEM_ERROR) return JS_ThrowOutOfMemory(ctx); if (status & BF_ST_DIVIDE_ZERO) { str = "division by zero"; } else if (status & BF_ST_INVALID_OP) { str = "invalid operation"; } else { str = "integer overflow"; } return JS_ThrowRangeError(ctx, "%s", str); } /* if the returned bigfloat is allocated it is equal to 'buf'. Otherwise it is a pointer to the bigfloat in 'val'. Return NULL in case of error. */ static bf_t *JS_ToBigFloat(JSContext *ctx, bf_t *buf, JSValueConst val) { uint32_t tag; bf_t *r; JSBigFloat *p; tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_INT: case JS_TAG_BOOL: case JS_TAG_NULL: r = buf; bf_init(ctx->bf_ctx, r); if (bf_set_si(r, JS_VALUE_GET_INT(val))) goto fail; break; case JS_TAG_FLOAT64: r = buf; bf_init(ctx->bf_ctx, r); if (bf_set_float64(r, JS_VALUE_GET_FLOAT64(val))) { fail: bf_delete(r); return NULL; } break; case JS_TAG_BIG_INT: #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: #endif p = JS_VALUE_GET_PTR(val); r = &p->num; break; case JS_TAG_UNDEFINED: default: r = buf; bf_init(ctx->bf_ctx, r); bf_set_nan(r); break; } return r; } #ifdef CONFIG_BIGNUM /* return NULL if invalid type */ static bfdec_t *JS_ToBigDecimal(JSContext *ctx, JSValueConst val) { uint32_t tag; JSBigDecimal *p; bfdec_t *r; tag = JS_VALUE_GET_NORM_TAG(val); switch(tag) { case JS_TAG_BIG_DECIMAL: p = JS_VALUE_GET_PTR(val); r = &p->num; break; default: JS_ThrowTypeError(ctx, "bigdecimal expected"); r = NULL; break; } return r; } static JSValue JS_NewBigFloat(JSContext *ctx) { JSBigFloat *p; p = js_malloc(ctx, sizeof(*p)); if (!p) return JS_EXCEPTION; p->header.ref_count = 1; bf_init(ctx->bf_ctx, &p->num); return JS_MKPTR(JS_TAG_BIG_FLOAT, p); } static JSValue JS_NewBigDecimal(JSContext *ctx) { JSBigDecimal *p; p = js_malloc(ctx, sizeof(*p)); if (!p) return JS_EXCEPTION; p->header.ref_count = 1; bfdec_init(ctx->bf_ctx, &p->num); return JS_MKPTR(JS_TAG_BIG_DECIMAL, p); } /* must be kept in sync with JSOverloadableOperatorEnum */ /* XXX: use atoms ? */ static const char js_overloadable_operator_names[JS_OVOP_COUNT][4] = { "+", "-", "*", "/", "%", "**", "|", "&", "^", "<<", ">>", ">>>", "==", "<", "pos", "neg", "++", "--", "~", }; static int get_ovop_from_opcode(OPCodeEnum op) { switch(op) { case OP_add: return JS_OVOP_ADD; case OP_sub: return JS_OVOP_SUB; case OP_mul: return JS_OVOP_MUL; case OP_div: return JS_OVOP_DIV; case OP_mod: case OP_math_mod: return JS_OVOP_MOD; case OP_pow: return JS_OVOP_POW; case OP_or: return JS_OVOP_OR; case OP_and: return JS_OVOP_AND; case OP_xor: return JS_OVOP_XOR; case OP_shl: return JS_OVOP_SHL; case OP_sar: return JS_OVOP_SAR; case OP_shr: return JS_OVOP_SHR; case OP_eq: case OP_neq: return JS_OVOP_EQ; case OP_lt: case OP_lte: case OP_gt: case OP_gte: return JS_OVOP_LESS; case OP_plus: return JS_OVOP_POS; case OP_neg: return JS_OVOP_NEG; case OP_inc: return JS_OVOP_INC; case OP_dec: return JS_OVOP_DEC; default: abort(); } } /* return NULL if not present */ static JSObject *find_binary_op(JSBinaryOperatorDef *def, uint32_t operator_index, JSOverloadableOperatorEnum op) { JSBinaryOperatorDefEntry *ent; int i; for(i = 0; i < def->count; i++) { ent = &def->tab[i]; if (ent->operator_index == operator_index) return ent->ops[op]; } return NULL; } /* return -1 if exception, 0 if no operator overloading, 1 if overloaded operator called */ static __exception int js_call_binary_op_fallback(JSContext *ctx, JSValue *pret, JSValueConst op1, JSValueConst op2, OPCodeEnum op, BOOL is_numeric, int hint) { JSValue opset1_obj, opset2_obj, method, ret, new_op1, new_op2; JSOperatorSetData *opset1, *opset2; JSOverloadableOperatorEnum ovop; JSObject *p; JSValueConst args[2]; if (!ctx->allow_operator_overloading) return 0; opset2_obj = JS_UNDEFINED; opset1_obj = JS_GetProperty(ctx, op1, JS_ATOM_Symbol_operatorSet); if (JS_IsException(opset1_obj)) goto exception; if (JS_IsUndefined(opset1_obj)) return 0; opset1 = JS_GetOpaque2(ctx, opset1_obj, JS_CLASS_OPERATOR_SET); if (!opset1) goto exception; opset2_obj = JS_GetProperty(ctx, op2, JS_ATOM_Symbol_operatorSet); if (JS_IsException(opset2_obj)) goto exception; if (JS_IsUndefined(opset2_obj)) { JS_FreeValue(ctx, opset1_obj); return 0; } opset2 = JS_GetOpaque2(ctx, opset2_obj, JS_CLASS_OPERATOR_SET); if (!opset2) goto exception; if (opset1->is_primitive && opset2->is_primitive) { JS_FreeValue(ctx, opset1_obj); JS_FreeValue(ctx, opset2_obj); return 0; } ovop = get_ovop_from_opcode(op); if (opset1->operator_counter == opset2->operator_counter) { p = opset1->self_ops[ovop]; } else if (opset1->operator_counter > opset2->operator_counter) { p = find_binary_op(&opset1->left, opset2->operator_counter, ovop); } else { p = find_binary_op(&opset2->right, opset1->operator_counter, ovop); } if (!p) { JS_ThrowTypeError(ctx, "operator %s: no function defined", js_overloadable_operator_names[ovop]); goto exception; } if (opset1->is_primitive) { if (is_numeric) { new_op1 = JS_ToNumeric(ctx, op1); } else { new_op1 = JS_ToPrimitive(ctx, op1, hint); } if (JS_IsException(new_op1)) goto exception; } else { new_op1 = JS_DupValue(ctx, op1); } if (opset2->is_primitive) { if (is_numeric) { new_op2 = JS_ToNumeric(ctx, op2); } else { new_op2 = JS_ToPrimitive(ctx, op2, hint); } if (JS_IsException(new_op2)) { JS_FreeValue(ctx, new_op1); goto exception; } } else { new_op2 = JS_DupValue(ctx, op2); } /* XXX: could apply JS_ToPrimitive() if primitive type so that the operator function does not get a value object */ method = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); if (ovop == JS_OVOP_LESS && (op == OP_lte || op == OP_gt)) { args[0] = new_op2; args[1] = new_op1; } else { args[0] = new_op1; args[1] = new_op2; } ret = JS_CallFree(ctx, method, JS_UNDEFINED, 2, args); JS_FreeValue(ctx, new_op1); JS_FreeValue(ctx, new_op2); if (JS_IsException(ret)) goto exception; if (ovop == JS_OVOP_EQ) { BOOL res = JS_ToBoolFree(ctx, ret); if (op == OP_neq) res ^= 1; ret = JS_NewBool(ctx, res); } else if (ovop == JS_OVOP_LESS) { if (JS_IsUndefined(ret)) { ret = JS_FALSE; } else { BOOL res = JS_ToBoolFree(ctx, ret); if (op == OP_lte || op == OP_gte) res ^= 1; ret = JS_NewBool(ctx, res); } } JS_FreeValue(ctx, opset1_obj); JS_FreeValue(ctx, opset2_obj); *pret = ret; return 1; exception: JS_FreeValue(ctx, opset1_obj); JS_FreeValue(ctx, opset2_obj); *pret = JS_UNDEFINED; return -1; } /* try to call the operation on the operatorSet field of 'obj'. Only used for "/" and "**" on the BigInt prototype in math mode */ static __exception int js_call_binary_op_simple(JSContext *ctx, JSValue *pret, JSValueConst obj, JSValueConst op1, JSValueConst op2, OPCodeEnum op) { JSValue opset1_obj, method, ret, new_op1, new_op2; JSOperatorSetData *opset1; JSOverloadableOperatorEnum ovop; JSObject *p; JSValueConst args[2]; opset1_obj = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_operatorSet); if (JS_IsException(opset1_obj)) goto exception; if (JS_IsUndefined(opset1_obj)) return 0; opset1 = JS_GetOpaque2(ctx, opset1_obj, JS_CLASS_OPERATOR_SET); if (!opset1) goto exception; ovop = get_ovop_from_opcode(op); p = opset1->self_ops[ovop]; if (!p) { JS_FreeValue(ctx, opset1_obj); return 0; } new_op1 = JS_ToNumeric(ctx, op1); if (JS_IsException(new_op1)) goto exception; new_op2 = JS_ToNumeric(ctx, op2); if (JS_IsException(new_op2)) { JS_FreeValue(ctx, new_op1); goto exception; } method = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); args[0] = new_op1; args[1] = new_op2; ret = JS_CallFree(ctx, method, JS_UNDEFINED, 2, args); JS_FreeValue(ctx, new_op1); JS_FreeValue(ctx, new_op2); if (JS_IsException(ret)) goto exception; JS_FreeValue(ctx, opset1_obj); *pret = ret; return 1; exception: JS_FreeValue(ctx, opset1_obj); *pret = JS_UNDEFINED; return -1; } /* return -1 if exception, 0 if no operator overloading, 1 if overloaded operator called */ static __exception int js_call_unary_op_fallback(JSContext *ctx, JSValue *pret, JSValueConst op1, OPCodeEnum op) { JSValue opset1_obj, method, ret; JSOperatorSetData *opset1; JSOverloadableOperatorEnum ovop; JSObject *p; if (!ctx->allow_operator_overloading) return 0; opset1_obj = JS_GetProperty(ctx, op1, JS_ATOM_Symbol_operatorSet); if (JS_IsException(opset1_obj)) goto exception; if (JS_IsUndefined(opset1_obj)) return 0; opset1 = JS_GetOpaque2(ctx, opset1_obj, JS_CLASS_OPERATOR_SET); if (!opset1) goto exception; if (opset1->is_primitive) { JS_FreeValue(ctx, opset1_obj); return 0; } ovop = get_ovop_from_opcode(op); p = opset1->self_ops[ovop]; if (!p) { JS_ThrowTypeError(ctx, "no overloaded operator %s", js_overloadable_operator_names[ovop]); goto exception; } method = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p)); ret = JS_CallFree(ctx, method, JS_UNDEFINED, 1, &op1); if (JS_IsException(ret)) goto exception; JS_FreeValue(ctx, opset1_obj); *pret = ret; return 1; exception: JS_FreeValue(ctx, opset1_obj); *pret = JS_UNDEFINED; return -1; } static int js_unary_arith_bigfloat(JSContext *ctx, JSValue *pres, OPCodeEnum op, JSValue op1) { bf_t a_s, *r, *a; int ret, v; JSValue res; if (op == OP_plus && !is_math_mode(ctx)) { JS_ThrowTypeError(ctx, "bigfloat argument with unary +"); JS_FreeValue(ctx, op1); return -1; } res = JS_NewBigFloat(ctx); if (JS_IsException(res)) { JS_FreeValue(ctx, op1); return -1; } r = JS_GetBigFloat(res); a = JS_ToBigFloat(ctx, &a_s, op1); if (!a) { JS_FreeValue(ctx, res); JS_FreeValue(ctx, op1); return -1; } ret = 0; switch(op) { case OP_inc: case OP_dec: v = 2 * (op - OP_dec) - 1; ret = bf_add_si(r, a, v, ctx->fp_env.prec, ctx->fp_env.flags); break; case OP_plus: ret = bf_set(r, a); break; case OP_neg: ret = bf_set(r, a); bf_neg(r); break; default: abort(); } if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, op1); if (unlikely(ret & BF_ST_MEM_ERROR)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = res; return 0; } static int js_unary_arith_bigdecimal(JSContext *ctx, JSValue *pres, OPCodeEnum op, JSValue op1) { bfdec_t *r, *a; int ret, v; JSValue res; if (op == OP_plus && !is_math_mode(ctx)) { JS_ThrowTypeError(ctx, "bigdecimal argument with unary +"); JS_FreeValue(ctx, op1); return -1; } res = JS_NewBigDecimal(ctx); if (JS_IsException(res)) { JS_FreeValue(ctx, op1); return -1; } r = JS_GetBigDecimal(res); a = JS_ToBigDecimal(ctx, op1); if (!a) { JS_FreeValue(ctx, res); JS_FreeValue(ctx, op1); return -1; } ret = 0; switch(op) { case OP_inc: case OP_dec: v = 2 * (op - OP_dec) - 1; ret = bfdec_add_si(r, a, v, BF_PREC_INF, BF_RNDZ); break; case OP_plus: ret = bfdec_set(r, a); break; case OP_neg: ret = bfdec_set(r, a); bfdec_neg(r); break; default: abort(); } JS_FreeValue(ctx, op1); if (unlikely(ret)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = res; return 0; } #endif /* CONFIG_BIGNUM */ static int js_unary_arith_bigint(JSContext *ctx, JSValue *pres, OPCodeEnum op, JSValue op1) { bf_t a_s, *r, *a; int ret, v; JSValue res; if (op == OP_plus && !is_math_mode(ctx)) { JS_ThrowTypeError(ctx, "bigint argument with unary +"); JS_FreeValue(ctx, op1); return -1; } res = JS_NewBigInt(ctx); if (JS_IsException(res)) { JS_FreeValue(ctx, op1); return -1; } r = JS_GetBigInt(res); a = JS_ToBigInt(ctx, &a_s, op1); if (!a) { JS_FreeValue(ctx, res); JS_FreeValue(ctx, op1); return -1; } ret = 0; switch(op) { case OP_inc: case OP_dec: v = 2 * (op - OP_dec) - 1; ret = bf_add_si(r, a, v, BF_PREC_INF, BF_RNDZ); break; case OP_plus: ret = bf_set(r, a); break; case OP_neg: ret = bf_set(r, a); bf_neg(r); break; case OP_not: ret = bf_add_si(r, a, 1, BF_PREC_INF, BF_RNDZ); bf_neg(r); break; default: abort(); } JS_FreeBigInt(ctx, a, &a_s); JS_FreeValue(ctx, op1); if (unlikely(ret)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } res = JS_CompactBigInt(ctx, res); *pres = res; return 0; } static no_inline __exception int js_unary_arith_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) { JSValue op1; int v; uint32_t tag; op1 = sp[-1]; /* fast path for float64 */ if (JS_TAG_IS_FLOAT64(JS_VALUE_GET_TAG(op1))) goto handle_float64; #ifdef CONFIG_BIGNUM if (JS_IsObject(op1)) { JSValue val; int ret = js_call_unary_op_fallback(ctx, &val, op1, op); if (ret < 0) return -1; if (ret) { JS_FreeValue(ctx, op1); sp[-1] = val; return 0; } } #endif op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) goto exception; tag = JS_VALUE_GET_TAG(op1); switch(tag) { case JS_TAG_INT: { int64_t v64; v64 = JS_VALUE_GET_INT(op1); switch(op) { case OP_inc: case OP_dec: v = 2 * (op - OP_dec) - 1; v64 += v; break; case OP_plus: break; case OP_neg: if (v64 == 0) { sp[-1] = __JS_NewFloat64(ctx, -0.0); return 0; } else { v64 = -v64; } break; default: abort(); } sp[-1] = JS_NewInt64(ctx, v64); } break; case JS_TAG_BIG_INT: handle_bigint: if (ctx->rt->bigint_ops.unary_arith(ctx, sp - 1, op, op1)) goto exception; break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: if (ctx->rt->bigfloat_ops.unary_arith(ctx, sp - 1, op, op1)) goto exception; break; case JS_TAG_BIG_DECIMAL: if (ctx->rt->bigdecimal_ops.unary_arith(ctx, sp - 1, op, op1)) goto exception; break; #endif default: handle_float64: { double d; if (is_math_mode(ctx)) goto handle_bigint; d = JS_VALUE_GET_FLOAT64(op1); switch(op) { case OP_inc: case OP_dec: v = 2 * (op - OP_dec) - 1; d += v; break; case OP_plus: break; case OP_neg: d = -d; break; default: abort(); } sp[-1] = __JS_NewFloat64(ctx, d); } break; } return 0; exception: sp[-1] = JS_UNDEFINED; return -1; } static __exception int js_post_inc_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) { JSValue op1; /* XXX: allow custom operators */ op1 = sp[-1]; op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { sp[-1] = JS_UNDEFINED; return -1; } sp[-1] = op1; sp[0] = JS_DupValue(ctx, op1); return js_unary_arith_slow(ctx, sp + 1, op - OP_post_dec + OP_dec); } static no_inline int js_not_slow(JSContext *ctx, JSValue *sp) { JSValue op1; op1 = sp[-1]; #ifdef CONFIG_BIGNUM if (JS_IsObject(op1)) { JSValue val; int ret = js_call_unary_op_fallback(ctx, &val, op1, OP_not); if (ret < 0) return -1; if (ret) { JS_FreeValue(ctx, op1); sp[-1] = val; return 0; } } #endif op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) goto exception; if (is_math_mode(ctx) || JS_VALUE_GET_TAG(op1) == JS_TAG_BIG_INT) { if (ctx->rt->bigint_ops.unary_arith(ctx, sp - 1, OP_not, op1)) goto exception; } else { int32_t v1; if (unlikely(JS_ToInt32Free(ctx, &v1, op1))) goto exception; sp[-1] = JS_NewInt32(ctx, ~v1); } return 0; exception: sp[-1] = JS_UNDEFINED; return -1; } static int js_binary_arith_bigint(JSContext *ctx, OPCodeEnum op, JSValue *pres, JSValue op1, JSValue op2) { bf_t a_s, b_s, *r, *a, *b; int ret; JSValue res; res = JS_NewBigInt(ctx); if (JS_IsException(res)) goto fail; a = JS_ToBigInt(ctx, &a_s, op1); if (!a) goto fail; b = JS_ToBigInt(ctx, &b_s, op2); if (!b) { JS_FreeBigInt(ctx, a, &a_s); goto fail; } r = JS_GetBigInt(res); ret = 0; switch(op) { case OP_add: ret = bf_add(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_sub: ret = bf_sub(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_mul: ret = bf_mul(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_div: if (!is_math_mode(ctx)) { bf_t rem_s, *rem = &rem_s; bf_init(ctx->bf_ctx, rem); ret = bf_divrem(r, rem, a, b, BF_PREC_INF, BF_RNDZ, BF_RNDZ); bf_delete(rem); } else { goto math_mode_div_pow; } break; #ifdef CONFIG_BIGNUM case OP_math_mod: /* Euclidian remainder */ ret = bf_rem(r, a, b, BF_PREC_INF, BF_RNDZ, BF_DIVREM_EUCLIDIAN) & BF_ST_INVALID_OP; break; #endif case OP_mod: ret = bf_rem(r, a, b, BF_PREC_INF, BF_RNDZ, BF_RNDZ) & BF_ST_INVALID_OP; break; case OP_pow: if (b->sign) { if (!is_math_mode(ctx)) { ret = BF_ST_INVALID_OP; } else { math_mode_div_pow: #ifdef CONFIG_BIGNUM JS_FreeValue(ctx, res); ret = js_call_binary_op_simple(ctx, &res, ctx->class_proto[JS_CLASS_BIG_INT], op1, op2, op); if (ret != 0) { JS_FreeBigInt(ctx, a, &a_s); JS_FreeBigInt(ctx, b, &b_s); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (ret < 0) { return -1; } else { *pres = res; return 0; } } /* if no BigInt power operator defined, return a bigfloat */ res = JS_NewBigFloat(ctx); if (JS_IsException(res)) { JS_FreeBigInt(ctx, a, &a_s); JS_FreeBigInt(ctx, b, &b_s); goto fail; } r = JS_GetBigFloat(res); if (op == OP_div) { ret = bf_div(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags) & BF_ST_MEM_ERROR; } else { ret = bf_pow(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags | BF_POW_JS_QUIRKS) & BF_ST_MEM_ERROR; } JS_FreeBigInt(ctx, a, &a_s); JS_FreeBigInt(ctx, b, &b_s); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (unlikely(ret)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = res; return 0; #else abort(); #endif } } else { ret = bf_pow(r, a, b, BF_PREC_INF, BF_RNDZ | BF_POW_JS_QUIRKS); } break; /* logical operations */ case OP_shl: case OP_sar: { slimb_t v2; #if LIMB_BITS == 32 bf_get_int32(&v2, b, 0); if (v2 == INT32_MIN) v2 = INT32_MIN + 1; #else bf_get_int64(&v2, b, 0); if (v2 == INT64_MIN) v2 = INT64_MIN + 1; #endif if (op == OP_sar) v2 = -v2; ret = bf_set(r, a); ret |= bf_mul_2exp(r, v2, BF_PREC_INF, BF_RNDZ); if (v2 < 0) { ret |= bf_rint(r, BF_RNDD) & (BF_ST_OVERFLOW | BF_ST_MEM_ERROR); } } break; case OP_and: ret = bf_logic_and(r, a, b); break; case OP_or: ret = bf_logic_or(r, a, b); break; case OP_xor: ret = bf_logic_xor(r, a, b); break; default: abort(); } JS_FreeBigInt(ctx, a, &a_s); JS_FreeBigInt(ctx, b, &b_s); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (unlikely(ret)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = JS_CompactBigInt(ctx, res); return 0; fail: JS_FreeValue(ctx, res); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return -1; } #ifdef CONFIG_BIGNUM static int js_binary_arith_bigfloat(JSContext *ctx, OPCodeEnum op, JSValue *pres, JSValue op1, JSValue op2) { bf_t a_s, b_s, *r, *a, *b; int ret; JSValue res; res = JS_NewBigFloat(ctx); if (JS_IsException(res)) goto fail; r = JS_GetBigFloat(res); a = JS_ToBigFloat(ctx, &a_s, op1); if (!a) { JS_FreeValue(ctx, res); goto fail; } b = JS_ToBigFloat(ctx, &b_s, op2); if (!b) { if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, res); goto fail; } bf_init(ctx->bf_ctx, r); switch(op) { case OP_add: ret = bf_add(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags); break; case OP_sub: ret = bf_sub(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags); break; case OP_mul: ret = bf_mul(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags); break; case OP_div: ret = bf_div(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags); break; case OP_math_mod: /* Euclidian remainder */ ret = bf_rem(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags, BF_DIVREM_EUCLIDIAN); break; case OP_mod: ret = bf_rem(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags, BF_RNDZ); break; case OP_pow: ret = bf_pow(r, a, b, ctx->fp_env.prec, ctx->fp_env.flags | BF_POW_JS_QUIRKS); break; default: abort(); } if (a == &a_s) bf_delete(a); if (b == &b_s) bf_delete(b); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (unlikely(ret & BF_ST_MEM_ERROR)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = res; return 0; fail: JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return -1; } /* b must be a positive integer */ static int js_bfdec_pow(bfdec_t *r, const bfdec_t *a, const bfdec_t *b) { bfdec_t b1; int32_t b2; int ret; bfdec_init(b->ctx, &b1); ret = bfdec_set(&b1, b); if (ret) { bfdec_delete(&b1); return ret; } ret = bfdec_rint(&b1, BF_RNDZ); if (ret) { bfdec_delete(&b1); return BF_ST_INVALID_OP; /* must be an integer */ } ret = bfdec_get_int32(&b2, &b1); bfdec_delete(&b1); if (ret) return ret; /* overflow */ if (b2 < 0) return BF_ST_INVALID_OP; /* must be positive */ return bfdec_pow_ui(r, a, b2); } static int js_binary_arith_bigdecimal(JSContext *ctx, OPCodeEnum op, JSValue *pres, JSValue op1, JSValue op2) { bfdec_t *r, *a, *b; int ret; JSValue res; res = JS_NewBigDecimal(ctx); if (JS_IsException(res)) goto fail; r = JS_GetBigDecimal(res); a = JS_ToBigDecimal(ctx, op1); if (!a) goto fail; b = JS_ToBigDecimal(ctx, op2); if (!b) goto fail; switch(op) { case OP_add: ret = bfdec_add(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_sub: ret = bfdec_sub(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_mul: ret = bfdec_mul(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_div: ret = bfdec_div(r, a, b, BF_PREC_INF, BF_RNDZ); break; case OP_math_mod: /* Euclidian remainder */ ret = bfdec_rem(r, a, b, BF_PREC_INF, BF_RNDZ, BF_DIVREM_EUCLIDIAN); break; case OP_mod: ret = bfdec_rem(r, a, b, BF_PREC_INF, BF_RNDZ, BF_RNDZ); break; case OP_pow: ret = js_bfdec_pow(r, a, b); break; default: abort(); } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (unlikely(ret)) { JS_FreeValue(ctx, res); throw_bf_exception(ctx, ret); return -1; } *pres = res; return 0; fail: JS_FreeValue(ctx, res); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return -1; } #endif /* CONFIG_BIGNUM */ static no_inline __exception int js_binary_arith_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) { JSValue op1, op2; uint32_t tag1, tag2; double d1, d2; op1 = sp[-2]; op2 = sp[-1]; tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); /* fast path for float operations */ if (tag1 == JS_TAG_FLOAT64 && tag2 == JS_TAG_FLOAT64) { d1 = JS_VALUE_GET_FLOAT64(op1); d2 = JS_VALUE_GET_FLOAT64(op2); goto handle_float64; } #ifdef CONFIG_BIGNUM /* try to call an overloaded operator */ if ((tag1 == JS_TAG_OBJECT && (tag2 != JS_TAG_NULL && tag2 != JS_TAG_UNDEFINED)) || (tag2 == JS_TAG_OBJECT && (tag1 != JS_TAG_NULL && tag1 != JS_TAG_UNDEFINED))) { JSValue res; int ret = js_call_binary_op_fallback(ctx, &res, op1, op2, op, TRUE, 0); if (ret != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (ret < 0) { goto exception; } else { sp[-2] = res; return 0; } } } #endif op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); if (tag1 == JS_TAG_INT && tag2 == JS_TAG_INT) { int32_t v1, v2; int64_t v; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); switch(op) { case OP_sub: v = (int64_t)v1 - (int64_t)v2; break; case OP_mul: v = (int64_t)v1 * (int64_t)v2; if (is_math_mode(ctx) && (v < -MAX_SAFE_INTEGER || v > MAX_SAFE_INTEGER)) goto handle_bigint; if (v == 0 && (v1 | v2) < 0) { sp[-2] = __JS_NewFloat64(ctx, -0.0); return 0; } break; case OP_div: if (is_math_mode(ctx)) goto handle_bigint; sp[-2] = __JS_NewFloat64(ctx, (double)v1 / (double)v2); return 0; #ifdef CONFIG_BIGNUM case OP_math_mod: if (unlikely(v2 == 0)) { throw_bf_exception(ctx, BF_ST_DIVIDE_ZERO); goto exception; } v = (int64_t)v1 % (int64_t)v2; if (v < 0) { if (v2 < 0) v -= v2; else v += v2; } break; #endif case OP_mod: if (v1 < 0 || v2 <= 0) { sp[-2] = JS_NewFloat64(ctx, fmod(v1, v2)); return 0; } else { v = (int64_t)v1 % (int64_t)v2; } break; case OP_pow: if (!is_math_mode(ctx)) { sp[-2] = JS_NewFloat64(ctx, js_pow(v1, v2)); return 0; } else { goto handle_bigint; } break; default: abort(); } sp[-2] = JS_NewInt64(ctx, v); } else #ifdef CONFIG_BIGNUM if (tag1 == JS_TAG_BIG_DECIMAL || tag2 == JS_TAG_BIG_DECIMAL) { if (ctx->rt->bigdecimal_ops.binary_arith(ctx, op, sp - 2, op1, op2)) goto exception; } else if (tag1 == JS_TAG_BIG_FLOAT || tag2 == JS_TAG_BIG_FLOAT) { if (ctx->rt->bigfloat_ops.binary_arith(ctx, op, sp - 2, op1, op2)) goto exception; } else #endif if (tag1 == JS_TAG_BIG_INT || tag2 == JS_TAG_BIG_INT) { handle_bigint: if (ctx->rt->bigint_ops.binary_arith(ctx, op, sp - 2, op1, op2)) goto exception; } else { double dr; /* float64 result */ if (JS_ToFloat64Free(ctx, &d1, op1)) { JS_FreeValue(ctx, op2); goto exception; } if (JS_ToFloat64Free(ctx, &d2, op2)) goto exception; handle_float64: if (is_math_mode(ctx) && is_safe_integer(d1) && is_safe_integer(d2)) goto handle_bigint; switch(op) { case OP_sub: dr = d1 - d2; break; case OP_mul: dr = d1 * d2; break; case OP_div: dr = d1 / d2; break; case OP_mod: dr = fmod(d1, d2); break; #ifdef CONFIG_BIGNUM case OP_math_mod: d2 = fabs(d2); dr = fmod(d1, d2); /* XXX: loss of accuracy if dr < 0 */ if (dr < 0) dr += d2; break; #endif case OP_pow: dr = js_pow(d1, d2); break; default: abort(); } sp[-2] = __JS_NewFloat64(ctx, dr); } return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } static no_inline __exception int js_add_slow(JSContext *ctx, JSValue *sp) { JSValue op1, op2; uint32_t tag1, tag2; op1 = sp[-2]; op2 = sp[-1]; tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); /* fast path for float64 */ if (tag1 == JS_TAG_FLOAT64 && tag2 == JS_TAG_FLOAT64) { double d1, d2; d1 = JS_VALUE_GET_FLOAT64(op1); d2 = JS_VALUE_GET_FLOAT64(op2); sp[-2] = __JS_NewFloat64(ctx, d1 + d2); return 0; } if (tag1 == JS_TAG_OBJECT || tag2 == JS_TAG_OBJECT) { #ifdef CONFIG_BIGNUM /* try to call an overloaded operator */ if ((tag1 == JS_TAG_OBJECT && (tag2 != JS_TAG_NULL && tag2 != JS_TAG_UNDEFINED && tag2 != JS_TAG_STRING)) || (tag2 == JS_TAG_OBJECT && (tag1 != JS_TAG_NULL && tag1 != JS_TAG_UNDEFINED && tag1 != JS_TAG_STRING))) { JSValue res; int ret = js_call_binary_op_fallback(ctx, &res, op1, op2, OP_add, FALSE, HINT_NONE); if (ret != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (ret < 0) { goto exception; } else { sp[-2] = res; return 0; } } } #endif op1 = JS_ToPrimitiveFree(ctx, op1, HINT_NONE); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToPrimitiveFree(ctx, op2, HINT_NONE); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); } if (tag1 == JS_TAG_STRING || tag2 == JS_TAG_STRING) { sp[-2] = JS_ConcatString(ctx, op1, op2); if (JS_IsException(sp[-2])) goto exception; return 0; } op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); if (tag1 == JS_TAG_INT && tag2 == JS_TAG_INT) { int32_t v1, v2; int64_t v; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); v = (int64_t)v1 + (int64_t)v2; sp[-2] = JS_NewInt64(ctx, v); } else #ifdef CONFIG_BIGNUM if (tag1 == JS_TAG_BIG_DECIMAL || tag2 == JS_TAG_BIG_DECIMAL) { if (ctx->rt->bigdecimal_ops.binary_arith(ctx, OP_add, sp - 2, op1, op2)) goto exception; } else if (tag1 == JS_TAG_BIG_FLOAT || tag2 == JS_TAG_BIG_FLOAT) { if (ctx->rt->bigfloat_ops.binary_arith(ctx, OP_add, sp - 2, op1, op2)) goto exception; } else #endif if (tag1 == JS_TAG_BIG_INT || tag2 == JS_TAG_BIG_INT) { handle_bigint: if (ctx->rt->bigint_ops.binary_arith(ctx, OP_add, sp - 2, op1, op2)) goto exception; } else { double d1, d2; /* float64 result */ if (JS_ToFloat64Free(ctx, &d1, op1)) { JS_FreeValue(ctx, op2); goto exception; } if (JS_ToFloat64Free(ctx, &d2, op2)) goto exception; if (is_math_mode(ctx) && is_safe_integer(d1) && is_safe_integer(d2)) goto handle_bigint; sp[-2] = __JS_NewFloat64(ctx, d1 + d2); } return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } static no_inline __exception int js_binary_logic_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) { JSValue op1, op2; uint32_t tag1, tag2; uint32_t v1, v2, r; op1 = sp[-2]; op2 = sp[-1]; tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); #ifdef CONFIG_BIGNUM /* try to call an overloaded operator */ if ((tag1 == JS_TAG_OBJECT && (tag2 != JS_TAG_NULL && tag2 != JS_TAG_UNDEFINED)) || (tag2 == JS_TAG_OBJECT && (tag1 != JS_TAG_NULL && tag1 != JS_TAG_UNDEFINED))) { JSValue res; int ret = js_call_binary_op_fallback(ctx, &res, op1, op2, op, TRUE, 0); if (ret != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (ret < 0) { goto exception; } else { sp[-2] = res; return 0; } } } #endif op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } if (is_math_mode(ctx)) goto bigint_op; tag1 = JS_VALUE_GET_TAG(op1); tag2 = JS_VALUE_GET_TAG(op2); if (tag1 == JS_TAG_BIG_INT || tag2 == JS_TAG_BIG_INT) { if (tag1 != tag2) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); JS_ThrowTypeError(ctx, "both operands must be bigint"); goto exception; } else { bigint_op: if (ctx->rt->bigint_ops.binary_arith(ctx, op, sp - 2, op1, op2)) goto exception; } } else { if (unlikely(JS_ToInt32Free(ctx, (int32_t *)&v1, op1))) { JS_FreeValue(ctx, op2); goto exception; } if (unlikely(JS_ToInt32Free(ctx, (int32_t *)&v2, op2))) goto exception; switch(op) { case OP_shl: r = v1 << (v2 & 0x1f); break; case OP_sar: r = (int)v1 >> (v2 & 0x1f); break; case OP_and: r = v1 & v2; break; case OP_or: r = v1 | v2; break; case OP_xor: r = v1 ^ v2; break; default: abort(); } sp[-2] = JS_NewInt32(ctx, r); } return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } /* Note: also used for bigint */ static int js_compare_bigfloat(JSContext *ctx, OPCodeEnum op, JSValue op1, JSValue op2) { bf_t a_s, b_s, *a, *b; int res; a = JS_ToBigFloat(ctx, &a_s, op1); if (!a) { JS_FreeValue(ctx, op2); return -1; } b = JS_ToBigFloat(ctx, &b_s, op2); if (!b) { if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, op1); return -1; } switch(op) { case OP_lt: res = bf_cmp_lt(a, b); /* if NaN return false */ break; case OP_lte: res = bf_cmp_le(a, b); /* if NaN return false */ break; case OP_gt: res = bf_cmp_lt(b, a); /* if NaN return false */ break; case OP_gte: res = bf_cmp_le(b, a); /* if NaN return false */ break; case OP_eq: res = bf_cmp_eq(a, b); /* if NaN return false */ break; default: abort(); } if (a == &a_s) bf_delete(a); if (b == &b_s) bf_delete(b); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return res; } #ifdef CONFIG_BIGNUM static int js_compare_bigdecimal(JSContext *ctx, OPCodeEnum op, JSValue op1, JSValue op2) { bfdec_t *a, *b; int res; /* Note: binary floats are converted to bigdecimal with toString(). It is not mathematically correct but is consistent with the BigDecimal() constructor behavior */ op1 = JS_ToBigDecimalFree(ctx, op1, TRUE); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); return -1; } op2 = JS_ToBigDecimalFree(ctx, op2, TRUE); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); return -1; } a = JS_ToBigDecimal(ctx, op1); /* cannot fail */ b = JS_ToBigDecimal(ctx, op2); /* cannot fail */ switch(op) { case OP_lt: res = bfdec_cmp_lt(a, b); /* if NaN return false */ break; case OP_lte: res = bfdec_cmp_le(a, b); /* if NaN return false */ break; case OP_gt: res = bfdec_cmp_lt(b, a); /* if NaN return false */ break; case OP_gte: res = bfdec_cmp_le(b, a); /* if NaN return false */ break; case OP_eq: res = bfdec_cmp_eq(a, b); /* if NaN return false */ break; default: abort(); } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); return res; } #endif /* !CONFIG_BIGNUM */ static no_inline int js_relational_slow(JSContext *ctx, JSValue *sp, OPCodeEnum op) { JSValue op1, op2; int res; uint32_t tag1, tag2; op1 = sp[-2]; op2 = sp[-1]; tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); #ifdef CONFIG_BIGNUM /* try to call an overloaded operator */ if ((tag1 == JS_TAG_OBJECT && (tag2 != JS_TAG_NULL && tag2 != JS_TAG_UNDEFINED)) || (tag2 == JS_TAG_OBJECT && (tag1 != JS_TAG_NULL && tag1 != JS_TAG_UNDEFINED))) { JSValue ret; res = js_call_binary_op_fallback(ctx, &ret, op1, op2, op, FALSE, HINT_NUMBER); if (res != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (res < 0) { goto exception; } else { sp[-2] = ret; return 0; } } } #endif op1 = JS_ToPrimitiveFree(ctx, op1, HINT_NUMBER); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToPrimitiveFree(ctx, op2, HINT_NUMBER); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); if (tag1 == JS_TAG_STRING && tag2 == JS_TAG_STRING) { JSString *p1, *p2; p1 = JS_VALUE_GET_STRING(op1); p2 = JS_VALUE_GET_STRING(op2); res = js_string_compare(ctx, p1, p2); switch(op) { case OP_lt: res = (res < 0); break; case OP_lte: res = (res <= 0); break; case OP_gt: res = (res > 0); break; default: case OP_gte: res = (res >= 0); break; } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); } else if ((tag1 <= JS_TAG_NULL || tag1 == JS_TAG_FLOAT64) && (tag2 <= JS_TAG_NULL || tag2 == JS_TAG_FLOAT64)) { /* fast path for float64/int */ goto float64_compare; } else { if (((tag1 == JS_TAG_BIG_INT && tag2 == JS_TAG_STRING) || (tag2 == JS_TAG_BIG_INT && tag1 == JS_TAG_STRING)) && !is_math_mode(ctx)) { if (tag1 == JS_TAG_STRING) { op1 = JS_StringToBigInt(ctx, op1); if (JS_VALUE_GET_TAG(op1) != JS_TAG_BIG_INT) goto invalid_bigint_string; } if (tag2 == JS_TAG_STRING) { op2 = JS_StringToBigInt(ctx, op2); if (JS_VALUE_GET_TAG(op2) != JS_TAG_BIG_INT) { invalid_bigint_string: JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); res = FALSE; goto done; } } } else { op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } } tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); #ifdef CONFIG_BIGNUM if (tag1 == JS_TAG_BIG_DECIMAL || tag2 == JS_TAG_BIG_DECIMAL) { res = ctx->rt->bigdecimal_ops.compare(ctx, op, op1, op2); if (res < 0) goto exception; } else if (tag1 == JS_TAG_BIG_FLOAT || tag2 == JS_TAG_BIG_FLOAT) { res = ctx->rt->bigfloat_ops.compare(ctx, op, op1, op2); if (res < 0) goto exception; } else #endif if (tag1 == JS_TAG_BIG_INT || tag2 == JS_TAG_BIG_INT) { res = ctx->rt->bigint_ops.compare(ctx, op, op1, op2); if (res < 0) goto exception; } else { double d1, d2; float64_compare: /* can use floating point comparison */ if (tag1 == JS_TAG_FLOAT64) { d1 = JS_VALUE_GET_FLOAT64(op1); } else { d1 = JS_VALUE_GET_INT(op1); } if (tag2 == JS_TAG_FLOAT64) { d2 = JS_VALUE_GET_FLOAT64(op2); } else { d2 = JS_VALUE_GET_INT(op2); } switch(op) { case OP_lt: res = (d1 < d2); /* if NaN return false */ break; case OP_lte: res = (d1 <= d2); /* if NaN return false */ break; case OP_gt: res = (d1 > d2); /* if NaN return false */ break; default: case OP_gte: res = (d1 >= d2); /* if NaN return false */ break; } } } done: sp[-2] = JS_NewBool(ctx, res); return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } static BOOL tag_is_number(uint32_t tag) { return (tag == JS_TAG_INT || tag == JS_TAG_BIG_INT || tag == JS_TAG_FLOAT64 #ifdef CONFIG_BIGNUM || tag == JS_TAG_BIG_FLOAT || tag == JS_TAG_BIG_DECIMAL #endif ); } static no_inline __exception int js_eq_slow(JSContext *ctx, JSValue *sp, BOOL is_neq) { JSValue op1, op2; #ifdef CONFIG_BIGNUM JSValue ret; #endif int res; uint32_t tag1, tag2; op1 = sp[-2]; op2 = sp[-1]; redo: tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); if (tag_is_number(tag1) && tag_is_number(tag2)) { if (tag1 == JS_TAG_INT && tag2 == JS_TAG_INT) { res = JS_VALUE_GET_INT(op1) == JS_VALUE_GET_INT(op2); } else if ((tag1 == JS_TAG_FLOAT64 && (tag2 == JS_TAG_INT || tag2 == JS_TAG_FLOAT64)) || (tag2 == JS_TAG_FLOAT64 && (tag1 == JS_TAG_INT || tag1 == JS_TAG_FLOAT64))) { double d1, d2; if (tag1 == JS_TAG_FLOAT64) { d1 = JS_VALUE_GET_FLOAT64(op1); } else { d1 = JS_VALUE_GET_INT(op1); } if (tag2 == JS_TAG_FLOAT64) { d2 = JS_VALUE_GET_FLOAT64(op2); } else { d2 = JS_VALUE_GET_INT(op2); } res = (d1 == d2); } else #ifdef CONFIG_BIGNUM if (tag1 == JS_TAG_BIG_DECIMAL || tag2 == JS_TAG_BIG_DECIMAL) { res = ctx->rt->bigdecimal_ops.compare(ctx, OP_eq, op1, op2); if (res < 0) goto exception; } else if (tag1 == JS_TAG_BIG_FLOAT || tag2 == JS_TAG_BIG_FLOAT) { res = ctx->rt->bigfloat_ops.compare(ctx, OP_eq, op1, op2); if (res < 0) goto exception; } else #endif { res = ctx->rt->bigint_ops.compare(ctx, OP_eq, op1, op2); if (res < 0) goto exception; } } else if (tag1 == tag2) { #ifdef CONFIG_BIGNUM if (tag1 == JS_TAG_OBJECT) { /* try the fallback operator */ res = js_call_binary_op_fallback(ctx, &ret, op1, op2, is_neq ? OP_neq : OP_eq, FALSE, HINT_NONE); if (res != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (res < 0) { goto exception; } else { sp[-2] = ret; return 0; } } } #endif res = js_strict_eq2(ctx, op1, op2, JS_EQ_STRICT); } else if ((tag1 == JS_TAG_NULL && tag2 == JS_TAG_UNDEFINED) || (tag2 == JS_TAG_NULL && tag1 == JS_TAG_UNDEFINED)) { res = TRUE; } else if ((tag1 == JS_TAG_STRING && tag_is_number(tag2)) || (tag2 == JS_TAG_STRING && tag_is_number(tag1))) { if ((tag1 == JS_TAG_BIG_INT || tag2 == JS_TAG_BIG_INT) && !is_math_mode(ctx)) { if (tag1 == JS_TAG_STRING) { op1 = JS_StringToBigInt(ctx, op1); if (JS_VALUE_GET_TAG(op1) != JS_TAG_BIG_INT) goto invalid_bigint_string; } if (tag2 == JS_TAG_STRING) { op2 = JS_StringToBigInt(ctx, op2); if (JS_VALUE_GET_TAG(op2) != JS_TAG_BIG_INT) { invalid_bigint_string: JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); res = FALSE; goto done; } } } else { op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } } res = js_strict_eq(ctx, op1, op2); } else if (tag1 == JS_TAG_BOOL) { op1 = JS_NewInt32(ctx, JS_VALUE_GET_INT(op1)); goto redo; } else if (tag2 == JS_TAG_BOOL) { op2 = JS_NewInt32(ctx, JS_VALUE_GET_INT(op2)); goto redo; } else if ((tag1 == JS_TAG_OBJECT && (tag_is_number(tag2) || tag2 == JS_TAG_STRING || tag2 == JS_TAG_SYMBOL)) || (tag2 == JS_TAG_OBJECT && (tag_is_number(tag1) || tag1 == JS_TAG_STRING || tag1 == JS_TAG_SYMBOL))) { #ifdef CONFIG_BIGNUM /* try the fallback operator */ res = js_call_binary_op_fallback(ctx, &ret, op1, op2, is_neq ? OP_neq : OP_eq, FALSE, HINT_NONE); if (res != 0) { JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); if (res < 0) { goto exception; } else { sp[-2] = ret; return 0; } } #endif op1 = JS_ToPrimitiveFree(ctx, op1, HINT_NONE); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToPrimitiveFree(ctx, op2, HINT_NONE); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } goto redo; } else { /* IsHTMLDDA object is equivalent to undefined for '==' and '!=' */ if ((JS_IsHTMLDDA(ctx, op1) && (tag2 == JS_TAG_NULL || tag2 == JS_TAG_UNDEFINED)) || (JS_IsHTMLDDA(ctx, op2) && (tag1 == JS_TAG_NULL || tag1 == JS_TAG_UNDEFINED))) { res = TRUE; } else { res = FALSE; } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); } done: sp[-2] = JS_NewBool(ctx, res ^ is_neq); return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } static no_inline int js_shr_slow(JSContext *ctx, JSValue *sp) { JSValue op1, op2; uint32_t v1, v2, r; op1 = sp[-2]; op2 = sp[-1]; op1 = JS_ToNumericFree(ctx, op1); if (JS_IsException(op1)) { JS_FreeValue(ctx, op2); goto exception; } op2 = JS_ToNumericFree(ctx, op2); if (JS_IsException(op2)) { JS_FreeValue(ctx, op1); goto exception; } /* XXX: could forbid >>> in bignum mode */ if (!is_math_mode(ctx) && (JS_VALUE_GET_TAG(op1) == JS_TAG_BIG_INT || JS_VALUE_GET_TAG(op2) == JS_TAG_BIG_INT)) { JS_ThrowTypeError(ctx, "bigint operands are forbidden for >>>"); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); goto exception; } /* cannot give an exception */ JS_ToUint32Free(ctx, &v1, op1); JS_ToUint32Free(ctx, &v2, op2); r = v1 >> (v2 & 0x1f); sp[-2] = JS_NewUint32(ctx, r); return 0; exception: sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } #ifdef CONFIG_BIGNUM static JSValue js_mul_pow10_to_float64(JSContext *ctx, const bf_t *a, int64_t exponent) { bf_t r_s, *r = &r_s; double d; int ret; /* always convert to Float64 */ bf_init(ctx->bf_ctx, r); ret = bf_mul_pow_radix(r, a, 10, exponent, 53, bf_set_exp_bits(11) | BF_RNDN | BF_FLAG_SUBNORMAL); bf_get_float64(r, &d, BF_RNDN); bf_delete(r); if (ret & BF_ST_MEM_ERROR) return JS_ThrowOutOfMemory(ctx); else return __JS_NewFloat64(ctx, d); } static no_inline int js_mul_pow10(JSContext *ctx, JSValue *sp) { bf_t a_s, *a, *r; JSValue op1, op2, res; int64_t e; int ret; res = JS_NewBigFloat(ctx); if (JS_IsException(res)) return -1; r = JS_GetBigFloat(res); op1 = sp[-2]; op2 = sp[-1]; a = JS_ToBigFloat(ctx, &a_s, op1); if (!a) { JS_FreeValue(ctx, res); return -1; } if (JS_IsBigInt(ctx, op2)) { ret = JS_ToBigInt64(ctx, &e, op2); } else { ret = JS_ToInt64(ctx, &e, op2); } if (ret) { if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, res); return -1; } bf_mul_pow_radix(r, a, 10, e, ctx->fp_env.prec, ctx->fp_env.flags); if (a == &a_s) bf_delete(a); JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); sp[-2] = res; return 0; } #endif /* XXX: Should take JSValueConst arguments */ static BOOL js_strict_eq2(JSContext *ctx, JSValue op1, JSValue op2, JSStrictEqModeEnum eq_mode) { BOOL res; int tag1, tag2; double d1, d2; tag1 = JS_VALUE_GET_NORM_TAG(op1); tag2 = JS_VALUE_GET_NORM_TAG(op2); switch(tag1) { case JS_TAG_BOOL: if (tag1 != tag2) { res = FALSE; } else { res = JS_VALUE_GET_INT(op1) == JS_VALUE_GET_INT(op2); goto done_no_free; } break; case JS_TAG_NULL: case JS_TAG_UNDEFINED: res = (tag1 == tag2); break; case JS_TAG_STRING: { JSString *p1, *p2; if (tag1 != tag2) { res = FALSE; } else { p1 = JS_VALUE_GET_STRING(op1); p2 = JS_VALUE_GET_STRING(op2); res = (js_string_compare(ctx, p1, p2) == 0); } } break; case JS_TAG_SYMBOL: { JSAtomStruct *p1, *p2; if (tag1 != tag2) { res = FALSE; } else { p1 = JS_VALUE_GET_PTR(op1); p2 = JS_VALUE_GET_PTR(op2); res = (p1 == p2); } } break; case JS_TAG_OBJECT: if (tag1 != tag2) res = FALSE; else res = JS_VALUE_GET_OBJ(op1) == JS_VALUE_GET_OBJ(op2); break; case JS_TAG_INT: d1 = JS_VALUE_GET_INT(op1); if (tag2 == JS_TAG_INT) { d2 = JS_VALUE_GET_INT(op2); goto number_test; } else if (tag2 == JS_TAG_FLOAT64) { d2 = JS_VALUE_GET_FLOAT64(op2); goto number_test; } else { res = FALSE; } break; case JS_TAG_FLOAT64: d1 = JS_VALUE_GET_FLOAT64(op1); if (tag2 == JS_TAG_FLOAT64) { d2 = JS_VALUE_GET_FLOAT64(op2); } else if (tag2 == JS_TAG_INT) { d2 = JS_VALUE_GET_INT(op2); } else { res = FALSE; break; } number_test: if (unlikely(eq_mode >= JS_EQ_SAME_VALUE)) { JSFloat64Union u1, u2; /* NaN is not always normalized, so this test is necessary */ if (isnan(d1) || isnan(d2)) { res = isnan(d1) == isnan(d2); } else if (eq_mode == JS_EQ_SAME_VALUE_ZERO) { res = (d1 == d2); /* +0 == -0 */ } else { u1.d = d1; u2.d = d2; res = (u1.u64 == u2.u64); /* +0 != -0 */ } } else { res = (d1 == d2); /* if NaN return false and +0 == -0 */ } goto done_no_free; case JS_TAG_BIG_INT: { bf_t a_s, *a, b_s, *b; if (tag1 != tag2) { res = FALSE; break; } a = JS_ToBigFloat(ctx, &a_s, op1); /* cannot fail */ b = JS_ToBigFloat(ctx, &b_s, op2); /* cannot fail */ res = bf_cmp_eq(a, b); if (a == &a_s) bf_delete(a); if (b == &b_s) bf_delete(b); } break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: { JSBigFloat *p1, *p2; const bf_t *a, *b; if (tag1 != tag2) { res = FALSE; break; } p1 = JS_VALUE_GET_PTR(op1); p2 = JS_VALUE_GET_PTR(op2); a = &p1->num; b = &p2->num; if (unlikely(eq_mode >= JS_EQ_SAME_VALUE)) { if (eq_mode == JS_EQ_SAME_VALUE_ZERO && a->expn == BF_EXP_ZERO && b->expn == BF_EXP_ZERO) { res = TRUE; } else { res = (bf_cmp_full(a, b) == 0); } } else { res = bf_cmp_eq(a, b); } } break; case JS_TAG_BIG_DECIMAL: { JSBigDecimal *p1, *p2; const bfdec_t *a, *b; if (tag1 != tag2) { res = FALSE; break; } p1 = JS_VALUE_GET_PTR(op1); p2 = JS_VALUE_GET_PTR(op2); a = &p1->num; b = &p2->num; res = bfdec_cmp_eq(a, b); } break; #endif default: res = FALSE; break; } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); done_no_free: return res; } static BOOL js_strict_eq(JSContext *ctx, JSValue op1, JSValue op2) { return js_strict_eq2(ctx, op1, op2, JS_EQ_STRICT); } static BOOL js_same_value(JSContext *ctx, JSValueConst op1, JSValueConst op2) { return js_strict_eq2(ctx, JS_DupValue(ctx, op1), JS_DupValue(ctx, op2), JS_EQ_SAME_VALUE); } static BOOL js_same_value_zero(JSContext *ctx, JSValueConst op1, JSValueConst op2) { return js_strict_eq2(ctx, JS_DupValue(ctx, op1), JS_DupValue(ctx, op2), JS_EQ_SAME_VALUE_ZERO); } static no_inline int js_strict_eq_slow(JSContext *ctx, JSValue *sp, BOOL is_neq) { BOOL res; res = js_strict_eq(ctx, sp[-2], sp[-1]); sp[-2] = JS_NewBool(ctx, res ^ is_neq); return 0; } static __exception int js_operator_in(JSContext *ctx, JSValue *sp) { JSValue op1, op2; JSAtom atom; int ret; op1 = sp[-2]; op2 = sp[-1]; if (JS_VALUE_GET_TAG(op2) != JS_TAG_OBJECT) { JS_ThrowTypeError(ctx, "invalid 'in' operand"); return -1; } atom = JS_ValueToAtom(ctx, op1); if (unlikely(atom == JS_ATOM_NULL)) return -1; ret = JS_HasProperty(ctx, op2, atom); JS_FreeAtom(ctx, atom); if (ret < 0) return -1; JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); sp[-2] = JS_NewBool(ctx, ret); return 0; } static __exception int js_operator_private_in(JSContext *ctx, JSValue *sp) { JSValue op1, op2; int ret; op1 = sp[-2]; /* object */ op2 = sp[-1]; /* field name or method function */ if (JS_VALUE_GET_TAG(op1) != JS_TAG_OBJECT) { JS_ThrowTypeError(ctx, "invalid 'in' operand"); return -1; } if (JS_IsObject(op2)) { /* method: use the brand */ ret = JS_CheckBrand(ctx, op1, op2); if (ret < 0) return -1; } else { JSAtom atom; JSObject *p; JSShapeProperty *prs; JSProperty *pr; /* field */ atom = JS_ValueToAtom(ctx, op2); if (unlikely(atom == JS_ATOM_NULL)) return -1; p = JS_VALUE_GET_OBJ(op1); prs = find_own_property(&pr, p, atom); JS_FreeAtom(ctx, atom); ret = (prs != NULL); } JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); sp[-2] = JS_NewBool(ctx, ret); return 0; } static __exception int js_has_unscopable(JSContext *ctx, JSValueConst obj, JSAtom atom) { JSValue arr, val; int ret; arr = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_unscopables); if (JS_IsException(arr)) return -1; ret = 0; if (JS_IsObject(arr)) { val = JS_GetProperty(ctx, arr, atom); ret = JS_ToBoolFree(ctx, val); } JS_FreeValue(ctx, arr); return ret; } static __exception int js_operator_instanceof(JSContext *ctx, JSValue *sp) { JSValue op1, op2; BOOL ret; op1 = sp[-2]; op2 = sp[-1]; ret = JS_IsInstanceOf(ctx, op1, op2); if (ret < 0) return ret; JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); sp[-2] = JS_NewBool(ctx, ret); return 0; } static __exception int js_operator_typeof(JSContext *ctx, JSValueConst op1) { JSAtom atom; uint32_t tag; tag = JS_VALUE_GET_NORM_TAG(op1); switch(tag) { case JS_TAG_BIG_INT: atom = JS_ATOM_bigint; break; #ifdef CONFIG_BIGNUM case JS_TAG_BIG_FLOAT: atom = JS_ATOM_bigfloat; break; case JS_TAG_BIG_DECIMAL: atom = JS_ATOM_bigdecimal; break; #endif case JS_TAG_INT: case JS_TAG_FLOAT64: atom = JS_ATOM_number; break; case JS_TAG_UNDEFINED: atom = JS_ATOM_undefined; break; case JS_TAG_BOOL: atom = JS_ATOM_boolean; break; case JS_TAG_STRING: atom = JS_ATOM_string; break; case JS_TAG_OBJECT: { JSObject *p; p = JS_VALUE_GET_OBJ(op1); if (unlikely(p->is_HTMLDDA)) atom = JS_ATOM_undefined; else if (JS_IsFunction(ctx, op1)) atom = JS_ATOM_function; else goto obj_type; } break; case JS_TAG_NULL: obj_type: atom = JS_ATOM_object; break; case JS_TAG_SYMBOL: atom = JS_ATOM_symbol; break; default: atom = JS_ATOM_unknown; break; } return atom; } static __exception int js_operator_delete(JSContext *ctx, JSValue *sp) { JSValue op1, op2; JSAtom atom; int ret; op1 = sp[-2]; op2 = sp[-1]; atom = JS_ValueToAtom(ctx, op2); if (unlikely(atom == JS_ATOM_NULL)) return -1; ret = JS_DeleteProperty(ctx, op1, atom, JS_PROP_THROW_STRICT); JS_FreeAtom(ctx, atom); if (unlikely(ret < 0)) return -1; JS_FreeValue(ctx, op1); JS_FreeValue(ctx, op2); sp[-2] = JS_NewBool(ctx, ret); return 0; } static JSValue js_throw_type_error(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { return JS_ThrowTypeError(ctx, "invalid property access"); } /* XXX: not 100% compatible, but mozilla seems to use a similar implementation to ensure that caller in non strict mode does not throw (ES5 compatibility) */ static JSValue js_function_proto_caller(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) { JSFunctionBytecode *b = JS_GetFunctionBytecode(this_val); if (!b || (b->js_mode & JS_MODE_STRICT) || !b->has_prototype) { return js_throw_type_error(ctx, this_val, 0, NULL); } return JS_UNDEFINED; } static JSValue js_function_proto_fileName(JSContext *ctx, JSValueConst this_val) { JSFunctionBytecode *b = JS_GetFunctionBytecode(this_val); if (b && b->has_debug) { return JS_AtomToString(ctx, b->debug.filename); } return JS_UNDEFINED; } static JSValue js_function_proto_lineNumber(JSContext *ctx, JSValueConst this_val) { JSFunctionBytecode *b = JS_GetFunctionBytecode(this_val); if (b && b->has_debug) { return JS_NewInt32(ctx, b->debug.line_num); } return JS_UNDEFINED; } static int js_arguments_define_own_property(JSContext *ctx, JSValueConst this_obj, JSAtom prop, JSValueConst val, JSValueConst getter, JSValueConst setter, int flags) { JSObject *p; uint32_t idx; p = JS_VALUE_GET_OBJ(this_obj); /* convert to normal array when redefining an existing numeric field */ if (p->fast_array && JS_AtomIsArrayIndex(ctx, &idx, prop) && idx < p->u.array.count) { if (convert_fast_array_to_array(ctx, p)) return -1; } /* run the default define own property */ return JS_DefineProperty(ctx, this_obj, prop, val, getter, setter, flags | JS_PROP_NO_EXOTIC); } static const JSClassExoticMethods js_arguments_exotic_methods = { .define_own_property = js_arguments_define_own_property, }; static JSValue js_build_arguments(JSContext *ctx, int argc, JSValueConst *argv) { JSValue val, *tab; JSProperty *pr; JSObject *p; int i; val = JS_NewObjectProtoClass(ctx, ctx->class_proto[JS_CLASS_OBJECT], JS_CLASS_ARGUMENTS); if (JS_IsException(val)) return val; p = JS_VALUE_GET_OBJ(val); /* add the length field (cannot fail) */ pr = add_property(ctx, p, JS_ATOM_length, JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); pr->u.value = JS_NewInt32(ctx, argc); /* initialize the fast array part */ tab = NULL; if (argc > 0) { tab = js_malloc(ctx, sizeof(tab[0]) * argc); if (!tab) { JS_FreeValue(ctx, val); return JS_EXCEPTION; } for(i = 0; i < argc; i++) { tab[i] = JS_DupValue(ctx, argv[i]); } } p->u.array.u.values = tab; p->u.array.count = argc; JS_DefinePropertyValue(ctx, val, JS_ATOM_Symbol_iterator, JS_DupValue(ctx, ctx->array_proto_values), JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE); /* add callee property to throw a TypeError in strict mode */ JS_DefineProperty(ctx, val, JS_ATOM_callee, JS_UNDEFINED, ctx->throw_type_error, ctx->throw_type_error, JS_PROP_HAS_GET | JS_PROP_HAS_SET); return val; } #define GLOBAL_VAR_OFFSET 0x40000000 #define ARGUMENT_VAR_OFFSET 0x20000000 /* legacy arguments object: add references to the function arguments */ static JSValue js_build_mapped_arguments(JSContext *ctx, int argc, JSValueConst *argv, JSStackFrame *sf, int arg_count) { JSValue val; JSProperty *pr; JSObject *p; int i; val = JS_NewObjectProtoClass(ctx, ctx->class_proto[JS_CLASS_OBJECT], JS_CLASS_MAPPED_ARGUMENTS); if (JS_IsException(val)) return val; p = JS_VALUE_GET_OBJ(val); /* add the length field (cannot fail) */ pr = add_property(ctx, p, JS_ATOM_length, JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); pr->u.value = JS_NewInt32(ctx, argc); for(i = 0; i < arg_count; i++) { JSVarRef *var_ref; var_ref = get_var_ref(ctx, sf, i, TRUE); if (!var_ref) goto fail; pr = add_property(ctx, p, __JS_AtomFromUInt32(i), JS_PROP_C_W_E | JS_PROP_VARREF); if (!pr) { free_var_ref(ctx->rt, var_ref); goto fail; } pr->u.var_ref = var_ref; } /* the arguments not mapped to the arguments of the function can be normal properties */ for(i = arg_count; i < argc; i++) { if (JS_DefinePropertyValueUint32(ctx, val, i, JS_DupValue(ctx, argv[i]), JS_PROP_C_W_E) < 0) goto fail; } JS_DefinePropertyValue(ctx, val, JS_ATOM_Symbol_iterator, JS_DupValue(ctx, ctx->array_proto_values), JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE); /* callee returns this function in non strict mode */ JS_DefinePropertyValue(ctx, val, JS_ATOM_callee, JS_DupValue(ctx, ctx->rt->current_stack_frame->cur_func), JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE); return val; fail: JS_FreeValue(ctx, val); return JS_EXCEPTION; } static JSValue js_build_rest(JSContext *ctx, int first, int argc, JSValueConst *argv) { JSValue val; int i, ret; val = JS_NewArray(ctx); if (JS_IsException(val)) return val; for (i = first; i < argc; i++) { ret = JS_DefinePropertyValueUint32(ctx, val, i - first, JS_DupValue(ctx, argv[i]), JS_PROP_C_W_E); if (ret < 0) { JS_FreeValue(ctx, val); return JS_EXCEPTION; } } return val; } static JSValue build_for_in_iterator(JSContext *ctx, JSValue obj) { JSObject *p, *p1; JSPropertyEnum *tab_atom; int i; JSValue enum_obj; JSForInIterator *it; uint32_t tag, tab_atom_count; tag = JS_VALUE_GET_TAG(obj); if (tag != JS_TAG_OBJECT && tag != JS_TAG_NULL && tag != JS_TAG_UNDEFINED) { obj = JS_ToObjectFree(ctx, obj); } it = js_malloc(ctx, sizeof(*it)); if (!it) { JS_FreeValue(ctx, obj); return JS_EXCEPTION; } enum_obj = JS_NewObjectProtoClass(ctx, JS_NULL, JS_CLASS_FOR_IN_ITERATOR); if (JS_IsException(enum_obj)) { js_free(ctx, it); JS_FreeValue(ctx, obj); return JS_EXCEPTION; } it->is_array = FALSE; it->obj = obj; it->idx = 0; it->tab_atom = NULL; it->atom_count = 0; it->in_prototype_chain = FALSE; p1 = JS_VALUE_GET_OBJ(enum_obj); p1->u.for_in_iterator = it; if (tag == JS_TAG_NULL || tag == JS_TAG_UNDEFINED) return enum_obj; p = JS_VALUE_GET_OBJ(obj); if (p->fast_array) { JSShape *sh; JSShapeProperty *prs; /* check that there are no enumerable normal fields */ sh = p->shape; for(i = 0, prs = get_shape_prop(sh); i < sh->prop_count; i++, prs++) { if (prs->flags & JS_PROP_ENUMERABLE) goto normal_case; } /* for fast arrays, we only store the number of elements */ it->is_array = TRUE; it->atom_count = p->u.array.count; } else { normal_case: if (JS_GetOwnPropertyNamesInternal(ctx, &tab_atom, &tab_atom_count, p, JS_GPN_STRING_MASK | JS_GPN_SET_ENUM)) { JS_FreeValue(ctx, enum_obj); return JS_EXCEPTION; } it->tab_atom = tab_atom; it->atom_count = tab_atom_count; } return enum_obj; } /* obj -> enum_obj */ static __exception int js_for_in_start(JSContext *ctx, JSValue *sp) { sp[-1] = build_for_in_iterator(ctx, sp[-1]); if (JS_IsException(sp[-1])) return -1; return 0; } /* return -1 if exception, 0 if slow case, 1 if the enumeration is finished */ static __exception int js_for_in_prepare_prototype_chain_enum(JSContext *ctx, JSValueConst enum_obj) { JSObject *p; JSForInIterator *it; JSPropertyEnum *tab_atom; uint32_t tab_atom_count, i; JSValue obj1; p = JS_VALUE_GET_OBJ(enum_obj); it = p->u.for_in_iterator; /* check if there are enumerable properties in the prototype chain (fast path) */ obj1 = JS_DupValue(ctx, it->obj); for(;;) { obj1 = JS_GetPrototypeFree(ctx, obj1); if (JS_IsNull(obj1)) break; if (JS_IsException(obj1)) goto fail; if (JS_GetOwnPropertyNamesInternal(ctx, &tab_atom, &tab_atom_count, JS_VALUE_GET_OBJ(obj1), JS_GPN_STRING_MASK | JS_GPN_ENUM_ONLY)) { JS_FreeValue(ctx, obj1); goto fail; } js_free_prop_enum(ctx, tab_atom, tab_atom_count); if (tab_atom_count != 0) { JS_FreeValue(ctx, obj1); goto slow_path; } /* must check for timeout to avoid infinite loop */ if (js_poll_interrupts(ctx)) { JS_FreeValue(ctx, obj1); goto fail; } } JS_FreeValue(ctx, obj1); return 1; slow_path: /* add the visited properties, even if they are not enumerable */ if (it->is_array) { if (JS_GetOwnPropertyNamesInternal(ctx, &tab_atom, &tab_atom_count, JS_VALUE_GET_OBJ(it->obj), JS_GPN_STRING_MASK | JS_GPN_SET_ENUM)) { goto fail; } it->is_array = FALSE; it->tab_atom = tab_atom; it->atom_count = tab_atom_count; } for(i = 0; i < it->atom_count; i++) { if (JS_DefinePropertyValue(ctx, enum_obj, it->tab_atom[i].atom, JS_NULL, JS_PROP_ENUMERABLE) < 0) goto fail; } return 0; fail: return -1; } /* enum_obj -> enum_obj value done */ static __exception int js_for_in_next(JSContext *ctx, JSValue *sp) { JSValueConst enum_obj; JSObject *p; JSAtom prop; JSForInIterator *it; JSPropertyEnum *tab_atom; uint32_t tab_atom_count; int ret; enum_obj = sp[-1]; /* fail safe */ if (JS_VALUE_GET_TAG(enum_obj) != JS_TAG_OBJECT) goto done; p = JS_VALUE_GET_OBJ(enum_obj); if (p->class_id != JS_CLASS_FOR_IN_ITERATOR) goto done; it = p->u.for_in_iterator; for(;;) { if (it->idx >= it->atom_count) { if (JS_IsNull(it->obj) || JS_IsUndefined(it->obj)) goto done; /* not an object */ /* no more property in the current object: look in the prototype */ if (!it->in_prototype_chain) { ret = js_for_in_prepare_prototype_chain_enum(ctx, enum_obj); if (ret < 0) return -1; if (ret) goto done; it->in_prototype_chain = TRUE; } it->obj = JS_GetPrototypeFree(ctx, it->obj); if (JS_IsException(it->obj)) return -1; if (JS_IsNull(it->obj)) goto done; /* no more prototype */ /* must check for timeout to avoid infinite loop */ if (js_poll_interrupts(ctx)) return -1; if (JS_GetOwnPropertyNamesInternal(ctx, &tab_atom, &tab_atom_count, JS_VALUE_GET_OBJ(it->obj), JS_GPN_STRING_MASK | JS_GPN_SET_ENUM)) { return -1; } js_free_prop_enum(ctx, it->tab_atom, it->atom_count); it->tab_atom = tab_atom; it->atom_count = tab_atom_count; it->idx = 0; } else { if (it->is_array) { prop = __JS_AtomFromUInt32(it->idx); it->idx++; } else { BOOL is_enumerable; prop = it->tab_atom[it->idx].atom; is_enumerable = it->tab_atom[it->idx].is_enumerable; it->idx++; if (it->in_prototype_chain) { /* slow case: we are in the prototype chain */ ret = JS_GetOwnPropertyInternal(ctx, NULL, JS_VALUE_GET_OBJ(enum_obj), prop); if (ret < 0) return ret; if (ret) continue; /* already visited */ /* add to the visited property list */ if (JS_DefinePropertyValue(ctx, enum_obj, prop, JS_NULL, JS_PROP_ENUMERABLE) < 0) return -1; } if (!is_enumerable) continue; } /* check if the property was deleted */ ret = JS_GetOwnPropertyInternal(ctx, NULL, JS_VALUE_GET_OBJ(it->obj), prop); if (ret < 0) return ret; if (ret) break; } } /* return the property */ sp[0] = JS_AtomToValue(ctx, prop); sp[1] = JS_FALSE; return 0; done: /* return the end */ sp[0] = JS_UNDEFINED; sp[1] = JS_TRUE; return 0; } static JSValue JS_GetIterator2(JSContext *ctx, JSValueConst obj, JSValueConst method) { JSValue enum_obj; enum_obj = JS_Call(ctx, method, obj, 0, NULL); if (JS_IsException(enum_obj)) return enum_obj; if (!JS_IsObject(enum_obj)) { JS_FreeValue(ctx, enum_obj); return JS_ThrowTypeErrorNotAnObject(ctx); } return enum_obj; } static JSValue JS_GetIterator(JSContext *ctx, JSValueConst obj, BOOL is_async) { JSValue method, ret, sync_iter; if (is_async) { method = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_asyncIterator); if (JS_IsException(method)) return method; if (JS_IsUndefined(method) || JS_IsNull(method)) { method = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_iterator); if (JS_IsException(method)) return method; sync_iter = JS_GetIterator2(ctx, obj, method); JS_FreeValue(ctx, method); if (JS_IsException(sync_iter)) return sync_iter; ret = JS_CreateAsyncFromSyncIterator(ctx, sync_iter); JS_FreeValue(ctx, sync_iter); return ret; } } else { method = JS_GetProperty(ctx, obj, JS_ATOM_Symbol_iterator); if (JS_IsException(method)) return method; } if (!JS_IsFunction(ctx, method)) { JS_FreeValue(ctx, method); return JS_ThrowTypeError(ctx, "value is not iterable"); } ret = JS_GetIterator2(ctx, obj, method); JS_FreeValue(ctx, method); return ret; } /* return *pdone = 2 if the iterator object is not parsed */ static JSValue JS_IteratorNext2(JSContext *ctx, JSValueConst enum_obj, JSValueConst method, int argc, JSValueConst *argv, int *pdone) { JSValue obj; /* fast path for the built-in iterators (avoid creating the intermediate result object) */ if (JS_IsObject(method)) { JSObject *p = JS_VALUE_GET_OBJ(method); if (p->class_id == JS_CLASS_C_FUNCTION && p->u.cfunc.cproto == JS_CFUNC_iterator_next) { JSCFunctionType func; JSValueConst args[1]; /* in case the function expects one argument */ if (argc == 0) { args[0] = JS_UNDEFINED; argv = args; } func = p->u.cfunc.c_function; return func.iterator_next(ctx, enum_obj, argc, argv, pdone, p->u.cfunc.magic); } } obj = JS_Call(ctx, method, enum_obj, argc, argv); if (JS_IsException(obj)) goto fail; if (!JS_IsObject(obj)) { JS_FreeValue(ctx, obj); JS_ThrowTypeError(ctx, "iterator must return an object"); goto fail; } *pdone = 2; return obj; fail: *pdone = FALSE; return JS_EXCEPTION; } static JSValue JS_IteratorNext(JSContext *ctx, JSValueConst enum_obj, JSValueConst method, int argc, JSValueConst *argv, BOOL *pdone) { JSValue obj, value, done_val; int done; obj = JS_IteratorNext2(ctx, enum_obj, method, argc, argv, &done); if (JS_IsException(obj)) goto fail; if (done != 2) { *pdone = done; return obj; } else { done_val = JS_GetProperty(ctx, obj, JS_ATOM_done); if (JS_IsException(done_val)) goto fail; *pdone = JS_ToBoolFree(ctx, done_val); value = JS_UNDEFINED; if (!*pdone) { value = JS_GetProperty(ctx, obj, JS_ATOM_value); } JS_FreeValue(ctx, obj); return value; } fail: JS_FreeValue(ctx, obj); *pdone = FALSE; return JS_EXCEPTION; } /* return < 0 in case of exception */ static int JS_IteratorClose(JSContext *ctx, JSValueConst enum_obj, BOOL is_exception_pending) { JSValue method, ret, ex_obj; int res; if (is_exception_pending) { ex_obj = ctx->rt->current_exception; ctx->rt->current_exception = JS_NULL; res = -1; } else { ex_obj = JS_UNDEFINED; res = 0; } method = JS_GetProperty(ctx, enum_obj, JS_ATOM_return); if (JS_IsException(method)) { res = -1; goto done; } if (JS_IsUndefined(method) || JS_IsNull(method)) { goto done; } ret = JS_CallFree(ctx, method, enum_obj, 0, NULL); if (!is_exception_pending) { if (JS_IsException(ret)) { res = -1; } else if (!JS_IsObject(ret)) { JS_ThrowTypeErrorNotAnObject(ctx); res = -1; } } JS_FreeValue(ctx, ret); done: if (is_exception_pending) { JS_Throw(ctx, ex_obj); } return res; } /* obj -> enum_rec (3 slots) */ static __exception int js_for_of_start(JSContext *ctx, JSValue *sp, BOOL is_async) { JSValue op1, obj, method; op1 = sp[-1]; obj = JS_GetIterator(ctx, op1, is_async); if (JS_IsException(obj)) return -1; JS_FreeValue(ctx, op1); sp[-1] = obj; method = JS_GetProperty(ctx, obj, JS_ATOM_next); if (JS_IsException(method)) return -1; sp[0] = method; return 0; } /* enum_rec [objs] -> enum_rec [objs] value done. There are 'offset' objs. If 'done' is true or in case of exception, 'enum_rec' is set to undefined. If 'done' is true, 'value' is always set to undefined. */ static __exception int js_for_of_next(JSContext *ctx, JSValue *sp, int offset) { JSValue value = JS_UNDEFINED; int done = 1; if (likely(!JS_IsUndefined(sp[offset]))) { value = JS_IteratorNext(ctx, sp[offset], sp[offset + 1], 0, NULL, &done); if (JS_IsException(value)) done = -1; if (done) { /* value is JS_UNDEFINED or JS_EXCEPTION */ /* replace the iteration object with undefined */ JS_FreeValue(ctx, sp[offset]); sp[offset] = JS_UNDEFINED; if (done < 0) { return -1; } else { JS_FreeValue(ctx, value); value = JS_UNDEFINED; } } } sp[0] = value; sp[1] = JS_NewBool(ctx, done); return 0; } static JSValue JS_IteratorGetCompleteValue(JSContext *ctx, JSValueConst obj, BOOL *pdone) { JSValue done_val, value; BOOL done; done_val = JS_GetProperty(ctx, obj, JS_ATOM_done); if (JS_IsException(done_val)) goto fail; done = JS_ToBoolFree(ctx, done_val); value = JS_GetProperty(ctx, obj, JS_ATOM_value); if (JS_IsException(value)) goto fail; *pdone = done; return value; fail: *pdone = FALSE; return JS_EXCEPTION; } static __exception int js_iterator_get_value_done(JSContext *ctx, JSValue *sp) { JSValue obj, value; BOOL done; obj = sp[-1]; if (!JS_IsObject(obj)) { JS_ThrowTypeError(ctx, "iterator must return an object"); return -1; } value = JS_IteratorGetCompleteValue(ctx, obj, &done); if (JS_IsException(value)) return -1; JS_FreeValue(ctx, obj); sp[-1] = value; sp[0] = JS_NewBool(ctx, done); return 0; } static JSValue js_create_iterator_result(JSContext *ctx, JSValue val, BOOL done) { JSValue obj; obj = JS_NewObject(ctx); if (JS_IsException(obj)) { JS_FreeValue(ctx, val); return obj; } if (JS_DefinePropertyValue(ctx, obj, JS_ATOM_value, val, JS_PROP_C_W_E) < 0) { goto fail; } if (JS_DefinePropertyValue(ctx, obj, JS_ATOM_done, JS_NewBool(ctx, done), JS_PROP_C_W_E) < 0) { fail: JS_FreeValue(ctx, obj); return JS_EXCEPTION; } return obj; } static JSValue js_array_iterator_next(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, BOOL *pdone, int magic); static JSValue js_create_array_iterator(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic); static BOOL js_is_fast_array(JSContext *ctx, JSValueConst obj) { /* Try and handle fast arrays explicitly */ if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { JSObject *p = JS_VALUE_GET_OBJ(obj); if (p->class_id == JS_CLASS_ARRAY && p->fast_array) { return TRUE; } } return FALSE; } /* Access an Array's internal JSValue array if available */ static BOOL js_get_fast_array(JSContext *ctx, JSValueConst obj, JSValue **arrpp, uint32_t *countp) { /* Try and handle fast arrays explicitly */ if (JS_VALUE_GET_TAG(obj) == JS_TAG_OBJECT) { JSObject *p = JS_VALUE_GET_OBJ(obj); if (p->class_id == JS_CLASS_ARRAY && p->fast_array) { *countp = p->u.array.count; *arrpp = p->u.array.u.values; return TRUE; } } return FALSE; } static __exception int js_append_enumerate(JSContext *ctx, JSValue *sp) { JSValue iterator, enumobj, method, value; int is_array_iterator; JSValue *arrp; uint32_t i, count32, pos; if (JS_VALUE_GET_TAG(sp[-2]) != JS_TAG_INT) { JS_ThrowInternalError(ctx, "invalid index for append"); return -1; } pos = JS_VALUE_GET_INT(sp[-2]); /* XXX: further optimisations: - use ctx->array_proto_values? - check if array_iterator_prototype next method is built-in and avoid constructing actual iterator object? - build this into js_for_of_start and use in all `for (x of o)` loops */ iterator = JS_GetProperty(ctx, sp[-1], JS_ATOM_Symbol_iterator); if (JS_IsException(iterator)) return -1; is_array_iterator = JS_IsCFunction(ctx, iterator, (JSCFunction *)js_create_array_iterator, JS_ITERATOR_KIND_VALUE); JS_FreeValue(ctx, iterator); enumobj = JS_GetIterator(ctx, sp[-1], FALSE); if (JS_IsException(enumobj)) return -1; method = JS_GetProperty(ctx, enumobj, JS_ATOM_next); if (JS_IsException(method)) { JS_FreeValue(ctx, enumobj); return -1; } if (is_array_iterator && JS_IsCFunction(ctx, method, (JSCFunction *)js_array_iterator_next, 0) && js_get_fast_array(ctx, sp[-1], &arrp, &count32)) { uint32_t len; if (js_get_length32(ctx, &len, sp[-1])) goto exception; /* if len > count32, the elements >= count32 might be read in the prototypes and might have side effects */ if (len != count32) goto general_case; /* Handle fast arrays explicitly */ for (i = 0; i < count32; i++) { if (JS_DefinePropertyValueUint32(ctx, sp[-3], pos++, JS_DupValue(ctx, arrp[i]), JS_PROP_C_W_E) < 0) goto exception; } } else { general_case: for (;;) { BOOL done; value = JS_IteratorNext(ctx, enumobj, method, 0, NULL, &done); if (JS_IsException(value)) goto exception; if (done) { /* value is JS_UNDEFINED */ break; } if (JS_DefinePropertyValueUint32(ctx, sp[-3], pos++, value, JS_PROP_C_W_E) < 0) goto exception; } } /* Note: could raise an error if too many elements */ sp[-2] = JS_NewInt32(ctx, pos); JS_FreeValue(ctx, enumobj); JS_FreeValue(ctx, method); return 0; exception: JS_IteratorClose(ctx, enumobj, TRUE); JS_FreeValue(ctx, enumobj); JS_FreeValue(ctx, method); return -1; } static __exception int JS_CopyDataProperties(JSContext *ctx, JSValueConst target, JSValueConst source, JSValueConst excluded, BOOL setprop) { JSPropertyEnum *tab_atom; JSValue val; uint32_t i, tab_atom_count; JSObject *p; JSObject *pexcl = NULL; int ret, gpn_flags; JSPropertyDescriptor desc; BOOL is_enumerable; if (JS_VALUE_GET_TAG(source) != JS_TAG_OBJECT) return 0; if (JS_VALUE_GET_TAG(excluded) == JS_TAG_OBJECT) pexcl = JS_VALUE_GET_OBJ(excluded); p = JS_VALUE_GET_OBJ(source); gpn_flags = JS_GPN_STRING_MASK | JS_GPN_SYMBOL_MASK | JS_GPN_ENUM_ONLY; if (p->is_exotic) { const JSClassExoticMethods *em = ctx->rt->class_array[p->class_id].exotic; /* cannot use JS_GPN_ENUM_ONLY with e.g. proxies because it introduces a visible change */ if (em && em->get_own_property_names) { gpn_flags &= ~JS_GPN_ENUM_ONLY; } } if (JS_GetOwnPropertyNamesInternal(ctx, &tab_atom, &tab_atom_count, p, gpn_flags)) return -1; for (i = 0; i < tab_atom_count; i++) { if (pexcl) { ret = JS_GetOwnPropertyInternal(ctx, NULL, pexcl, tab_atom[i].atom); if (ret) { if (ret < 0) goto exception; continue; } } if (!(gpn_flags & JS_GPN_ENUM_ONLY)) { /* test if the property is enumerable */ ret = JS_GetOwnPropertyInternal(ctx, &desc, p, tab_atom[i].atom); if (ret < 0) goto exception; if (!ret) continue; is_enumerable = (desc.flags & JS_PROP_ENUMERABLE) != 0; js_free_desc(ctx, &desc); if (!is_enumerable) continue; } val = JS_GetProperty(ctx, source, tab_atom[i].atom); if (JS_IsException(val)) goto exception; if (setprop) ret = JS_SetProperty(ctx, target, tab_atom[i].atom, val); else ret = JS_DefinePropertyValue(ctx, target, tab_atom[i].atom, val, JS_PROP_C_W_E); if (ret < 0) goto exception; } js_free_prop_enum(ctx, tab_atom, tab_atom_count); return 0; exception: js_free_prop_enum(ctx, tab_atom, tab_atom_count); return -1; } /* only valid inside C functions */ static JSValueConst JS_GetActiveFunction(JSContext *ctx) { return ctx->rt->current_stack_frame->cur_func; } static JSVarRef *get_var_ref(JSContext *ctx, JSStackFrame *sf, int var_idx, BOOL is_arg) { JSVarRef *var_ref; struct list_head *el; list_for_each(el, &sf->var_ref_list) { var_ref = list_entry(el, JSVarRef, var_ref_link); if (var_ref->var_idx == var_idx && var_ref->is_arg == is_arg) { var_ref->header.ref_count++; return var_ref; } } /* create a new one */ var_ref = js_malloc(ctx, sizeof(JSVarRef)); if (!var_ref) return NULL; var_ref->header.ref_count = 1; add_gc_object(ctx->rt, &var_ref->header, JS_GC_OBJ_TYPE_VAR_REF); var_ref->is_detached = FALSE; var_ref->is_arg = is_arg; var_ref->var_idx = var_idx; list_add_tail(&var_ref->var_ref_link, &sf->var_ref_list); if (sf->js_mode & JS_MODE_ASYNC) { /* The stack frame is detached and may be destroyed at any time so its reference count must be increased. Calling close_var_refs() when destroying the stack frame is not possible because it would change the graph between the GC objects. Another solution could be to temporarily detach the JSVarRef of async functions during the GC. It would have the advantage of allowing the release of unused stack frames in a cycle. */ var_ref->async_func = container_of(sf, JSAsyncFunctionState, frame); var_ref->async_func->header.ref_count++; } else { var_ref->async_func = NULL; } if (is_arg) var_ref->pvalue = &sf->arg_buf[var_idx]; else var_ref->pvalue = &sf->var_buf[var_idx]; return var_ref; } static JSValue js_closure2(JSContext *ctx, JSValue func_obj, JSFunctionBytecode *b, JSVarRef **cur_var_refs, JSStackFrame *sf) { JSObject *p; JSVarRef **var_refs; int i; p = JS_VALUE_GET_OBJ(func_obj); p->u.func.function_bytecode = b; p->u.func.home_object = NULL; p->u.func.var_refs = NULL; if (b->closure_var_count) { var_refs = js_mallocz(ctx, sizeof(var_refs[0]) * b->closure_var_count); if (!var_refs) goto fail; p->u.func.var_refs = var_refs; for(i = 0; i < b->closure_var_count; i++) { JSClosureVar *cv = &b->closure_var[i]; JSVarRef *var_ref; if (cv->is_local) { /* reuse the existing variable reference if it already exists */ var_ref = get_var_ref(ctx, sf, cv->var_idx, cv->is_arg); if (!var_ref) goto fail; } else { var_ref = cur_var_refs[cv->var_idx]; var_ref->header.ref_count++; } var_refs[i] = var_ref; } } return func_obj; fail: /* bfunc is freed when func_obj is freed */ JS_FreeValue(ctx, func_obj); return JS_EXCEPTION; } static JSValue js_instantiate_prototype(JSContext *ctx, JSObject *p, JSAtom atom, void *opaque) { JSValue obj, this_val; int ret; this_val = JS_MKPTR(JS_TAG_OBJECT, p); obj = JS_NewObject(ctx); if (JS_IsException(obj)) return JS_EXCEPTION; set_cycle_flag(ctx, obj); set_cycle_flag(ctx, this_val); ret = JS_DefinePropertyValue(ctx, obj, JS_ATOM_constructor, JS_DupValue(ctx, this_val), JS_PROP_WRITABLE | JS_PROP_CONFIGURABLE); if (ret < 0) { JS_FreeValue(ctx, obj); return JS_EXCEPTION; } return obj; } static const uint16_t func_kind_to_class_id[] = { [JS_FUNC_NORMAL] = JS_CLASS_BYTECODE_FUNCTION, [JS_FUNC_GENERATOR] = JS_CLASS_GENERATOR_FUNCTION, [JS_FUNC_ASYNC] = JS_CLASS_ASYNC_FUNCTION, [JS_FUNC_ASYNC_GENERATOR] = JS_CLASS_ASYNC_GENERATOR_FUNCTION, }; static JSValue js_closure(JSContext *ctx, JSValue bfunc, JSVarRef **cur_var_refs, JSStackFrame *sf) { JSFunctionBytecode *b; JSValue func_obj; JSAtom name_atom; b = JS_VALUE_GET_PTR(bfunc); func_obj = JS_NewObjectClass(ctx, func_kind_to_class_id[b->func_kind]); if (JS_IsException(func_obj)) { JS_FreeValue(ctx, bfunc); return JS_EXCEPTION; } func_obj = js_closure2(ctx, func_obj, b, cur_var_refs, sf); if (JS_IsException(func_obj)) { /* bfunc has been freed */ goto fail; } name_atom = b->func_name; if (name_atom == JS_ATOM_NULL) name_atom = JS_ATOM_empty_string; js_function_set_properties(ctx, func_obj, name_atom, b->defined_arg_count); if (b->func_kind & JS_FUNC_GENERATOR) { JSValue proto; int proto_class_id; /* generators have a prototype field which is used as prototype for the generator object */ if (b->func_kind == JS_FUNC_ASYNC_GENERATOR) proto_class_id = JS_CLASS_ASYNC_GENERATOR; else proto_class_id = JS_CLASS_GENERATOR; proto = JS_NewObjectProto(ctx, ctx->class_proto[proto_class_id]); if (JS_IsException(proto)) goto fail; JS_DefinePropertyValue(ctx, func_obj, JS_ATOM_prototype, proto, JS_PROP_WRITABLE); } else if (b->has_prototype) { /* add the 'prototype' property: delay instantiation to avoid creating cycles for every javascript function. The prototype object is created on the fly when first accessed */ JS_SetConstructorBit(ctx, func_obj, TRUE); JS_DefineAutoInitProperty(ctx, func_obj, JS_ATOM_prototype, JS_AUTOINIT_ID_PROTOTYPE, NULL, JS_PROP_WRITABLE); } return func_obj; fail: /* bfunc is freed when func_obj is freed */ JS_FreeValue(ctx, func_obj); return JS_EXCEPTION; } #define JS_DEFINE_CLASS_HAS_HERITAGE (1 << 0) static int js_op_define_class(JSContext *ctx, JSValue *sp, JSAtom class_name, int class_flags, JSVarRef **cur_var_refs, JSStackFrame *sf, BOOL is_computed_name) { JSValue bfunc, parent_class, proto = JS_UNDEFINED; JSValue ctor = JS_UNDEFINED, parent_proto = JS_UNDEFINED; JSFunctionBytecode *b; parent_class = sp[-2]; bfunc = sp[-1]; if (class_flags & JS_DEFINE_CLASS_HAS_HERITAGE) { if (JS_IsNull(parent_class)) { parent_proto = JS_NULL; parent_class = JS_DupValue(ctx, ctx->function_proto); } else { if (!JS_IsConstructor(ctx, parent_class)) { JS_ThrowTypeError(ctx, "parent class must be constructor"); goto fail; } parent_proto = JS_GetProperty(ctx, parent_class, JS_ATOM_prototype); if (JS_IsException(parent_proto)) goto fail; if (!JS_IsNull(parent_proto) && !JS_IsObject(parent_proto)) { JS_ThrowTypeError(ctx, "parent prototype must be an object or null"); goto fail; } } } else { /* parent_class is JS_UNDEFINED in this case */ parent_proto = JS_DupValue(ctx, ctx->class_proto[JS_CLASS_OBJECT]); parent_class = JS_DupValue(ctx, ctx->function_proto); } proto = JS_NewObjectProto(ctx, parent_proto); if (JS_IsException(proto)) goto fail; b = JS_VALUE_GET_PTR(bfunc); assert(b->func_kind == JS_FUNC_NORMAL); ctor = JS_NewObjectProtoClass(ctx, parent_class, JS_CLASS_BYTECODE_FUNCTION); if (JS_IsException(ctor)) goto fail; ctor = js_closure2(ctx, ctor, b, cur_var_refs, sf); bfunc = JS_UNDEFINED; if (JS_IsException(ctor)) goto fail; js_method_set_home_object(ctx, ctor, proto); JS_SetConstructorBit(ctx, ctor, TRUE); JS_DefinePropertyValue(ctx, ctor, JS_ATOM_length, JS_NewInt32(ctx, b->defined_arg_count), JS_PROP_CONFIGURABLE); if (is_computed_name) { if (JS_DefineObjectNameComputed(ctx, ctor, sp[-3], JS_PROP_CONFIGURABLE) < 0) goto fail; } else { if (JS_DefineObjectName(ctx, ctor, class_name, JS_PROP_CONFIGURABLE) < 0) goto fail; } /* the constructor property must be first. It can be overriden by computed property names */ if (JS_DefinePropertyValue(ctx, proto, JS_ATOM_constructor, JS_DupValue(ctx, ctor), JS_PROP_CONFIGURABLE | JS_PROP_WRITABLE | JS_PROP_THROW) < 0) goto fail; /* set the prototype property */ if (JS_DefinePropertyValue(ctx, ctor, JS_ATOM_prototype, JS_DupValue(ctx, proto), JS_PROP_THROW) < 0) goto fail; set_cycle_flag(ctx, ctor); set_cycle_flag(ctx, proto); JS_FreeValue(ctx, parent_proto); JS_FreeValue(ctx, parent_class); sp[-2] = ctor; sp[-1] = proto; return 0; fail: JS_FreeValue(ctx, parent_class); JS_FreeValue(ctx, parent_proto); JS_FreeValue(ctx, bfunc); JS_FreeValue(ctx, proto); JS_FreeValue(ctx, ctor); sp[-2] = JS_UNDEFINED; sp[-1] = JS_UNDEFINED; return -1; } static void close_var_refs(JSRuntime *rt, JSStackFrame *sf) { struct list_head *el, *el1; JSVarRef *var_ref; int var_idx; list_for_each_safe(el, el1, &sf->var_ref_list) { var_ref = list_entry(el, JSVarRef, var_ref_link); /* no need to unlink var_ref->var_ref_link as the list is never used afterwards */ if (var_ref->async_func) async_func_free(rt, var_ref->async_func); var_idx = var_ref->var_idx; if (var_ref->is_arg) var_ref->value = JS_DupValueRT(rt, sf->arg_buf[var_idx]); else var_ref->value = JS_DupValueRT(rt, sf->var_buf[var_idx]); var_ref->pvalue = &var_ref->value; /* the reference is no longer to a local variable */ var_ref->is_detached = TRUE; } } static void close_lexical_var(JSContext *ctx, JSStackFrame *sf, int idx, int is_arg) { struct list_head *el, *el1; JSVarRef *var_ref; int var_idx = idx; list_for_each_safe(el, el1, &sf->var_ref_list) { var_ref = list_entry(el, JSVarRef, var_ref_link); if (var_idx == var_ref->var_idx && var_ref->is_arg == is_arg) { list_del(&var_ref->var_ref_link); if (var_ref->async_func) async_func_free(ctx->rt, var_ref->async_func); var_ref->value = JS_DupValue(ctx, sf->var_buf[var_idx]); var_ref->pvalue = &var_ref->value; /* the reference is no longer to a local variable */ var_ref->is_detached = TRUE; } } } #define JS_CALL_FLAG_COPY_ARGV (1 << 1) #define JS_CALL_FLAG_GENERATOR (1 << 2) static JSValue js_call_c_function(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSRuntime *rt = ctx->rt; JSCFunctionType func; JSObject *p; JSStackFrame sf_s, *sf = &sf_s, *prev_sf; JSValue ret_val; JSValueConst *arg_buf; int arg_count, i; JSCFunctionEnum cproto; p = JS_VALUE_GET_OBJ(func_obj); cproto = p->u.cfunc.cproto; arg_count = p->u.cfunc.length; /* better to always check stack overflow */ if (js_check_stack_overflow(rt, sizeof(arg_buf[0]) * arg_count)) return JS_ThrowStackOverflow(ctx); prev_sf = rt->current_stack_frame; sf->prev_frame = prev_sf; rt->current_stack_frame = sf; ctx = p->u.cfunc.realm; /* change the current realm */ #ifdef CONFIG_BIGNUM /* we only propagate the bignum mode as some runtime functions test it */ if (prev_sf) sf->js_mode = prev_sf->js_mode & JS_MODE_MATH; else sf->js_mode = 0; #else sf->js_mode = 0; #endif #ifdef STRICT_R_HEADERS sf->cur_func = func_obj; #else sf->cur_func = (JSValue)func_obj; #endif sf->arg_count = argc; arg_buf = argv; if (unlikely(argc < arg_count)) { /* ensure that at least argc_count arguments are readable */ arg_buf = alloca(sizeof(arg_buf[0]) * arg_count); for(i = 0; i < argc; i++) arg_buf[i] = argv[i]; for(i = argc; i < arg_count; i++) arg_buf[i] = JS_UNDEFINED; sf->arg_count = arg_count; } sf->arg_buf = (JSValue*)arg_buf; func = p->u.cfunc.c_function; switch(cproto) { case JS_CFUNC_constructor: case JS_CFUNC_constructor_or_func: if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { if (cproto == JS_CFUNC_constructor) { not_a_constructor: ret_val = JS_ThrowTypeError(ctx, "must be called with new"); break; } else { this_obj = JS_UNDEFINED; } } /* here this_obj is new_target */ /* fall thru */ case JS_CFUNC_generic: ret_val = func.generic(ctx, this_obj, argc, arg_buf); break; case JS_CFUNC_constructor_magic: case JS_CFUNC_constructor_or_func_magic: if (!(flags & JS_CALL_FLAG_CONSTRUCTOR)) { if (cproto == JS_CFUNC_constructor_magic) { goto not_a_constructor; } else { this_obj = JS_UNDEFINED; } } /* fall thru */ case JS_CFUNC_generic_magic: ret_val = func.generic_magic(ctx, this_obj, argc, arg_buf, p->u.cfunc.magic); break; case JS_CFUNC_getter: ret_val = func.getter(ctx, this_obj); break; case JS_CFUNC_setter: ret_val = func.setter(ctx, this_obj, arg_buf[0]); break; case JS_CFUNC_getter_magic: ret_val = func.getter_magic(ctx, this_obj, p->u.cfunc.magic); break; case JS_CFUNC_setter_magic: ret_val = func.setter_magic(ctx, this_obj, arg_buf[0], p->u.cfunc.magic); break; case JS_CFUNC_f_f: { double d1; if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { ret_val = JS_EXCEPTION; break; } ret_val = JS_NewFloat64(ctx, func.f_f(d1)); } break; case JS_CFUNC_f_f_f: { double d1, d2; if (unlikely(JS_ToFloat64(ctx, &d1, arg_buf[0]))) { ret_val = JS_EXCEPTION; break; } if (unlikely(JS_ToFloat64(ctx, &d2, arg_buf[1]))) { ret_val = JS_EXCEPTION; break; } ret_val = JS_NewFloat64(ctx, func.f_f_f(d1, d2)); } break; case JS_CFUNC_iterator_next: { int done; ret_val = func.iterator_next(ctx, this_obj, argc, arg_buf, &done, p->u.cfunc.magic); if (!JS_IsException(ret_val) && done != 2) { ret_val = js_create_iterator_result(ctx, ret_val, done); } } break; default: abort(); } rt->current_stack_frame = sf->prev_frame; return ret_val; } static JSValue js_call_bound_function(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSObject *p; JSBoundFunction *bf; JSValueConst *arg_buf, new_target; int arg_count, i; p = JS_VALUE_GET_OBJ(func_obj); bf = p->u.bound_function; arg_count = bf->argc + argc; if (js_check_stack_overflow(ctx->rt, sizeof(JSValue) * arg_count)) return JS_ThrowStackOverflow(ctx); arg_buf = alloca(sizeof(JSValue) * arg_count); for(i = 0; i < bf->argc; i++) { arg_buf[i] = bf->argv[i]; } for(i = 0; i < argc; i++) { arg_buf[bf->argc + i] = argv[i]; } if (flags & JS_CALL_FLAG_CONSTRUCTOR) { new_target = this_obj; if (js_same_value(ctx, func_obj, new_target)) new_target = bf->func_obj; return JS_CallConstructor2(ctx, bf->func_obj, new_target, arg_count, arg_buf); } else { return JS_Call(ctx, bf->func_obj, bf->this_val, arg_count, arg_buf); } } /* argument of OP_special_object */ typedef enum { OP_SPECIAL_OBJECT_ARGUMENTS, OP_SPECIAL_OBJECT_MAPPED_ARGUMENTS, OP_SPECIAL_OBJECT_THIS_FUNC, OP_SPECIAL_OBJECT_NEW_TARGET, OP_SPECIAL_OBJECT_HOME_OBJECT, OP_SPECIAL_OBJECT_VAR_OBJECT, OP_SPECIAL_OBJECT_IMPORT_META, } OPSpecialObjectEnum; #define FUNC_RET_AWAIT 0 #define FUNC_RET_YIELD 1 #define FUNC_RET_YIELD_STAR 2 #define FUNC_RET_INITIAL_YIELD 3 /* argv[] is modified if (flags & JS_CALL_FLAG_COPY_ARGV) = 0. */ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValueConst func_obj, JSValueConst this_obj, JSValueConst new_target, int argc, JSValue *argv, int flags) { JSRuntime *rt = caller_ctx->rt; JSContext *ctx; JSObject *p; JSFunctionBytecode *b; JSStackFrame sf_s, *sf = &sf_s; const uint8_t *pc; int opcode, arg_allocated_size, i; JSValue *local_buf, *stack_buf, *var_buf, *arg_buf, *sp, ret_val, *pval; JSVarRef **var_refs; size_t alloca_size; #if !DIRECT_DISPATCH || defined(STRICT_R_HEADERS) #define SWITCH(pc) switch (opcode = *pc++) #define CASE(op) case op #define DEFAULT default #define BREAK break #else static const void * const dispatch_table[256] = { #define DEF(id, size, n_pop, n_push, f) && case_OP_ ## id, #if SHORT_OPCODES #define def(id, size, n_pop, n_push, f) #else #define def(id, size, n_pop, n_push, f) && case_default, #endif #include "quickjs-opcode.h" [ OP_COUNT ... 255 ] = &&case_default }; #define SWITCH(pc) goto *dispatch_table[opcode = *pc++]; #define CASE(op) case_ ## op #define DEFAULT case_default #define BREAK SWITCH(pc) #endif if (js_poll_interrupts(caller_ctx)) return JS_EXCEPTION; if (unlikely(JS_VALUE_GET_TAG(func_obj) != JS_TAG_OBJECT)) { if (flags & JS_CALL_FLAG_GENERATOR) { JSAsyncFunctionState *s = JS_VALUE_GET_PTR(func_obj); /* func_obj get contains a pointer to JSFuncAsyncState */ /* the stack frame is already allocated */ sf = &s->frame; p = JS_VALUE_GET_OBJ(sf->cur_func); b = p->u.func.function_bytecode; ctx = b->realm; var_refs = p->u.func.var_refs; local_buf = arg_buf = sf->arg_buf; var_buf = sf->var_buf; stack_buf = sf->var_buf + b->var_count; sp = sf->cur_sp; sf->cur_sp = NULL; /* cur_sp is NULL if the function is running */ pc = sf->cur_pc; sf->prev_frame = rt->current_stack_frame; rt->current_stack_frame = sf; if (s->throw_flag) goto exception; else goto restart; } else { goto not_a_function; } } p = JS_VALUE_GET_OBJ(func_obj); if (unlikely(p->class_id != JS_CLASS_BYTECODE_FUNCTION)) { JSClassCall *call_func; call_func = rt->class_array[p->class_id].call; if (!call_func) { not_a_function: return JS_ThrowTypeError(caller_ctx, "not a function"); } return call_func(caller_ctx, func_obj, this_obj, argc, (JSValueConst *)argv, flags); } b = p->u.func.function_bytecode; if (unlikely(argc < b->arg_count || (flags & JS_CALL_FLAG_COPY_ARGV))) { arg_allocated_size = b->arg_count; } else { arg_allocated_size = 0; } alloca_size = sizeof(JSValue) * (arg_allocated_size + b->var_count + b->stack_size); if (js_check_stack_overflow(rt, alloca_size)) return JS_ThrowStackOverflow(caller_ctx); sf->js_mode = b->js_mode; arg_buf = argv; sf->arg_count = argc; #ifdef STRICT_R_HEADERS sf->cur_func = func_obj; #else sf->cur_func = (JSValue)func_obj; #endif init_list_head(&sf->var_ref_list); var_refs = p->u.func.var_refs; local_buf = alloca(alloca_size); if (unlikely(arg_allocated_size)) { int n = min_int(argc, b->arg_count); arg_buf = local_buf; for(i = 0; i < n; i++) arg_buf[i] = JS_DupValue(caller_ctx, argv[i]); for(; i < b->arg_count; i++) arg_buf[i] = JS_UNDEFINED; sf->arg_count = b->arg_count; } var_buf = local_buf + arg_allocated_size; sf->var_buf = var_buf; sf->arg_buf = arg_buf; for(i = 0; i < b->var_count; i++) var_buf[i] = JS_UNDEFINED; stack_buf = var_buf + b->var_count; sp = stack_buf; pc = b->byte_code_buf; sf->prev_frame = rt->current_stack_frame; rt->current_stack_frame = sf; ctx = b->realm; /* set the current realm */ restart: for(;;) { int call_argc; JSValue *call_argv; SWITCH(pc) { CASE(OP_push_i32): *sp++ = JS_NewInt32(ctx, get_u32(pc)); pc += 4; BREAK; CASE(OP_push_const): *sp++ = JS_DupValue(ctx, b->cpool[get_u32(pc)]); pc += 4; BREAK; #if SHORT_OPCODES CASE(OP_push_minus1): CASE(OP_push_0): CASE(OP_push_1): CASE(OP_push_2): CASE(OP_push_3): CASE(OP_push_4): CASE(OP_push_5): CASE(OP_push_6): CASE(OP_push_7): *sp++ = JS_NewInt32(ctx, opcode - OP_push_0); BREAK; CASE(OP_push_i8): *sp++ = JS_NewInt32(ctx, get_i8(pc)); pc += 1; BREAK; CASE(OP_push_i16): *sp++ = JS_NewInt32(ctx, get_i16(pc)); pc += 2; BREAK; CASE(OP_push_const8): *sp++ = JS_DupValue(ctx, b->cpool[*pc++]); BREAK; CASE(OP_fclosure8): *sp++ = js_closure(ctx, JS_DupValue(ctx, b->cpool[*pc++]), var_refs, sf); if (unlikely(JS_IsException(sp[-1]))) goto exception; BREAK; CASE(OP_push_empty_string): *sp++ = JS_AtomToString(ctx, JS_ATOM_empty_string); BREAK; CASE(OP_get_length): { JSValue val; val = JS_GetProperty(ctx, sp[-1], JS_ATOM_length); if (unlikely(JS_IsException(val))) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = val; } BREAK; #endif CASE(OP_push_atom_value): *sp++ = JS_AtomToValue(ctx, get_u32(pc)); pc += 4; BREAK; CASE(OP_undefined): *sp++ = JS_UNDEFINED; BREAK; CASE(OP_null): *sp++ = JS_NULL; BREAK; CASE(OP_push_this): /* OP_push_this is only called at the start of a function */ { JSValue val; if (!(b->js_mode & JS_MODE_STRICT)) { uint32_t tag = JS_VALUE_GET_TAG(this_obj); if (likely(tag == JS_TAG_OBJECT)) goto normal_this; if (tag == JS_TAG_NULL || tag == JS_TAG_UNDEFINED) { val = JS_DupValue(ctx, ctx->global_obj); } else { val = JS_ToObject(ctx, this_obj); if (JS_IsException(val)) goto exception; } } else { normal_this: val = JS_DupValue(ctx, this_obj); } *sp++ = val; } BREAK; CASE(OP_push_false): *sp++ = JS_FALSE; BREAK; CASE(OP_push_true): *sp++ = JS_TRUE; BREAK; CASE(OP_object): *sp++ = JS_NewObject(ctx); if (unlikely(JS_IsException(sp[-1]))) goto exception; BREAK; CASE(OP_special_object): { int arg = *pc++; switch(arg) { case OP_SPECIAL_OBJECT_ARGUMENTS: *sp++ = js_build_arguments(ctx, argc, (JSValueConst *)argv); if (unlikely(JS_IsException(sp[-1]))) goto exception; break; case OP_SPECIAL_OBJECT_MAPPED_ARGUMENTS: *sp++ = js_build_mapped_arguments(ctx, argc, (JSValueConst *)argv, sf, min_int(argc, b->arg_count)); if (unlikely(JS_IsException(sp[-1]))) goto exception; break; case OP_SPECIAL_OBJECT_THIS_FUNC: *sp++ = JS_DupValue(ctx, sf->cur_func); break; case OP_SPECIAL_OBJECT_NEW_TARGET: *sp++ = JS_DupValue(ctx, new_target); break; case OP_SPECIAL_OBJECT_HOME_OBJECT: { JSObject *p1; p1 = p->u.func.home_object; if (unlikely(!p1)) *sp++ = JS_UNDEFINED; else *sp++ = JS_DupValue(ctx, JS_MKPTR(JS_TAG_OBJECT, p1)); } break; case OP_SPECIAL_OBJECT_VAR_OBJECT: *sp++ = JS_NewObjectProto(ctx, JS_NULL); if (unlikely(JS_IsException(sp[-1]))) goto exception; break; case OP_SPECIAL_OBJECT_IMPORT_META: *sp++ = js_import_meta(ctx); if (unlikely(JS_IsException(sp[-1]))) goto exception; break; default: abort(); } } BREAK; CASE(OP_rest): { int first = get_u16(pc); pc += 2; *sp++ = js_build_rest(ctx, first, argc, (JSValueConst *)argv); if (unlikely(JS_IsException(sp[-1]))) goto exception; } BREAK; CASE(OP_drop): JS_FreeValue(ctx, sp[-1]); sp--; BREAK; CASE(OP_nip): JS_FreeValue(ctx, sp[-2]); sp[-2] = sp[-1]; sp--; BREAK; CASE(OP_nip1): /* a b c -> b c */ JS_FreeValue(ctx, sp[-3]); sp[-3] = sp[-2]; sp[-2] = sp[-1]; sp--; BREAK; CASE(OP_dup): sp[0] = JS_DupValue(ctx, sp[-1]); sp++; BREAK; CASE(OP_dup2): /* a b -> a b a b */ sp[0] = JS_DupValue(ctx, sp[-2]); sp[1] = JS_DupValue(ctx, sp[-1]); sp += 2; BREAK; CASE(OP_dup3): /* a b c -> a b c a b c */ sp[0] = JS_DupValue(ctx, sp[-3]); sp[1] = JS_DupValue(ctx, sp[-2]); sp[2] = JS_DupValue(ctx, sp[-1]); sp += 3; BREAK; CASE(OP_dup1): /* a b -> a a b */ sp[0] = sp[-1]; sp[-1] = JS_DupValue(ctx, sp[-2]); sp++; BREAK; CASE(OP_insert2): /* obj a -> a obj a (dup_x1) */ sp[0] = sp[-1]; sp[-1] = sp[-2]; sp[-2] = JS_DupValue(ctx, sp[0]); sp++; BREAK; CASE(OP_insert3): /* obj prop a -> a obj prop a (dup_x2) */ sp[0] = sp[-1]; sp[-1] = sp[-2]; sp[-2] = sp[-3]; sp[-3] = JS_DupValue(ctx, sp[0]); sp++; BREAK; CASE(OP_insert4): /* this obj prop a -> a this obj prop a */ sp[0] = sp[-1]; sp[-1] = sp[-2]; sp[-2] = sp[-3]; sp[-3] = sp[-4]; sp[-4] = JS_DupValue(ctx, sp[0]); sp++; BREAK; CASE(OP_perm3): /* obj a b -> a obj b (213) */ { JSValue tmp; tmp = sp[-2]; sp[-2] = sp[-3]; sp[-3] = tmp; } BREAK; CASE(OP_rot3l): /* x a b -> a b x (231) */ { JSValue tmp; tmp = sp[-3]; sp[-3] = sp[-2]; sp[-2] = sp[-1]; sp[-1] = tmp; } BREAK; CASE(OP_rot4l): /* x a b c -> a b c x */ { JSValue tmp; tmp = sp[-4]; sp[-4] = sp[-3]; sp[-3] = sp[-2]; sp[-2] = sp[-1]; sp[-1] = tmp; } BREAK; CASE(OP_rot5l): /* x a b c d -> a b c d x */ { JSValue tmp; tmp = sp[-5]; sp[-5] = sp[-4]; sp[-4] = sp[-3]; sp[-3] = sp[-2]; sp[-2] = sp[-1]; sp[-1] = tmp; } BREAK; CASE(OP_rot3r): /* a b x -> x a b (312) */ { JSValue tmp; tmp = sp[-1]; sp[-1] = sp[-2]; sp[-2] = sp[-3]; sp[-3] = tmp; } BREAK; CASE(OP_perm4): /* obj prop a b -> a obj prop b */ { JSValue tmp; tmp = sp[-2]; sp[-2] = sp[-3]; sp[-3] = sp[-4]; sp[-4] = tmp; } BREAK; CASE(OP_perm5): /* this obj prop a b -> a this obj prop b */ { JSValue tmp; tmp = sp[-2]; sp[-2] = sp[-3]; sp[-3] = sp[-4]; sp[-4] = sp[-5]; sp[-5] = tmp; } BREAK; CASE(OP_swap): /* a b -> b a */ { JSValue tmp; tmp = sp[-2]; sp[-2] = sp[-1]; sp[-1] = tmp; } BREAK; CASE(OP_swap2): /* a b c d -> c d a b */ { JSValue tmp1, tmp2; tmp1 = sp[-4]; tmp2 = sp[-3]; sp[-4] = sp[-2]; sp[-3] = sp[-1]; sp[-2] = tmp1; sp[-1] = tmp2; } BREAK; CASE(OP_fclosure): { JSValue bfunc = JS_DupValue(ctx, b->cpool[get_u32(pc)]); pc += 4; *sp++ = js_closure(ctx, bfunc, var_refs, sf); if (unlikely(JS_IsException(sp[-1]))) goto exception; } BREAK; #if SHORT_OPCODES CASE(OP_call0): CASE(OP_call1): CASE(OP_call2): CASE(OP_call3): call_argc = opcode - OP_call0; goto has_call_argc; #endif CASE(OP_call): CASE(OP_tail_call): { call_argc = get_u16(pc); pc += 2; goto has_call_argc; has_call_argc: call_argv = sp - call_argc; sf->cur_pc = pc; ret_val = JS_CallInternal(ctx, call_argv[-1], JS_UNDEFINED, JS_UNDEFINED, call_argc, call_argv, 0); if (unlikely(JS_IsException(ret_val))) goto exception; if (opcode == OP_tail_call) goto done; for(i = -1; i < call_argc; i++) JS_FreeValue(ctx, call_argv[i]); sp -= call_argc + 1; *sp++ = ret_val; } BREAK; CASE(OP_call_constructor): { call_argc = get_u16(pc); pc += 2; call_argv = sp - call_argc; sf->cur_pc = pc; ret_val = JS_CallConstructorInternal(ctx, call_argv[-2], call_argv[-1], call_argc, call_argv, 0); if (unlikely(JS_IsException(ret_val))) goto exception; for(i = -2; i < call_argc; i++) JS_FreeValue(ctx, call_argv[i]); sp -= call_argc + 2; *sp++ = ret_val; } BREAK; CASE(OP_call_method): CASE(OP_tail_call_method): { call_argc = get_u16(pc); pc += 2; call_argv = sp - call_argc; sf->cur_pc = pc; ret_val = JS_CallInternal(ctx, call_argv[-1], call_argv[-2], JS_UNDEFINED, call_argc, call_argv, 0); if (unlikely(JS_IsException(ret_val))) goto exception; if (opcode == OP_tail_call_method) goto done; for(i = -2; i < call_argc; i++) JS_FreeValue(ctx, call_argv[i]); sp -= call_argc + 2; *sp++ = ret_val; } BREAK; CASE(OP_array_from): { int i, ret; call_argc = get_u16(pc); pc += 2; ret_val = JS_NewArray(ctx); if (unlikely(JS_IsException(ret_val))) goto exception; call_argv = sp - call_argc; for(i = 0; i < call_argc; i++) { ret = JS_DefinePropertyValue(ctx, ret_val, __JS_AtomFromUInt32(i), call_argv[i], JS_PROP_C_W_E | JS_PROP_THROW); call_argv[i] = JS_UNDEFINED; if (ret < 0) { JS_FreeValue(ctx, ret_val); goto exception; } } sp -= call_argc; *sp++ = ret_val; } BREAK; CASE(OP_apply): { int magic; magic = get_u16(pc); pc += 2; ret_val = js_function_apply(ctx, sp[-3], 2, (JSValueConst *)&sp[-2], magic); if (unlikely(JS_IsException(ret_val))) goto exception; JS_FreeValue(ctx, sp[-3]); JS_FreeValue(ctx, sp[-2]); JS_FreeValue(ctx, sp[-1]); sp -= 3; *sp++ = ret_val; } BREAK; CASE(OP_return): ret_val = *--sp; goto done; CASE(OP_return_undef): ret_val = JS_UNDEFINED; goto done; CASE(OP_check_ctor_return): /* return TRUE if 'this' should be returned */ if (!JS_IsObject(sp[-1])) { if (!JS_IsUndefined(sp[-1])) { JS_ThrowTypeError(caller_ctx, "derived class constructor must return an object or undefined"); goto exception; } sp[0] = JS_TRUE; } else { sp[0] = JS_FALSE; } sp++; BREAK; CASE(OP_check_ctor): if (JS_IsUndefined(new_target)) { JS_ThrowTypeError(ctx, "class constructors must be invoked with 'new'"); goto exception; } BREAK; CASE(OP_check_brand): { int ret = JS_CheckBrand(ctx, sp[-2], sp[-1]); if (ret < 0) goto exception; if (!ret) { JS_ThrowTypeError(ctx, "invalid brand on object"); goto exception; } } BREAK; CASE(OP_add_brand): if (JS_AddBrand(ctx, sp[-2], sp[-1]) < 0) goto exception; JS_FreeValue(ctx, sp[-2]); JS_FreeValue(ctx, sp[-1]); sp -= 2; BREAK; CASE(OP_throw): JS_Throw(ctx, *--sp); goto exception; CASE(OP_throw_error): #define JS_THROW_VAR_RO 0 #define JS_THROW_VAR_REDECL 1 #define JS_THROW_VAR_UNINITIALIZED 2 #define JS_THROW_ERROR_DELETE_SUPER 3 #define JS_THROW_ERROR_ITERATOR_THROW 4 { JSAtom atom; int type; atom = get_u32(pc); type = pc[4]; pc += 5; if (type == JS_THROW_VAR_RO) JS_ThrowTypeErrorReadOnly(ctx, JS_PROP_THROW, atom); else if (type == JS_THROW_VAR_REDECL) JS_ThrowSyntaxErrorVarRedeclaration(ctx, atom); else if (type == JS_THROW_VAR_UNINITIALIZED) JS_ThrowReferenceErrorUninitialized(ctx, atom); else if (type == JS_THROW_ERROR_DELETE_SUPER) JS_ThrowReferenceError(ctx, "unsupported reference to 'super'"); else if (type == JS_THROW_ERROR_ITERATOR_THROW) JS_ThrowTypeError(ctx, "iterator does not have a throw method"); else JS_ThrowInternalError(ctx, "invalid throw var type %d", type); } goto exception; CASE(OP_eval): { JSValueConst obj; int scope_idx; call_argc = get_u16(pc); scope_idx = get_u16(pc + 2) - 1; pc += 4; call_argv = sp - call_argc; sf->cur_pc = pc; if (js_same_value(ctx, call_argv[-1], ctx->eval_obj)) { if (call_argc >= 1) obj = call_argv[0]; else obj = JS_UNDEFINED; ret_val = JS_EvalObject(ctx, JS_UNDEFINED, obj, JS_EVAL_TYPE_DIRECT, scope_idx); } else { ret_val = JS_CallInternal(ctx, call_argv[-1], JS_UNDEFINED, JS_UNDEFINED, call_argc, call_argv, 0); } if (unlikely(JS_IsException(ret_val))) goto exception; for(i = -1; i < call_argc; i++) JS_FreeValue(ctx, call_argv[i]); sp -= call_argc + 1; *sp++ = ret_val; } BREAK; /* could merge with OP_apply */ CASE(OP_apply_eval): { int scope_idx; uint32_t len; JSValue *tab; JSValueConst obj; scope_idx = get_u16(pc) - 1; pc += 2; tab = build_arg_list(ctx, &len, sp[-1]); if (!tab) goto exception; if (js_same_value(ctx, sp[-2], ctx->eval_obj)) { if (len >= 1) obj = tab[0]; else obj = JS_UNDEFINED; ret_val = JS_EvalObject(ctx, JS_UNDEFINED, obj, JS_EVAL_TYPE_DIRECT, scope_idx); } else { ret_val = JS_Call(ctx, sp[-2], JS_UNDEFINED, len, (JSValueConst *)tab); } free_arg_list(ctx, tab, len); if (unlikely(JS_IsException(ret_val))) goto exception; JS_FreeValue(ctx, sp[-2]); JS_FreeValue(ctx, sp[-1]); sp -= 2; *sp++ = ret_val; } BREAK; CASE(OP_regexp): { sp[-2] = js_regexp_constructor_internal(ctx, JS_UNDEFINED, sp[-2], sp[-1]); sp--; } BREAK; CASE(OP_get_super): { JSValue proto; proto = JS_GetPrototype(ctx, sp[-1]); if (JS_IsException(proto)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = proto; } BREAK; CASE(OP_import): { JSValue val; val = js_dynamic_import(ctx, sp[-1]); if (JS_IsException(val)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = val; } BREAK; CASE(OP_check_var): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; ret = JS_CheckGlobalVar(ctx, atom); if (ret < 0) goto exception; *sp++ = JS_NewBool(ctx, ret); } BREAK; CASE(OP_get_var_undef): CASE(OP_get_var): { JSValue val; JSAtom atom; atom = get_u32(pc); pc += 4; val = JS_GetGlobalVar(ctx, atom, opcode - OP_get_var_undef); if (unlikely(JS_IsException(val))) goto exception; *sp++ = val; } BREAK; CASE(OP_put_var): CASE(OP_put_var_init): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; ret = JS_SetGlobalVar(ctx, atom, sp[-1], opcode - OP_put_var); sp--; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_put_var_strict): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; /* sp[-2] is JS_TRUE or JS_FALSE */ if (unlikely(!JS_VALUE_GET_INT(sp[-2]))) { JS_ThrowReferenceErrorNotDefined(ctx, atom); goto exception; } ret = JS_SetGlobalVar(ctx, atom, sp[-1], 2); sp -= 2; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_check_define_var): { JSAtom atom; int flags; atom = get_u32(pc); flags = pc[4]; pc += 5; if (JS_CheckDefineGlobalVar(ctx, atom, flags)) goto exception; } BREAK; CASE(OP_define_var): { JSAtom atom; int flags; atom = get_u32(pc); flags = pc[4]; pc += 5; if (JS_DefineGlobalVar(ctx, atom, flags)) goto exception; } BREAK; CASE(OP_define_func): { JSAtom atom; int flags; atom = get_u32(pc); flags = pc[4]; pc += 5; if (JS_DefineGlobalFunction(ctx, atom, sp[-1], flags)) goto exception; JS_FreeValue(ctx, sp[-1]); sp--; } BREAK; CASE(OP_get_loc): { int idx; idx = get_u16(pc); pc += 2; sp[0] = JS_DupValue(ctx, var_buf[idx]); sp++; } BREAK; CASE(OP_put_loc): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, &var_buf[idx], sp[-1]); sp--; } BREAK; CASE(OP_set_loc): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, &var_buf[idx], JS_DupValue(ctx, sp[-1])); } BREAK; CASE(OP_get_arg): { int idx; idx = get_u16(pc); pc += 2; sp[0] = JS_DupValue(ctx, arg_buf[idx]); sp++; } BREAK; CASE(OP_put_arg): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, &arg_buf[idx], sp[-1]); sp--; } BREAK; CASE(OP_set_arg): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, &arg_buf[idx], JS_DupValue(ctx, sp[-1])); } BREAK; #if SHORT_OPCODES CASE(OP_get_loc8): *sp++ = JS_DupValue(ctx, var_buf[*pc++]); BREAK; CASE(OP_put_loc8): set_value(ctx, &var_buf[*pc++], *--sp); BREAK; CASE(OP_set_loc8): set_value(ctx, &var_buf[*pc++], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_get_loc0): *sp++ = JS_DupValue(ctx, var_buf[0]); BREAK; CASE(OP_get_loc1): *sp++ = JS_DupValue(ctx, var_buf[1]); BREAK; CASE(OP_get_loc2): *sp++ = JS_DupValue(ctx, var_buf[2]); BREAK; CASE(OP_get_loc3): *sp++ = JS_DupValue(ctx, var_buf[3]); BREAK; CASE(OP_put_loc0): set_value(ctx, &var_buf[0], *--sp); BREAK; CASE(OP_put_loc1): set_value(ctx, &var_buf[1], *--sp); BREAK; CASE(OP_put_loc2): set_value(ctx, &var_buf[2], *--sp); BREAK; CASE(OP_put_loc3): set_value(ctx, &var_buf[3], *--sp); BREAK; CASE(OP_set_loc0): set_value(ctx, &var_buf[0], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_loc1): set_value(ctx, &var_buf[1], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_loc2): set_value(ctx, &var_buf[2], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_loc3): set_value(ctx, &var_buf[3], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_get_arg0): *sp++ = JS_DupValue(ctx, arg_buf[0]); BREAK; CASE(OP_get_arg1): *sp++ = JS_DupValue(ctx, arg_buf[1]); BREAK; CASE(OP_get_arg2): *sp++ = JS_DupValue(ctx, arg_buf[2]); BREAK; CASE(OP_get_arg3): *sp++ = JS_DupValue(ctx, arg_buf[3]); BREAK; CASE(OP_put_arg0): set_value(ctx, &arg_buf[0], *--sp); BREAK; CASE(OP_put_arg1): set_value(ctx, &arg_buf[1], *--sp); BREAK; CASE(OP_put_arg2): set_value(ctx, &arg_buf[2], *--sp); BREAK; CASE(OP_put_arg3): set_value(ctx, &arg_buf[3], *--sp); BREAK; CASE(OP_set_arg0): set_value(ctx, &arg_buf[0], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_arg1): set_value(ctx, &arg_buf[1], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_arg2): set_value(ctx, &arg_buf[2], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_arg3): set_value(ctx, &arg_buf[3], JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_get_var_ref0): *sp++ = JS_DupValue(ctx, *var_refs[0]->pvalue); BREAK; CASE(OP_get_var_ref1): *sp++ = JS_DupValue(ctx, *var_refs[1]->pvalue); BREAK; CASE(OP_get_var_ref2): *sp++ = JS_DupValue(ctx, *var_refs[2]->pvalue); BREAK; CASE(OP_get_var_ref3): *sp++ = JS_DupValue(ctx, *var_refs[3]->pvalue); BREAK; CASE(OP_put_var_ref0): set_value(ctx, var_refs[0]->pvalue, *--sp); BREAK; CASE(OP_put_var_ref1): set_value(ctx, var_refs[1]->pvalue, *--sp); BREAK; CASE(OP_put_var_ref2): set_value(ctx, var_refs[2]->pvalue, *--sp); BREAK; CASE(OP_put_var_ref3): set_value(ctx, var_refs[3]->pvalue, *--sp); BREAK; CASE(OP_set_var_ref0): set_value(ctx, var_refs[0]->pvalue, JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_var_ref1): set_value(ctx, var_refs[1]->pvalue, JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_var_ref2): set_value(ctx, var_refs[2]->pvalue, JS_DupValue(ctx, sp[-1])); BREAK; CASE(OP_set_var_ref3): set_value(ctx, var_refs[3]->pvalue, JS_DupValue(ctx, sp[-1])); BREAK; #endif CASE(OP_get_var_ref): { int idx; JSValue val; idx = get_u16(pc); pc += 2; val = *var_refs[idx]->pvalue; sp[0] = JS_DupValue(ctx, val); sp++; } BREAK; CASE(OP_put_var_ref): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, var_refs[idx]->pvalue, sp[-1]); sp--; } BREAK; CASE(OP_set_var_ref): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, var_refs[idx]->pvalue, JS_DupValue(ctx, sp[-1])); } BREAK; CASE(OP_get_var_ref_check): { int idx; JSValue val; idx = get_u16(pc); pc += 2; val = *var_refs[idx]->pvalue; if (unlikely(JS_IsUninitialized(val))) { JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, TRUE); goto exception; } sp[0] = JS_DupValue(ctx, val); sp++; } BREAK; CASE(OP_put_var_ref_check): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(JS_IsUninitialized(*var_refs[idx]->pvalue))) { JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, TRUE); goto exception; } set_value(ctx, var_refs[idx]->pvalue, sp[-1]); sp--; } BREAK; CASE(OP_put_var_ref_check_init): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(!JS_IsUninitialized(*var_refs[idx]->pvalue))) { JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, TRUE); goto exception; } set_value(ctx, var_refs[idx]->pvalue, sp[-1]); sp--; } BREAK; CASE(OP_set_loc_uninitialized): { int idx; idx = get_u16(pc); pc += 2; set_value(ctx, &var_buf[idx], JS_UNINITIALIZED); } BREAK; CASE(OP_get_loc_check): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(JS_IsUninitialized(var_buf[idx]))) { JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, FALSE); goto exception; } sp[0] = JS_DupValue(ctx, var_buf[idx]); sp++; } BREAK; CASE(OP_get_loc_checkthis): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(JS_IsUninitialized(var_buf[idx]))) { JS_ThrowReferenceErrorUninitialized2(caller_ctx, b, idx, FALSE); goto exception; } sp[0] = JS_DupValue(ctx, var_buf[idx]); sp++; } BREAK; CASE(OP_put_loc_check): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(JS_IsUninitialized(var_buf[idx]))) { JS_ThrowReferenceErrorUninitialized2(ctx, b, idx, FALSE); goto exception; } set_value(ctx, &var_buf[idx], sp[-1]); sp--; } BREAK; CASE(OP_put_loc_check_init): { int idx; idx = get_u16(pc); pc += 2; if (unlikely(!JS_IsUninitialized(var_buf[idx]))) { JS_ThrowReferenceError(ctx, "'this' can be initialized only once"); goto exception; } set_value(ctx, &var_buf[idx], sp[-1]); sp--; } BREAK; CASE(OP_close_loc): { int idx; idx = get_u16(pc); pc += 2; close_lexical_var(ctx, sf, idx, FALSE); } BREAK; CASE(OP_make_loc_ref): CASE(OP_make_arg_ref): CASE(OP_make_var_ref_ref): { JSVarRef *var_ref; JSProperty *pr; JSAtom atom; int idx; atom = get_u32(pc); idx = get_u16(pc + 4); pc += 6; *sp++ = JS_NewObjectProto(ctx, JS_NULL); if (unlikely(JS_IsException(sp[-1]))) goto exception; if (opcode == OP_make_var_ref_ref) { var_ref = var_refs[idx]; var_ref->header.ref_count++; } else { var_ref = get_var_ref(ctx, sf, idx, opcode == OP_make_arg_ref); if (!var_ref) goto exception; } pr = add_property(ctx, JS_VALUE_GET_OBJ(sp[-1]), atom, JS_PROP_WRITABLE | JS_PROP_VARREF); if (!pr) { free_var_ref(rt, var_ref); goto exception; } pr->u.var_ref = var_ref; *sp++ = JS_AtomToValue(ctx, atom); } BREAK; CASE(OP_make_var_ref): { JSAtom atom; atom = get_u32(pc); pc += 4; if (JS_GetGlobalVarRef(ctx, atom, sp)) goto exception; sp += 2; } BREAK; CASE(OP_goto): pc += (int32_t)get_u32(pc); if (unlikely(js_poll_interrupts(ctx))) goto exception; BREAK; #if SHORT_OPCODES CASE(OP_goto16): pc += (int16_t)get_u16(pc); if (unlikely(js_poll_interrupts(ctx))) goto exception; BREAK; CASE(OP_goto8): pc += (int8_t)pc[0]; if (unlikely(js_poll_interrupts(ctx))) goto exception; BREAK; #endif CASE(OP_if_true): { int res; JSValue op1; op1 = sp[-1]; pc += 4; if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { res = JS_VALUE_GET_INT(op1); } else { res = JS_ToBoolFree(ctx, op1); } sp--; if (res) { pc += (int32_t)get_u32(pc - 4) - 4; } if (unlikely(js_poll_interrupts(ctx))) goto exception; } BREAK; CASE(OP_if_false): { int res; JSValue op1; op1 = sp[-1]; pc += 4; if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { res = JS_VALUE_GET_INT(op1); } else { res = JS_ToBoolFree(ctx, op1); } sp--; if (!res) { pc += (int32_t)get_u32(pc - 4) - 4; } if (unlikely(js_poll_interrupts(ctx))) goto exception; } BREAK; #if SHORT_OPCODES CASE(OP_if_true8): { int res; JSValue op1; op1 = sp[-1]; pc += 1; if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { res = JS_VALUE_GET_INT(op1); } else { res = JS_ToBoolFree(ctx, op1); } sp--; if (res) { pc += (int8_t)pc[-1] - 1; } if (unlikely(js_poll_interrupts(ctx))) goto exception; } BREAK; CASE(OP_if_false8): { int res; JSValue op1; op1 = sp[-1]; pc += 1; if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { res = JS_VALUE_GET_INT(op1); } else { res = JS_ToBoolFree(ctx, op1); } sp--; if (!res) { pc += (int8_t)pc[-1] - 1; } if (unlikely(js_poll_interrupts(ctx))) goto exception; } BREAK; #endif CASE(OP_catch): { int32_t diff; diff = get_u32(pc); sp[0] = JS_NewCatchOffset(ctx, pc + diff - b->byte_code_buf); sp++; pc += 4; } BREAK; CASE(OP_gosub): { int32_t diff; diff = get_u32(pc); /* XXX: should have a different tag to avoid security flaw */ sp[0] = JS_NewInt32(ctx, pc + 4 - b->byte_code_buf); sp++; pc += diff; } BREAK; CASE(OP_ret): { JSValue op1; uint32_t pos; op1 = sp[-1]; if (unlikely(JS_VALUE_GET_TAG(op1) != JS_TAG_INT)) goto ret_fail; pos = JS_VALUE_GET_INT(op1); if (unlikely(pos >= b->byte_code_len)) { ret_fail: JS_ThrowInternalError(ctx, "invalid ret value"); goto exception; } sp--; pc = b->byte_code_buf + pos; } BREAK; CASE(OP_for_in_start): if (js_for_in_start(ctx, sp)) goto exception; BREAK; CASE(OP_for_in_next): if (js_for_in_next(ctx, sp)) goto exception; sp += 2; BREAK; CASE(OP_for_of_start): if (js_for_of_start(ctx, sp, FALSE)) goto exception; sp += 1; *sp++ = JS_NewCatchOffset(ctx, 0); BREAK; CASE(OP_for_of_next): { int offset = -3 - pc[0]; pc += 1; if (js_for_of_next(ctx, sp, offset)) goto exception; sp += 2; } BREAK; CASE(OP_for_await_of_start): if (js_for_of_start(ctx, sp, TRUE)) goto exception; sp += 1; *sp++ = JS_NewCatchOffset(ctx, 0); BREAK; CASE(OP_iterator_get_value_done): if (js_iterator_get_value_done(ctx, sp)) goto exception; sp += 1; BREAK; CASE(OP_iterator_check_object): if (unlikely(!JS_IsObject(sp[-1]))) { JS_ThrowTypeError(ctx, "iterator must return an object"); goto exception; } BREAK; CASE(OP_iterator_close): /* iter_obj next catch_offset -> */ sp--; /* drop the catch offset to avoid getting caught by exception */ JS_FreeValue(ctx, sp[-1]); /* drop the next method */ sp--; if (!JS_IsUndefined(sp[-1])) { if (JS_IteratorClose(ctx, sp[-1], FALSE)) goto exception; JS_FreeValue(ctx, sp[-1]); } sp--; BREAK; CASE(OP_nip_catch): { JSValue ret_val; /* catch_offset ... ret_val -> ret_eval */ ret_val = *--sp; while (sp > stack_buf && JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_CATCH_OFFSET) { JS_FreeValue(ctx, *--sp); } if (unlikely(sp == stack_buf)) { JS_ThrowInternalError(ctx, "nip_catch"); JS_FreeValue(ctx, ret_val); goto exception; } sp[-1] = ret_val; } BREAK; CASE(OP_iterator_next): /* stack: iter_obj next catch_offset val */ { JSValue ret; ret = JS_Call(ctx, sp[-3], sp[-4], 1, (JSValueConst *)(sp - 1)); if (JS_IsException(ret)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret; } BREAK; CASE(OP_iterator_call): /* stack: iter_obj next catch_offset val */ { JSValue method, ret; BOOL ret_flag; int flags; flags = *pc++; method = JS_GetProperty(ctx, sp[-4], (flags & 1) ? JS_ATOM_throw : JS_ATOM_return); if (JS_IsException(method)) goto exception; if (JS_IsUndefined(method) || JS_IsNull(method)) { ret_flag = TRUE; } else { if (flags & 2) { /* no argument */ ret = JS_CallFree(ctx, method, sp[-4], 0, NULL); } else { ret = JS_CallFree(ctx, method, sp[-4], 1, (JSValueConst *)(sp - 1)); } if (JS_IsException(ret)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret; ret_flag = FALSE; } sp[0] = JS_NewBool(ctx, ret_flag); sp += 1; } BREAK; CASE(OP_lnot): { int res; JSValue op1; op1 = sp[-1]; if ((uint32_t)JS_VALUE_GET_TAG(op1) <= JS_TAG_UNDEFINED) { res = JS_VALUE_GET_INT(op1) != 0; } else { res = JS_ToBoolFree(ctx, op1); } sp[-1] = JS_NewBool(ctx, !res); } BREAK; CASE(OP_get_field): { JSValue val; JSAtom atom; atom = get_u32(pc); pc += 4; val = JS_GetProperty(ctx, sp[-1], atom); if (unlikely(JS_IsException(val))) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = val; } BREAK; CASE(OP_get_field2): { JSValue val; JSAtom atom; atom = get_u32(pc); pc += 4; val = JS_GetProperty(ctx, sp[-1], atom); if (unlikely(JS_IsException(val))) goto exception; *sp++ = val; } BREAK; CASE(OP_put_field): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; ret = JS_SetPropertyInternal(ctx, sp[-2], atom, sp[-1], sp[-2], JS_PROP_THROW_STRICT); JS_FreeValue(ctx, sp[-2]); sp -= 2; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_private_symbol): { JSAtom atom; JSValue val; atom = get_u32(pc); pc += 4; val = JS_NewSymbolFromAtom(ctx, atom, JS_ATOM_TYPE_PRIVATE); if (JS_IsException(val)) goto exception; *sp++ = val; } BREAK; CASE(OP_get_private_field): { JSValue val; val = JS_GetPrivateField(ctx, sp[-2], sp[-1]); JS_FreeValue(ctx, sp[-1]); JS_FreeValue(ctx, sp[-2]); sp[-2] = val; sp--; if (unlikely(JS_IsException(val))) goto exception; } BREAK; CASE(OP_put_private_field): { int ret; ret = JS_SetPrivateField(ctx, sp[-3], sp[-1], sp[-2]); JS_FreeValue(ctx, sp[-3]); JS_FreeValue(ctx, sp[-1]); sp -= 3; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_define_private_field): { int ret; ret = JS_DefinePrivateField(ctx, sp[-3], sp[-2], sp[-1]); JS_FreeValue(ctx, sp[-2]); sp -= 2; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_define_field): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; ret = JS_DefinePropertyValue(ctx, sp[-2], atom, sp[-1], JS_PROP_C_W_E | JS_PROP_THROW); sp--; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_set_name): { int ret; JSAtom atom; atom = get_u32(pc); pc += 4; ret = JS_DefineObjectName(ctx, sp[-1], atom, JS_PROP_CONFIGURABLE); if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_set_name_computed): { int ret; ret = JS_DefineObjectNameComputed(ctx, sp[-1], sp[-2], JS_PROP_CONFIGURABLE); if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_set_proto): { JSValue proto; proto = sp[-1]; if (JS_IsObject(proto) || JS_IsNull(proto)) { if (JS_SetPrototypeInternal(ctx, sp[-2], proto, TRUE) < 0) goto exception; } JS_FreeValue(ctx, proto); sp--; } BREAK; CASE(OP_set_home_object): js_method_set_home_object(ctx, sp[-1], sp[-2]); BREAK; CASE(OP_define_method): CASE(OP_define_method_computed): { JSValue getter, setter, value; JSValueConst obj; JSAtom atom; int flags, ret, op_flags; BOOL is_computed; #define OP_DEFINE_METHOD_METHOD 0 #define OP_DEFINE_METHOD_GETTER 1 #define OP_DEFINE_METHOD_SETTER 2 #define OP_DEFINE_METHOD_ENUMERABLE 4 is_computed = (opcode == OP_define_method_computed); if (is_computed) { atom = JS_ValueToAtom(ctx, sp[-2]); if (unlikely(atom == JS_ATOM_NULL)) goto exception; opcode += OP_define_method - OP_define_method_computed; } else { atom = get_u32(pc); pc += 4; } op_flags = *pc++; obj = sp[-2 - is_computed]; flags = JS_PROP_HAS_CONFIGURABLE | JS_PROP_CONFIGURABLE | JS_PROP_HAS_ENUMERABLE | JS_PROP_THROW; if (op_flags & OP_DEFINE_METHOD_ENUMERABLE) flags |= JS_PROP_ENUMERABLE; op_flags &= 3; value = JS_UNDEFINED; getter = JS_UNDEFINED; setter = JS_UNDEFINED; if (op_flags == OP_DEFINE_METHOD_METHOD) { value = sp[-1]; flags |= JS_PROP_HAS_VALUE | JS_PROP_HAS_WRITABLE | JS_PROP_WRITABLE; } else if (op_flags == OP_DEFINE_METHOD_GETTER) { getter = sp[-1]; flags |= JS_PROP_HAS_GET; } else { setter = sp[-1]; flags |= JS_PROP_HAS_SET; } ret = js_method_set_properties(ctx, sp[-1], atom, flags, obj); if (ret >= 0) { ret = JS_DefineProperty(ctx, obj, atom, value, getter, setter, flags); } JS_FreeValue(ctx, sp[-1]); if (is_computed) { JS_FreeAtom(ctx, atom); JS_FreeValue(ctx, sp[-2]); } sp -= 1 + is_computed; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_define_class): CASE(OP_define_class_computed): { int class_flags; JSAtom atom; atom = get_u32(pc); class_flags = pc[4]; pc += 5; if (js_op_define_class(ctx, sp, atom, class_flags, var_refs, sf, (opcode == OP_define_class_computed)) < 0) goto exception; } BREAK; CASE(OP_get_array_el): { JSValue val; val = JS_GetPropertyValue(ctx, sp[-2], sp[-1]); JS_FreeValue(ctx, sp[-2]); sp[-2] = val; sp--; if (unlikely(JS_IsException(val))) goto exception; } BREAK; CASE(OP_get_array_el2): { JSValue val; val = JS_GetPropertyValue(ctx, sp[-2], sp[-1]); sp[-1] = val; if (unlikely(JS_IsException(val))) goto exception; } BREAK; CASE(OP_get_ref_value): { JSValue val; if (unlikely(JS_IsUndefined(sp[-2]))) { JSAtom atom = JS_ValueToAtom(ctx, sp[-1]); if (atom != JS_ATOM_NULL) { JS_ThrowReferenceErrorNotDefined(ctx, atom); JS_FreeAtom(ctx, atom); } goto exception; } val = JS_GetPropertyValue(ctx, sp[-2], JS_DupValue(ctx, sp[-1])); if (unlikely(JS_IsException(val))) goto exception; sp[0] = val; sp++; } BREAK; CASE(OP_get_super_value): { JSValue val; JSAtom atom; atom = JS_ValueToAtom(ctx, sp[-1]); if (unlikely(atom == JS_ATOM_NULL)) goto exception; val = JS_GetPropertyInternal(ctx, sp[-2], atom, sp[-3], FALSE); JS_FreeAtom(ctx, atom); if (unlikely(JS_IsException(val))) goto exception; JS_FreeValue(ctx, sp[-1]); JS_FreeValue(ctx, sp[-2]); JS_FreeValue(ctx, sp[-3]); sp[-3] = val; sp -= 2; } BREAK; CASE(OP_put_array_el): { int ret; ret = JS_SetPropertyValue(ctx, sp[-3], sp[-2], sp[-1], JS_PROP_THROW_STRICT); JS_FreeValue(ctx, sp[-3]); sp -= 3; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_put_ref_value): { int ret, flags; flags = JS_PROP_THROW_STRICT; if (unlikely(JS_IsUndefined(sp[-3]))) { if (is_strict_mode(ctx)) { JSAtom atom = JS_ValueToAtom(ctx, sp[-2]); if (atom != JS_ATOM_NULL) { JS_ThrowReferenceErrorNotDefined(ctx, atom); JS_FreeAtom(ctx, atom); } goto exception; } else { sp[-3] = JS_DupValue(ctx, ctx->global_obj); } } else { if (is_strict_mode(ctx)) flags |= JS_PROP_NO_ADD; } ret = JS_SetPropertyValue(ctx, sp[-3], sp[-2], sp[-1], flags); JS_FreeValue(ctx, sp[-3]); sp -= 3; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_put_super_value): { int ret; JSAtom atom; if (JS_VALUE_GET_TAG(sp[-3]) != JS_TAG_OBJECT) { JS_ThrowTypeErrorNotAnObject(ctx); goto exception; } atom = JS_ValueToAtom(ctx, sp[-2]); if (unlikely(atom == JS_ATOM_NULL)) goto exception; ret = JS_SetPropertyInternal(ctx, sp[-3], atom, sp[-1], sp[-4], JS_PROP_THROW_STRICT); JS_FreeAtom(ctx, atom); JS_FreeValue(ctx, sp[-4]); JS_FreeValue(ctx, sp[-3]); JS_FreeValue(ctx, sp[-2]); sp -= 4; if (ret < 0) goto exception; } BREAK; CASE(OP_define_array_el): { int ret; ret = JS_DefinePropertyValueValue(ctx, sp[-3], JS_DupValue(ctx, sp[-2]), sp[-1], JS_PROP_C_W_E | JS_PROP_THROW); sp -= 1; if (unlikely(ret < 0)) goto exception; } BREAK; CASE(OP_append): /* array pos enumobj -- array pos */ { if (js_append_enumerate(ctx, sp)) goto exception; JS_FreeValue(ctx, *--sp); } BREAK; CASE(OP_copy_data_properties): /* target source excludeList */ { /* stack offsets (-1 based): 2 bits for target, 3 bits for source, 2 bits for exclusionList */ int mask; mask = *pc++; if (JS_CopyDataProperties(ctx, sp[-1 - (mask & 3)], sp[-1 - ((mask >> 2) & 7)], sp[-1 - ((mask >> 5) & 7)], 0)) goto exception; } BREAK; CASE(OP_add): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { int64_t r; r = (int64_t)JS_VALUE_GET_INT(op1) + JS_VALUE_GET_INT(op2); if (unlikely((int)r != r)) goto add_slow; sp[-2] = JS_NewInt32(ctx, r); sp--; } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { sp[-2] = __JS_NewFloat64(ctx, JS_VALUE_GET_FLOAT64(op1) + JS_VALUE_GET_FLOAT64(op2)); sp--; } else { add_slow: if (js_add_slow(ctx, sp)) goto exception; sp--; } } BREAK; CASE(OP_add_loc): { JSValue *pv; int idx; idx = *pc; pc += 1; pv = &var_buf[idx]; if (likely(JS_VALUE_IS_BOTH_INT(*pv, sp[-1]))) { int64_t r; r = (int64_t)JS_VALUE_GET_INT(*pv) + JS_VALUE_GET_INT(sp[-1]); if (unlikely((int)r != r)) goto add_loc_slow; *pv = JS_NewInt32(ctx, r); sp--; } else if (JS_VALUE_GET_TAG(*pv) == JS_TAG_STRING) { JSValue op1; op1 = sp[-1]; sp--; op1 = JS_ToPrimitiveFree(ctx, op1, HINT_NONE); if (JS_IsException(op1)) goto exception; op1 = JS_ConcatString(ctx, JS_DupValue(ctx, *pv), op1); if (JS_IsException(op1)) goto exception; set_value(ctx, pv, op1); } else { JSValue ops[2]; add_loc_slow: /* In case of exception, js_add_slow frees ops[0] and ops[1], so we must duplicate *pv */ ops[0] = JS_DupValue(ctx, *pv); ops[1] = sp[-1]; sp--; if (js_add_slow(ctx, ops + 2)) goto exception; set_value(ctx, pv, ops[0]); } } BREAK; CASE(OP_sub): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { int64_t r; r = (int64_t)JS_VALUE_GET_INT(op1) - JS_VALUE_GET_INT(op2); if (unlikely((int)r != r)) goto binary_arith_slow; sp[-2] = JS_NewInt32(ctx, r); sp--; } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { sp[-2] = __JS_NewFloat64(ctx, JS_VALUE_GET_FLOAT64(op1) - JS_VALUE_GET_FLOAT64(op2)); sp--; } else { goto binary_arith_slow; } } BREAK; CASE(OP_mul): { JSValue op1, op2; double d; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { int32_t v1, v2; int64_t r; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); r = (int64_t)v1 * v2; if (unlikely((int)r != r)) { #ifdef CONFIG_BIGNUM if (unlikely(sf->js_mode & JS_MODE_MATH) && (r < -MAX_SAFE_INTEGER || r > MAX_SAFE_INTEGER)) goto binary_arith_slow; #endif d = (double)r; goto mul_fp_res; } /* need to test zero case for -0 result */ if (unlikely(r == 0 && (v1 | v2) < 0)) { d = -0.0; goto mul_fp_res; } sp[-2] = JS_NewInt32(ctx, r); sp--; } else if (JS_VALUE_IS_BOTH_FLOAT(op1, op2)) { #ifdef CONFIG_BIGNUM if (unlikely(sf->js_mode & JS_MODE_MATH)) goto binary_arith_slow; #endif d = JS_VALUE_GET_FLOAT64(op1) * JS_VALUE_GET_FLOAT64(op2); mul_fp_res: sp[-2] = __JS_NewFloat64(ctx, d); sp--; } else { goto binary_arith_slow; } } BREAK; CASE(OP_div): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { int v1, v2; if (unlikely(sf->js_mode & JS_MODE_MATH)) goto binary_arith_slow; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); sp[-2] = JS_NewFloat64(ctx, (double)v1 / (double)v2); sp--; } else { goto binary_arith_slow; } } BREAK; CASE(OP_mod): #ifdef CONFIG_BIGNUM CASE(OP_math_mod): #endif { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { int v1, v2, r; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); /* We must avoid v2 = 0, v1 = INT32_MIN and v2 = -1 and the cases where the result is -0. */ if (unlikely(v1 < 0 || v2 <= 0)) goto binary_arith_slow; r = v1 % v2; sp[-2] = JS_NewInt32(ctx, r); sp--; } else { goto binary_arith_slow; } } BREAK; CASE(OP_pow): binary_arith_slow: if (js_binary_arith_slow(ctx, sp, opcode)) goto exception; sp--; BREAK; CASE(OP_plus): { JSValue op1; uint32_t tag; op1 = sp[-1]; tag = JS_VALUE_GET_TAG(op1); if (tag == JS_TAG_INT || JS_TAG_IS_FLOAT64(tag)) { } else { if (js_unary_arith_slow(ctx, sp, opcode)) goto exception; } } BREAK; CASE(OP_neg): { JSValue op1; uint32_t tag; int val; double d; op1 = sp[-1]; tag = JS_VALUE_GET_TAG(op1); if (tag == JS_TAG_INT) { val = JS_VALUE_GET_INT(op1); /* Note: -0 cannot be expressed as integer */ if (unlikely(val == 0)) { d = -0.0; goto neg_fp_res; } if (unlikely(val == INT32_MIN)) { d = -(double)val; goto neg_fp_res; } sp[-1] = JS_NewInt32(ctx, -val); } else if (JS_TAG_IS_FLOAT64(tag)) { d = -JS_VALUE_GET_FLOAT64(op1); neg_fp_res: sp[-1] = __JS_NewFloat64(ctx, d); } else { if (js_unary_arith_slow(ctx, sp, opcode)) goto exception; } } BREAK; CASE(OP_inc): { JSValue op1; int val; op1 = sp[-1]; if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { val = JS_VALUE_GET_INT(op1); if (unlikely(val == INT32_MAX)) goto inc_slow; sp[-1] = JS_NewInt32(ctx, val + 1); } else { inc_slow: if (js_unary_arith_slow(ctx, sp, opcode)) goto exception; } } BREAK; CASE(OP_dec): { JSValue op1; int val; op1 = sp[-1]; if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { val = JS_VALUE_GET_INT(op1); if (unlikely(val == INT32_MIN)) goto dec_slow; sp[-1] = JS_NewInt32(ctx, val - 1); } else { dec_slow: if (js_unary_arith_slow(ctx, sp, opcode)) goto exception; } } BREAK; CASE(OP_post_inc): CASE(OP_post_dec): if (js_post_inc_slow(ctx, sp, opcode)) goto exception; sp++; BREAK; CASE(OP_inc_loc): { JSValue op1; int val; int idx; idx = *pc; pc += 1; op1 = var_buf[idx]; if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { val = JS_VALUE_GET_INT(op1); if (unlikely(val == INT32_MAX)) goto inc_loc_slow; var_buf[idx] = JS_NewInt32(ctx, val + 1); } else { inc_loc_slow: /* must duplicate otherwise the variable value may be destroyed before JS code accesses it */ op1 = JS_DupValue(ctx, op1); if (js_unary_arith_slow(ctx, &op1 + 1, OP_inc)) goto exception; set_value(ctx, &var_buf[idx], op1); } } BREAK; CASE(OP_dec_loc): { JSValue op1; int val; int idx; idx = *pc; pc += 1; op1 = var_buf[idx]; if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { val = JS_VALUE_GET_INT(op1); if (unlikely(val == INT32_MIN)) goto dec_loc_slow; var_buf[idx] = JS_NewInt32(ctx, val - 1); } else { dec_loc_slow: /* must duplicate otherwise the variable value may be destroyed before JS code accesses it */ op1 = JS_DupValue(ctx, op1); if (js_unary_arith_slow(ctx, &op1 + 1, OP_dec)) goto exception; set_value(ctx, &var_buf[idx], op1); } } BREAK; CASE(OP_not): { JSValue op1; op1 = sp[-1]; if (JS_VALUE_GET_TAG(op1) == JS_TAG_INT) { sp[-1] = JS_NewInt32(ctx, ~JS_VALUE_GET_INT(op1)); } else { if (js_not_slow(ctx, sp)) goto exception; } } BREAK; CASE(OP_shl): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { uint32_t v1, v2; v1 = JS_VALUE_GET_INT(op1); v2 = JS_VALUE_GET_INT(op2); #ifdef CONFIG_BIGNUM { int64_t r; if (unlikely(sf->js_mode & JS_MODE_MATH)) { if (v2 > 0x1f) goto shl_slow; r = (int64_t)v1 << v2; if ((int)r != r) goto shl_slow; } else { v2 &= 0x1f; } } #else v2 &= 0x1f; #endif sp[-2] = JS_NewInt32(ctx, v1 << v2); sp--; } else { #ifdef CONFIG_BIGNUM shl_slow: #endif if (js_binary_logic_slow(ctx, sp, opcode)) goto exception; sp--; } } BREAK; CASE(OP_shr): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { uint32_t v2; v2 = JS_VALUE_GET_INT(op2); /* v1 >>> v2 retains its JS semantics if CONFIG_BIGNUM */ v2 &= 0x1f; sp[-2] = JS_NewUint32(ctx, (uint32_t)JS_VALUE_GET_INT(op1) >> v2); sp--; } else { if (js_shr_slow(ctx, sp)) goto exception; sp--; } } BREAK; CASE(OP_sar): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { uint32_t v2; v2 = JS_VALUE_GET_INT(op2); #ifdef CONFIG_BIGNUM if (unlikely(v2 > 0x1f)) { if (unlikely(sf->js_mode & JS_MODE_MATH)) goto sar_slow; else v2 &= 0x1f; } #else v2 &= 0x1f; #endif sp[-2] = JS_NewInt32(ctx, (int)JS_VALUE_GET_INT(op1) >> v2); sp--; } else { #ifdef CONFIG_BIGNUM sar_slow: #endif if (js_binary_logic_slow(ctx, sp, opcode)) goto exception; sp--; } } BREAK; CASE(OP_and): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { sp[-2] = JS_NewInt32(ctx, JS_VALUE_GET_INT(op1) & JS_VALUE_GET_INT(op2)); sp--; } else { if (js_binary_logic_slow(ctx, sp, opcode)) goto exception; sp--; } } BREAK; CASE(OP_or): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { sp[-2] = JS_NewInt32(ctx, JS_VALUE_GET_INT(op1) | JS_VALUE_GET_INT(op2)); sp--; } else { if (js_binary_logic_slow(ctx, sp, opcode)) goto exception; sp--; } } BREAK; CASE(OP_xor): { JSValue op1, op2; op1 = sp[-2]; op2 = sp[-1]; if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { sp[-2] = JS_NewInt32(ctx, JS_VALUE_GET_INT(op1) ^ JS_VALUE_GET_INT(op2)); sp--; } else { if (js_binary_logic_slow(ctx, sp, opcode)) goto exception; sp--; } } BREAK; #define OP_CMP(opcode, binary_op, slow_call) \ CASE(opcode): \ { \ JSValue op1, op2; \ op1 = sp[-2]; \ op2 = sp[-1]; \ if (likely(JS_VALUE_IS_BOTH_INT(op1, op2))) { \ sp[-2] = JS_NewBool(ctx, JS_VALUE_GET_INT(op1) binary_op JS_VALUE_GET_INT(op2)); \ sp--; \ } else { \ if (slow_call) \ goto exception; \ sp--; \ } \ } \ BREAK OP_CMP(OP_lt, <, js_relational_slow(ctx, sp, opcode)); OP_CMP(OP_lte, <=, js_relational_slow(ctx, sp, opcode)); OP_CMP(OP_gt, >, js_relational_slow(ctx, sp, opcode)); OP_CMP(OP_gte, >=, js_relational_slow(ctx, sp, opcode)); OP_CMP(OP_eq, ==, js_eq_slow(ctx, sp, 0)); OP_CMP(OP_neq, !=, js_eq_slow(ctx, sp, 1)); OP_CMP(OP_strict_eq, ==, js_strict_eq_slow(ctx, sp, 0)); OP_CMP(OP_strict_neq, !=, js_strict_eq_slow(ctx, sp, 1)); #ifdef CONFIG_BIGNUM CASE(OP_mul_pow10): if (rt->bigfloat_ops.mul_pow10(ctx, sp)) goto exception; sp--; BREAK; #endif CASE(OP_in): if (js_operator_in(ctx, sp)) goto exception; sp--; BREAK; CASE(OP_private_in): if (js_operator_private_in(ctx, sp)) goto exception; sp--; BREAK; CASE(OP_instanceof): if (js_operator_instanceof(ctx, sp)) goto exception; sp--; BREAK; CASE(OP_typeof): { JSValue op1; JSAtom atom; op1 = sp[-1]; atom = js_operator_typeof(ctx, op1); JS_FreeValue(ctx, op1); sp[-1] = JS_AtomToString(ctx, atom); } BREAK; CASE(OP_delete): if (js_operator_delete(ctx, sp)) goto exception; sp--; BREAK; CASE(OP_delete_var): { JSAtom atom; int ret; atom = get_u32(pc); pc += 4; ret = JS_DeleteProperty(ctx, ctx->global_obj, atom, 0); if (unlikely(ret < 0)) goto exception; *sp++ = JS_NewBool(ctx, ret); } BREAK; CASE(OP_to_object): if (JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_OBJECT) { ret_val = JS_ToObject(ctx, sp[-1]); if (JS_IsException(ret_val)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret_val; } BREAK; CASE(OP_to_propkey): switch (JS_VALUE_GET_TAG(sp[-1])) { case JS_TAG_INT: case JS_TAG_STRING: case JS_TAG_SYMBOL: break; default: ret_val = JS_ToPropertyKey(ctx, sp[-1]); if (JS_IsException(ret_val)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret_val; break; } BREAK; CASE(OP_to_propkey2): /* must be tested first */ if (unlikely(JS_IsUndefined(sp[-2]) || JS_IsNull(sp[-2]))) { JS_ThrowTypeError(ctx, "value has no property"); goto exception; } switch (JS_VALUE_GET_TAG(sp[-1])) { case JS_TAG_INT: case JS_TAG_STRING: case JS_TAG_SYMBOL: break; default: ret_val = JS_ToPropertyKey(ctx, sp[-1]); if (JS_IsException(ret_val)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret_val; break; } BREAK; #if 0 CASE(OP_to_string): if (JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_STRING) { ret_val = JS_ToString(ctx, sp[-1]); if (JS_IsException(ret_val)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = ret_val; } BREAK; #endif CASE(OP_with_get_var): CASE(OP_with_put_var): CASE(OP_with_delete_var): CASE(OP_with_make_ref): CASE(OP_with_get_ref): CASE(OP_with_get_ref_undef): { JSAtom atom; int32_t diff; JSValue obj, val; int ret, is_with; atom = get_u32(pc); diff = get_u32(pc + 4); is_with = pc[8]; pc += 9; obj = sp[-1]; ret = JS_HasProperty(ctx, obj, atom); if (unlikely(ret < 0)) goto exception; if (ret) { if (is_with) { ret = js_has_unscopable(ctx, obj, atom); if (unlikely(ret < 0)) goto exception; if (ret) goto no_with; } switch (opcode) { case OP_with_get_var: val = JS_GetProperty(ctx, obj, atom); if (unlikely(JS_IsException(val))) goto exception; set_value(ctx, &sp[-1], val); break; case OP_with_put_var: /* XXX: check if strict mode */ ret = JS_SetPropertyInternal(ctx, obj, atom, sp[-2], obj, JS_PROP_THROW_STRICT); JS_FreeValue(ctx, sp[-1]); sp -= 2; if (unlikely(ret < 0)) goto exception; break; case OP_with_delete_var: ret = JS_DeleteProperty(ctx, obj, atom, 0); if (unlikely(ret < 0)) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = JS_NewBool(ctx, ret); break; case OP_with_make_ref: /* produce a pair object/propname on the stack */ *sp++ = JS_AtomToValue(ctx, atom); break; case OP_with_get_ref: /* produce a pair object/method on the stack */ val = JS_GetProperty(ctx, obj, atom); if (unlikely(JS_IsException(val))) goto exception; *sp++ = val; break; case OP_with_get_ref_undef: /* produce a pair undefined/function on the stack */ val = JS_GetProperty(ctx, obj, atom); if (unlikely(JS_IsException(val))) goto exception; JS_FreeValue(ctx, sp[-1]); sp[-1] = JS_UNDEFINED; *sp++ = val; break; } pc += diff - 5; } else { no_with: /* if not jumping, drop the object argument */ JS_FreeValue(ctx, sp[-1]); sp--; } } BREAK; CASE(OP_await): ret_val = JS_NewInt32(ctx, FUNC_RET_AWAIT); goto done_generator; CASE(OP_yield): ret_val = JS_NewInt32(ctx, FUNC_RET_YIELD); goto done_generator; CASE(OP_yield_star): CASE(OP_async_yield_star): ret_val = JS_NewInt32(ctx, FUNC_RET_YIELD_STAR); goto done_generator; CASE(OP_return_async): ret_val = JS_UNDEFINED; goto done_generator; CASE(OP_initial_yield): ret_val = JS_NewInt32(ctx, FUNC_RET_INITIAL_YIELD); goto done_generator; CASE(OP_nop): BREAK; CASE(OP_is_undefined_or_null): if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_UNDEFINED || JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_NULL) { goto set_true; } else { goto free_and_set_false; } #if SHORT_OPCODES CASE(OP_is_undefined): if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_UNDEFINED) { goto set_true; } else { goto free_and_set_false; } CASE(OP_is_null): if (JS_VALUE_GET_TAG(sp[-1]) == JS_TAG_NULL) { goto set_true; } else { goto free_and_set_false; } /* XXX: could merge to a single opcode */ CASE(OP_typeof_is_undefined): /* different from OP_is_undefined because of isHTMLDDA */ if (js_operator_typeof(ctx, sp[-1]) == JS_ATOM_undefined) { goto free_and_set_true; } else { goto free_and_set_false; } CASE(OP_typeof_is_function): if (js_operator_typeof(ctx, sp[-1]) == JS_ATOM_function) { goto free_and_set_true; } else { goto free_and_set_false; } free_and_set_true: JS_FreeValue(ctx, sp[-1]); #endif set_true: sp[-1] = JS_TRUE; BREAK; free_and_set_false: JS_FreeValue(ctx, sp[-1]); sp[-1] = JS_FALSE; BREAK; CASE(OP_invalid): DEFAULT: JS_ThrowInternalError(ctx, "invalid opcode: pc=%u opcode=0x%02x", (int)(pc - b->byte_code_buf - 1), opcode); goto exception; } } exception: if (is_backtrace_needed(ctx, rt->current_exception)) { /* add the backtrace information now (it is not done before if the exception happens in a bytecode operation */ sf->cur_pc = pc; build_backtrace(ctx, rt->current_exception, NULL, 0, 0); } if (!JS_IsUncatchableError(ctx, rt->current_exception)) { while (sp > stack_buf) { JSValue val = *--sp; JS_FreeValue(ctx, val); if (JS_VALUE_GET_TAG(val) == JS_TAG_CATCH_OFFSET) { int pos = JS_VALUE_GET_INT(val); if (pos == 0) { /* enumerator: close it with a throw */ JS_FreeValue(ctx, sp[-1]); /* drop the next method */ sp--; JS_IteratorClose(ctx, sp[-1], TRUE); } else { *sp++ = rt->current_exception; rt->current_exception = JS_NULL; pc = b->byte_code_buf + pos; goto restart; } } } } ret_val = JS_EXCEPTION; /* the local variables are freed by the caller in the generator case. Hence the label 'done' should never be reached in a generator function. */ if (b->func_kind != JS_FUNC_NORMAL) { done_generator: sf->cur_pc = pc; sf->cur_sp = sp; } else { done: if (unlikely(!list_empty(&sf->var_ref_list))) { /* variable references reference the stack: must close them */ close_var_refs(rt, sf); } /* free the local variables and stack */ for(pval = local_buf; pval < sp; pval++) { JS_FreeValue(ctx, *pval); } } rt->current_stack_frame = sf->prev_frame; return ret_val; } JSValue JS_Call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv) { return JS_CallInternal(ctx, func_obj, this_obj, JS_UNDEFINED, argc, (JSValue *)argv, JS_CALL_FLAG_COPY_ARGV); } static JSValue JS_CallFree(JSContext *ctx, JSValue func_obj, JSValueConst this_obj, int argc, JSValueConst *argv) { JSValue res = JS_CallInternal(ctx, func_obj, this_obj, JS_UNDEFINED, argc, (JSValue *)argv, JS_CALL_FLAG_COPY_ARGV); JS_FreeValue(ctx, func_obj); return res; } /* warning: the refcount of the context is not incremented. Return NULL in case of exception (case of revoked proxy only) */ static JSContext *JS_GetFunctionRealm(JSContext *ctx, JSValueConst func_obj) { JSObject *p; JSContext *realm; if (JS_VALUE_GET_TAG(func_obj) != JS_TAG_OBJECT) return ctx; p = JS_VALUE_GET_OBJ(func_obj); switch(p->class_id) { case JS_CLASS_C_FUNCTION: realm = p->u.cfunc.realm; break; case JS_CLASS_BYTECODE_FUNCTION: case JS_CLASS_GENERATOR_FUNCTION: case JS_CLASS_ASYNC_FUNCTION: case JS_CLASS_ASYNC_GENERATOR_FUNCTION: { JSFunctionBytecode *b; b = p->u.func.function_bytecode; realm = b->realm; } break; case JS_CLASS_PROXY: { JSProxyData *s = p->u.opaque; if (!s) return ctx; if (s->is_revoked) { JS_ThrowTypeErrorRevokedProxy(ctx); return NULL; } else { realm = JS_GetFunctionRealm(ctx, s->target); } } break; case JS_CLASS_BOUND_FUNCTION: { JSBoundFunction *bf = p->u.bound_function; realm = JS_GetFunctionRealm(ctx, bf->func_obj); } break; default: realm = ctx; break; } return realm; } static JSValue js_create_from_ctor(JSContext *ctx, JSValueConst ctor, int class_id) { JSValue proto, obj; JSContext *realm; if (JS_IsUndefined(ctor)) { proto = JS_DupValue(ctx, ctx->class_proto[class_id]); } else { proto = JS_GetProperty(ctx, ctor, JS_ATOM_prototype); if (JS_IsException(proto)) return proto; if (!JS_IsObject(proto)) { JS_FreeValue(ctx, proto); realm = JS_GetFunctionRealm(ctx, ctor); if (!realm) return JS_EXCEPTION; proto = JS_DupValue(ctx, realm->class_proto[class_id]); } } obj = JS_NewObjectProtoClass(ctx, proto, class_id); JS_FreeValue(ctx, proto); return obj; } /* argv[] is modified if (flags & JS_CALL_FLAG_COPY_ARGV) = 0. */ static JSValue JS_CallConstructorInternal(JSContext *ctx, JSValueConst func_obj, JSValueConst new_target, int argc, JSValue *argv, int flags) { JSObject *p; JSFunctionBytecode *b; if (js_poll_interrupts(ctx)) return JS_EXCEPTION; flags |= JS_CALL_FLAG_CONSTRUCTOR; if (unlikely(JS_VALUE_GET_TAG(func_obj) != JS_TAG_OBJECT)) goto not_a_function; p = JS_VALUE_GET_OBJ(func_obj); if (unlikely(!p->is_constructor)) return JS_ThrowTypeError(ctx, "not a constructor"); if (unlikely(p->class_id != JS_CLASS_BYTECODE_FUNCTION)) { JSClassCall *call_func; call_func = ctx->rt->class_array[p->class_id].call; if (!call_func) { not_a_function: return JS_ThrowTypeError(ctx, "not a function"); } return call_func(ctx, func_obj, new_target, argc, (JSValueConst *)argv, flags); } b = p->u.func.function_bytecode; if (b->is_derived_class_constructor) { return JS_CallInternal(ctx, func_obj, JS_UNDEFINED, new_target, argc, argv, flags); } else { JSValue obj, ret; /* legacy constructor behavior */ obj = js_create_from_ctor(ctx, new_target, JS_CLASS_OBJECT); if (JS_IsException(obj)) return JS_EXCEPTION; ret = JS_CallInternal(ctx, func_obj, obj, new_target, argc, argv, flags); if (JS_VALUE_GET_TAG(ret) == JS_TAG_OBJECT || JS_IsException(ret)) { JS_FreeValue(ctx, obj); return ret; } else { JS_FreeValue(ctx, ret); return obj; } } } JSValue JS_CallConstructor2(JSContext *ctx, JSValueConst func_obj, JSValueConst new_target, int argc, JSValueConst *argv) { return JS_CallConstructorInternal(ctx, func_obj, new_target, argc, (JSValue *)argv, JS_CALL_FLAG_COPY_ARGV); } JSValue JS_CallConstructor(JSContext *ctx, JSValueConst func_obj, int argc, JSValueConst *argv) { return JS_CallConstructorInternal(ctx, func_obj, func_obj, argc, (JSValue *)argv, JS_CALL_FLAG_COPY_ARGV); } JSValue JS_Invoke(JSContext *ctx, JSValueConst this_val, JSAtom atom, int argc, JSValueConst *argv) { JSValue func_obj; func_obj = JS_GetProperty(ctx, this_val, atom); if (JS_IsException(func_obj)) return func_obj; return JS_CallFree(ctx, func_obj, this_val, argc, argv); } static JSValue JS_InvokeFree(JSContext *ctx, JSValue this_val, JSAtom atom, int argc, JSValueConst *argv) { JSValue res = JS_Invoke(ctx, this_val, atom, argc, argv); JS_FreeValue(ctx, this_val); return res; } /* JSAsyncFunctionState (used by generator and async functions) */ static JSAsyncFunctionState *async_func_init(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv) { JSAsyncFunctionState *s; JSObject *p; JSFunctionBytecode *b; JSStackFrame *sf; int local_count, i, arg_buf_len, n; s = js_mallocz(ctx, sizeof(*s)); if (!s) return NULL; s->header.ref_count = 1; add_gc_object(ctx->rt, &s->header, JS_GC_OBJ_TYPE_ASYNC_FUNCTION); sf = &s->frame; init_list_head(&sf->var_ref_list); p = JS_VALUE_GET_OBJ(func_obj); b = p->u.func.function_bytecode; sf->js_mode = b->js_mode | JS_MODE_ASYNC; sf->cur_pc = b->byte_code_buf; arg_buf_len = max_int(b->arg_count, argc); local_count = arg_buf_len + b->var_count + b->stack_size; sf->arg_buf = js_malloc(ctx, sizeof(JSValue) * max_int(local_count, 1)); if (!sf->arg_buf) { js_free(ctx, s); return NULL; } sf->cur_func = JS_DupValue(ctx, func_obj); s->this_val = JS_DupValue(ctx, this_obj); s->argc = argc; sf->arg_count = arg_buf_len; sf->var_buf = sf->arg_buf + arg_buf_len; sf->cur_sp = sf->var_buf + b->var_count; for(i = 0; i < argc; i++) sf->arg_buf[i] = JS_DupValue(ctx, argv[i]); n = arg_buf_len + b->var_count; for(i = argc; i < n; i++) sf->arg_buf[i] = JS_UNDEFINED; s->resolving_funcs[0] = JS_UNDEFINED; s->resolving_funcs[1] = JS_UNDEFINED; s->is_completed = FALSE; return s; } static void async_func_free_frame(JSRuntime *rt, JSAsyncFunctionState *s) { JSStackFrame *sf = &s->frame; JSValue *sp; if (sf->arg_buf) { /* cannot free the function if it is running */ assert(sf->cur_sp != NULL); for(sp = sf->arg_buf; sp < sf->cur_sp; sp++) { JS_FreeValueRT(rt, *sp); } js_free_rt(rt, sf->arg_buf); sf->arg_buf = NULL; } JS_FreeValueRT(rt, sf->cur_func); JS_FreeValueRT(rt, s->this_val); } static JSValue async_func_resume(JSContext *ctx, JSAsyncFunctionState *s) { JSRuntime *rt = ctx->rt; JSStackFrame *sf = &s->frame; JSValue func_obj, ret; assert(!s->is_completed); if (js_check_stack_overflow(ctx->rt, 0)) { ret = JS_ThrowStackOverflow(ctx); } else { /* the tag does not matter provided it is not an object */ func_obj = JS_MKPTR(JS_TAG_INT, s); ret = JS_CallInternal(ctx, func_obj, s->this_val, JS_UNDEFINED, s->argc, sf->arg_buf, JS_CALL_FLAG_GENERATOR); } if (JS_IsException(ret) || JS_IsUndefined(ret)) { if (JS_IsUndefined(ret)) { ret = sf->cur_sp[-1]; sf->cur_sp[-1] = JS_UNDEFINED; } /* end of execution */ s->is_completed = TRUE; /* close the closure variables. */ close_var_refs(rt, sf); async_func_free_frame(rt, s); } return ret; } static void __async_func_free(JSRuntime *rt, JSAsyncFunctionState *s) { /* cannot close the closure variables here because it would potentially modify the object graph */ if (!s->is_completed) { async_func_free_frame(rt, s); } JS_FreeValueRT(rt, s->resolving_funcs[0]); JS_FreeValueRT(rt, s->resolving_funcs[1]); remove_gc_object(&s->header); if (rt->gc_phase == JS_GC_PHASE_REMOVE_CYCLES && s->header.ref_count != 0) { list_add_tail(&s->header.link, &rt->gc_zero_ref_count_list); } else { js_free_rt(rt, s); } } static void async_func_free(JSRuntime *rt, JSAsyncFunctionState *s) { if (--s->header.ref_count == 0) { if (rt->gc_phase != JS_GC_PHASE_REMOVE_CYCLES) { list_del(&s->header.link); list_add(&s->header.link, &rt->gc_zero_ref_count_list); if (rt->gc_phase == JS_GC_PHASE_NONE) { free_zero_refcount(rt); } } } } /* Generators */ typedef enum JSGeneratorStateEnum { JS_GENERATOR_STATE_SUSPENDED_START, JS_GENERATOR_STATE_SUSPENDED_YIELD, JS_GENERATOR_STATE_SUSPENDED_YIELD_STAR, JS_GENERATOR_STATE_EXECUTING, JS_GENERATOR_STATE_COMPLETED, } JSGeneratorStateEnum; typedef struct JSGeneratorData { JSGeneratorStateEnum state; JSAsyncFunctionState *func_state; } JSGeneratorData; static void free_generator_stack_rt(JSRuntime *rt, JSGeneratorData *s) { if (s->state == JS_GENERATOR_STATE_COMPLETED) return; if (s->func_state) { async_func_free(rt, s->func_state); s->func_state = NULL; } s->state = JS_GENERATOR_STATE_COMPLETED; } static void js_generator_finalizer(JSRuntime *rt, JSValue obj) { JSGeneratorData *s = JS_GetOpaque(obj, JS_CLASS_GENERATOR); if (s) { free_generator_stack_rt(rt, s); js_free_rt(rt, s); } } static void free_generator_stack(JSContext *ctx, JSGeneratorData *s) { free_generator_stack_rt(ctx->rt, s); } static void js_generator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JSGeneratorData *s = p->u.generator_data; if (!s || !s->func_state) return; mark_func(rt, &s->func_state->header); } /* XXX: use enum */ #define GEN_MAGIC_NEXT 0 #define GEN_MAGIC_RETURN 1 #define GEN_MAGIC_THROW 2 static JSValue js_generator_next(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, BOOL *pdone, int magic) { JSGeneratorData *s = JS_GetOpaque(this_val, JS_CLASS_GENERATOR); JSStackFrame *sf; JSValue ret, func_ret; *pdone = TRUE; if (!s) return JS_ThrowTypeError(ctx, "not a generator"); sf = &s->func_state->frame; switch(s->state) { default: case JS_GENERATOR_STATE_SUSPENDED_START: if (magic == GEN_MAGIC_NEXT) { goto exec_no_arg; } else { free_generator_stack(ctx, s); goto done; } break; case JS_GENERATOR_STATE_SUSPENDED_YIELD_STAR: case JS_GENERATOR_STATE_SUSPENDED_YIELD: /* cur_sp[-1] was set to JS_UNDEFINED in the previous call */ ret = JS_DupValue(ctx, argv[0]); if (magic == GEN_MAGIC_THROW && s->state == JS_GENERATOR_STATE_SUSPENDED_YIELD) { JS_Throw(ctx, ret); s->func_state->throw_flag = TRUE; } else { sf->cur_sp[-1] = ret; sf->cur_sp[0] = JS_NewInt32(ctx, magic); sf->cur_sp++; exec_no_arg: s->func_state->throw_flag = FALSE; } s->state = JS_GENERATOR_STATE_EXECUTING; func_ret = async_func_resume(ctx, s->func_state); s->state = JS_GENERATOR_STATE_SUSPENDED_YIELD; if (s->func_state->is_completed) { /* finalize the execution in case of exception or normal return */ free_generator_stack(ctx, s); return func_ret; } else { assert(JS_VALUE_GET_TAG(func_ret) == JS_TAG_INT); /* get the returned yield value at the top of the stack */ ret = sf->cur_sp[-1]; sf->cur_sp[-1] = JS_UNDEFINED; if (JS_VALUE_GET_INT(func_ret) == FUNC_RET_YIELD_STAR) { s->state = JS_GENERATOR_STATE_SUSPENDED_YIELD_STAR; /* return (value, done) object */ *pdone = 2; } else { *pdone = FALSE; } } break; case JS_GENERATOR_STATE_COMPLETED: done: /* execution is finished */ switch(magic) { default: case GEN_MAGIC_NEXT: ret = JS_UNDEFINED; break; case GEN_MAGIC_RETURN: ret = JS_DupValue(ctx, argv[0]); break; case GEN_MAGIC_THROW: ret = JS_Throw(ctx, JS_DupValue(ctx, argv[0])); break; } break; case JS_GENERATOR_STATE_EXECUTING: ret = JS_ThrowTypeError(ctx, "cannot invoke a running generator"); break; } return ret; } static JSValue js_generator_function_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSValue obj, func_ret; JSGeneratorData *s; s = js_mallocz(ctx, sizeof(*s)); if (!s) return JS_EXCEPTION; s->state = JS_GENERATOR_STATE_SUSPENDED_START; s->func_state = async_func_init(ctx, func_obj, this_obj, argc, argv); if (!s->func_state) { s->state = JS_GENERATOR_STATE_COMPLETED; goto fail; } /* execute the function up to 'OP_initial_yield' */ func_ret = async_func_resume(ctx, s->func_state); if (JS_IsException(func_ret)) goto fail; JS_FreeValue(ctx, func_ret); obj = js_create_from_ctor(ctx, func_obj, JS_CLASS_GENERATOR); if (JS_IsException(obj)) goto fail; JS_SetOpaque(obj, s); return obj; fail: free_generator_stack_rt(ctx->rt, s); js_free(ctx, s); return JS_EXCEPTION; } /* AsyncFunction */ static void js_async_function_resolve_finalizer(JSRuntime *rt, JSValue val) { JSObject *p = JS_VALUE_GET_OBJ(val); JSAsyncFunctionState *s = p->u.async_function_data; if (s) { async_func_free(rt, s); } } static void js_async_function_resolve_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSObject *p = JS_VALUE_GET_OBJ(val); JSAsyncFunctionState *s = p->u.async_function_data; if (s) { mark_func(rt, &s->header); } } static int js_async_function_resolve_create(JSContext *ctx, JSAsyncFunctionState *s, JSValue *resolving_funcs) { int i; JSObject *p; for(i = 0; i < 2; i++) { resolving_funcs[i] = JS_NewObjectProtoClass(ctx, ctx->function_proto, JS_CLASS_ASYNC_FUNCTION_RESOLVE + i); if (JS_IsException(resolving_funcs[i])) { if (i == 1) JS_FreeValue(ctx, resolving_funcs[0]); return -1; } p = JS_VALUE_GET_OBJ(resolving_funcs[i]); s->header.ref_count++; p->u.async_function_data = s; } return 0; } static void js_async_function_resume(JSContext *ctx, JSAsyncFunctionState *s) { JSValue func_ret, ret2; func_ret = async_func_resume(ctx, s); if (s->is_completed) { if (JS_IsException(func_ret)) { JSValue error; fail: error = JS_GetException(ctx); ret2 = JS_Call(ctx, s->resolving_funcs[1], JS_UNDEFINED, 1, (JSValueConst *)&error); JS_FreeValue(ctx, error); JS_FreeValue(ctx, ret2); /* XXX: what to do if exception ? */ } else { /* normal return */ ret2 = JS_Call(ctx, s->resolving_funcs[0], JS_UNDEFINED, 1, (JSValueConst *)&func_ret); JS_FreeValue(ctx, func_ret); JS_FreeValue(ctx, ret2); /* XXX: what to do if exception ? */ } } else { JSValue value, promise, resolving_funcs[2], resolving_funcs1[2]; int i, res; value = s->frame.cur_sp[-1]; s->frame.cur_sp[-1] = JS_UNDEFINED; /* await */ JS_FreeValue(ctx, func_ret); /* not used */ promise = js_promise_resolve(ctx, ctx->promise_ctor, 1, (JSValueConst *)&value, 0); JS_FreeValue(ctx, value); if (JS_IsException(promise)) goto fail; if (js_async_function_resolve_create(ctx, s, resolving_funcs)) { JS_FreeValue(ctx, promise); goto fail; } /* Note: no need to create 'thrownawayCapability' as in the spec */ for(i = 0; i < 2; i++) resolving_funcs1[i] = JS_UNDEFINED; res = perform_promise_then(ctx, promise, (JSValueConst *)resolving_funcs, (JSValueConst *)resolving_funcs1); JS_FreeValue(ctx, promise); for(i = 0; i < 2; i++) JS_FreeValue(ctx, resolving_funcs[i]); if (res) goto fail; } } static JSValue js_async_function_resolve_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSObject *p = JS_VALUE_GET_OBJ(func_obj); JSAsyncFunctionState *s = p->u.async_function_data; BOOL is_reject = p->class_id - JS_CLASS_ASYNC_FUNCTION_RESOLVE; JSValueConst arg; if (argc > 0) arg = argv[0]; else arg = JS_UNDEFINED; s->throw_flag = is_reject; if (is_reject) { JS_Throw(ctx, JS_DupValue(ctx, arg)); } else { /* return value of await */ s->frame.cur_sp[-1] = JS_DupValue(ctx, arg); } js_async_function_resume(ctx, s); return JS_UNDEFINED; } static JSValue js_async_function_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSValue promise; JSAsyncFunctionState *s; s = async_func_init(ctx, func_obj, this_obj, argc, argv); if (!s) return JS_EXCEPTION; promise = JS_NewPromiseCapability(ctx, s->resolving_funcs); if (JS_IsException(promise)) { async_func_free(ctx->rt, s); return JS_EXCEPTION; } js_async_function_resume(ctx, s); async_func_free(ctx->rt, s); return promise; } /* AsyncGenerator */ typedef enum JSAsyncGeneratorStateEnum { JS_ASYNC_GENERATOR_STATE_SUSPENDED_START, JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD, JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD_STAR, JS_ASYNC_GENERATOR_STATE_EXECUTING, JS_ASYNC_GENERATOR_STATE_AWAITING_RETURN, JS_ASYNC_GENERATOR_STATE_COMPLETED, } JSAsyncGeneratorStateEnum; typedef struct JSAsyncGeneratorRequest { struct list_head link; /* completion */ int completion_type; /* GEN_MAGIC_x */ JSValue result; /* promise capability */ JSValue promise; JSValue resolving_funcs[2]; } JSAsyncGeneratorRequest; typedef struct JSAsyncGeneratorData { JSObject *generator; /* back pointer to the object (const) */ JSAsyncGeneratorStateEnum state; /* func_state is NULL is state AWAITING_RETURN and COMPLETED */ JSAsyncFunctionState *func_state; struct list_head queue; /* list of JSAsyncGeneratorRequest.link */ } JSAsyncGeneratorData; static void js_async_generator_free(JSRuntime *rt, JSAsyncGeneratorData *s) { struct list_head *el, *el1; JSAsyncGeneratorRequest *req; list_for_each_safe(el, el1, &s->queue) { req = list_entry(el, JSAsyncGeneratorRequest, link); JS_FreeValueRT(rt, req->result); JS_FreeValueRT(rt, req->promise); JS_FreeValueRT(rt, req->resolving_funcs[0]); JS_FreeValueRT(rt, req->resolving_funcs[1]); js_free_rt(rt, req); } if (s->func_state) async_func_free(rt, s->func_state); js_free_rt(rt, s); } static void js_async_generator_finalizer(JSRuntime *rt, JSValue obj) { JSAsyncGeneratorData *s = JS_GetOpaque(obj, JS_CLASS_ASYNC_GENERATOR); if (s) { js_async_generator_free(rt, s); } } static void js_async_generator_mark(JSRuntime *rt, JSValueConst val, JS_MarkFunc *mark_func) { JSAsyncGeneratorData *s = JS_GetOpaque(val, JS_CLASS_ASYNC_GENERATOR); struct list_head *el; JSAsyncGeneratorRequest *req; if (s) { list_for_each(el, &s->queue) { req = list_entry(el, JSAsyncGeneratorRequest, link); JS_MarkValue(rt, req->result, mark_func); JS_MarkValue(rt, req->promise, mark_func); JS_MarkValue(rt, req->resolving_funcs[0], mark_func); JS_MarkValue(rt, req->resolving_funcs[1], mark_func); } if (s->func_state) { mark_func(rt, &s->func_state->header); } } } static JSValue js_async_generator_resolve_function(JSContext *ctx, JSValueConst this_obj, int argc, JSValueConst *argv, int magic, JSValue *func_data); static int js_async_generator_resolve_function_create(JSContext *ctx, JSValueConst generator, JSValue *resolving_funcs, BOOL is_resume_next) { int i; JSValue func; for(i = 0; i < 2; i++) { func = JS_NewCFunctionData(ctx, js_async_generator_resolve_function, 1, i + is_resume_next * 2, 1, &generator); if (JS_IsException(func)) { if (i == 1) JS_FreeValue(ctx, resolving_funcs[0]); return -1; } resolving_funcs[i] = func; } return 0; } static int js_async_generator_await(JSContext *ctx, JSAsyncGeneratorData *s, JSValueConst value) { JSValue promise, resolving_funcs[2], resolving_funcs1[2]; int i, res; promise = js_promise_resolve(ctx, ctx->promise_ctor, 1, &value, 0); if (JS_IsException(promise)) goto fail; if (js_async_generator_resolve_function_create(ctx, JS_MKPTR(JS_TAG_OBJECT, s->generator), resolving_funcs, FALSE)) { JS_FreeValue(ctx, promise); goto fail; } /* Note: no need to create 'thrownawayCapability' as in the spec */ for(i = 0; i < 2; i++) resolving_funcs1[i] = JS_UNDEFINED; res = perform_promise_then(ctx, promise, (JSValueConst *)resolving_funcs, (JSValueConst *)resolving_funcs1); JS_FreeValue(ctx, promise); for(i = 0; i < 2; i++) JS_FreeValue(ctx, resolving_funcs[i]); if (res) goto fail; return 0; fail: return -1; } static void js_async_generator_resolve_or_reject(JSContext *ctx, JSAsyncGeneratorData *s, JSValueConst result, int is_reject) { JSAsyncGeneratorRequest *next; JSValue ret; next = list_entry(s->queue.next, JSAsyncGeneratorRequest, link); list_del(&next->link); ret = JS_Call(ctx, next->resolving_funcs[is_reject], JS_UNDEFINED, 1, &result); JS_FreeValue(ctx, ret); JS_FreeValue(ctx, next->result); JS_FreeValue(ctx, next->promise); JS_FreeValue(ctx, next->resolving_funcs[0]); JS_FreeValue(ctx, next->resolving_funcs[1]); js_free(ctx, next); } static void js_async_generator_resolve(JSContext *ctx, JSAsyncGeneratorData *s, JSValueConst value, BOOL done) { JSValue result; result = js_create_iterator_result(ctx, JS_DupValue(ctx, value), done); /* XXX: better exception handling ? */ js_async_generator_resolve_or_reject(ctx, s, result, 0); JS_FreeValue(ctx, result); } static void js_async_generator_reject(JSContext *ctx, JSAsyncGeneratorData *s, JSValueConst exception) { js_async_generator_resolve_or_reject(ctx, s, exception, 1); } static void js_async_generator_complete(JSContext *ctx, JSAsyncGeneratorData *s) { if (s->state != JS_ASYNC_GENERATOR_STATE_COMPLETED) { s->state = JS_ASYNC_GENERATOR_STATE_COMPLETED; async_func_free(ctx->rt, s->func_state); s->func_state = NULL; } } static int js_async_generator_completed_return(JSContext *ctx, JSAsyncGeneratorData *s, JSValueConst value) { JSValue promise, resolving_funcs[2], resolving_funcs1[2]; int res; // Can fail looking up JS_ATOM_constructor when is_reject==0. promise = js_promise_resolve(ctx, ctx->promise_ctor, 1, &value, /*is_reject*/0); // A poisoned .constructor property is observable and the resulting // exception should be delivered to the catch handler. if (JS_IsException(promise)) { JSValue err = JS_GetException(ctx); promise = js_promise_resolve(ctx, ctx->promise_ctor, 1, (JSValueConst *)&err, /*is_reject*/1); JS_FreeValue(ctx, err); if (JS_IsException(promise)) return -1; } if (js_async_generator_resolve_function_create(ctx, JS_MKPTR(JS_TAG_OBJECT, s->generator), resolving_funcs1, TRUE)) { JS_FreeValue(ctx, promise); return -1; } resolving_funcs[0] = JS_UNDEFINED; resolving_funcs[1] = JS_UNDEFINED; res = perform_promise_then(ctx, promise, (JSValueConst *)resolving_funcs1, (JSValueConst *)resolving_funcs); JS_FreeValue(ctx, resolving_funcs1[0]); JS_FreeValue(ctx, resolving_funcs1[1]); JS_FreeValue(ctx, promise); return res; } static void js_async_generator_resume_next(JSContext *ctx, JSAsyncGeneratorData *s) { JSAsyncGeneratorRequest *next; JSValue func_ret, value; for(;;) { if (list_empty(&s->queue)) break; next = list_entry(s->queue.next, JSAsyncGeneratorRequest, link); switch(s->state) { case JS_ASYNC_GENERATOR_STATE_EXECUTING: /* only happens when restarting execution after await() */ goto resume_exec; case JS_ASYNC_GENERATOR_STATE_AWAITING_RETURN: goto done; case JS_ASYNC_GENERATOR_STATE_SUSPENDED_START: if (next->completion_type == GEN_MAGIC_NEXT) { goto exec_no_arg; } else { js_async_generator_complete(ctx, s); } break; case JS_ASYNC_GENERATOR_STATE_COMPLETED: if (next->completion_type == GEN_MAGIC_NEXT) { js_async_generator_resolve(ctx, s, JS_UNDEFINED, TRUE); } else if (next->completion_type == GEN_MAGIC_RETURN) { s->state = JS_ASYNC_GENERATOR_STATE_AWAITING_RETURN; js_async_generator_completed_return(ctx, s, next->result); } else { js_async_generator_reject(ctx, s, next->result); } goto done; case JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD: case JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD_STAR: value = JS_DupValue(ctx, next->result); if (next->completion_type == GEN_MAGIC_THROW && s->state == JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD) { JS_Throw(ctx, value); s->func_state->throw_flag = TRUE; } else { /* 'yield' returns a value. 'yield *' also returns a value in case the 'throw' method is called */ s->func_state->frame.cur_sp[-1] = value; s->func_state->frame.cur_sp[0] = JS_NewInt32(ctx, next->completion_type); s->func_state->frame.cur_sp++; exec_no_arg: s->func_state->throw_flag = FALSE; } s->state = JS_ASYNC_GENERATOR_STATE_EXECUTING; resume_exec: func_ret = async_func_resume(ctx, s->func_state); if (s->func_state->is_completed) { if (JS_IsException(func_ret)) { value = JS_GetException(ctx); js_async_generator_complete(ctx, s); js_async_generator_reject(ctx, s, value); JS_FreeValue(ctx, value); } else { /* end of function */ js_async_generator_complete(ctx, s); js_async_generator_resolve(ctx, s, func_ret, TRUE); JS_FreeValue(ctx, func_ret); } } else { int func_ret_code, ret; assert(JS_VALUE_GET_TAG(func_ret) == JS_TAG_INT); func_ret_code = JS_VALUE_GET_INT(func_ret); value = s->func_state->frame.cur_sp[-1]; s->func_state->frame.cur_sp[-1] = JS_UNDEFINED; switch(func_ret_code) { case FUNC_RET_YIELD: case FUNC_RET_YIELD_STAR: if (func_ret_code == FUNC_RET_YIELD_STAR) s->state = JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD_STAR; else s->state = JS_ASYNC_GENERATOR_STATE_SUSPENDED_YIELD; js_async_generator_resolve(ctx, s, value, FALSE); JS_FreeValue(ctx, value); break; case FUNC_RET_AWAIT: ret = js_async_generator_await(ctx, s, value); JS_FreeValue(ctx, value); if (ret < 0) { /* exception: throw it */ s->func_state->throw_flag = TRUE; goto resume_exec; } goto done; default: abort(); } } break; default: abort(); } } done: ; } static JSValue js_async_generator_resolve_function(JSContext *ctx, JSValueConst this_obj, int argc, JSValueConst *argv, int magic, JSValue *func_data) { BOOL is_reject = magic & 1; JSAsyncGeneratorData *s = JS_GetOpaque(func_data[0], JS_CLASS_ASYNC_GENERATOR); JSValueConst arg = argv[0]; /* XXX: what if s == NULL */ if (magic >= 2) { /* resume next case in AWAITING_RETURN state */ assert(s->state == JS_ASYNC_GENERATOR_STATE_AWAITING_RETURN || s->state == JS_ASYNC_GENERATOR_STATE_COMPLETED); s->state = JS_ASYNC_GENERATOR_STATE_COMPLETED; if (is_reject) { js_async_generator_reject(ctx, s, arg); } else { js_async_generator_resolve(ctx, s, arg, TRUE); } } else { /* restart function execution after await() */ assert(s->state == JS_ASYNC_GENERATOR_STATE_EXECUTING); s->func_state->throw_flag = is_reject; if (is_reject) { JS_Throw(ctx, JS_DupValue(ctx, arg)); } else { /* return value of await */ s->func_state->frame.cur_sp[-1] = JS_DupValue(ctx, arg); } js_async_generator_resume_next(ctx, s); } return JS_UNDEFINED; } /* magic = GEN_MAGIC_x */ static JSValue js_async_generator_next(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic) { JSAsyncGeneratorData *s = JS_GetOpaque(this_val, JS_CLASS_ASYNC_GENERATOR); JSValue promise, resolving_funcs[2]; JSAsyncGeneratorRequest *req; promise = JS_NewPromiseCapability(ctx, resolving_funcs); if (JS_IsException(promise)) return JS_EXCEPTION; if (!s) { JSValue err, res2; JS_ThrowTypeError(ctx, "not an AsyncGenerator object"); err = JS_GetException(ctx); res2 = JS_Call(ctx, resolving_funcs[1], JS_UNDEFINED, 1, (JSValueConst *)&err); JS_FreeValue(ctx, err); JS_FreeValue(ctx, res2); JS_FreeValue(ctx, resolving_funcs[0]); JS_FreeValue(ctx, resolving_funcs[1]); return promise; } req = js_mallocz(ctx, sizeof(*req)); if (!req) goto fail; req->completion_type = magic; req->result = JS_DupValue(ctx, argv[0]); req->promise = JS_DupValue(ctx, promise); req->resolving_funcs[0] = resolving_funcs[0]; req->resolving_funcs[1] = resolving_funcs[1]; list_add_tail(&req->link, &s->queue); if (s->state != JS_ASYNC_GENERATOR_STATE_EXECUTING) { js_async_generator_resume_next(ctx, s); } return promise; fail: JS_FreeValue(ctx, resolving_funcs[0]); JS_FreeValue(ctx, resolving_funcs[1]); JS_FreeValue(ctx, promise); return JS_EXCEPTION; } static JSValue js_async_generator_function_call(JSContext *ctx, JSValueConst func_obj, JSValueConst this_obj, int argc, JSValueConst *argv, int flags) { JSValue obj, func_ret; JSAsyncGeneratorData *s; s = js_mallocz(ctx, sizeof(*s)); if (!s) return JS_EXCEPTION; s->state = JS_ASYNC_GENERATOR_STATE_SUSPENDED_START; init_list_head(&s->queue); s->func_state = async_func_init(ctx, func_obj, this_obj, argc, argv); if (!s->func_state) goto fail; /* execute the function up to 'OP_initial_yield' (no yield nor await are possible) */ func_ret = async_func_resume(ctx, s->func_state); if (JS_IsException(func_ret)) goto fail; JS_FreeValue(ctx, func_ret); obj = js_create_from_ctor(ctx, func_obj, JS_CLASS_ASYNC_GENERATOR); if (JS_IsException(obj)) goto fail; s->generator = JS_VALUE_GET_OBJ(obj); JS_SetOpaque(obj, s); return obj; fail: js_async_generator_free(ctx->rt, s); return JS_EXCEPTION; } /* JS parser */ enum { TOK_NUMBER = -128, TOK_STRING, TOK_TEMPLATE, TOK_IDENT, TOK_REGEXP, /* warning: order matters (see js_parse_assign_expr) */ TOK_MUL_ASSIGN, TOK_DIV_ASSIGN, TOK_MOD_ASSIGN, TOK_PLUS_ASSIGN, TOK_MINUS_ASSIGN, TOK_SHL_ASSIGN, TOK_SAR_ASSIGN, TOK_SHR_ASSIGN, TOK_AND_ASSIGN, TOK_XOR_ASSIGN, TOK_OR_ASSIGN, #ifdef CONFIG_BIGNUM TOK_MATH_POW_ASSIGN, #endif TOK_POW_ASSIGN, TOK_LAND_ASSIGN, TOK_LOR_ASSIGN, TOK_DOUBLE_QUESTION_MARK_ASSIGN, TOK_DEC, TOK_INC, TOK_SHL, TOK_SAR, TOK_SHR, TOK_LT, TOK_LTE, TOK_GT, TOK_GTE, TOK_EQ, TOK_STRICT_EQ, TOK_NEQ, TOK_STRICT_NEQ, TOK_LAND, TOK_LOR, #ifdef CONFIG_BIGNUM TOK_MATH_POW, #endif TOK_POW, TOK_ARROW, TOK_ELLIPSIS, TOK_DOUBLE_QUESTION_MARK, TOK_QUESTION_MARK_DOT, TOK_ERROR, TOK_PRIVATE_NAME, TOK_EOF, /* keywords: WARNING: same order as atoms */ TOK_NULL, /* must be first */ TOK_FALSE, TOK_TRUE, TOK_IF, TOK_ELSE, TOK_RETURN, TOK_VAR, TOK_THIS, TOK_DELETE, TOK_VOID, TOK_TYPEOF, TOK_NEW, TOK_IN, TOK_INSTANCEOF, TOK_DO, TOK_WHILE, TOK_FOR, TOK_BREAK, TOK_CONTINUE, TOK_SWITCH, TOK_CASE, TOK_DEFAULT, TOK_THROW, TOK_TRY, TOK_CATCH, TOK_FINALLY, TOK_FUNCTION, TOK_DEBUGGER, TOK_WITH, /* FutureReservedWord */ TOK_CLASS, TOK_CONST, TOK_ENUM, TOK_EXPORT, TOK_EXTENDS, TOK_IMPORT, TOK_SUPER, /* FutureReservedWords when parsing strict mode code */ TOK_IMPLEMENTS, TOK_INTERFACE, TOK_LET, TOK_PACKAGE, TOK_PRIVATE, TOK_PROTECTED, TOK_PUBLIC, TOK_STATIC, TOK_YIELD, TOK_AWAIT, /* must be last */ TOK_OF, /* only used for js_parse_skip_parens_token() */ }; #define TOK_FIRST_KEYWORD TOK_NULL #define TOK_LAST_KEYWORD TOK_AWAIT /* unicode code points */ #define CP_NBSP 0x00a0 #define CP_BOM 0xfeff #define CP_LS 0x2028 #define CP_PS 0x2029 typedef struct BlockEnv { struct BlockEnv *prev; JSAtom label_name; /* JS_ATOM_NULL if none */ int label_break; /* -1 if none */ int label_cont; /* -1 if none */ int drop_count; /* number of stack elements to drop */ int label_finally; /* -1 if none */ int scope_level; int has_iterator; } BlockEnv; typedef struct JSGlobalVar { int cpool_idx; /* if >= 0, index in the constant pool for hoisted function defintion*/ uint8_t force_init : 1; /* force initialization to undefined */ uint8_t is_lexical : 1; /* global let/const definition */ uint8_t is_const : 1; /* const definition */ int scope_level; /* scope of definition */ JSAtom var_name; /* variable name */ } JSGlobalVar; typedef struct RelocEntry { struct RelocEntry *next; uint32_t addr; /* address to patch */ int size; /* address size: 1, 2 or 4 bytes */ } RelocEntry; typedef struct JumpSlot { int op; int size; int pos; int label; } JumpSlot; typedef struct LabelSlot { int ref_count; int pos; /* phase 1 address, -1 means not resolved yet */ int pos2; /* phase 2 address, -1 means not resolved yet */ int addr; /* phase 3 address, -1 means not resolved yet */ RelocEntry *first_reloc; } LabelSlot; typedef struct LineNumberSlot { uint32_t pc; int line_num; } LineNumberSlot; typedef enum JSParseFunctionEnum { JS_PARSE_FUNC_STATEMENT, JS_PARSE_FUNC_VAR, JS_PARSE_FUNC_EXPR, JS_PARSE_FUNC_ARROW, JS_PARSE_FUNC_GETTER, JS_PARSE_FUNC_SETTER, JS_PARSE_FUNC_METHOD, JS_PARSE_FUNC_CLASS_STATIC_INIT, JS_PARSE_FUNC_CLASS_CONSTRUCTOR, JS_PARSE_FUNC_DERIVED_CLASS_CONSTRUCTOR, } JSParseFunctionEnum; typedef enum JSParseExportEnum { JS_PARSE_EXPORT_NONE, JS_PARSE_EXPORT_NAMED, JS_PARSE_EXPORT_DEFAULT, } JSParseExportEnum; typedef struct JSFunctionDef { JSContext *ctx; struct JSFunctionDef *parent; int parent_cpool_idx; /* index in the constant pool of the parent or -1 if none */ int parent_scope_level; /* scope level in parent at point of definition */ struct list_head child_list; /* list of JSFunctionDef.link */ struct list_head link; BOOL is_eval; /* TRUE if eval code */ int eval_type; /* only valid if is_eval = TRUE */ BOOL is_global_var; /* TRUE if variables are not defined locally: eval global, eval module or non strict eval */ BOOL is_func_expr; /* TRUE if function expression */ BOOL has_home_object; /* TRUE if the home object is available */ BOOL has_prototype; /* true if a prototype field is necessary */ BOOL has_simple_parameter_list; BOOL has_parameter_expressions; /* if true, an argument scope is created */ BOOL has_use_strict; /* to reject directive in special cases */ BOOL has_eval_call; /* true if the function contains a call to eval() */ BOOL has_arguments_binding; /* true if the 'arguments' binding is available in the function */ BOOL has_this_binding; /* true if the 'this' and new.target binding are available in the function */ BOOL new_target_allowed; /* true if the 'new.target' does not throw a syntax error */ BOOL super_call_allowed; /* true if super() is allowed */ BOOL super_allowed; /* true if super. or super[] is allowed */ BOOL arguments_allowed; /* true if the 'arguments' identifier is allowed */ BOOL is_derived_class_constructor; BOOL in_function_body; BOOL backtrace_barrier; JSFunctionKindEnum func_kind : 8; JSParseFunctionEnum func_type : 8; uint8_t js_mode; /* bitmap of JS_MODE_x */ JSAtom func_name; /* JS_ATOM_NULL if no name */ JSVarDef *vars; int var_size; /* allocated size for vars[] */ int var_count; JSVarDef *args; int arg_size; /* allocated size for args[] */ int arg_count; /* number of arguments */ int defined_arg_count; int var_object_idx; /* -1 if none */ int arg_var_object_idx; /* -1 if none (var object for the argument scope) */ int arguments_var_idx; /* -1 if none */ int arguments_arg_idx; /* argument variable definition in argument scope, -1 if none */ int func_var_idx; /* variable containing the current function (-1 if none, only used if is_func_expr is true) */ int eval_ret_idx; /* variable containing the return value of the eval, -1 if none */ int this_var_idx; /* variable containg the 'this' value, -1 if none */ int new_target_var_idx; /* variable containg the 'new.target' value, -1 if none */ int this_active_func_var_idx; /* variable containg the 'this.active_func' value, -1 if none */ int home_object_var_idx; BOOL need_home_object; int scope_level; /* index into fd->scopes if the current lexical scope */ int scope_first; /* index into vd->vars of first lexically scoped variable */ int scope_size; /* allocated size of fd->scopes array */ int scope_count; /* number of entries used in the fd->scopes array */ JSVarScope *scopes; JSVarScope def_scope_array[4]; int body_scope; /* scope of the body of the function or eval */ int global_var_count; int global_var_size; JSGlobalVar *global_vars; DynBuf byte_code; int last_opcode_pos; /* -1 if no last opcode */ int last_opcode_line_num; BOOL use_short_opcodes; /* true if short opcodes are used in byte_code */ LabelSlot *label_slots; int label_size; /* allocated size for label_slots[] */ int label_count; BlockEnv *top_break; /* break/continue label stack */ /* constant pool (strings, functions, numbers) */ JSValue *cpool; int cpool_count; int cpool_size; /* list of variables in the closure */ int closure_var_count; int closure_var_size; JSClosureVar *closure_var; JumpSlot *jump_slots; int jump_size; int jump_count; LineNumberSlot *line_number_slots; int line_number_size; int line_number_count; int line_number_last; int line_number_last_pc; /* pc2line table */ JSAtom filename; int line_num; DynBuf pc2line; char *source; /* raw source, utf-8 encoded */ int source_len; JSModuleDef *module; /* != NULL when parsing a module */ BOOL has_await; /* TRUE if await is used (used in module eval) */ } JSFunctionDef; typedef struct JSToken { int val; int line_num; /* line number of token start */ const uint8_t *ptr; union { struct { JSValue str; int sep; } str; struct { JSValue val; #ifdef CONFIG_BIGNUM slimb_t exponent; /* may be != 0 only if val is a float */ #endif } num; struct { JSAtom atom; BOOL has_escape; BOOL is_reserved; } ident; struct { JSValue body; JSValue flags; } regexp; } u; } JSToken; typedef struct JSParseState { JSContext *ctx; int last_line_num; /* line number of last token */ int line_num; /* line number of current offset */ const char *filename; JSToken token; BOOL got_lf; /* true if got line feed before the current token */ const uint8_t *last_ptr; const uint8_t *buf_ptr; const uint8_t *buf_end; /* current function code */ JSFunctionDef *cur_func; BOOL is_module; /* parsing a module */ BOOL allow_html_comments; BOOL ext_json; /* true if accepting JSON superset */ } JSParseState; typedef struct JSOpCode { #ifdef DUMP_BYTECODE const char *name; #endif uint8_t size; /* in bytes */ /* the opcodes remove n_pop items from the top of the stack, then pushes n_push items */ uint8_t n_pop; uint8_t n_push; uint8_t fmt; } JSOpCode; static const JSOpCode opcode_info[OP_COUNT + (OP_TEMP_END - OP_TEMP_START)] = { #define FMT(f) #ifdef DUMP_BYTECODE #define DEF(id, size, n_pop, n_push, f) { #id, size, n_pop, n_push, OP_FMT_ ## f }, #else #define DEF(id, size, n_pop, n_push, f) { size, n_pop, n_push, OP_FMT_ ## f }, #endif #include "quickjs-opcode.h" #undef DEF #undef FMT }; #if SHORT_OPCODES /* After the final compilation pass, short opcodes are used. Their opcodes overlap with the temporary opcodes which cannot appear in the final bytecode. Their description is after the temporary opcodes in opcode_info[]. */ #define short_opcode_info(op) \ opcode_info[(op) >= OP_TEMP_START ? \ (op) + (OP_TEMP_END - OP_TEMP_START) : (op)] #else #define short_opcode_info(op) opcode_info[op] #endif static __exception int next_token(JSParseState *s); static void free_token(JSParseState *s, JSToken *token) { switch(token->val) { case TOK_NUMBER: JS_FreeValue(s->ctx, token->u.num.val); break; case TOK_STRING: case TOK_TEMPLATE: JS_FreeValue(s->ctx, token->u.str.str); break; case TOK_REGEXP: JS_FreeValue(s->ctx, token->u.regexp.body); JS_FreeValue(s->ctx, token->u.regexp.flags); break; case TOK_IDENT: case TOK_PRIVATE_NAME: JS_FreeAtom(s->ctx, token->u.ident.atom); break; default: if (token->val >= TOK_FIRST_KEYWORD && token->val <= TOK_LAST_KEYWORD) { JS_FreeAtom(s->ctx, token->u.ident.atom); } break; } } static void __attribute((unused)) dump_token(JSParseState *s, const JSToken *token) { switch(token->val) { case TOK_NUMBER: { double d; JS_ToFloat64(s->ctx, &d, token->u.num.val); /* no exception possible */ printf("number: %.14g\n", d); } break; case TOK_IDENT: dump_atom: { char buf[ATOM_GET_STR_BUF_SIZE]; printf("ident: '%s'\n", JS_AtomGetStr(s->ctx, buf, sizeof(buf), token->u.ident.atom)); } break; case TOK_STRING: { const char *str; /* XXX: quote the string */ str = JS_ToCString(s->ctx, token->u.str.str); printf("string: '%s'\n", str); JS_FreeCString(s->ctx, str); } break; case TOK_TEMPLATE: { const char *str; str = JS_ToCString(s->ctx, token->u.str.str); printf("template: `%s`\n", str); JS_FreeCString(s->ctx, str); } break; case TOK_REGEXP: { const char *str, *str2; str = JS_ToCString(s->ctx, token->u.regexp.body); str2 = JS_ToCString(s->ctx, token->u.regexp.flags); printf("regexp: '%s' '%s'\n", str, str2); JS_FreeCString(s->ctx, str); JS_FreeCString(s->ctx, str2); } break; case TOK_EOF: printf("eof\n"); break; default: if (s->token.val >= TOK_NULL && s->token.val <= TOK_LAST_KEYWORD) { goto dump_atom; } else if (s->token.val >= 256) { printf("token: %d\n", token->val); } else { printf("token: '%c'\n", token->val); } break; } } int __attribute__((format(printf, 2, 3))) js_parse_error(JSParseState *s, const char *fmt, ...) { JSContext *ctx = s->ctx; va_list ap; int backtrace_flags; va_start(ap, fmt); JS_ThrowError2(ctx, JS_SYNTAX_ERROR, fmt, ap, FALSE); va_end(ap); backtrace_flags = 0; if (s->cur_func && s->cur_func->backtrace_barrier) backtrace_flags = JS_BACKTRACE_FLAG_SINGLE_LEVEL; build_backtrace(ctx, ctx->rt->current_exception, s->filename, s->line_num, backtrace_flags); return -1; } static int js_parse_expect(JSParseState *s, int tok) { if (s->token.val != tok) { /* XXX: dump token correctly in all cases */ return js_parse_error(s, "expecting '%c'", tok); } return next_token(s); } static int js_parse_expect_semi(JSParseState *s) { if (s->token.val != ';') { /* automatic insertion of ';' */ if (s->token.val == TOK_EOF || s->token.val == '}' || s->got_lf) { return 0; } return js_parse_error(s, "expecting '%c'", ';'); } return next_token(s); } static int js_parse_error_reserved_identifier(JSParseState *s) { char buf1[ATOM_GET_STR_BUF_SIZE]; return js_parse_error(s, "'%s' is a reserved identifier", JS_AtomGetStr(s->ctx, buf1, sizeof(buf1), s->token.u.ident.atom)); } static __exception int js_parse_template_part(JSParseState *s, const uint8_t *p) { uint32_t c; StringBuffer b_s, *b = &b_s; /* p points to the first byte of the template part */ if (string_buffer_init(s->ctx, b, 32)) goto fail; for(;;) { if (p >= s->buf_end) goto unexpected_eof; c = *p++; if (c == '`') { /* template end part */ break; } if (c == '$' && *p == '{') { /* template start or middle part */ p++; break; } if (c == '\\') { if (string_buffer_putc8(b, c)) goto fail; if (p >= s->buf_end) goto unexpected_eof; c = *p++; } /* newline sequences are normalized as single '\n' bytes */ if (c == '\r') { if (*p == '\n') p++; c = '\n'; } if (c == '\n') { s->line_num++; } else if (c >= 0x80) { const uint8_t *p_next; c = unicode_from_utf8(p - 1, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) { js_parse_error(s, "invalid UTF-8 sequence"); goto fail; } p = p_next; } if (string_buffer_putc(b, c)) goto fail; } s->token.val = TOK_TEMPLATE; s->token.u.str.sep = c; s->token.u.str.str = string_buffer_end(b); s->buf_ptr = p; return 0; unexpected_eof: js_parse_error(s, "unexpected end of string"); fail: string_buffer_free(b); return -1; } static __exception int js_parse_string(JSParseState *s, int sep, BOOL do_throw, const uint8_t *p, JSToken *token, const uint8_t **pp) { int ret; uint32_t c; StringBuffer b_s, *b = &b_s; /* string */ if (string_buffer_init(s->ctx, b, 32)) goto fail; for(;;) { if (p >= s->buf_end) goto invalid_char; c = *p; if (c < 0x20) { if (!s->cur_func) { if (do_throw) js_parse_error(s, "invalid character in a JSON string"); goto fail; } if (sep == '`') { if (c == '\r') { if (p[1] == '\n') p++; c = '\n'; } /* do not update s->line_num */ } else if (c == '\n' || c == '\r') goto invalid_char; } p++; if (c == sep) break; if (c == '$' && *p == '{' && sep == '`') { /* template start or middle part */ p++; break; } if (c == '\\') { c = *p; /* XXX: need a specific JSON case to avoid accepting invalid escapes */ switch(c) { case '\0': if (p >= s->buf_end) goto invalid_char; p++; break; case '\'': case '\"': case '\\': p++; break; case '\r': /* accept DOS and MAC newline sequences */ if (p[1] == '\n') { p++; } /* fall thru */ case '\n': /* ignore escaped newline sequence */ p++; if (sep != '`') s->line_num++; continue; default: if (c >= '0' && c <= '9') { if (!s->cur_func) goto invalid_escape; /* JSON case */ if (!(s->cur_func->js_mode & JS_MODE_STRICT) && sep != '`') goto parse_escape; if (c == '0' && !(p[1] >= '0' && p[1] <= '9')) { p++; c = '\0'; } else { if (c >= '8' || sep == '`') { /* Note: according to ES2021, \8 and \9 are not accepted in strict mode or in templates. */ goto invalid_escape; } else { if (do_throw) js_parse_error(s, "octal escape sequences are not allowed in strict mode"); } goto fail; } } else if (c >= 0x80) { const uint8_t *p_next; c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) { goto invalid_utf8; } p = p_next; /* LS or PS are skipped */ if (c == CP_LS || c == CP_PS) continue; } else { parse_escape: ret = lre_parse_escape(&p, TRUE); if (ret == -1) { invalid_escape: if (do_throw) js_parse_error(s, "malformed escape sequence in string literal"); goto fail; } else if (ret < 0) { /* ignore the '\' (could output a warning) */ p++; } else { c = ret; } } break; } } else if (c >= 0x80) { const uint8_t *p_next; c = unicode_from_utf8(p - 1, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) goto invalid_utf8; p = p_next; } if (string_buffer_putc(b, c)) goto fail; } token->val = TOK_STRING; token->u.str.sep = c; token->u.str.str = string_buffer_end(b); *pp = p; return 0; invalid_utf8: if (do_throw) js_parse_error(s, "invalid UTF-8 sequence"); goto fail; invalid_char: if (do_throw) js_parse_error(s, "unexpected end of string"); fail: string_buffer_free(b); return -1; } static inline BOOL token_is_pseudo_keyword(JSParseState *s, JSAtom atom) { return s->token.val == TOK_IDENT && s->token.u.ident.atom == atom && !s->token.u.ident.has_escape; } static __exception int js_parse_regexp(JSParseState *s) { const uint8_t *p; BOOL in_class; StringBuffer b_s, *b = &b_s; StringBuffer b2_s, *b2 = &b2_s; uint32_t c; p = s->buf_ptr; p++; in_class = FALSE; if (string_buffer_init(s->ctx, b, 32)) return -1; if (string_buffer_init(s->ctx, b2, 1)) goto fail; for(;;) { if (p >= s->buf_end) { eof_error: js_parse_error(s, "unexpected end of regexp"); goto fail; } c = *p++; if (c == '\n' || c == '\r') { goto eol_error; } else if (c == '/') { if (!in_class) break; } else if (c == '[') { in_class = TRUE; } else if (c == ']') { /* XXX: incorrect as the first character in a class */ in_class = FALSE; } else if (c == '\\') { if (string_buffer_putc8(b, c)) goto fail; c = *p++; if (c == '\n' || c == '\r') goto eol_error; else if (c == '\0' && p >= s->buf_end) goto eof_error; else if (c >= 0x80) { const uint8_t *p_next; c = unicode_from_utf8(p - 1, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) { goto invalid_utf8; } p = p_next; if (c == CP_LS || c == CP_PS) goto eol_error; } } else if (c >= 0x80) { const uint8_t *p_next; c = unicode_from_utf8(p - 1, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) { invalid_utf8: js_parse_error(s, "invalid UTF-8 sequence"); goto fail; } p = p_next; /* LS or PS are considered as line terminator */ if (c == CP_LS || c == CP_PS) { eol_error: js_parse_error(s, "unexpected line terminator in regexp"); goto fail; } } if (string_buffer_putc(b, c)) goto fail; } /* flags */ for(;;) { const uint8_t *p_next = p; c = *p_next++; if (c >= 0x80) { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p_next); if (c > 0x10FFFF) { goto invalid_utf8; } } if (!lre_js_is_ident_next(c)) break; if (string_buffer_putc(b2, c)) goto fail; p = p_next; } s->token.val = TOK_REGEXP; s->token.u.regexp.body = string_buffer_end(b); s->token.u.regexp.flags = string_buffer_end(b2); s->buf_ptr = p; return 0; fail: string_buffer_free(b); string_buffer_free(b2); return -1; } static __exception int ident_realloc(JSContext *ctx, char **pbuf, size_t *psize, char *static_buf) { char *buf, *new_buf; size_t size, new_size; buf = *pbuf; size = *psize; if (size >= (SIZE_MAX / 3) * 2) new_size = SIZE_MAX; else new_size = size + (size >> 1); if (buf == static_buf) { new_buf = js_malloc(ctx, new_size); if (!new_buf) return -1; memcpy(new_buf, buf, size); } else { new_buf = js_realloc(ctx, buf, new_size); if (!new_buf) return -1; } *pbuf = new_buf; *psize = new_size; return 0; } /* convert a TOK_IDENT to a keyword when needed */ static void update_token_ident(JSParseState *s) { if (s->token.u.ident.atom <= JS_ATOM_LAST_KEYWORD || (s->token.u.ident.atom <= JS_ATOM_LAST_STRICT_KEYWORD && (s->cur_func->js_mode & JS_MODE_STRICT)) || (s->token.u.ident.atom == JS_ATOM_yield && ((s->cur_func->func_kind & JS_FUNC_GENERATOR) || (s->cur_func->func_type == JS_PARSE_FUNC_ARROW && !s->cur_func->in_function_body && s->cur_func->parent && (s->cur_func->parent->func_kind & JS_FUNC_GENERATOR)))) || (s->token.u.ident.atom == JS_ATOM_await && (s->is_module || (s->cur_func->func_kind & JS_FUNC_ASYNC) || s->cur_func->func_type == JS_PARSE_FUNC_CLASS_STATIC_INIT || (s->cur_func->func_type == JS_PARSE_FUNC_ARROW && !s->cur_func->in_function_body && s->cur_func->parent && ((s->cur_func->parent->func_kind & JS_FUNC_ASYNC) || s->cur_func->parent->func_type == JS_PARSE_FUNC_CLASS_STATIC_INIT))))) { if (s->token.u.ident.has_escape) { s->token.u.ident.is_reserved = TRUE; s->token.val = TOK_IDENT; } else { /* The keywords atoms are pre allocated */ s->token.val = s->token.u.ident.atom - 1 + TOK_FIRST_KEYWORD; } } } /* if the current token is an identifier or keyword, reparse it according to the current function type */ static void reparse_ident_token(JSParseState *s) { if (s->token.val == TOK_IDENT || (s->token.val >= TOK_FIRST_KEYWORD && s->token.val <= TOK_LAST_KEYWORD)) { s->token.val = TOK_IDENT; s->token.u.ident.is_reserved = FALSE; update_token_ident(s); } } /* 'c' is the first character. Return JS_ATOM_NULL in case of error */ static JSAtom parse_ident(JSParseState *s, const uint8_t **pp, BOOL *pident_has_escape, int c, BOOL is_private) { const uint8_t *p, *p1; char ident_buf[128], *buf; size_t ident_size, ident_pos; JSAtom atom; p = *pp; buf = ident_buf; ident_size = sizeof(ident_buf); ident_pos = 0; if (is_private) buf[ident_pos++] = '#'; for(;;) { p1 = p; if (c < 128) { buf[ident_pos++] = c; } else { ident_pos += unicode_to_utf8((uint8_t*)buf + ident_pos, c); } c = *p1++; if (c == '\\' && *p1 == 'u') { c = lre_parse_escape(&p1, TRUE); *pident_has_escape = TRUE; } else if (c >= 128) { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p1); } if (!lre_js_is_ident_next(c)) break; p = p1; if (unlikely(ident_pos >= ident_size - UTF8_CHAR_LEN_MAX)) { if (ident_realloc(s->ctx, &buf, &ident_size, ident_buf)) { atom = JS_ATOM_NULL; goto done; } } } atom = JS_NewAtomLen(s->ctx, buf, ident_pos); done: if (unlikely(buf != ident_buf)) js_free(s->ctx, buf); *pp = p; return atom; } static __exception int next_token(JSParseState *s) { const uint8_t *p; int c; BOOL ident_has_escape; JSAtom atom; if (js_check_stack_overflow(s->ctx->rt, 0)) { return js_parse_error(s, "stack overflow"); } free_token(s, &s->token); p = s->last_ptr = s->buf_ptr; s->got_lf = FALSE; s->last_line_num = s->token.line_num; redo: s->token.line_num = s->line_num; s->token.ptr = p; c = *p; switch(c) { case 0: if (p >= s->buf_end) { s->token.val = TOK_EOF; } else { goto def_token; } break; case '`': if (js_parse_template_part(s, p + 1)) goto fail; p = s->buf_ptr; break; case '\'': case '\"': if (js_parse_string(s, c, TRUE, p + 1, &s->token, &p)) goto fail; break; case '\r': /* accept DOS and MAC newline sequences */ if (p[1] == '\n') { p++; } /* fall thru */ case '\n': p++; line_terminator: s->got_lf = TRUE; s->line_num++; goto redo; case '\f': case '\v': case ' ': case '\t': p++; goto redo; case '/': if (p[1] == '*') { /* comment */ p += 2; for(;;) { if (*p == '\0' && p >= s->buf_end) { js_parse_error(s, "unexpected end of comment"); goto fail; } if (p[0] == '*' && p[1] == '/') { p += 2; break; } if (*p == '\n') { s->line_num++; s->got_lf = TRUE; /* considered as LF for ASI */ p++; } else if (*p == '\r') { s->got_lf = TRUE; /* considered as LF for ASI */ p++; } else if (*p >= 0x80) { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p); if (c == CP_LS || c == CP_PS) { s->got_lf = TRUE; /* considered as LF for ASI */ } else if (c == -1) { p++; /* skip invalid UTF-8 */ } } else { p++; } } goto redo; } else if (p[1] == '/') { /* line comment */ p += 2; skip_line_comment: for(;;) { if (*p == '\0' && p >= s->buf_end) break; if (*p == '\r' || *p == '\n') break; if (*p >= 0x80) { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p); /* LS or PS are considered as line terminator */ if (c == CP_LS || c == CP_PS) { break; } else if (c == -1) { p++; /* skip invalid UTF-8 */ } } else { p++; } } goto redo; } else if (p[1] == '=') { p += 2; s->token.val = TOK_DIV_ASSIGN; } else { p++; s->token.val = c; } break; case '\\': if (p[1] == 'u') { const uint8_t *p1 = p + 1; int c1 = lre_parse_escape(&p1, TRUE); if (c1 >= 0 && lre_js_is_ident_first(c1)) { c = c1; p = p1; ident_has_escape = TRUE; goto has_ident; } else { /* XXX: syntax error? */ } } goto def_token; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case '$': /* identifier */ p++; ident_has_escape = FALSE; has_ident: atom = parse_ident(s, &p, &ident_has_escape, c, FALSE); if (atom == JS_ATOM_NULL) goto fail; s->token.u.ident.atom = atom; s->token.u.ident.has_escape = ident_has_escape; s->token.u.ident.is_reserved = FALSE; s->token.val = TOK_IDENT; update_token_ident(s); break; case '#': /* private name */ { const uint8_t *p1; p++; p1 = p; c = *p1++; if (c == '\\' && *p1 == 'u') { c = lre_parse_escape(&p1, TRUE); } else if (c >= 128) { c = unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p1); } if (!lre_js_is_ident_first(c)) { js_parse_error(s, "invalid first character of private name"); goto fail; } p = p1; ident_has_escape = FALSE; /* not used */ atom = parse_ident(s, &p, &ident_has_escape, c, TRUE); if (atom == JS_ATOM_NULL) goto fail; s->token.u.ident.atom = atom; s->token.val = TOK_PRIVATE_NAME; } break; case '.': if (p[1] == '.' && p[2] == '.') { p += 3; s->token.val = TOK_ELLIPSIS; break; } if (p[1] >= '0' && p[1] <= '9') { goto parse_number; } else { goto def_token; } break; case '0': /* in strict mode, octal literals are not accepted */ if (is_digit(p[1]) && (s->cur_func->js_mode & JS_MODE_STRICT)) { js_parse_error(s, "octal literals are deprecated in strict mode"); goto fail; } goto parse_number; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* number */ parse_number: { JSValue ret; const uint8_t *p1; int flags, radix; flags = ATOD_ACCEPT_BIN_OCT | ATOD_ACCEPT_LEGACY_OCTAL | ATOD_ACCEPT_UNDERSCORES; flags |= ATOD_ACCEPT_SUFFIX; #ifdef CONFIG_BIGNUM if (s->cur_func->js_mode & JS_MODE_MATH) { flags |= ATOD_MODE_BIGINT; if (s->cur_func->js_mode & JS_MODE_MATH) flags |= ATOD_TYPE_BIG_FLOAT; } #endif radix = 0; #ifdef CONFIG_BIGNUM s->token.u.num.exponent = 0; ret = js_atof2(s->ctx, (const char *)p, (const char **)&p, radix, flags, &s->token.u.num.exponent); #else ret = js_atof(s->ctx, (const char *)p, (const char **)&p, radix, flags); #endif if (JS_IsException(ret)) goto fail; /* reject `10instanceof Number` */ if (JS_VALUE_IS_NAN(ret) || lre_js_is_ident_next(unicode_from_utf8(p, UTF8_CHAR_LEN_MAX, &p1))) { JS_FreeValue(s->ctx, ret); js_parse_error(s, "invalid number literal"); goto fail; } s->token.val = TOK_NUMBER; s->token.u.num.val = ret; } break; case '*': if (p[1] == '=') { p += 2; s->token.val = TOK_MUL_ASSIGN; } else if (p[1] == '*') { if (p[2] == '=') { p += 3; s->token.val = TOK_POW_ASSIGN; } else { p += 2; s->token.val = TOK_POW; } } else { goto def_token; } break; case '%': if (p[1] == '=') { p += 2; s->token.val = TOK_MOD_ASSIGN; } else { goto def_token; } break; case '+': if (p[1] == '=') { p += 2; s->token.val = TOK_PLUS_ASSIGN; } else if (p[1] == '+') { p += 2; s->token.val = TOK_INC; } else { goto def_token; } break; case '-': if (p[1] == '=') { p += 2; s->token.val = TOK_MINUS_ASSIGN; } else if (p[1] == '-') { if (s->allow_html_comments && p[2] == '>' && s->last_line_num != s->line_num) { /* Annex B: `-->` at beginning of line is an html comment end. It extends to the end of the line. */ goto skip_line_comment; } p += 2; s->token.val = TOK_DEC; } else { goto def_token; } break; case '<': if (p[1] == '=') { p += 2; s->token.val = TOK_LTE; } else if (p[1] == '<') { if (p[2] == '=') { p += 3; s->token.val = TOK_SHL_ASSIGN; } else { p += 2; s->token.val = TOK_SHL; } } else if (s->allow_html_comments && p[1] == '!' && p[2] == '-' && p[3] == '-') { /* Annex B: handle `