jemalloc-sys-0.3.2/build.rs010064400007650000024000000361231350066755100140170ustar0000000000000000// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate cc; extern crate fs_extra; use std::env; use std::fs; use std::fs::File; use std::path::{Path, PathBuf}; use std::process::Command; // `jemalloc` is known not to work on these targets: const UNSUPPORTED_TARGETS: &[&str] = &[ "rumprun", "bitrig", "emscripten", "fuchsia", "redox", "wasm32", ]; // `jemalloc-sys` is not tested on these targets in CI: const UNTESTED_TARGETS: &[&str] = &["openbsd", "msvc"]; // `jemalloc`'s background_thread support is known not to work on these targets: const NO_BG_THREAD_TARGETS: &[&str] = &["musl"]; // targets that don't support unprefixed `malloc` // // “it was found that the `realpath` function in libc would allocate with libc malloc // (not jemalloc malloc), and then the standard library would free with jemalloc free, // causing a segfault.” // https://github.com/rust-lang/rust/commit/e3b414d8612314e74e2b0ebde1ed5c6997d28e8d // https://github.com/rust-lang/rust/commit/536011d929ecbd1170baf34e09580e567c971f95 // https://github.com/rust-lang/rust/commit/9f3de647326fbe50e0e283b9018ab7c41abccde3 // https://github.com/rust-lang/rust/commit/ed015456a114ae907a36af80c06f81ea93182a24 const NO_UNPREFIXED_MALLOC: &[&str] = &["android", "dragonfly", "musl", "darwin"]; macro_rules! info { ($($args:tt)*) => { println!($($args)*) } } macro_rules! warning { ($arg:tt, $($args:tt)*) => { println!(concat!(concat!("cargo:warning=\"", $arg), "\""), $($args)*) } } fn main() { let target = env::var("TARGET").expect("TARGET was not set"); let host = env::var("HOST").expect("HOST was not set"); let num_jobs = env::var("NUM_JOBS").expect("NUM_JOBS was not set"); let out_dir = PathBuf::from(env::var_os("OUT_DIR").expect("OUT_DIR was not set")); let src_dir = env::current_dir().expect("failed to get current directory"); info!("TARGET={}", target.clone()); info!("HOST={}", host.clone()); info!("NUM_JOBS={}", num_jobs.clone()); info!("OUT_DIR={:?}", out_dir); let build_dir = out_dir.join("build"); info!("BUILD_DIR={:?}", build_dir); info!("SRC_DIR={:?}", src_dir); if UNSUPPORTED_TARGETS.iter().any(|i| target.contains(i)) { panic!("jemalloc does not support target: {}", target); } if UNTESTED_TARGETS.iter().any(|i| target.contains(i)) { warning!("jemalloc support for `{}` is untested", target); } if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") { info!("jemalloc override set"); let jemalloc = PathBuf::from(jemalloc); assert!( jemalloc.exists(), "Path to `jemalloc` in `JEMALLOC_OVERRIDE={}` does not exist", jemalloc.display() ); println!( "cargo:rustc-link-search=native={}", jemalloc.parent().unwrap().display() ); let stem = jemalloc.file_stem().unwrap().to_str().unwrap(); let name = jemalloc.file_name().unwrap().to_str().unwrap(); let kind = if name.ends_with(".a") { "static" } else { "dylib" }; println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]); return; } fs::create_dir_all(&build_dir).unwrap(); // Disable -Wextra warnings - jemalloc doesn't compile free of warnings with // it enabled: https://github.com/jemalloc/jemalloc/issues/1196 let compiler = cc::Build::new().extra_warnings(false).get_compiler(); let cflags = compiler .args() .iter() .map(|s| s.to_str().unwrap()) .collect::>() .join(" "); info!("CC={:?}", compiler.path()); info!("CFLAGS={:?}", cflags); assert!(out_dir.exists(), "OUT_DIR does not exist"); let (jemalloc_repo_dir, run_autoconf) = if env::var("JEMALLOC_SYS_GIT_DEV_BRANCH").is_ok() { let jemalloc_repo = out_dir.join("jemalloc_repo"); if jemalloc_repo.exists() { fs::remove_dir_all(jemalloc_repo.clone()).unwrap(); } let mut cmd = Command::new("git"); cmd.arg("clone") .arg("--depth=1") .arg("--branch=dev") .arg("--") .arg("https://github.com/jemalloc/jemalloc") .arg(format!("{}", jemalloc_repo.display())); run(&mut cmd); (jemalloc_repo, true) } else { (PathBuf::from("jemalloc"), false) }; info!("JEMALLOC_REPO_DIR={:?}", jemalloc_repo_dir); let jemalloc_src_dir = out_dir.join("jemalloc"); info!("JEMALLOC_SRC_DIR={:?}", jemalloc_src_dir); if jemalloc_src_dir.exists() { fs::remove_dir_all(jemalloc_src_dir.clone()).unwrap(); } // Copy jemalloc submodule to the OUT_DIR let mut copy_options = fs_extra::dir::CopyOptions::new(); copy_options.overwrite = true; copy_options.copy_inside = true; fs_extra::dir::copy(&jemalloc_repo_dir, &jemalloc_src_dir, ©_options) .expect("failed to copy jemalloc source code to OUT_DIR"); assert!(jemalloc_src_dir.exists()); // Configuration files let config_files = ["configure" /*"VERSION"*/]; // Verify that the configuration files are up-to-date let verify_configure = env::var("JEMALLOC_SYS_VERIFY_CONFIGURE").is_ok(); if verify_configure || run_autoconf { info!("Verifying that configuration files in `configure/` are up-to-date... "); // The configuration file from the configure/directory should be used. // The jemalloc git submodule shouldn't contain any configuration files. assert!( !jemalloc_src_dir.join("configure").exists(), "the jemalloc submodule contains configuration files" ); // Run autoconf: let mut cmd = Command::new("autoconf"); cmd.current_dir(jemalloc_src_dir.clone()); run(&mut cmd); for f in &config_files { use std::io::Read; fn read_content(file_path: &Path) -> String { assert!( file_path.exists(), "config file path `{}` does not exist", file_path.display() ); let mut file = File::open(file_path).expect("file not found"); let mut content = String::new(); file.read_to_string(&mut content) .expect("failed to read file"); content } if verify_configure { let current = read_content(&jemalloc_src_dir.join(f)); let reference = read_content(&Path::new("configure").join(f)); assert_eq!( current, reference, "the current and reference configuration files \"{}\" differ", f ); } } } else { // Copy the configuration files to jemalloc's source directory for f in &config_files { fs::copy(Path::new("configure").join(f), jemalloc_src_dir.join(f)) .expect("failed to copy config file to OUT_DIR"); } } // Run configure: let configure = jemalloc_src_dir.join("configure"); let mut cmd = Command::new("sh"); cmd.arg( configure .to_str() .unwrap() .replace("C:\\", "/c/") .replace("\\", "/"), ) .current_dir(&build_dir) .env("CC", compiler.path()) .env("CFLAGS", cflags.clone()) .env("LDFLAGS", cflags.clone()) .env("CPPFLAGS", cflags.clone()) .arg("--disable-cxx"); if target.contains("ios") { // newer iOS deviced have 16kb page sizes: // closed: https://github.com/gnzlbg/jemallocator/issues/68 cmd.arg("--with-lg-page=14"); } // collect `malloc_conf` string: let mut malloc_conf = String::new(); if let Some(bg) = BackgroundThreadSupport::new(&target) { // `jemalloc` is compiled with background thread run-time support on // available platforms by default so there is nothing to do to enable // it. if bg.always_enabled { // Background thread support does not enable background threads at // run-time, just support for enabling them via run-time configuration // options (they are disabled by default) // The `enable_background_threads` cargo feature forces background // threads to be enabled at run-time by default: malloc_conf += "background_thread:true"; } } else { // Background thread run-time support is disabled by // disabling background threads at compile-time: malloc_conf += "background_thread:false"; } if let Ok(malloc_conf_opts) = env::var("JEMALLOC_SYS_WITH_MALLOC_CONF") { malloc_conf += &format!( "{}{}", if malloc_conf.is_empty() { "" } else { "," }, malloc_conf_opts ); } if !malloc_conf.is_empty() { info!("--with-malloc-conf={}", malloc_conf); cmd.arg(format!("--with-malloc-conf={}", malloc_conf)); } if let Ok(lg_page) = env::var("JEMALLOC_SYS_WITH_LG_PAGE") { info!("--with-lg-page={}", lg_page); cmd.arg(format!("--with-lg-page={}", lg_page)); } if let Ok(lg_hugepage) = env::var("JEMALLOC_SYS_WITH_LG_HUGEPAGE") { info!("--with-lg-hugepage={}", lg_hugepage); cmd.arg(format!("--with-lg-hugepage={}", lg_hugepage)); } if let Ok(lg_quantum) = env::var("JEMALLOC_SYS_WITH_LG_QUANTUM") { info!("--with-lg-quantum={}", lg_quantum); cmd.arg(format!("--with-lg-quantum={}", lg_quantum)); } if let Ok(lg_vaddr) = env::var("JEMALLOC_SYS_WITH_LG_VADDR") { info!("--with-lg-vaddr={}", lg_vaddr); cmd.arg(format!("--with-lg-vaddr={}", lg_vaddr)); } let mut use_prefix = env::var("CARGO_FEATURE_UNPREFIXED_MALLOC_ON_SUPPORTED_PLATFORMS").is_err(); if !use_prefix && NO_UNPREFIXED_MALLOC.iter().any(|i| target.contains(i)) { warning!( "Unprefixed `malloc` requested on unsupported platform `{}` => using prefixed `malloc`", target ); use_prefix = true; } if use_prefix { cmd.arg("--with-jemalloc-prefix=_rjem_"); println!("cargo:rustc-cfg=prefixed"); info!("--with-jemalloc-prefix=_rjem_"); } cmd.arg("--with-private-namespace=_rjem_"); if env::var("CARGO_FEATURE_DEBUG").is_ok() { info!("CARGO_FEATURE_DEBUG set"); cmd.arg("--enable-debug"); } if env::var("CARGO_FEATURE_PROFILING").is_ok() { info!("CARGO_FEATURE_PROFILING set"); cmd.arg("--enable-prof"); } if env::var("CARGO_FEATURE_STATS").is_ok() { info!("CARGO_FEATURE_STATS set"); cmd.arg("--enable-stats"); } if env::var("CARGO_FEATURE_DISABLE_INITIAL_EXEC_TLS").is_ok() { info!("CARGO_FEATURE_DISABLE_INITIAL_EXEC_TLS set"); cmd.arg("--disable-initial-exec-tls"); } cmd.arg(format!("--host={}", gnu_target(&target))); cmd.arg(format!("--build={}", gnu_target(&host))); cmd.arg(format!("--prefix={}", out_dir.display())); run(&mut cmd); // Make: let make = make_cmd(&host); run(Command::new(make) .current_dir(&build_dir) .arg("srcroot=../jemalloc/") .arg("-j") .arg(num_jobs.clone())); if env::var("JEMALLOC_SYS_RUN_JEMALLOC_TESTS").is_ok() { info!("Building and running jemalloc tests..."); // Make tests: run(Command::new(make) .current_dir(&build_dir) .arg("srcroot=../jemalloc/") .arg("-j") .arg(num_jobs.clone()) .arg("tests")); // Run tests: run(Command::new(make) .current_dir(&build_dir) .arg("srcroot=../jemalloc/") .arg("check")); } // Make install: run(Command::new(make) .current_dir(&build_dir) .arg("srcroot=../jemalloc/") .arg("install_lib_static") .arg("install_include") .arg("-j") .arg(num_jobs.clone())); println!("cargo:root={}", out_dir.display()); // Linkage directives to pull in jemalloc and its dependencies. // // On some platforms we need to be sure to link in `pthread` which jemalloc // depends on, and specifically on android we need to also link to libgcc. // Currently jemalloc is compiled with gcc which will generate calls to // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in // libcompiler-rt), so link that in to get that support. if target.contains("windows") { println!("cargo:rustc-link-lib=static=jemalloc"); } else { println!("cargo:rustc-link-lib=static=jemalloc_pic"); } println!("cargo:rustc-link-search=native={}/lib", build_dir.display()); if target.contains("android") { println!("cargo:rustc-link-lib=gcc"); } else if !target.contains("windows") { println!("cargo:rustc-link-lib=pthread"); } println!("cargo:rerun-if-changed=jemalloc"); } fn run(cmd: &mut Command) { println!("running: {:?}", cmd); let status = match cmd.status() { Ok(status) => status, Err(e) => panic!("failed to execute command: {}", e), }; if !status.success() { panic!( "command did not execute successfully: {:?}\n\ expected success, got: {}", cmd, status ); } } fn gnu_target(target: &str) -> String { match target { "i686-pc-windows-msvc" => "i686-pc-win32".to_string(), "x86_64-pc-windows-msvc" => "x86_64-pc-win32".to_string(), "i686-pc-windows-gnu" => "i686-w64-mingw32".to_string(), "x86_64-pc-windows-gnu" => "x86_64-w64-mingw32".to_string(), s => s.to_string(), } } fn make_cmd(host: &str) -> &'static str { const GMAKE_HOSTS: &[&str] = &["bitrig", "dragonfly", "freebsd", "netbsd", "openbsd"]; if GMAKE_HOSTS.iter().any(|i| host.contains(i)) { "gmake" } else if host.contains("windows") { "mingw32-make" } else { "make" } } struct BackgroundThreadSupport { always_enabled: bool, } impl BackgroundThreadSupport { fn new(target: &str) -> Option { let runtime_support = env::var("CARGO_FEATURE_BACKGROUND_THREADS_RUNTIME_SUPPORT").is_ok(); let always_enabled = env::var("CARGO_FEATURE_BACKGROUND_THREADS").is_ok(); if !runtime_support { assert!( !always_enabled, "enabling `background_threads` requires `background_threads_runtime_support`" ); return None; } if NO_BG_THREAD_TARGETS.iter().any(|i| target.contains(i)) { warning!( "`background_threads_runtime_support` not supported for `{}`", target ); } Some(Self { always_enabled }) } } jemalloc-sys-0.3.2/Cargo.toml.orig010064400007650000024000000025431350067125200152310ustar0000000000000000[package] name = "jemalloc-sys" version = "0.3.2" authors = [ "Alex Crichton ", "Gonzalo Brito Gadeschi ", ] build = "build.rs" links = "jemalloc" license = "MIT/Apache-2.0" readme = "README.md" repository = "https://github.com/gnzlbg/jemallocator" homepage = "https://github.com/gnzlbg/jemallocator" documentation = "https://docs.rs/jemallocator-sys" keywords = ["allocator", "jemalloc"] description = """ Rust FFI bindings to jemalloc """ edition = "2015" [badges] appveyor = { repository = "gnzlbg/jemallocator" } travis-ci = { repository = "gnzlbg/jemallocator" } codecov = { repository = "gnzlbg/jemallocator" } is-it-maintained-issue-resolution = { repository = "gnzlbg/jemallocator" } is-it-maintained-open-issues = { repository = "gnzlbg/jemallocator" } maintenance = { status = "actively-developed" } [lib] test = false bench = false [dependencies] libc = { version = "^0.2.8", default-features = false } [build-dependencies] cc = "^1.0.13" fs_extra = "^1.1" [features] default = ["background_threads_runtime_support"] profiling = [] debug = [] background_threads_runtime_support = [] background_threads = [ "background_threads_runtime_support" ] stats = [] unprefixed_malloc_on_supported_platforms = [] disable_initial_exec_tls = [] [package.metadata.docs.rs] rustdoc-args = [ "--cfg", "jemallocator_docs" ] jemalloc-sys-0.3.2/Cargo.toml0000644000000036050000000000000115010ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] edition = "2015" name = "jemalloc-sys" version = "0.3.2" authors = ["Alex Crichton ", "Gonzalo Brito Gadeschi "] build = "build.rs" links = "jemalloc" description = "Rust FFI bindings to jemalloc\n" homepage = "https://github.com/gnzlbg/jemallocator" documentation = "https://docs.rs/jemallocator-sys" readme = "README.md" keywords = ["allocator", "jemalloc"] license = "MIT/Apache-2.0" repository = "https://github.com/gnzlbg/jemallocator" [package.metadata.docs.rs] rustdoc-args = ["--cfg", "jemallocator_docs"] [lib] test = false bench = false [dependencies.libc] version = "^0.2.8" default-features = false [build-dependencies.cc] version = "^1.0.13" [build-dependencies.fs_extra] version = "^1.1" [features] background_threads = ["background_threads_runtime_support"] background_threads_runtime_support = [] debug = [] default = ["background_threads_runtime_support"] disable_initial_exec_tls = [] profiling = [] stats = [] unprefixed_malloc_on_supported_platforms = [] [badges.appveyor] repository = "gnzlbg/jemallocator" [badges.codecov] repository = "gnzlbg/jemallocator" [badges.is-it-maintained-issue-resolution] repository = "gnzlbg/jemallocator" [badges.is-it-maintained-open-issues] repository = "gnzlbg/jemallocator" [badges.maintenance] status = "actively-developed" [badges.travis-ci] repository = "gnzlbg/jemallocator" jemalloc-sys-0.3.2/configure/configure010075500007650000024000013247161337327462500162570ustar0000000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= PACKAGE_URL= ac_unique_file="Makefile.in" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='LTLIBOBJS LIBOBJS cfgoutputs_out cfgoutputs_in cfghdrs_out cfghdrs_in enable_initial_exec_tls enable_zone_allocator enable_tls enable_lazy_lock jemalloc_version_gid jemalloc_version_nrev jemalloc_version_bugfix jemalloc_version_minor jemalloc_version_major jemalloc_version enable_log enable_cache_oblivious enable_xmalloc enable_utrace enable_fill enable_prof enable_stats enable_debug je_ install_suffix private_namespace JEMALLOC_CPREFIX JEMALLOC_PREFIX AUTOCONF LD RANLIB INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM enable_autogen RPATH_EXTRA LM CC_MM DUMP_SYMS AROUT ARFLAGS MKLIB TEST_LD_MODE LDTARGET CTARGET PIC_CFLAGS SOREV EXTRA_LDFLAGS DSO_LDFLAGS link_whole_archive libprefix exe a o importlib so LD_PRELOAD_VAR RPATH abi AWK NM AR host_os host_vendor host_cpu host build_os build_vendor build_cpu build EGREP GREP EXTRA_CXXFLAGS SPECIFIED_CXXFLAGS CONFIGURE_CXXFLAGS enable_cxx HAVE_CXX14 ac_ct_CXX CXXFLAGS CXX CPP EXTRA_CFLAGS SPECIFIED_CFLAGS CONFIGURE_CFLAGS OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC XSLROOT XSLTPROC MANDIR DATADIR LIBDIR INCLUDEDIR BINDIR PREFIX abs_objroot objroot abs_srcroot srcroot rev CONFIG target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking with_xslroot enable_cxx with_lg_vaddr with_rpath enable_autogen with_mangling with_jemalloc_prefix with_export with_private_namespace with_install_suffix with_malloc_conf enable_debug enable_stats enable_prof enable_prof_libunwind with_static_libunwind enable_prof_libgcc enable_prof_gcc enable_fill enable_utrace enable_xmalloc enable_cache_oblivious enable_log with_lg_quantum with_lg_page with_lg_hugepage with_lg_page_sizes with_version enable_syscall enable_lazy_lock enable_zone_allocator enable_initial_exec_tls ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP CXX CXXFLAGS CCC' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --disable-cxx Disable C++ integration --enable-autogen Automatically regenerate configure output --enable-debug Build debugging code --disable-stats Disable statistics calculation/reporting --enable-prof Enable allocation profiling --enable-prof-libunwind Use libunwind for backtracing --disable-prof-libgcc Do not use libgcc for backtracing --disable-prof-gcc Do not use gcc intrinsics for backtracing --disable-fill Disable support for junk/zero filling --enable-utrace Enable utrace(2)-based tracing --enable-xmalloc Support xmalloc option --disable-cache-oblivious Disable support for cache-oblivious allocation alignment --enable-log Support debug logging --disable-syscall Disable use of syscall(2) --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) --disable-zone-allocator Disable zone allocator for Darwin --disable-initial-exec-tls Disable the initial-exec tls model Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-xslroot= XSL stylesheet root path --with-lg-vaddr= Number of significant virtual address bits --with-rpath= Colon-separated rpath (ELF systems only) --with-mangling= Mangle symbols in --with-jemalloc-prefix= Prefix to prepend to all public APIs --without-export disable exporting jemalloc public APIs --with-private-namespace= Prefix to prepend to all library-private APIs --with-install-suffix= Suffix to append to all installed files --with-malloc-conf= config.malloc_conf options string --with-static-libunwind= Path to static libunwind library; use rather than dynamically linking --with-lg-quantum= Base 2 log of minimum allocation alignment --with-lg-page= Base 2 log of system page size --with-lg-hugepage= Base 2 log of system huge page size --with-lg-page-sizes= Base 2 logs of system page sizes to support --with-version=..--g Version string Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor CXX C++ compiler command CXXFLAGS C++ compiler flags Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_compute_int LINENO EXPR VAR INCLUDES # -------------------------------------------- # Tries to find the compile-time value of EXPR in a program that includes # INCLUDES, setting VAR accordingly. Returns whether the value could be # computed ac_fn_c_compute_int () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=0 ac_mid=0 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid; break else as_fn_arith $ac_mid + 1 && ac_lo=$as_val if test $ac_lo -le $ac_mid; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) < 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=-1 ac_mid=-1 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=$ac_mid; break else as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val if test $ac_mid -le $ac_hi; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else ac_lo= ac_hi= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid else as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in #(( ?*) eval "$3=\$ac_lo"; ac_retval=0 ;; '') ac_retval=1 ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 static long int longval () { return $2; } static unsigned long int ulongval () { return $2; } #include #include int main () { FILE *f = fopen ("conftest.val", "w"); if (! f) return 1; if (($2) < 0) { long int i = longval (); if (i != ($2)) return 1; fprintf (f, "%ld", i); } else { unsigned long int i = ulongval (); if (i != ($2)) return 1; fprintf (f, "%lu", i); } /* Do not output a trailing newline, as this causes \r\n confusion on some platforms. */ return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : echo >>conftest.val; read $3 &5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in build-aux "$srcdir"/build-aux; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. CONFIGURE_CFLAGS= SPECIFIED_CFLAGS="${CFLAGS}" CONFIGURE_CXXFLAGS= SPECIFIED_CXXFLAGS="${CXXFLAGS}" CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` rev=2 srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi abs_srcroot="`cd \"${srcdir}\"; pwd`/" objroot="" abs_objroot="`pwd`/" if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` # Extract the first word of "xsltproc", so it can be a program name with args. set dummy xsltproc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XSLTPROC+:} false; then : $as_echo_n "(cached) " >&6 else case $XSLTPROC in [\\/]* | ?:[\\/]*) ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false" ;; esac fi XSLTPROC=$ac_cv_path_XSLTPROC if test -n "$XSLTPROC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5 $as_echo "$XSLTPROC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else DEFAULT_XSLROOT="" fi # Check whether --with-xslroot was given. if test "${with_xslroot+set}" = set; then : withval=$with_xslroot; if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi else XSLROOT="${DEFAULT_XSLROOT}" fi CFLAGS=$CFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x$GCC" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 $as_echo_n "checking whether compiler is MSVC... " >&6; } if ${je_cv_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef _MSC_VER int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_msvc=yes else je_cv_msvc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 $as_echo "$je_cv_msvc" >&6; } fi je_cv_cray_prgenv_wrapper="" if test "x${PE_ENV}" != "x" ; then case "${CC}" in CC|cc) je_cv_cray_prgenv_wrapper="yes" ;; *) ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5 $as_echo_n "checking whether compiler is cray... " >&6; } if ${je_cv_cray+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef _CRAYC int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cray=yes else je_cv_cray=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5 $as_echo "$je_cv_cray" >&6; } if test "x${je_cv_cray}" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5 $as_echo_n "checking whether cray compiler version is 8.4... " >&6; } if ${je_cv_cray_84+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cray_84=yes else je_cv_cray_84=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5 $as_echo "$je_cv_cray_84" >&6; } fi if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5 $as_echo_n "checking whether compiler supports -std=gnu11... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-std=gnu11 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-std=gnu11 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 $as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-std=gnu99 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-std=gnu99 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wall if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wall { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5 $as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wshorten-64-to-32 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wshorten-64-to-32 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5 $as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wsign-compare if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wsign-compare { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wundef" >&5 $as_echo_n "checking whether compiler supports -Wundef... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wundef if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wundef { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-format-zero-length" >&5 $as_echo_n "checking whether compiler supports -Wno-format-zero-length... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wno-format-zero-length if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wno-format-zero-length { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 $as_echo_n "checking whether compiler supports -pipe... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-pipe if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-pipe { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 $as_echo_n "checking whether compiler supports -g3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-g3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-g3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5 $as_echo_n "checking whether compiler supports -Zi... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Zi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Zi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5 $as_echo_n "checking whether compiler supports -MT... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-MT if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-MT { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5 $as_echo_n "checking whether compiler supports -W3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-W3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-W3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5 $as_echo_n "checking whether compiler supports -FS... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-FS if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-FS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi T_APPEND_V=-I${srcdir}/include/msvc_compat if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi fi if test "x$je_cv_cray" = "xyes" ; then if test "x$je_cv_cray_84" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5 $as_echo_n "checking whether compiler supports -hipa2... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hipa2 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hipa2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5 $as_echo_n "checking whether compiler supports -hnognu... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnognu if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnognu { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5 $as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnomessage=128 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnomessage=128 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5 $as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnomessage=1357 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnomessage=1357 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Check whether --enable-cxx was given. if test "${enable_cxx+set}" = set; then : enableval=$enable_cxx; if test "x$enable_cxx" = "xno" ; then enable_cxx="0" else enable_cxx="1" fi else enable_cxx="1" fi if test "x$enable_cxx" = "x1" ; then # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html # =========================================================================== # # SYNOPSIS # # AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) # # DESCRIPTION # # Check for baseline language coverage in the compiler for the specified # version of the C++ standard. If necessary, add switches to CXX and # CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) # or '14' (for the C++14 standard). # # The second argument, if specified, indicates whether you insist on an # extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. # -std=c++11). If neither is specified, you get whatever works, with # preference for an extended mode. # # The third argument, if specified 'mandatory' or if left unspecified, # indicates that baseline support for the specified C++ standard is # required and that the macro should error out if no mode with that # support is found. If specified 'optional', then configuration proceeds # regardless, after defining HAVE_CXX${VERSION} if and only if a # supporting mode is found. # # LICENSE # # Copyright (c) 2008 Benjamin Kosnik # Copyright (c) 2012 Zack Weinberg # Copyright (c) 2013 Roy Stogner # Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov # Copyright (c) 2015 Paul Norman # Copyright (c) 2015 Moritz Klammler # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 4 ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ax_cxx_compile_cxx14_required=false ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ac_success=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features by default" >&5 $as_echo_n "checking whether $CXX supports C++14 features by default... " >&6; } if ${ax_cv_cxx_compile_cxx14+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ax_cv_cxx_compile_cxx14=yes else ax_cv_cxx_compile_cxx14=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_cxx_compile_cxx14" >&5 $as_echo "$ax_cv_cxx_compile_cxx14" >&6; } if test x$ax_cv_cxx_compile_cxx14 = xyes; then ac_success=yes fi if test x$ac_success = xno; then for switch in -std=c++14 -std=c++0x +std=c++14 "-h std=c++14"; do cachevar=`$as_echo "ax_cv_cxx_compile_cxx14_$switch" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features with $switch" >&5 $as_echo_n "checking whether $CXX supports C++14 features with $switch... " >&6; } if eval \${$cachevar+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_CXX="$CXX" CXX="$CXX $switch" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval $cachevar=yes else eval $cachevar=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext CXX="$ac_save_CXX" fi eval ac_res=\$$cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test x$ax_cxx_compile_cxx14_required = xtrue; then if test x$ac_success = xno; then as_fn_error $? "*** A compiler with support for C++14 language features is required." "$LINENO" 5 fi fi if test x$ac_success = xno; then HAVE_CXX14=0 { $as_echo "$as_me:${as_lineno-$LINENO}: No compiler with C++14 support was found" >&5 $as_echo "$as_me: No compiler with C++14 support was found" >&6;} else HAVE_CXX14=1 $as_echo "#define HAVE_CXX14 1" >>confdefs.h fi if test "x${HAVE_CXX14}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-Wall if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-Wall { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 $as_echo_n "checking whether compiler supports -g3... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-g3 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-g3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi SAVED_LIBS="${LIBS}" T_APPEND_V=-lstdc++ if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether libstdc++ linkage is compilable" >&5 $as_echo_n "checking whether libstdc++ linkage is compilable... " >&6; } if ${je_cv_libstdcxx+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int *arr = (int *)malloc(sizeof(int) * 42); if (arr == NULL) return 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_libstdcxx=yes else je_cv_libstdcxx=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_libstdcxx" >&5 $as_echo "$je_cv_libstdcxx" >&6; } if test "x${je_cv_libstdcxx}" = "xno" ; then LIBS="${SAVED_LIBS}" fi else enable_cxx="0" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) ac_cv_big_endian=1;; #( no) ac_cv_big_endian=0 ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac if test "x${ac_cv_big_endian}" = "x1" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_BIG_ENDIAN _ACEOF fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then T_APPEND_V=-I${srcdir}/include/msvc_compat/C99 if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi fi if test "x${je_cv_msvc}" = "xyes" ; then LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5 $as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; } else # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 $as_echo_n "checking size of void *... " >&6; } if ${ac_cv_sizeof_void_p+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : else if test "$ac_cv_type_void_p" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (void *) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_void_p=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 $as_echo "$ac_cv_sizeof_void_p" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_VOID_P $ac_cv_sizeof_void_p _ACEOF if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5 fi fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_PTR $LG_SIZEOF_PTR _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 $as_echo_n "checking size of int... " >&6; } if ${ac_cv_sizeof_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : else if test "$ac_cv_type_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 $as_echo "$ac_cv_sizeof_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INT $ac_cv_sizeof_int _ACEOF if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INT $LG_SIZEOF_INT _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 $as_echo_n "checking size of long... " >&6; } if ${ac_cv_sizeof_long+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : else if test "$ac_cv_type_long" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 $as_echo "$ac_cv_sizeof_long" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG $ac_cv_sizeof_long _ACEOF if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_LONG $LG_SIZEOF_LONG _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5 $as_echo_n "checking size of long long... " >&6; } if ${ac_cv_sizeof_long_long+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then : else if test "$ac_cv_type_long_long" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long long) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long_long=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5 $as_echo "$ac_cv_sizeof_long_long" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long _ACEOF if test "x${ac_cv_sizeof_long_long}" = "x8" ; then LG_SIZEOF_LONG_LONG=3 elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then LG_SIZEOF_LONG_LONG=2 else as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5 $as_echo_n "checking size of intmax_t... " >&6; } if ${ac_cv_sizeof_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then : else if test "$ac_cv_type_intmax_t" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (intmax_t) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_intmax_t=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5 $as_echo "$ac_cv_sizeof_intmax_t" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t _ACEOF if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T _ACEOF # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) HAVE_CPU_SPINWAIT=1 if test "x${je_cv_msvc}" = "xyes" ; then if ${je_cv_pause_msvc+:} false; then : $as_echo_n "(cached) " >&6 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5 $as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; } if ${je_cv_pause_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { _mm_pause(); return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pause_msvc=yes else je_cv_pause_msvc=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5 $as_echo "$je_cv_pause_msvc" >&6; } fi if test "x${je_cv_pause_msvc}" = "xyes" ; then CPU_SPINWAIT='_mm_pause()' fi else if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5 $as_echo_n "checking whether pause instruction is compilable... " >&6; } if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { __asm__ volatile("pause"); return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pause=yes else je_cv_pause=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5 $as_echo "$je_cv_pause" >&6; } fi if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi fi ;; *) HAVE_CPU_SPINWAIT=0 ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_CPU_SPINWAIT $HAVE_CPU_SPINWAIT _ACEOF cat >>confdefs.h <<_ACEOF #define CPU_SPINWAIT $CPU_SPINWAIT _ACEOF # Check whether --with-lg_vaddr was given. if test "${with_lg_vaddr+set}" = set; then : withval=$with_lg_vaddr; LG_VADDR="$with_lg_vaddr" else LG_VADDR="detect" fi case "${host_cpu}" in aarch64) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if test "x${LG_SIZEOF_PTR}" = "x2" ; then #aarch64 ILP32 LG_VADDR=32 else #aarch64 LP64 LG_VADDR=48 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 $as_echo "$LG_VADDR" >&6; } fi ;; x86_64) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if ${je_cv_lg_vaddr+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : je_cv_lg_vaddr=57 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef _WIN32 #include #include typedef unsigned __int32 uint32_t; #else #include #endif int main () { uint32_t r[4]; uint32_t eax_in = 0x80000008U; #ifdef _WIN32 __cpuid((int *)r, (int)eax_in); #else asm volatile ("cpuid" : "=a" (r[0]), "=b" (r[1]), "=c" (r[2]), "=d" (r[3]) : "a" (eax_in), "c" (0) ); #endif uint32_t eax_out = r[0]; uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); FILE *f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } if (vaddr > (sizeof(void *) << 3)) { vaddr = sizeof(void *) << 3; } fprintf(f, "%u", vaddr); fclose(f); return 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : je_cv_lg_vaddr=`cat conftest.out` else je_cv_lg_vaddr=error fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_vaddr" >&5 $as_echo "$je_cv_lg_vaddr" >&6; } if test "x${je_cv_lg_vaddr}" != "x" ; then LG_VADDR="${je_cv_lg_vaddr}" fi if test "x${LG_VADDR}" != "xerror" ; then cat >>confdefs.h <<_ACEOF #define LG_VADDR $LG_VADDR _ACEOF else as_fn_error $? "cannot determine number of significant virtual address bits" "$LINENO" 5 fi fi ;; *) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if test "x${LG_SIZEOF_PTR}" = "x3" ; then LG_VADDR=64 elif test "x${LG_SIZEOF_PTR}" = "x2" ; then LG_VADDR=32 elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" else as_fn_error $? "Unsupported lg(pointer size): ${LG_SIZEOF_PTR}" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 $as_echo "$LG_VADDR" >&6; } fi ;; esac cat >>confdefs.h <<_ACEOF #define LG_VADDR $LG_VADDR _ACEOF LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then TEST_LD_MODE='-dynamic' fi if test "x${je_cv_cray}" = "xyes" ; then CC_MM= fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nm", so it can be a program name with args. set dummy ${ac_tool_prefix}nm; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then ac_cv_prog_NM="$NM" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NM="${ac_tool_prefix}nm" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NM=$ac_cv_prog_NM if test -n "$NM"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NM" >&5 $as_echo "$NM" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NM"; then ac_ct_NM=$NM # Extract the first word of "nm", so it can be a program name with args. set dummy nm; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NM"; then ac_cv_prog_ac_ct_NM="$ac_ct_NM" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NM="nm" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NM=$ac_cv_prog_ac_ct_NM if test -n "$ac_ct_NM"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NM" >&5 $as_echo "$ac_ct_NM" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NM" = x; then NM=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NM=$ac_ct_NM fi else NM="$ac_cv_prog_NM" fi for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done default_retain="0" maps_coalesce="1" DUMP_SYMS="${NM} -a" SYM_PREFIX="" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" SYM_PREFIX="_" ;; *-*-freebsd*) abi="elf" $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" ;; *-*-openbsd*) abi="elf" force_tls="0" ;; *-*-bitrig*) abi="elf" ;; *-*-linux-android) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h force_tls="0" if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-linux*) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-kfreebsd*) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h ;; *-*-netbsd*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5 $as_echo_n "checking ABI... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __ELF__ /* ELF */ #else #error aout #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : abi="elf" else abi="aout" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 $as_echo "$abi" >&6; } ;; *-*-solaris2*) abi="elf" RPATH='-Wl,-R,$(1)' T_APPEND_V=-D_POSIX_PTHREAD_SEMANTICS if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi T_APPEND_V=-lposix4 -lsocket -lnsl if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi ;; *-ibm-aix*) if test "${LG_SIZEOF_PTR}" = "3"; then LD_PRELOAD_VAR="LDR_PRELOAD64" else LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" link_whole_archive="1" fi case "${host}" in *-*-cygwin*) DUMP_SYMS="dumpbin /SYMBOLS" ;; *) ;; esac a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 $as_echo "Unsupported operating system: ${host}" >&6; } abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const for ac_header in malloc.h do : ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default" if test "x$ac_cv_header_malloc_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MALLOC_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5 $as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include size_t malloc_usable_size(const void *ptr); int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else JEMALLOC_USABLE_SIZE_CONST= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi done cat >>confdefs.h <<_ACEOF #define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 $as_echo_n "checking for library containing log... " >&6; } if ${ac_cv_search_log+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char log (); int main () { return log (); ; return 0; } _ACEOF for ac_lib in '' m; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_log=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_log+:} false; then : break fi done if ${ac_cv_search_log+:} false; then : else ac_cv_search_log=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 $as_echo "$ac_cv_search_log" >&6; } ac_res=$ac_cv_search_log if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "Missing math functions" "$LINENO" 5 fi if test "x$ac_cv_search_log" != "xnone required" ; then LM="$ac_cv_search_log" else LM= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 $as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } if ${je_cv_attribute+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ static __attribute__((unused)) void foo(void){} int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_attribute=yes else je_cv_attribute=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5 $as_echo "$je_cv_attribute" >&6; } if test "x${je_cv_attribute}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 $as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-fvisibility=hidden if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-fvisibility=hidden { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 $as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-fvisibility=hidden if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-fvisibility=hidden { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi fi fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 $as_echo_n "checking whether tls_model attribute is compilable... " >&6; } if ${je_cv_tls_model+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_tls_model=yes else je_cv_tls_model=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5 $as_echo "$je_cv_tls_model" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5 $as_echo_n "checking whether alloc_size attribute is compilable... " >&6; } if ${je_cv_alloc_size+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(size_t size) __attribute__((alloc_size(1))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_alloc_size=yes else je_cv_alloc_size=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5 $as_echo "$je_cv_alloc_size" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_alloc_size}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_gnu_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_gnu_printf=yes else je_cv_format_gnu_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5 $as_echo "$je_cv_format_gnu_printf" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_format_gnu_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_printf=yes else je_cv_format_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5 $as_echo "$je_cv_format_printf" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_format_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h fi # Check whether --with-rpath was given. if test "${with_rpath+set}" = set; then : withval=$with_rpath; if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi else RPATH_EXTRA= fi # Check whether --enable-autogen was given. if test "${enable_autogen+set}" = set; then : enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi else enable_autogen="0" fi # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi # Extract the first word of "ld", so it can be a program name with args. set dummy ld; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else case $LD in [\\/]* | ?:[\\/]*) ac_cv_path_LD="$LD" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LD" && ac_cv_path_LD="false" ;; esac fi LD=$ac_cv_path_LD if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "autoconf", so it can be a program name with args. set dummy autoconf; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_AUTOCONF+:} false; then : $as_echo_n "(cached) " >&6 else case $AUTOCONF in [\\/]* | ?:[\\/]*) ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false" ;; esac fi AUTOCONF=$ac_cv_path_AUTOCONF if test -n "$AUTOCONF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5 $as_echo "$AUTOCONF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Check whether --with-mangling was given. if test "${with_mangling+set}" = set; then : withval=$with_mangling; mangling_map="$with_mangling" else mangling_map="" fi # Check whether --with-jemalloc_prefix was given. if test "${with_jemalloc_prefix+set}" = set; then : withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix" else if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi fi if test "x$JEMALLOC_PREFIX" = "x" ; then $as_echo "#define JEMALLOC_IS_MALLOC 1" >>confdefs.h else JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` cat >>confdefs.h <<_ACEOF #define JEMALLOC_PREFIX "$JEMALLOC_PREFIX" _ACEOF cat >>confdefs.h <<_ACEOF #define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX" _ACEOF fi # Check whether --with-export was given. if test "${with_export+set}" = set; then : withval=$with_export; if test "x$with_export" = "xno"; then $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h fi fi public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign" if test "x$ac_cv_func_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h public_syms="${public_syms} memalign" fi ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc" if test "x$ac_cv_func_valloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h public_syms="${public_syms} valloc" fi wrap_syms= if test "x${JEMALLOC_PREFIX}" = "x" ; then ac_fn_c_check_func "$LINENO" "__libc_calloc" "ac_cv_func___libc_calloc" if test "x$ac_cv_func___libc_calloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_CALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_calloc" fi ac_fn_c_check_func "$LINENO" "__libc_free" "ac_cv_func___libc_free" if test "x$ac_cv_func___libc_free" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_FREE " >>confdefs.h wrap_syms="${wrap_syms} __libc_free" fi ac_fn_c_check_func "$LINENO" "__libc_malloc" "ac_cv_func___libc_malloc" if test "x$ac_cv_func___libc_malloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_malloc" fi ac_fn_c_check_func "$LINENO" "__libc_memalign" "ac_cv_func___libc_memalign" if test "x$ac_cv_func___libc_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN " >>confdefs.h wrap_syms="${wrap_syms} __libc_memalign" fi ac_fn_c_check_func "$LINENO" "__libc_realloc" "ac_cv_func___libc_realloc" if test "x$ac_cv_func___libc_realloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_REALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_realloc" fi ac_fn_c_check_func "$LINENO" "__libc_valloc" "ac_cv_func___libc_valloc" if test "x$ac_cv_func___libc_valloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_VALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_valloc" fi ac_fn_c_check_func "$LINENO" "__posix_memalign" "ac_cv_func___posix_memalign" if test "x$ac_cv_func___posix_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN " >>confdefs.h wrap_syms="${wrap_syms} __posix_memalign" fi fi case "${host}" in *-*-mingw* | *-*-cygwin*) wrap_syms="${wrap_syms} tls_callback" ;; *) ;; esac # Check whether --with-private_namespace was given. if test "${with_private_namespace+set}" = set; then : withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_" else JEMALLOC_PRIVATE_NAMESPACE="je_" fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE _ACEOF private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" # Check whether --with-install_suffix was given. if test "${with_install_suffix+set}" = set; then : withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix" else INSTALL_SUFFIX= fi install_suffix="$INSTALL_SUFFIX" # Check whether --with-malloc_conf was given. if test "${with_malloc_conf+set}" = set; then : withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf" else JEMALLOC_CONFIG_MALLOC_CONF="" fi config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" cat >>confdefs.h <<_ACEOF #define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf" _ACEOF je_="je_" cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi else enable_debug="0" fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h fi if test "x$enable_debug" = "x0" ; then if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 $as_echo_n "checking whether compiler supports -O3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 $as_echo_n "checking whether compiler supports -O3... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O3 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5 $as_echo_n "checking whether compiler supports -funroll-loops... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-funroll-loops if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-funroll-loops { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi elif test "x$je_cv_msvc" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 $as_echo_n "checking whether compiler supports -O2... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O2 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 $as_echo_n "checking whether compiler supports -O2... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O2 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 $as_echo_n "checking whether compiler supports -O... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 $as_echo_n "checking whether compiler supports -O... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi fi fi # Check whether --enable-stats was given. if test "${enable_stats+set}" = set; then : enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi else enable_stats="1" fi if test "x$enable_stats" = "x1" ; then $as_echo "#define JEMALLOC_STATS " >>confdefs.h fi # Check whether --enable-prof was given. if test "${enable_prof+set}" = set; then : enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi else enable_prof="0" fi if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi # Check whether --enable-prof-libunwind was given. if test "${enable_prof_libunwind+set}" = set; then : enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi else enable_prof_libunwind="0" fi # Check whether --with-static_libunwind was given. if test "${with_static_libunwind+set}" = set; then : withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5 fi LUNWIND="$with_static_libunwind" fi else LUNWIND="-lunwind" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then for ac_header in libunwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default" if test "x$ac_cv_header_libunwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBUNWIND_H 1 _ACEOF else enable_prof_libunwind="0" fi done if test "x$LUNWIND" = "x-lunwind" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5 $as_echo_n "checking for unw_backtrace in -lunwind... " >&6; } if ${ac_cv_lib_unwind_unw_backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lunwind $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char unw_backtrace (); int main () { return unw_backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_unwind_unw_backtrace=yes else ac_cv_lib_unwind_unw_backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5 $as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; } if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then : T_APPEND_V=$LUNWIND if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else enable_prof_libunwind="0" fi else T_APPEND_V=$LUNWIND if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h fi fi # Check whether --enable-prof-libgcc was given. if test "${enable_prof_libgcc+set}" = set; then : enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi else enable_prof_libgcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then for ac_header in unwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" if test "x$ac_cv_header_unwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNWIND_H 1 _ACEOF else enable_prof_libgcc="0" fi done if test "x${enable_prof_libgcc}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5 $as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; } if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgcc $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char _Unwind_Backtrace (); int main () { return _Unwind_Backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gcc__Unwind_Backtrace=yes else ac_cv_lib_gcc__Unwind_Backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5 $as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; } if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then : T_APPEND_V=-lgcc if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else enable_prof_libgcc="0" fi fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h fi else enable_prof_libgcc="0" fi # Check whether --enable-prof-gcc was given. if test "${enable_prof_gcc+set}" = set; then : enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi else enable_prof_gcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5 $as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-fno-omit-frame-pointer if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-fno-omit-frame-pointer { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi backtrace_method="gcc intrinsics" $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 $as_echo_n "checking configured backtracing method... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 $as_echo "$backtrace_method" >&6; } if test "x$enable_prof" = "x1" ; then T_APPEND_V=$LM if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi $as_echo "#define JEMALLOC_PROF " >>confdefs.h fi if test "x${maps_coalesce}" = "x1" ; then $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h fi if test "x$default_retain" = "x1" ; then $as_echo "#define JEMALLOC_RETAIN " >>confdefs.h fi have_dss="1" ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk" if test "x$ac_cv_func_sbrk" = xyes; then : have_sbrk="1" else have_sbrk="0" fi if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5 $as_echo "Disabling dss allocation because sbrk is deprecated" >&6; } have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then $as_echo "#define JEMALLOC_DSS " >>confdefs.h fi # Check whether --enable-fill was given. if test "${enable_fill+set}" = set; then : enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi else enable_fill="1" fi if test "x$enable_fill" = "x1" ; then $as_echo "#define JEMALLOC_FILL " >>confdefs.h fi # Check whether --enable-utrace was given. if test "${enable_utrace+set}" = set; then : enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi else enable_utrace="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5 $as_echo_n "checking whether utrace(2) is compilable... " >&6; } if ${je_cv_utrace+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include #include int main () { utrace((void *)0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_utrace=yes else je_cv_utrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5 $as_echo "$je_cv_utrace" >&6; } if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h fi # Check whether --enable-xmalloc was given. if test "${enable_xmalloc+set}" = set; then : enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi else enable_xmalloc="0" fi if test "x$enable_xmalloc" = "x1" ; then $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h fi # Check whether --enable-cache-oblivious was given. if test "${enable_cache_oblivious+set}" = set; then : enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi else enable_cache_oblivious="1" fi if test "x$enable_cache_oblivious" = "x1" ; then $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h fi # Check whether --enable-log was given. if test "${enable_log+set}" = set; then : enableval=$enable_log; if test "x$enable_log" = "xno" ; then enable_log="0" else enable_log="1" fi else enable_log="0" fi if test "x$enable_log" = "x1" ; then $as_echo "#define JEMALLOC_LOG " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5 $as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; } if ${je_cv_gcc_builtin_unreachable+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void foo (void) { __builtin_unreachable(); } int main () { { foo(); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_unreachable=yes else je_cv_gcc_builtin_unreachable=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5 $as_echo "$je_cv_gcc_builtin_unreachable" >&6; } if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h else $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5 $as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; } if ${je_cv_gcc_builtin_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_ffsl=yes else je_cv_gcc_builtin_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5 $as_echo "$je_cv_gcc_builtin_ffsl" >&6; } if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5 $as_echo_n "checking whether a program using ffsl is compilable... " >&6; } if ${je_cv_function_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_function_ffsl=yes else je_cv_function_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5 $as_echo "$je_cv_function_ffsl" >&6; } if test "x${je_cv_function_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h else as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5 fi fi # Check whether --with-lg_quantum was given. if test "${with_lg_quantum+set}" = set; then : withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum" else LG_QUANTA="3 4" fi if test "x$with_lg_quantum" != "x" ; then cat >>confdefs.h <<_ACEOF #define LG_QUANTUM $with_lg_quantum _ACEOF fi # Check whether --with-lg_page was given. if test "${with_lg_page+set}" = set; then : withval=$with_lg_page; LG_PAGE="$with_lg_page" else LG_PAGE="detect" fi if test "x$LG_PAGE" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5 $as_echo_n "checking LG_PAGE... " >&6; } if ${je_cv_lg_page+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : je_cv_lg_page=12 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef _WIN32 #include #else #include #endif #include int main () { int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d", result); fclose(f); return 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : je_cv_lg_page=`cat conftest.out` else je_cv_lg_page=undefined fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5 $as_echo "$je_cv_lg_page" >&6; } fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then cat >>confdefs.h <<_ACEOF #define LG_PAGE $LG_PAGE _ACEOF else as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5 fi # Check whether --with-lg_hugepage was given. if test "${with_lg_hugepage+set}" = set; then : withval=$with_lg_hugepage; je_cv_lg_hugepage="${with_lg_hugepage}" else je_cv_lg_hugepage="" fi if test "x${je_cv_lg_hugepage}" = "x" ; then if test -e "/proc/meminfo" ; then hpsk=`cat /proc/meminfo 2>/dev/null | \ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ awk '{print $2}'` if test "x${hpsk}" != "x" ; then je_cv_lg_hugepage=10 while test "${hpsk}" -gt 1 ; do hpsk="$((hpsk / 2))" je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" done fi fi if test "x${je_cv_lg_hugepage}" = "x" ; then je_cv_lg_hugepage=21 fi fi if test "x${LG_PAGE}" != "xundefined" -a \ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then as_fn_error $? "Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_HUGEPAGE ${je_cv_lg_hugepage} _ACEOF # Check whether --with-lg_page_sizes was given. if test "${with_lg_page_sizes+set}" = set; then : withval=$with_lg_page_sizes; LG_PAGE_SIZES="$with_lg_page_sizes" else LG_PAGE_SIZES="$LG_PAGE" fi # Check whether --with-version was given. if test "${with_version+set}" = set; then : withval=$with_version; echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null if test $? -eq 0 ; then echo "$with_version" > "${objroot}VERSION" else echo "${with_version}" | grep '^VERSION$' 2>&1 1>/dev/null if test $? -ne 0 ; then as_fn_error $? "${with_version} does not match ..--g or VERSION" "$LINENO" 5 fi fi else if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null if test $? -eq 0 ; then mv "${objroot}VERSION.tmp" "${objroot}VERSION" break fi done fi rm -f "${objroot}VERSION.tmp" fi if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5 $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; } echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'` if test "x$abi" != "xpecoff" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD " >>confdefs.h for ac_header in pthread.h do : ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" if test "x$ac_cv_header_pthread_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PTHREAD_H 1 _ACEOF else as_fn_error $? "pthread.h is missing" "$LINENO" 5 fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 $as_echo_n "checking for pthread_create in -lpthread... " >&6; } if ${ac_cv_lib_pthread_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthread $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthread_pthread_create=yes else ac_cv_lib_pthread_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 $as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : T_APPEND_V=-lpthread if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 $as_echo_n "checking for library containing pthread_create... " >&6; } if ${ac_cv_search_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF for ac_lib in '' ; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_pthread_create=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_pthread_create+:} false; then : break fi done if ${ac_cv_search_pthread_create+:} false; then : else ac_cv_search_pthread_create=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 $as_echo "$ac_cv_search_pthread_create" >&6; } ac_res=$ac_cv_search_pthread_create if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "libpthread is missing" "$LINENO" 5 fi fi wrap_syms="${wrap_syms} pthread_create" have_pthread="1" have_dlsym="1" for ac_header in dlfcn.h do : ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym" if test "x$ac_cv_func_dlsym" = xyes; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5 $as_echo_n "checking for dlsym in -ldl... " >&6; } if ${ac_cv_lib_dl_dlsym+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlsym (); int main () { return dlsym (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlsym=yes else ac_cv_lib_dl_dlsym=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5 $as_echo "$ac_cv_lib_dl_dlsym" >&6; } if test "x$ac_cv_lib_dl_dlsym" = xyes; then : LIBS="$LIBS -ldl" else have_dlsym="0" fi fi else have_dlsym="0" fi done if test "x$have_dlsym" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_DLSYM " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5 $as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; } if ${je_cv_pthread_atfork+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_atfork((void *)0, (void *)0, (void *)0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_atfork=yes else je_cv_pthread_atfork=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5 $as_echo "$je_cv_pthread_atfork" >&6; } if test "x${je_cv_pthread_atfork}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_setname_np(3) is compilable" >&5 $as_echo_n "checking whether pthread_setname_np(3) is compilable... " >&6; } if ${je_cv_pthread_setname_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_setname_np(pthread_self(), "setname_test"); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_setname_np=yes else je_cv_pthread_setname_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_setname_np" >&5 $as_echo "$je_cv_pthread_setname_np" >&6; } if test "x${je_cv_pthread_setname_np}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP " >>confdefs.h fi fi T_APPEND_V=-D_REENTRANT if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then if test "$ac_cv_search_clock_gettime" != "-lrt"; then SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" unset ac_cv_search_clock_gettime { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5 $as_echo_n "checking whether compiler supports -dynamic... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-dynamic if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-dynamic { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5 $as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; } if ${je_cv_clock_monotonic_coarse+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_clock_monotonic_coarse=yes else je_cv_clock_monotonic_coarse=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5 $as_echo "$je_cv_clock_monotonic_coarse" >&6; } if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5 $as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; } if ${je_cv_clock_monotonic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); #if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 # error _POSIX_MONOTONIC_CLOCK missing/invalid #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_clock_monotonic=yes else je_cv_clock_monotonic=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5 $as_echo "$je_cv_clock_monotonic" >&6; } if test "x${je_cv_clock_monotonic}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5 $as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; } if ${je_cv_mach_absolute_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { mach_absolute_time(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_mach_absolute_time=yes else je_cv_mach_absolute_time=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5 $as_echo "$je_cv_mach_absolute_time" >&6; } if test "x${je_cv_mach_absolute_time}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h fi # Check whether --enable-syscall was given. if test "${enable_syscall+set}" = set; then : enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then enable_syscall="0" else enable_syscall="1" fi else enable_syscall="1" fi if test "x$enable_syscall" = "x1" ; then SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5 $as_echo_n "checking whether syscall(2) is compilable... " >&6; } if ${je_cv_syscall+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { syscall(SYS_write, 2, "hello", 5); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_syscall=yes else je_cv_syscall=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5 $as_echo "$je_cv_syscall" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_syscall" = "xyes" ; then $as_echo "#define JEMALLOC_USE_SYSCALL " >>confdefs.h fi fi ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" if test "x$ac_cv_func_secure_getenv" = xyes; then : have_secure_getenv="1" else have_secure_getenv="0" fi if test "x$have_secure_getenv" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "sched_getcpu" "ac_cv_func_sched_getcpu" if test "x$ac_cv_func_sched_getcpu" = xyes; then : have_sched_getcpu="1" else have_sched_getcpu="0" fi if test "x$have_sched_getcpu" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SCHED_GETCPU " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "sched_setaffinity" "ac_cv_func_sched_setaffinity" if test "x$ac_cv_func_sched_setaffinity" = xyes; then : have_sched_setaffinity="1" else have_sched_setaffinity="0" fi if test "x$have_sched_setaffinity" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SCHED_SETAFFINITY " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid" if test "x$ac_cv_func_issetugid" = xyes; then : have_issetugid="1" else have_issetugid="0" fi if test "x$have_issetugid" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup" if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then : have__malloc_thread_cleanup="1" else have__malloc_thread_cleanup="0" fi if test "x$have__malloc_thread_cleanup" = "x1" ; then $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h wrap_syms="${wrap_syms} _malloc_thread_cleanup" force_tls="1" fi ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb" if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then : have__pthread_mutex_init_calloc_cb="1" else have__pthread_mutex_init_calloc_cb="0" fi if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" fi # Check whether --enable-lazy_lock was given. if test "${enable_lazy_lock+set}" = set; then : enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi else enable_lazy_lock="" fi if test "x${enable_lazy_lock}" = "x" ; then if test "x${force_lazy_lock}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } enable_lazy_lock="1" else enable_lazy_lock="0" fi fi if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5 $as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; } enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$have_dlsym" = "x1" ; then $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h else as_fn_error $? "Missing dlsym support: lazy-lock cannot be enabled." "$LINENO" 5 fi fi if test "x${force_tls}" = "x1" ; then enable_tls="1" elif test "x${force_tls}" = "x0" ; then enable_tls="0" else enable_tls="1" fi if test "x${enable_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 $as_echo_n "checking for TLS... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ __thread int x; int main () { x = 42; return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } enable_tls="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else enable_tls="0" fi if test "x${enable_tls}" = "x1" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_TLS _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5 $as_echo_n "checking whether C11 atomics is compilable... " >&6; } if ${je_cv_c11_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif int main () { uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return r == 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_c11_atomics=yes else je_cv_c11_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11_atomics" >&5 $as_echo "$je_cv_c11_atomics" >&6; } if test "x${je_cv_c11_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __atomic atomics is compilable" >&5 $as_echo_n "checking whether GCC __atomic atomics is compilable... " >&6; } if ${je_cv_gcc_atomic_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { int x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = x; return after_add == 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_atomic_atomics=yes else je_cv_gcc_atomic_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_atomic_atomics" >&5 $as_echo "$je_cv_gcc_atomic_atomics" >&6; } if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_ATOMIC_ATOMICS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __sync atomics is compilable" >&5 $as_echo_n "checking whether GCC __sync atomics is compilable... " >&6; } if ${je_cv_gcc_sync_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { int x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = x; return (before_add == 0) && (after_add == 1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_sync_atomics=yes else je_cv_gcc_sync_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_sync_atomics" >&5 $as_echo "$je_cv_gcc_sync_atomics" >&6; } if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_SYNC_ATOMICS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5 $as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; } if ${je_cv_osatomic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_osatomic=yes else je_cv_osatomic=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5 $as_echo "$je_cv_osatomic" >&6; } if test "x${je_cv_osatomic}" = "xyes" ; then $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5 $as_echo_n "checking whether madvise(2) is compilable... " >&6; } if ${je_cv_madvise+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madvise=yes else je_cv_madvise=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5 $as_echo "$je_cv_madvise" >&6; } if test "x${je_cv_madvise}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; } if ${je_cv_madv_free+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_FREE); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_free=yes else je_cv_madv_free=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5 $as_echo "$je_cv_madv_free" >&6; } if test "x${je_cv_madv_free}" = "xyes" ; then $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h elif test "x${je_cv_madvise}" = "xyes" ; then case "${host_cpu}" in i686|x86_64) case "${host}" in *-*-linux*) $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h $as_echo "#define JEMALLOC_DEFINE_MADVISE_FREE " >>confdefs.h ;; esac ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; } if ${je_cv_madv_dontneed+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_DONTNEED); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_dontneed=yes else je_cv_madv_dontneed=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5 $as_echo "$je_cv_madv_dontneed" >&6; } if test "x${je_cv_madv_dontneed}" = "xyes" ; then $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DO[NT]DUMP) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_DO[NT]DUMP) is compilable... " >&6; } if ${je_cv_madv_dontdump+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_DONTDUMP); madvise((void *)0, 0, MADV_DODUMP); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_dontdump=yes else je_cv_madv_dontdump=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontdump" >&5 $as_echo "$je_cv_madv_dontdump" >&6; } if test "x${je_cv_madv_dontdump}" = "xyes" ; then $as_echo "#define JEMALLOC_MADVISE_DONTDUMP " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; } if ${je_cv_thp+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_HUGEPAGE); madvise((void *)0, 0, MADV_NOHUGEPAGE); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_thp=yes else je_cv_thp=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5 $as_echo "$je_cv_thp" >&6; } case "${host_cpu}" in arm*) ;; *) if test "x${je_cv_thp}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE_HUGE " >>confdefs.h fi ;; esac fi if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 32-bit __sync_{add,sub}_and_fetch()" >&5 $as_echo_n "checking whether to force 32-bit __sync_{add,sub}_and_fetch()... " >&6; } if ${je_cv_sync_compare_and_swap_4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 { uint32_t x32 = 0; __sync_add_and_fetch(&x32, 42); __sync_sub_and_fetch(&x32, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 is defined, no need to force #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_sync_compare_and_swap_4=yes else je_cv_sync_compare_and_swap_4=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_4" >&5 $as_echo "$je_cv_sync_compare_and_swap_4" >&6; } if test "x${je_cv_sync_compare_and_swap_4}" = "xyes" ; then $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 64-bit __sync_{add,sub}_and_fetch()" >&5 $as_echo_n "checking whether to force 64-bit __sync_{add,sub}_and_fetch()... " >&6; } if ${je_cv_sync_compare_and_swap_8+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 { uint64_t x64 = 0; __sync_add_and_fetch(&x64, 42); __sync_sub_and_fetch(&x64, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 is defined, no need to force #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_sync_compare_and_swap_8=yes else je_cv_sync_compare_and_swap_8=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_8" >&5 $as_echo "$je_cv_sync_compare_and_swap_8" >&6; } if test "x${je_cv_sync_compare_and_swap_8}" = "xyes" ; then $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 " >>confdefs.h fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5 $as_echo_n "checking for __builtin_clz... " >&6; } if ${je_cv_builtin_clz+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_builtin_clz=yes else je_cv_builtin_clz=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5 $as_echo "$je_cv_builtin_clz" >&6; } if test "x${je_cv_builtin_clz}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5 $as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; } if ${je_cv_os_unfair_lock+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 #error "os_unfair_lock is not supported" #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_os_unfair_lock=yes else je_cv_os_unfair_lock=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5 $as_echo "$je_cv_os_unfair_lock" >&6; } if test "x${je_cv_os_unfair_lock}" = "xyes" ; then $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5 $as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; } if ${je_cv_osspin+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { OSSpinLock lock = 0; OSSpinLockLock(&lock); OSSpinLockUnlock(&lock); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_osspin=yes else je_cv_osspin=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osspin" >&5 $as_echo "$je_cv_osspin" >&6; } if test "x${je_cv_osspin}" = "xyes" ; then $as_echo "#define JEMALLOC_OSSPIN " >>confdefs.h fi # Check whether --enable-zone-allocator was given. if test "${enable_zone_allocator+set}" = set; then : enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi else if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi fi if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5 fi $as_echo "#define JEMALLOC_ZONE " >>confdefs.h fi # Check whether --enable-initial-exec-tls was given. if test "${enable_initial_exec_tls+set}" = set; then : enableval=$enable_initial_exec_tls; if test "x$enable_initial_exec_tls" = "xno" ; then enable_initial_exec_tls="0" else enable_initial_exec_tls="1" fi else enable_initial_exec_tls="1" fi if test "x${je_cv_tls_model}" = "xyes" -a \ "x${enable_initial_exec_tls}" = "x1" ; then $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h else $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h fi if test "x${have_pthread}" = "x1" -a "x${have_dlsym}" = "x1" \ -a "x${je_cv_os_unfair_lock}" != "xyes" \ -a "x${je_cv_osspin}" != "xyes" ; then $as_echo "#define JEMALLOC_BACKGROUND_THREAD 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5 $as_echo_n "checking whether glibc malloc hook is compilable... " >&6; } if ${je_cv_glibc_malloc_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); int main () { void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_malloc_hook=yes else je_cv_glibc_malloc_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5 $as_echo "$je_cv_glibc_malloc_hook" >&6; } if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5 $as_echo_n "checking whether glibc memalign hook is compilable... " >&6; } if ${je_cv_glibc_memalign_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); int main () { void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_memalign_hook=yes else je_cv_glibc_memalign_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5 $as_echo "$je_cv_glibc_memalign_hook" >&6; } if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h wrap_syms="${wrap_syms} __memalign_hook" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5 $as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; } if ${je_cv_pthread_mutex_adaptive_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_mutex_adaptive_np=yes else je_cv_pthread_mutex_adaptive_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5 $as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; } if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -D_GNU_SOURCE" >&5 $as_echo_n "checking whether compiler supports -D_GNU_SOURCE... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-D_GNU_SOURCE if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-D_GNU_SOURCE { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char with gnu source is compilable" >&5 $as_echo_n "checking whether strerror_r returns char with gnu source is compilable... " >&6; } if ${je_cv_strerror_r_returns_char_with_gnu_source+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { char *buffer = (char *) malloc(100); char *error = strerror_r(EINVAL, buffer, 100); printf("%s\n", error); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_strerror_r_returns_char_with_gnu_source=yes else je_cv_strerror_r_returns_char_with_gnu_source=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_strerror_r_returns_char_with_gnu_source" >&5 $as_echo "$je_cv_strerror_r_returns_char_with_gnu_source" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then $as_echo "#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt" ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols.awk" ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols_jet.awk" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/size_classes.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h" ac_config_headers="$ac_config_headers $cfghdrs_tup" ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" SHELL="${SHELL}" srcdir="${srcdir}" objroot="${objroot}" LG_QUANTA="${LG_QUANTA}" LG_PAGE_SIZES="${LG_PAGE_SIZES}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;; "include/jemalloc/internal/private_symbols.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/internal/size_classes.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/size_classes.h" ;; "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;; "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;; "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "include/jemalloc/internal/public_symbols.txt":C) f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "${n}:${m}" >> "${f}" public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ;; "include/jemalloc/internal/private_symbols.awk":C) f="${objroot}include/jemalloc/internal/private_symbols.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk":C) f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/internal/size_classes.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" 3 "${LG_PAGE_SIZES}" 2 > "${objroot}include/jemalloc/internal/size_classes.h" ;; "include/jemalloc/jemalloc_protos_jet.h":C) mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5 $as_echo "jemalloc version : ${jemalloc_version}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5 $as_echo "library revision : ${rev}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5 $as_echo "CONFIG : ${CONFIG}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 $as_echo "CC : ${CC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&5 $as_echo "CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&5 $as_echo "SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5 $as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 $as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CXX : ${CXX}" >&5 $as_echo "CXX : ${CXX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&5 $as_echo "CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&5 $as_echo "SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&5 $as_echo "EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 $as_echo "LDFLAGS : ${LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5 $as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: DSO_LDFLAGS : ${DSO_LDFLAGS}" >&5 $as_echo "DSO_LDFLAGS : ${DSO_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 $as_echo "LIBS : ${LIBS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 $as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 $as_echo "XSLTPROC : ${XSLTPROC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 $as_echo "XSLROOT : ${XSLROOT}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5 $as_echo "PREFIX : ${PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5 $as_echo "BINDIR : ${BINDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5 $as_echo "DATADIR : ${DATADIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5 $as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5 $as_echo "LIBDIR : ${LIBDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5 $as_echo "MANDIR : ${MANDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5 $as_echo "srcroot : ${srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5 $as_echo "abs_srcroot : ${abs_srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5 $as_echo "objroot : ${objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5 $as_echo "abs_objroot : ${abs_objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5 $as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5 $as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5 $as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5 $as_echo "install_suffix : ${install_suffix}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf : ${config_malloc_conf}" >&5 $as_echo "malloc_conf : ${config_malloc_conf}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5 $as_echo "autogen : ${enable_autogen}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5 $as_echo "debug : ${enable_debug}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5 $as_echo "stats : ${enable_stats}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5 $as_echo "prof : ${enable_prof}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5 $as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5 $as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5 $as_echo "prof-gcc : ${enable_prof_gcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5 $as_echo "fill : ${enable_fill}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5 $as_echo "utrace : ${enable_utrace}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5 $as_echo "xmalloc : ${enable_xmalloc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: log : ${enable_log}" >&5 $as_echo "log : ${enable_log}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5 $as_echo "lazy_lock : ${enable_lazy_lock}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5 $as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cxx : ${enable_cxx}" >&5 $as_echo "cxx : ${enable_cxx}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } jemalloc-sys-0.3.2/configure/VERSION010064400007650000024000000000621336403166300153710ustar00000000000000005.1.0-0-g61efbda7098de6fe64c362d309824864308c36d4 jemalloc-sys-0.3.2/jemalloc/.appveyor.yml010064400007650000024000000017311340421340100165630ustar0000000000000000version: '{build}' environment: matrix: - MSYSTEM: MINGW64 CPU: x86_64 MSVC: amd64 - MSYSTEM: MINGW32 CPU: i686 MSVC: x86 - MSYSTEM: MINGW64 CPU: x86_64 - MSYSTEM: MINGW32 CPU: i686 - MSYSTEM: MINGW64 CPU: x86_64 MSVC: amd64 CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW32 CPU: i686 MSVC: x86 CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW64 CPU: x86_64 CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW32 CPU: i686 CONFIG_FLAGS: --enable-debug install: - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC% - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc - pacman --noconfirm -Suy mingw-w64-%CPU%-make build_script: - bash -c "autoconf" - bash -c "./configure $CONFIG_FLAGS" - mingw32-make - file lib/jemalloc.dll - mingw32-make tests - mingw32-make -k check jemalloc-sys-0.3.2/jemalloc/.autom4te.cfg010064400007650000024000000001531340421340100164130ustar0000000000000000begin-language: "Autoconf-without-aclocal-m4" args: --no-cache end-language: "Autoconf-without-aclocal-m4" jemalloc-sys-0.3.2/jemalloc/.gitattributes010064400007650000024000000000231340421340100170010ustar0000000000000000* text=auto eol=lf jemalloc-sys-0.3.2/jemalloc/.gitignore010064400007650000024000000035711340421341300161140ustar0000000000000000/bin/jemalloc-config /bin/jemalloc.sh /bin/jeprof /config.stamp /config.log /config.status /configure /doc/html.xsl /doc/manpages.xsl /doc/jemalloc.xml /doc/jemalloc.html /doc/jemalloc.3 /jemalloc.pc /lib/ /Makefile /include/jemalloc/internal/jemalloc_preamble.h /include/jemalloc/internal/jemalloc_internal_defs.h /include/jemalloc/internal/private_namespace.gen.h /include/jemalloc/internal/private_namespace.h /include/jemalloc/internal/private_namespace_jet.gen.h /include/jemalloc/internal/private_namespace_jet.h /include/jemalloc/internal/private_symbols.awk /include/jemalloc/internal/private_symbols_jet.awk /include/jemalloc/internal/public_namespace.h /include/jemalloc/internal/public_symbols.txt /include/jemalloc/internal/public_unnamespace.h /include/jemalloc/internal/size_classes.h /include/jemalloc/jemalloc.h /include/jemalloc/jemalloc_defs.h /include/jemalloc/jemalloc_macros.h /include/jemalloc/jemalloc_mangle.h /include/jemalloc/jemalloc_mangle_jet.h /include/jemalloc/jemalloc_protos.h /include/jemalloc/jemalloc_protos_jet.h /include/jemalloc/jemalloc_rename.h /include/jemalloc/jemalloc_typedefs.h /src/*.[od] /src/*.sym /run_tests.out/ /test/test.sh test/include/test/jemalloc_test.h test/include/test/jemalloc_test_defs.h /test/integration/[A-Za-z]* !/test/integration/[A-Za-z]*.* /test/integration/*.[od] /test/integration/*.out /test/integration/cpp/[A-Za-z]* !/test/integration/cpp/[A-Za-z]*.* /test/integration/cpp/*.[od] /test/integration/cpp/*.out /test/src/*.[od] /test/stress/[A-Za-z]* !/test/stress/[A-Za-z]*.* /test/stress/*.[od] /test/stress/*.out /test/unit/[A-Za-z]* !/test/unit/[A-Za-z]*.* /test/unit/*.[od] /test/unit/*.out /VERSION *.pdb *.sdf *.opendb *.VC.db *.opensdf *.cachefile *.suo *.user *.sln.docstates *.tmp .vs/ /msvc/Win32/ /msvc/x64/ /msvc/projects/*/*/Debug*/ /msvc/projects/*/*/Release*/ /msvc/projects/*/*/Win32/ /msvc/projects/*/*/x64/ jemalloc-sys-0.3.2/jemalloc/.travis.yml010064400007650000024000000212361340421341300162330ustar0000000000000000language: generic dist: precise matrix: include: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: apt: packages: - gcc-multilib - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" before_script: - autoconf - ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS - make -j3 - make -j3 tests script: - make check jemalloc-sys-0.3.2/jemalloc/autogen.sh010075500007650000024000000004121340421340100161110ustar0000000000000000#!/bin/sh for i in autoconf; do echo "$i" $i if [ $? -ne 0 ]; then echo "Error $? in $i" exit 1 fi done echo "./configure --enable-autogen $@" ./configure --enable-autogen $@ if [ $? -ne 0 ]; then echo "Error $? in ./configure" exit 1 fi jemalloc-sys-0.3.2/jemalloc/bin/jemalloc-config.in010064400007650000024000000030761340421340100202520ustar0000000000000000#!/bin/sh usage() { cat < Options: --help | -h : Print usage. --version : Print jemalloc version. --revision : Print shared library revision number. --config : Print configure options used to build jemalloc. --prefix : Print installation directory prefix. --bindir : Print binary installation directory. --datadir : Print data installation directory. --includedir : Print include installation directory. --libdir : Print library installation directory. --mandir : Print manual page installation directory. --cc : Print compiler used to build jemalloc. --cflags : Print compiler flags used to build jemalloc. --cppflags : Print preprocessor flags used to build jemalloc. --cxxflags : Print C++ compiler flags used to build jemalloc. --ldflags : Print library flags used to build jemalloc. --libs : Print libraries jemalloc was linked against. EOF } prefix="@prefix@" exec_prefix="@exec_prefix@" case "$1" in --help | -h) usage exit 0 ;; --version) echo "@jemalloc_version@" ;; --revision) echo "@rev@" ;; --config) echo "@CONFIG@" ;; --prefix) echo "@PREFIX@" ;; --bindir) echo "@BINDIR@" ;; --datadir) echo "@DATADIR@" ;; --includedir) echo "@INCLUDEDIR@" ;; --libdir) echo "@LIBDIR@" ;; --mandir) echo "@MANDIR@" ;; --cc) echo "@CC@" ;; --cflags) echo "@CFLAGS@" ;; --cppflags) echo "@CPPFLAGS@" ;; --cxxflags) echo "@CXXFLAGS@" ;; --ldflags) echo "@LDFLAGS@ @EXTRA_LDFLAGS@" ;; --libs) echo "@LIBS@" ;; *) usage exit 1 esac jemalloc-sys-0.3.2/jemalloc/bin/jemalloc.sh.in010064400007650000024000000002271340421340100174130ustar0000000000000000#!/bin/sh prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ @LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ export @LD_PRELOAD_VAR@ exec "$@" jemalloc-sys-0.3.2/jemalloc/bin/jeprof.in010064400007650000024000005362031340421340100165110ustar0000000000000000#! /usr/bin/env perl # Copyright (c) 1998-2007, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Program for printing the profile generated by common/profiler.cc, # or by the heap profiler (common/debugallocation.cc) # # The profile contains a sequence of entries of the form: # # This program parses the profile, and generates user-readable # output. # # Examples: # # % tools/jeprof "program" "profile" # Enters "interactive" mode # # % tools/jeprof --text "program" "profile" # Generates one line per procedure # # % tools/jeprof --gv "program" "profile" # Generates annotated call-graph and displays via "gv" # # % tools/jeprof --gv --focus=Mutex "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # # % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # and does not match "string" # # % tools/jeprof --list=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --list= pattern. The listing is # annotated with the flat and cumulative sample counts at each line. # # % tools/jeprof --disasm=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --disasm= pattern. The listing is # annotated with the flat and cumulative sample counts at each PC value. # # TODO: Use color to indicate files? use strict; use warnings; use Getopt::Long; use Cwd; my $JEPROF_VERSION = "@jemalloc_version@"; my $PPROF_VERSION = "2.0"; # These are the object tools we use which can come from a # user-specified location using --tools, from the JEPROF_TOOLS # environment variable, or from the environment. my %obj_tool_map = ( "objdump" => "objdump", "nm" => "nm", "addr2line" => "addr2line", "c++filt" => "c++filt", ## ConfigureObjTools may add architecture-specific entries: #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables #"addr2line_pdb" => "addr2line-pdb", # ditto #"otool" => "otool", # equivalent of objdump on OS X ); # NOTE: these are lists, so you can put in commandline flags if you want. my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local my @GV = ("gv"); my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread my @KCACHEGRIND = ("kcachegrind"); my @PS2PDF = ("ps2pdf"); # These are used for dynamic profiles my @URL_FETCHER = ("curl", "-s", "--fail"); # These are the web pages that servers need to support for dynamic profiles my $HEAP_PAGE = "/pprof/heap"; my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param # ?seconds=#&event=x&period=n my $GROWTH_PAGE = "/pprof/growth"; my $CONTENTION_PAGE = "/pprof/contention"; my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param # "?seconds=#", # "?tags_regexp=#" and # "?type=#". my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; # These are the web pages that can be named on the command line. # All the alternatives must begin with /. my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; # default binary name my $UNKNOWN_BINARY = "(unknown)"; # There is a pervasive dependency on the length (in hex characters, # i.e., nibbles) of an address, distinguishing between 32-bit and # 64-bit profiles. To err on the safe size, default to 64-bit here: my $address_length = 16; my $dev_null = "/dev/null"; if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for $dev_null = "nul"; } # A list of paths to search for shared object files my @prefix_list = (); # Special routine name that should not have any symbols. # Used as separator to parse "addr2line -i" output. my $sep_symbol = '_fini'; my $sep_address = undef; ##### Argument parsing ##### sub usage_string { return < is a space separated list of profile names. jeprof [options] is a list of profile files where each file contains the necessary symbol mappings as well as profile data (likely generated with --raw). jeprof [options] is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE Each name can be: /path/to/profile - a path to a profile file host:port[/] - a location of a service to get profile from The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. For instance: jeprof http://myserver.com:80$HEAP_PAGE If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). jeprof --symbols Maps addresses to symbol names. In this mode, stdin should be a list of library mappings, in the same format as is found in the heap- and cpu-profile files (this loosely matches that of /proc/self/maps on linux), followed by a list of hex addresses to map, one per line. For more help with querying remote servers, including how to add the necessary server-side support code, see this filename (or one like it): /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html Options: --cum Sort by cumulative data --base= Subtract from before display --interactive Run in interactive mode (interactive "help" gives help) [default] --seconds= Length of time for dynamic profiles [default=30 secs] --add_lib= Read additional symbols and line info from the given library --lib_prefix= Comma separated list of library path prefixes Reporting Granularity: --addresses Report at address level --lines Report at source line level --functions Report at function level [default] --files Report at source file level Output type: --text Generate text report --callgrind Generate callgrind format to stdout --gv Generate Postscript and display --evince Generate PDF and display --web Generate SVG and display --list= Generate source listing of matching routines --disasm= Generate disassembly of matching routines --symbols Print demangled symbol names found at given addresses --dot Generate DOT file to stdout --ps Generate Postcript to stdout --pdf Generate PDF to stdout --svg Generate SVG to stdout --gif Generate GIF to stdout --raw Generate symbolized jeprof data (useful with remote fetch) Heap-Profile Options: --inuse_space Display in-use (mega)bytes [default] --inuse_objects Display in-use objects --alloc_space Display allocated (mega)bytes --alloc_objects Display allocated objects --show_bytes Display space in bytes --drop_negative Ignore negative differences Contention-profile options: --total_delay Display total delay at each region [default] --contentions Display number of delays at each region --mean_delay Display mean delay at each region Call-graph Options: --nodecount= Show at most so many nodes [default=80] --nodefraction= Hide nodes below *total [default=.005] --edgefraction= Hide edges below *total [default=.001] --maxdegree= Max incoming/outgoing edges per node [default=8] --focus= Focus on backtraces with nodes matching --thread= Show profile for thread --ignore= Ignore backtraces with nodes matching --scale= Set GV scaling [default=0] --heapcheck Make nodes with non-0 object counts (i.e. direct leak generators) more visible --retain= Retain only nodes that match --exclude= Exclude all nodes that match Miscellaneous: --tools=[,...] \$PATH for object tool pathnames --test Run unit tests --help This message --version Version information Environment Variables: JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof JEPROF_TOOLS Prefix for object tools pathnames Examples: jeprof /bin/ls ls.prof Enters "interactive" mode jeprof --text /bin/ls ls.prof Outputs one line per procedure jeprof --web /bin/ls ls.prof Displays annotated call-graph in web browser jeprof --gv /bin/ls ls.prof Displays annotated call-graph via 'gv' jeprof --gv --focus=Mutex /bin/ls ls.prof Restricts to code paths including a .*Mutex.* entry jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof Code paths including Mutex but not string jeprof --list=getdir /bin/ls ls.prof (Per-line) annotated source listing for getdir() jeprof --disasm=getdir /bin/ls ls.prof (Per-PC) annotated disassembly for getdir() jeprof http://localhost:1234/ Enters "interactive" mode jeprof --text localhost:1234 Outputs one line per procedure for localhost:1234 jeprof --raw localhost:1234 > ./local.raw jeprof --text ./local.raw Fetches a remote profile for later analysis and then analyzes it in text mode. EOF } sub version_string { return < \$main::opt_help, "version!" => \$main::opt_version, "cum!" => \$main::opt_cum, "base=s" => \$main::opt_base, "seconds=i" => \$main::opt_seconds, "add_lib=s" => \$main::opt_lib, "lib_prefix=s" => \$main::opt_lib_prefix, "functions!" => \$main::opt_functions, "lines!" => \$main::opt_lines, "addresses!" => \$main::opt_addresses, "files!" => \$main::opt_files, "text!" => \$main::opt_text, "callgrind!" => \$main::opt_callgrind, "list=s" => \$main::opt_list, "disasm=s" => \$main::opt_disasm, "symbols!" => \$main::opt_symbols, "gv!" => \$main::opt_gv, "evince!" => \$main::opt_evince, "web!" => \$main::opt_web, "dot!" => \$main::opt_dot, "ps!" => \$main::opt_ps, "pdf!" => \$main::opt_pdf, "svg!" => \$main::opt_svg, "gif!" => \$main::opt_gif, "raw!" => \$main::opt_raw, "interactive!" => \$main::opt_interactive, "nodecount=i" => \$main::opt_nodecount, "nodefraction=f" => \$main::opt_nodefraction, "edgefraction=f" => \$main::opt_edgefraction, "maxdegree=i" => \$main::opt_maxdegree, "focus=s" => \$main::opt_focus, "thread=s" => \$main::opt_thread, "ignore=s" => \$main::opt_ignore, "scale=i" => \$main::opt_scale, "heapcheck" => \$main::opt_heapcheck, "retain=s" => \$main::opt_retain, "exclude=s" => \$main::opt_exclude, "inuse_space!" => \$main::opt_inuse_space, "inuse_objects!" => \$main::opt_inuse_objects, "alloc_space!" => \$main::opt_alloc_space, "alloc_objects!" => \$main::opt_alloc_objects, "show_bytes!" => \$main::opt_show_bytes, "drop_negative!" => \$main::opt_drop_negative, "total_delay!" => \$main::opt_total_delay, "contentions!" => \$main::opt_contentions, "mean_delay!" => \$main::opt_mean_delay, "tools=s" => \$main::opt_tools, "test!" => \$main::opt_test, "debug!" => \$main::opt_debug, # Undocumented flags used only by unittests: "test_stride=i" => \$main::opt_test_stride, ) || usage("Invalid option(s)"); # Deal with the standard --help and --version if ($main::opt_help) { print usage_string(); exit(0); } if ($main::opt_version) { print version_string(); exit(0); } # Disassembly/listing/symbols mode requires address-level info if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { $main::opt_functions = 0; $main::opt_lines = 0; $main::opt_addresses = 1; $main::opt_files = 0; } # Check heap-profiling flags if ($main::opt_inuse_space + $main::opt_inuse_objects + $main::opt_alloc_space + $main::opt_alloc_objects > 1) { usage("Specify at most on of --inuse/--alloc options"); } # Check output granularities my $grains = $main::opt_functions + $main::opt_lines + $main::opt_addresses + $main::opt_files + 0; if ($grains > 1) { usage("Only specify one output granularity option"); } if ($grains == 0) { $main::opt_functions = 1; } # Check output modes my $modes = $main::opt_text + $main::opt_callgrind + ($main::opt_list eq '' ? 0 : 1) + ($main::opt_disasm eq '' ? 0 : 1) + ($main::opt_symbols == 0 ? 0 : 1) + $main::opt_gv + $main::opt_evince + $main::opt_web + $main::opt_dot + $main::opt_ps + $main::opt_pdf + $main::opt_svg + $main::opt_gif + $main::opt_raw + $main::opt_interactive + 0; if ($modes > 1) { usage("Only specify one output mode"); } if ($modes == 0) { if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode $main::opt_interactive = 1; } else { $main::opt_text = 1; } } if ($main::opt_test) { RunUnitTests(); # Should not return exit(1); } # Binary name and profile arguments list $main::prog = ""; @main::pfile_args = (); # Remote profiling without a binary (using $SYMBOL_PAGE instead) if (@ARGV > 0) { if (IsProfileURL($ARGV[0])) { $main::use_symbol_page = 1; } elsif (IsSymbolizedProfileFile($ARGV[0])) { $main::use_symbolized_profile = 1; $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file } } if ($main::use_symbol_page || $main::use_symbolized_profile) { # We don't need a binary! my %disabled = ('--lines' => $main::opt_lines, '--disasm' => $main::opt_disasm); for my $option (keys %disabled) { usage("$option cannot be used without a binary") if $disabled{$option}; } # Set $main::prog later... scalar(@ARGV) || usage("Did not specify profile file"); } elsif ($main::opt_symbols) { # --symbols needs a binary-name (to run nm on, etc) but not profiles $main::prog = shift(@ARGV) || usage("Did not specify program"); } else { $main::prog = shift(@ARGV) || usage("Did not specify program"); scalar(@ARGV) || usage("Did not specify profile file"); } # Parse profile file/location arguments foreach my $farg (@ARGV) { if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { my $machine = $1; my $num_machines = $2; my $path = $3; for (my $i = 0; $i < $num_machines; $i++) { unshift(@main::pfile_args, "$i.$machine$path"); } } else { unshift(@main::pfile_args, $farg); } } if ($main::use_symbol_page) { unless (IsProfileURL($main::pfile_args[0])) { error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); } CheckSymbolPage(); $main::prog = FetchProgramName(); } elsif (!$main::use_symbolized_profile) { # may not need objtools! ConfigureObjTools($main::prog) } # Break the opt_lib_prefix into the prefix_list array @prefix_list = split (',', $main::opt_lib_prefix); # Remove trailing / from the prefixes, in the list to prevent # searching things like /my/path//lib/mylib.so foreach (@prefix_list) { s|/+$||; } } sub FilterAndPrint { my ($profile, $symbols, $libs, $thread) = @_; # Get total data in profile my $total = TotalProfile($profile); # Remove uniniteresting stack items $profile = RemoveUninterestingFrames($symbols, $profile); # Focus? if ($main::opt_focus ne '') { $profile = FocusProfile($symbols, $profile, $main::opt_focus); } # Ignore? if ($main::opt_ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); } my $calls = ExtractCalls($symbols, $profile); # Reduce profiles to required output granularity, and also clean # each stack trace so a given entry exists at most once. my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); # Print if (!$main::opt_interactive) { if ($main::opt_disasm) { PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); } elsif ($main::opt_list) { PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); } elsif ($main::opt_text) { # Make sure the output is empty when have nothing to report # (only matters when --heapcheck is given but we must be # compatible with old branches that did not pass --heapcheck always): if ($total != 0) { printf("Total%s: %s %s\n", (defined($thread) ? " (t$thread)" : ""), Unparse($total), Units()); } PrintText($symbols, $flat, $cumulative, -1); } elsif ($main::opt_raw) { PrintSymbolizedProfile($symbols, $profile, $main::prog); } elsif ($main::opt_callgrind) { PrintCallgrind($calls); } else { if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), ""); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), ""); } elsif ($main::opt_web) { my $tmp = TempName($main::next_tmpfile, "svg"); RunWeb($tmp); # The command we run might hand the file name off # to an already running browser instance and then exit. # Normally, we'd remove $tmp on exit (right now), # but fork a child to remove $tmp a little later, so that the # browser has time to load it first. delete $main::tempnames{$tmp}; if (fork() == 0) { sleep 5; unlink($tmp); exit(0); } } } else { cleanup(); exit(1); } } } else { InteractiveMode($profile, $symbols, $libs, $total); } } sub Main() { Init(); $main::collected_profile = undef; @main::profile_files = (); $main::op_time = time(); # Printing symbols is special and requires a lot less info that most. if ($main::opt_symbols) { PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin return; } # Fetch all profile data FetchDynamicProfiles(); # this will hold symbols that we read from the profile files my $symbol_map = {}; # Read one profile, pick the last item on the list my $data = ReadProfile($main::prog, pop(@main::profile_files)); my $profile = $data->{profile}; my $pcs = $data->{pcs}; my $libs = $data->{libs}; # Info about main program and shared libraries $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); # Add additional profiles, if available. if (scalar(@main::profile_files) > 0) { foreach my $pname (@main::profile_files) { my $data2 = ReadProfile($main::prog, $pname); $profile = AddProfile($profile, $data2->{profile}); $pcs = AddPcs($pcs, $data2->{pcs}); $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); } } # Subtract base from profile, if specified if ($main::opt_base ne '') { my $base = ReadProfile($main::prog, $main::opt_base); $profile = SubtractProfile($profile, $base->{profile}); $pcs = AddPcs($pcs, $base->{pcs}); $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); } # Collect symbols my $symbols; if ($main::use_symbolized_profile) { $symbols = FetchSymbols($pcs, $symbol_map); } elsif ($main::use_symbol_page) { $symbols = FetchSymbols($pcs); } else { # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, # which may differ from the data from subsequent profiles, especially # if they were run on different machines. Use appropriate libs for # each pc somehow. $symbols = ExtractSymbols($libs, $pcs); } if (!defined($main::opt_thread)) { FilterAndPrint($profile, $symbols, $libs); } if (defined($data->{threads})) { foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) { if (defined($main::opt_thread) && ($main::opt_thread eq '*' || $main::opt_thread == $thread)) { my $thread_profile = $data->{threads}{$thread}; FilterAndPrint($thread_profile, $symbols, $libs, $thread); } } } cleanup(); exit(0); } ##### Entry Point ##### Main(); # Temporary code to detect if we're running on a Goobuntu system. # These systems don't have the right stuff installed for the special # Readline libraries to work, so as a temporary workaround, we default # to using the normal stdio code, rather than the fancier readline-based # code sub ReadlineMightFail { if (-e '/lib/libtermcap.so.2') { return 0; # libtermcap exists, so readline should be okay } else { return 1; } } sub RunGV { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { # Options using double dash are supported by this gv version. # Also, turn on noantialias to better handle bug in gv for # postscript files with large dimensions. # TODO: Maybe we should not pass the --noantialias flag # if the gv version is known to work properly without the flag. system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) . $bg); } else { # Old gv version - only supports options that use single dash. print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); } } sub RunEvince { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background system(ShellEscape(@EVINCE, $fname) . $bg); } sub RunWeb { my $fname = shift; print STDERR "Loading web page file:///$fname\n"; if (`uname` =~ /Darwin/) { # OS X: open will use standard preference for SVG files. system("/usr/bin/open", $fname); return; } # Some kind of Unix; try generic symlinks, then specific browsers. # (Stop once we find one.) # Works best if the browser is already running. my @alt = ( "/etc/alternatives/gnome-www-browser", "/etc/alternatives/x-www-browser", "google-chrome", "firefox", ); foreach my $b (@alt) { if (system($b, $fname) == 0) { return; } } print STDERR "Could not load web browser.\n"; } sub RunKcachegrind { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; system(ShellEscape(@KCACHEGRIND, $fname) . $bg); } ##### Interactive helper routines ##### sub InteractiveMode { $| = 1; # Make output unbuffered for interactive mode my ($orig_profile, $symbols, $libs, $total) = @_; print STDERR "Welcome to jeprof! For help, type 'help'.\n"; # Use ReadLine if it's installed and input comes from a console. if ( -t STDIN && !ReadlineMightFail() && defined(eval {require Term::ReadLine}) ) { my $term = new Term::ReadLine 'jeprof'; while ( defined ($_ = $term->readline('(jeprof) '))) { $term->addhistory($_) if /\S/; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } } } else { # don't have readline while (1) { print STDERR "(jeprof) "; $_ = ; last if ! defined $_ ; s/\r//g; # turn windows-looking lines into unix-looking lines # Save some flags that might be reset by InteractiveCommand() my $save_opt_lines = $main::opt_lines; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } # Restore flags $main::opt_lines = $save_opt_lines; } } } # Takes two args: orig profile, and command to run. # Returns 1 if we should keep going, or 0 if we were asked to quit sub InteractiveCommand { my($orig_profile, $symbols, $libs, $total, $command) = @_; $_ = $command; # just to make future m//'s easier if (!defined($_)) { print STDERR "\n"; return 0; } if (m/^\s*quit/) { return 0; } if (m/^\s*help/) { InteractiveHelpMessage(); return 1; } # Clear all the mode options -- mode is controlled by "$command" $main::opt_text = 0; $main::opt_callgrind = 0; $main::opt_disasm = 0; $main::opt_list = 0; $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_cum = 0; if (m/^\s*(text|top)(\d*)\s*(.*)/) { $main::opt_text = 1; my $line_limit = ($2 ne "") ? int($2) : 10; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($3); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintText($symbols, $flat, $cumulative, $line_limit); return 1; } if (m/^\s*callgrind\s*([^ \n]*)/) { $main::opt_callgrind = 1; # Get derived profiles my $calls = ExtractCalls($symbols, $orig_profile); my $filename = $1; if ( $1 eq '' ) { $filename = TempName($main::next_tmpfile, "callgrind"); } PrintCallgrind($calls, $filename); if ( $1 eq '' ) { RunKcachegrind($filename, " & "); $main::next_tmpfile++; } return 1; } if (m/^\s*(web)?list\s*(.+)/) { my $html = (defined($1) && ($1 eq "web")); $main::opt_list = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($2); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintListing($total, $libs, $flat, $cumulative, $routine, $html); return 1; } if (m/^\s*disasm\s*(.+)/) { $main::opt_disasm = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($1); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintDisassembly($libs, $flat, $cumulative, $routine); return 1; } if (m/^\s*(gv|web|evince)\s*(.*)/) { $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_web = 0; if ($1 eq "gv") { $main::opt_gv = 1; } elsif ($1 eq "evince") { $main::opt_evince = 1; } elsif ($1 eq "web") { $main::opt_web = 1; } my $focus; my $ignore; ($focus, $ignore) = ParseInteractiveArgs($2); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, $focus, $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), " &"); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); } elsif ($main::opt_web) { RunWeb(TempName($main::next_tmpfile, "svg")); } $main::next_tmpfile++; } return 1; } if (m/^\s*$/) { return 1; } print STDERR "Unknown command: try 'help'.\n"; return 1; } sub ProcessProfile { my $total_count = shift; my $orig_profile = shift; my $symbols = shift; my $focus = shift; my $ignore = shift; # Process current profile to account for various settings my $profile = $orig_profile; printf("Total: %s %s\n", Unparse($total_count), Units()); if ($focus ne '') { $profile = FocusProfile($symbols, $profile, $focus); my $focus_count = TotalProfile($profile); printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", $focus, Unparse($focus_count), Units(), Unparse($total_count), ($focus_count*100.0) / $total_count); } if ($ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $ignore); my $ignore_count = TotalProfile($profile); printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", $ignore, Unparse($ignore_count), Units(), Unparse($total_count), ($ignore_count*100.0) / $total_count); } return $profile; } sub InteractiveHelpMessage { print STDERR <{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { my $depth = $#addrs + 1; # int(foo / 2**32) is the only reliable way to get rid of bottom # 32 bits on both 32- and 64-bit systems. print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); foreach my $full_addr (@addrs) { my $addr = $full_addr; $addr =~ s/0x0*//; # strip off leading 0x, zeroes if (length($addr) > 16) { print STDERR "Invalid address in profile: $full_addr\n"; next; } my $low_addr = substr($addr, -8); # get last 8 hex chars my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); } } } } # Print symbols and profile data sub PrintSymbolizedProfile { my $symbols = shift; my $profile = shift; my $prog = shift; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; print '--- ', $symbol_marker, "\n"; if (defined($prog)) { print 'binary=', $prog, "\n"; } while (my ($pc, $name) = each(%{$symbols})) { my $sep = ' '; print '0x', $pc; # We have a list of function names, which include the inlined # calls. They are separated (and terminated) by --, which is # illegal in function names. for (my $j = 2; $j <= $#{$name}; $j += 3) { print $sep, $name->[$j]; $sep = '--'; } print "\n"; } print '---', "\n"; my $profile_marker; if ($main::profile_type eq 'heap') { $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'growth') { $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'contention') { $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } else { # elsif ($main::profile_type eq 'cpu') $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } print '--- ', $profile_marker, "\n"; if (defined($main::collected_profile)) { # if used with remote fetch, simply dump the collected profile to output. open(SRC, "<$main::collected_profile"); while () { print $_; } close(SRC); } else { # --raw/http: For everything to work correctly for non-remote profiles, we # would need to extend PrintProfileData() to handle all possible profile # types, re-enable the code that is currently disabled in ReadCPUProfile() # and FixCallerAddresses(), and remove the remote profile dumping code in # the block above. die "--raw/http: jeprof can only dump remote profiles for --raw\n"; # dump a cpu-format profile to standard out PrintProfileData($profile); } } # Print text output sub PrintText { my $symbols = shift; my $flat = shift; my $cumulative = shift; my $line_limit = shift; my $total = TotalProfile($flat); # Which profile to sort by? my $s = $main::opt_cum ? $cumulative : $flat; my $running_sum = 0; my $lines = 0; foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } keys(%{$cumulative})) { my $f = GetEntry($flat, $k); my $c = GetEntry($cumulative, $k); $running_sum += $f; my $sym = $k; if (exists($symbols->{$k})) { $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; if ($main::opt_addresses) { $sym = $k . " " . $sym; } } if ($f != 0 || $c != 0) { printf("%8s %6s %6s %8s %6s %s\n", Unparse($f), Percent($f, $total), Percent($running_sum, $total), Unparse($c), Percent($c, $total), $sym); } $lines++; last if ($line_limit >= 0 && $lines >= $line_limit); } } # Callgrind format has a compression for repeated function and file # names. You show the name the first time, and just use its number # subsequently. This can cut down the file to about a third or a # quarter of its uncompressed size. $key and $val are the key/value # pair that would normally be printed by callgrind; $map is a map from # value to number. sub CompressedCGName { my($key, $val, $map) = @_; my $idx = $map->{$val}; # For very short keys, providing an index hurts rather than helps. if (length($val) <= 3) { return "$key=$val\n"; } elsif (defined($idx)) { return "$key=($idx)\n"; } else { # scalar(keys $map) gives the number of items in the map. $idx = scalar(keys(%{$map})) + 1; $map->{$val} = $idx; return "$key=($idx) $val\n"; } } # Print the call graph in a way that's suiteable for callgrind. sub PrintCallgrind { my $calls = shift; my $filename; my %filename_to_index_map; my %fnname_to_index_map; if ($main::opt_interactive) { $filename = shift; print STDERR "Writing callgrind file to '$filename'.\n" } else { $filename = "&STDOUT"; } open(CG, ">$filename"); printf CG ("events: Hits\n\n"); foreach my $call ( map { $_->[0] } sort { $a->[1] cmp $b ->[1] || $a->[2] <=> $b->[2] } map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; [$_, $1, $2] } keys %$calls ) { my $count = int($calls->{$call}); $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; my ( $caller_file, $caller_line, $caller_function, $callee_file, $callee_line, $callee_function ) = ( $1, $2, $3, $5, $6, $7 ); # TODO(csilvers): for better compression, collect all the # caller/callee_files and functions first, before printing # anything, and only compress those referenced more than once. printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); if (defined $6) { printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); printf CG ("calls=$count $callee_line\n"); } printf CG ("$caller_line $count\n\n"); } } # Print disassembly for all all routines that match $main::opt_disasm sub PrintDisassembly { my $libs = shift; my $flat = shift; my $cumulative = shift; my $disasm_opts = shift; my $total = TotalProfile($flat); foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; # See if there are any samples in this routine my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { PrintDisassembledFunction($lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $total); last; } $addr = AddressInc($addr); } } } } # Return reference to array of tuples of the form: # [start_address, filename, linenumber, instruction, limit_address] # E.g., # ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] sub Disassemble { my $prog = shift; my $offset = shift; my $start_addr = shift; my $end_addr = shift; my $objdump = $obj_tool_map{"objdump"}; my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", "--start-address=0x$start_addr", "--stop-address=0x$end_addr", $prog); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); my @result = (); my $filename = ""; my $linenumber = -1; my $last = ["", "", "", ""]; while () { s/\r//g; # turn windows-looking lines into unix-looking lines chop; if (m|\s*([^:\s]+):(\d+)\s*$|) { # Location line of the form: # : $filename = $1; $linenumber = $2; } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { # Disassembly line -- zero-extend address to full length my $addr = HexExtend($1); my $k = AddressAdd($addr, $offset); $last->[4] = $k; # Store ending address for previous instruction $last = [$k, $filename, $linenumber, $2, $end_addr]; push(@result, $last); } } close(OBJDUMP); return @result; } # The input file should contain lines of the form /proc/maps-like # output (same format as expected from the profiles) or that looks # like hex addresses (like "0xDEADBEEF"). We will parse all # /proc/maps output, and for all the hex addresses, we will output # "short" symbol names, one per line, in the same order as the input. sub PrintSymbols { my $maps_and_symbols_file = shift; # ParseLibraries expects pcs to be in a set. Fine by us... my @pclist = (); # pcs in sorted order my $pcs = {}; my $map = ""; foreach my $line (<$maps_and_symbols_file>) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /\b(0x[0-9a-f]+)\b/i) { push(@pclist, HexExtend($1)); $pcs->{$pclist[-1]} = 1; } else { $map .= $line; } } my $libs = ParseLibraries($main::prog, $map, $pcs); my $symbols = ExtractSymbols($libs, $pcs); foreach my $pc (@pclist) { # ->[0] is the shortname, ->[2] is the full name print(($symbols->{$pc}->[0] || "??") . "\n"); } } # For sorting functions by name sub ByName { return ShortFunctionName($a) cmp ShortFunctionName($b); } # Print source-listing for all all routines that match $list_opts sub PrintListing { my $total = shift; my $libs = shift; my $flat = shift; my $cumulative = shift; my $list_opts = shift; my $html = shift; my $output = \*STDOUT; my $fname = ""; if ($html) { # Arrange to write the output to a temporary file $fname = TempName($main::next_tmpfile, "html"); $main::next_tmpfile++; if (!open(TEMP, ">$fname")) { print STDERR "$fname: $!\n"; return; } $output = \*TEMP; print $output HtmlListingHeader(); printf $output ("
%s
Total: %s %s
\n", $main::prog, Unparse($total), Units()); } my $listed = 0; foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { # Print if there are any samples in this routine my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { $listed += PrintSource( $lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $html, $output); last; } $addr = AddressInc($addr); } } } if ($html) { if ($listed > 0) { print $output HtmlListingFooter(); close($output); RunWeb($fname); } else { close($output); unlink($fname); } } } sub HtmlListingHeader { return <<'EOF'; Pprof listing EOF } sub HtmlListingFooter { return <<'EOF'; EOF } sub HtmlEscape { my $text = shift; $text =~ s/&/&/g; $text =~ s//>/g; return $text; } # Returns the indentation of the line, if it has any non-whitespace # characters. Otherwise, returns -1. sub Indentation { my $line = shift; if (m/^(\s*)\S/) { return length($1); } else { return -1; } } # If the symbol table contains inlining info, Disassemble() may tag an # instruction with a location inside an inlined function. But for # source listings, we prefer to use the location in the function we # are listing. So use MapToSymbols() to fetch full location # information for each instruction and then pick out the first # location from a location list (location list contains callers before # callees in case of inlining). # # After this routine has run, each entry in $instructions contains: # [0] start address # [1] filename for function we are listing # [2] line number for function we are listing # [3] disassembly # [4] limit address # [5] most specific filename (may be different from [1] due to inlining) # [6] most specific line number (may be different from [2] due to inlining) sub GetTopLevelLineNumbers { my ($lib, $offset, $instructions) = @_; my $pcs = []; for (my $i = 0; $i <= $#{$instructions}; $i++) { push(@{$pcs}, $instructions->[$i]->[0]); } my $symbols = {}; MapToSymbols($lib, $offset, $pcs, $symbols); for (my $i = 0; $i <= $#{$instructions}; $i++) { my $e = $instructions->[$i]; push(@{$e}, $e->[1]); push(@{$e}, $e->[2]); my $addr = $e->[0]; my $sym = $symbols->{$addr}; if (defined($sym)) { if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { $e->[1] = $1; # File name $e->[2] = $2; # Line number } } } } # Print source-listing for one routine sub PrintSource { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $html = shift; my $output = shift; # Disassemble all instructions (just to get line numbers) my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); GetTopLevelLineNumbers($prog, $offset, \@instructions); # Hack 1: assume that the first source file encountered in the # disassembly contains the routine my $filename = undef; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[2] >= 0) { $filename = $instructions[$i]->[1]; last; } } if (!defined($filename)) { print STDERR "no filename found in $routine\n"; return 0; } # Hack 2: assume that the largest line number from $filename is the # end of the procedure. This is typically safe since if P1 contains # an inlined call to P2, then P2 usually occurs earlier in the # source file. If this does not work, we might have to compute a # density profile or just print all regions we find. my $lastline = 0; for (my $i = 0; $i <= $#instructions; $i++) { my $f = $instructions[$i]->[1]; my $l = $instructions[$i]->[2]; if (($f eq $filename) && ($l > $lastline)) { $lastline = $l; } } # Hack 3: assume the first source location from "filename" is the start of # the source code. my $firstline = 1; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[1] eq $filename) { $firstline = $instructions[$i]->[2]; last; } } # Hack 4: Extend last line forward until its indentation is less than # the indentation we saw on $firstline my $oldlastline = $lastline; { if (!open(FILE, "<$filename")) { print STDERR "$filename: $!\n"; return 0; } my $l = 0; my $first_indentation = -1; while () { s/\r//g; # turn windows-looking lines into unix-looking lines $l++; my $indent = Indentation($_); if ($l >= $firstline) { if ($first_indentation < 0 && $indent >= 0) { $first_indentation = $indent; last if ($first_indentation == 0); } } if ($l >= $lastline && $indent >= 0) { if ($indent >= $first_indentation) { $lastline = $l+1; } else { last; } } } close(FILE); } # Assign all samples to the range $firstline,$lastline, # Hack 4: If an instruction does not occur in the range, its samples # are moved to the next instruction that occurs in the range. my $samples1 = {}; # Map from line number to flat count my $samples2 = {}; # Map from line number to cumulative count my $running1 = 0; # Unassigned flat counts my $running2 = 0; # Unassigned cumulative counts my $total1 = 0; # Total flat counts my $total2 = 0; # Total cumulative counts my %disasm = (); # Map from line number to disassembly my $running_disasm = ""; # Unassigned disassembly my $skip_marker = "---\n"; if ($html) { $skip_marker = ""; for (my $l = $firstline; $l <= $lastline; $l++) { $disasm{$l} = ""; } } my $last_dis_filename = ''; my $last_dis_linenum = -1; my $last_touched_line = -1; # To detect gaps in disassembly for a line foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } if ($html) { my $dis = sprintf(" %6s %6s \t\t%8s: %s ", HtmlPrintNumber($c1), HtmlPrintNumber($c2), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); # Append the most specific source line associated with this instruction if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; $dis = HtmlEscape($dis); my $f = $e->[5]; my $l = $e->[6]; if ($f ne $last_dis_filename) { $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } elsif ($l ne $last_dis_linenum) { # De-emphasize the unchanged file name portion $dis .= sprintf("%s" . ":%d", HtmlEscape(CleanFileName($f)), $l); } else { # De-emphasize the entire location $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } $last_dis_filename = $f; $last_dis_linenum = $l; $running_disasm .= $dis; $running_disasm .= "\n"; } $running1 += $c1; $running2 += $c2; $total1 += $c1; $total2 += $c2; my $file = $e->[1]; my $line = $e->[2]; if (($file eq $filename) && ($line >= $firstline) && ($line <= $lastline)) { # Assign all accumulated samples to this line AddEntry($samples1, $line, $running1); AddEntry($samples2, $line, $running2); $running1 = 0; $running2 = 0; if ($html) { if ($line != $last_touched_line && $disasm{$line} ne '') { $disasm{$line} .= "\n"; } $disasm{$line} .= $running_disasm; $running_disasm = ''; $last_touched_line = $line; } } } # Assign any leftover samples to $lastline AddEntry($samples1, $lastline, $running1); AddEntry($samples2, $lastline, $running2); if ($html) { if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { $disasm{$lastline} .= "\n"; } $disasm{$lastline} .= $running_disasm; } if ($html) { printf $output ( "

%s

%s\n
\n" .
      "Total:%6s %6s (flat / cumulative %s)\n",
      HtmlEscape(ShortFunctionName($routine)),
      HtmlEscape(CleanFileName($filename)),
      Unparse($total1),
      Unparse($total2),
      Units());
  } else {
    printf $output (
      "ROUTINE ====================== %s in %s\n" .
      "%6s %6s Total %s (flat / cumulative)\n",
      ShortFunctionName($routine),
      CleanFileName($filename),
      Unparse($total1),
      Unparse($total2),
      Units());
  }
  if (!open(FILE, "<$filename")) {
    print STDERR "$filename: $!\n";
    return 0;
  }
  my $l = 0;
  while () {
    s/\r//g;         # turn windows-looking lines into unix-looking lines
    $l++;
    if ($l >= $firstline - 5 &&
        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
      chop;
      my $text = $_;
      if ($l == $firstline) { print $output $skip_marker; }
      my $n1 = GetEntry($samples1, $l);
      my $n2 = GetEntry($samples2, $l);
      if ($html) {
        # Emit a span that has one of the following classes:
        #    livesrc -- has samples
        #    deadsrc -- has disassembly, but with no samples
        #    nop     -- has no matching disasembly
        # Also emit an optional span containing disassembly.
        my $dis = $disasm{$l};
        my $asm = "";
        if (defined($dis) && $dis ne '') {
          $asm = "" . $dis . "";
        }
        my $source_class = (($n1 + $n2 > 0)
                            ? "livesrc"
                            : (($asm ne "") ? "deadsrc" : "nop"));
        printf $output (
          "%5d " .
          "%6s %6s %s%s\n",
          $l, $source_class,
          HtmlPrintNumber($n1),
          HtmlPrintNumber($n2),
          HtmlEscape($text),
          $asm);
      } else {
        printf $output(
          "%6s %6s %4d: %s\n",
          UnparseAlt($n1),
          UnparseAlt($n2),
          $l,
          $text);
      }
      if ($l == $lastline)  { print $output $skip_marker; }
    };
  }
  close(FILE);
  if ($html) {
    print $output "
\n"; } return 1; } # Return the source line for the specified file/linenumber. # Returns undef if not found. sub SourceLine { my $file = shift; my $line = shift; # Look in cache if (!defined($main::source_cache{$file})) { if (100 < scalar keys(%main::source_cache)) { # Clear the cache when it gets too big $main::source_cache = (); } # Read all lines from the file if (!open(FILE, "<$file")) { print STDERR "$file: $!\n"; $main::source_cache{$file} = []; # Cache the negative result return undef; } my $lines = []; push(@{$lines}, ""); # So we can use 1-based line numbers as indices while () { push(@{$lines}, $_); } close(FILE); # Save the lines in the cache $main::source_cache{$file} = $lines; } my $lines = $main::source_cache{$file}; if (($line < 0) || ($line > $#{$lines})) { return undef; } else { return $lines->[$line]; } } # Print disassembly for one routine with interspersed source if available sub PrintDisassembledFunction { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $total = shift; # Disassemble all instructions my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); # Make array of counts per instruction my @flat_count = (); my @cum_count = (); my $flat_total = 0; my $cum_total = 0; foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } push(@flat_count, $c1); push(@cum_count, $c2); $flat_total += $c1; $cum_total += $c2; } # Print header with total counts printf("ROUTINE ====================== %s\n" . "%6s %6s %s (flat, cumulative) %.1f%% of total\n", ShortFunctionName($routine), Unparse($flat_total), Unparse($cum_total), Units(), ($cum_total * 100.0) / $total); # Process instructions in order my $current_file = ""; for (my $i = 0; $i <= $#instructions; ) { my $e = $instructions[$i]; # Print the new file name whenever we switch files if ($e->[1] ne $current_file) { $current_file = $e->[1]; my $fname = $current_file; $fname =~ s|^\./||; # Trim leading "./" # Shorten long file names if (length($fname) >= 58) { $fname = "..." . substr($fname, -55); } printf("-------------------- %s\n", $fname); } # TODO: Compute range of lines to print together to deal with # small reorderings. my $first_line = $e->[2]; my $last_line = $first_line; my %flat_sum = (); my %cum_sum = (); for (my $l = $first_line; $l <= $last_line; $l++) { $flat_sum{$l} = 0; $cum_sum{$l} = 0; } # Find run of instructions for this range of source lines my $first_inst = $i; while (($i <= $#instructions) && ($instructions[$i]->[2] >= $first_line) && ($instructions[$i]->[2] <= $last_line)) { $e = $instructions[$i]; $flat_sum{$e->[2]} += $flat_count[$i]; $cum_sum{$e->[2]} += $cum_count[$i]; $i++; } my $last_inst = $i - 1; # Print source lines for (my $l = $first_line; $l <= $last_line; $l++) { my $line = SourceLine($current_file, $l); if (!defined($line)) { $line = "?\n"; next; } else { $line =~ s/^\s+//; } printf("%6s %6s %5d: %s", UnparseAlt($flat_sum{$l}), UnparseAlt($cum_sum{$l}), $l, $line); } # Print disassembly for (my $x = $first_inst; $x <= $last_inst; $x++) { my $e = $instructions[$x]; printf("%6s %6s %8s: %6s\n", UnparseAlt($flat_count[$x]), UnparseAlt($cum_count[$x]), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); } } } # Print DOT graph sub PrintDot { my $prog = shift; my $symbols = shift; my $raw = shift; my $flat = shift; my $cumulative = shift; my $overall_total = shift; # Get total my $local_total = TotalProfile($flat); my $nodelimit = int($main::opt_nodefraction * $local_total); my $edgelimit = int($main::opt_edgefraction * $local_total); my $nodecount = $main::opt_nodecount; # Find nodes to include my @list = (sort { abs(GetEntry($cumulative, $b)) <=> abs(GetEntry($cumulative, $a)) || $a cmp $b } keys(%{$cumulative})); my $last = $nodecount - 1; if ($last > $#list) { $last = $#list; } while (($last >= 0) && (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { $last--; } if ($last < 0) { print STDERR "No nodes to print\n"; return 0; } if ($nodelimit > 0 || $edgelimit > 0) { printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", Unparse($nodelimit), Units(), Unparse($edgelimit), Units()); } # Open DOT output file my $output; my $escaped_dot = ShellEscape(@DOT); my $escaped_ps2pdf = ShellEscape(@PS2PDF); if ($main::opt_gv) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); $output = "| $escaped_dot -Tps2 >$escaped_outfile"; } elsif ($main::opt_evince) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; } elsif ($main::opt_ps) { $output = "| $escaped_dot -Tps2"; } elsif ($main::opt_pdf) { $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; } elsif ($main::opt_web || $main::opt_svg) { # We need to post-process the SVG, so write to a temporary file always. my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); $output = "| $escaped_dot -Tsvg >$escaped_outfile"; } elsif ($main::opt_gif) { $output = "| $escaped_dot -Tgif"; } else { $output = ">&STDOUT"; } open(DOT, $output) || error("$output: $!\n"); # Title printf DOT ("digraph \"%s; %s %s\" {\n", $prog, Unparse($overall_total), Units()); if ($main::opt_pdf) { # The output is more printable if we set the page size for dot. printf DOT ("size=\"8,11\"\n"); } printf DOT ("node [width=0.375,height=0.25];\n"); # Print legend printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", $prog, sprintf("Total %s: %s", Units(), Unparse($overall_total)), sprintf("Focusing on: %s", Unparse($local_total)), sprintf("Dropped nodes with <= %s abs(%s)", Unparse($nodelimit), Units()), sprintf("Dropped edges with <= %s %s", Unparse($edgelimit), Units()) ); # Print nodes my %node = (); my $nextnode = 1; foreach my $a (@list[0..$last]) { # Pick font size my $f = GetEntry($flat, $a); my $c = GetEntry($cumulative, $a); my $fs = 8; if ($local_total > 0) { $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); } $node{$a} = $nextnode++; my $sym = $a; $sym =~ s/\s+/\\n/g; $sym =~ s/::/\\n/g; # Extra cumulative info to print for non-leaves my $extra = ""; if ($f != $c) { $extra = sprintf("\\rof %s (%s)", Unparse($c), Percent($c, $local_total)); } my $style = ""; if ($main::opt_heapcheck) { if ($f > 0) { # make leak-causing nodes more visible (add a background) $style = ",style=filled,fillcolor=gray" } elsif ($f < 0) { # make anti-leak-causing nodes (which almost never occur) # stand out as well (triple border) $style = ",peripheries=3" } } printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . "\",shape=box,fontsize=%.1f%s];\n", $node{$a}, $sym, Unparse($f), Percent($f, $local_total), $extra, $fs, $style, ); } # Get edges and counts per edge my %edge = (); my $n; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$raw})) { # TODO: omit low %age edges $n = $raw->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); for (my $i = 1; $i <= $#translated; $i++) { my $src = $translated[$i]; my $dst = $translated[$i-1]; #next if ($src eq $dst); # Avoid self-edges? if (exists($node{$src}) && exists($node{$dst})) { my $edge_label = "$src\001$dst"; if (!exists($edge{$edge_label})) { $edge{$edge_label} = 0; } $edge{$edge_label} += $n; } } } # Print edges (process in order of decreasing counts) my %indegree = (); # Number of incoming edges added per node so far my %outdegree = (); # Number of outgoing edges added per node so far foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { my @x = split(/\001/, $e); $n = $edge{$e}; # Initialize degree of kept incoming and outgoing edges if necessary my $src = $x[0]; my $dst = $x[1]; if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } my $keep; if ($indegree{$dst} == 0) { # Keep edge if needed for reachability $keep = 1; } elsif (abs($n) <= $edgelimit) { # Drop if we are below --edgefraction $keep = 0; } elsif ($outdegree{$src} >= $main::opt_maxdegree || $indegree{$dst} >= $main::opt_maxdegree) { # Keep limited number of in/out edges per node $keep = 0; } else { $keep = 1; } if ($keep) { $outdegree{$src}++; $indegree{$dst}++; # Compute line width based on edge count my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); if ($fraction > 1) { $fraction = 1; } my $w = $fraction * 2; if ($w < 1 && ($main::opt_web || $main::opt_svg)) { # SVG output treats line widths < 1 poorly. $w = 1; } # Dot sometimes segfaults if given edge weights that are too large, so # we cap the weights at a large value my $edgeweight = abs($n) ** 0.7; if ($edgeweight > 100000) { $edgeweight = 100000; } $edgeweight = int($edgeweight); my $style = sprintf("setlinewidth(%f)", $w); if ($x[1] =~ m/\(inline\)/) { $style .= ",dashed"; } # Use a slightly squashed function of the edge count as the weight printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", $node{$x[0]}, $node{$x[1]}, Unparse($n), $edgeweight, $style); } } print DOT ("}\n"); close(DOT); if ($main::opt_web || $main::opt_svg) { # Rewrite SVG to be more usable inside web browser. RewriteSvg(TempName($main::next_tmpfile, "svg")); } return 1; } sub RewriteSvg { my $svgfile = shift; open(SVG, $svgfile) || die "open temp svg: $!"; my @svg = ; close(SVG); unlink $svgfile; my $svg = join('', @svg); # Dot's SVG output is # # # # ... # # # # Change it to # # # $svg_javascript # # # ... # # # # Fix width, height; drop viewBox. $svg =~ s/(?s) above first my $svg_javascript = SvgJavascript(); my $viewport = "\n"; $svg =~ s/ above . $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; $svg =~ s/$svgfile") || die "open $svgfile: $!"; print SVG $svg; close(SVG); } } sub SvgJavascript { return <<'EOF'; EOF } # Provides a map from fullname to shortname for cases where the # shortname is ambiguous. The symlist has both the fullname and # shortname for all symbols, which is usually fine, but sometimes -- # such as overloaded functions -- two different fullnames can map to # the same shortname. In that case, we use the address of the # function to disambiguate the two. This function fills in a map that # maps fullnames to modified shortnames in such cases. If a fullname # is not present in the map, the 'normal' shortname provided by the # symlist is the appropriate one to use. sub FillFullnameToShortnameMap { my $symbols = shift; my $fullname_to_shortname_map = shift; my $shortnames_seen_once = {}; my $shortnames_seen_more_than_once = {}; foreach my $symlist (values(%{$symbols})) { # TODO(csilvers): deal with inlined symbols too. my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address next; # the only collisions we care about are when addresses differ } if (defined($shortnames_seen_once->{$shortname}) && $shortnames_seen_once->{$shortname} ne $fullname) { $shortnames_seen_more_than_once->{$shortname} = 1; } else { $shortnames_seen_once->{$shortname} = $fullname; } } foreach my $symlist (values(%{$symbols})) { my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; # TODO(csilvers): take in a list of addresses we care about, and only # store in the map if $symlist->[1] is in that list. Saves space. next if defined($fullname_to_shortname_map->{$fullname}); if (defined($shortnames_seen_more_than_once->{$shortname})) { if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; } } } } # Return a small number that identifies the argument. # Multiple calls with the same argument will return the same number. # Calls with different arguments will return different numbers. sub ShortIdFor { my $key = shift; my $id = $main::uniqueid{$key}; if (!defined($id)) { $id = keys(%main::uniqueid) + 1; $main::uniqueid{$key} = $id; } return $id; } # Translate a stack of addresses into a stack of symbols sub TranslateStack { my $symbols = shift; my $fullname_to_shortname_map = shift; my $k = shift; my @addrs = split(/\n/, $k); my @result = (); for (my $i = 0; $i <= $#addrs; $i++) { my $a = $addrs[$i]; # Skip large addresses since they sometimes show up as fake entries on RH9 if (length($a) > 8 && $a gt "7fffffffffffffff") { next; } if ($main::opt_disasm || $main::opt_list) { # We want just the address for the key push(@result, $a); next; } my $symlist = $symbols->{$a}; if (!defined($symlist)) { $symlist = [$a, "", $a]; } # We can have a sequence of symbols for a particular entry # (more than one symbol in the case of inlining). Callers # come before callees in symlist, so walk backwards since # the translated stack should contain callees before callers. for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { my $func = $symlist->[$j-2]; my $fileline = $symlist->[$j-1]; my $fullfunc = $symlist->[$j]; if (defined($fullname_to_shortname_map->{$fullfunc})) { $func = $fullname_to_shortname_map->{$fullfunc}; } if ($j > 2) { $func = "$func (inline)"; } # Do not merge nodes corresponding to Callback::Run since that # causes confusing cycles in dot display. Instead, we synthesize # a unique name for this frame per caller. if ($func =~ m/Callback.*::Run$/) { my $caller = ($i > 0) ? $addrs[$i-1] : 0; $func = "Run#" . ShortIdFor($caller); } if ($main::opt_addresses) { push(@result, "$a $func $fileline"); } elsif ($main::opt_lines) { if ($func eq '??' && $fileline eq '??:0') { push(@result, "$a"); } else { push(@result, "$func $fileline"); } } elsif ($main::opt_functions) { if ($func eq '??') { push(@result, "$a"); } else { push(@result, $func); } } elsif ($main::opt_files) { if ($fileline eq '??:0' || $fileline eq '') { push(@result, "$a"); } else { my $f = $fileline; $f =~ s/:\d+$//; push(@result, $f); } } else { push(@result, $a); last; # Do not print inlined info } } } # print join(",", @addrs), " => ", join(",", @result), "\n"; return @result; } # Generate percent string for a number and a total sub Percent { my $num = shift; my $tot = shift; if ($tot != 0) { return sprintf("%.1f%%", $num * 100.0 / $tot); } else { return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); } } # Generate pretty-printed form of number sub Unparse { my $num = shift; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return sprintf("%d", $num); } else { if ($main::opt_show_bytes) { return sprintf("%d", $num); } else { return sprintf("%.1f", $num / 1048576.0); } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds } else { return sprintf("%d", $num); } } # Alternate pretty-printed form: 0 maps to "." sub UnparseAlt { my $num = shift; if ($num == 0) { return "."; } else { return Unparse($num); } } # Alternate pretty-printed form: 0 maps to "" sub HtmlPrintNumber { my $num = shift; if ($num == 0) { return ""; } else { return Unparse($num); } } # Return output units sub Units { if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return "objects"; } else { if ($main::opt_show_bytes) { return "B"; } else { return "MB"; } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return "seconds"; } else { return "samples"; } } ##### Profile manipulation code ##### # Generate flattened profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a] sub FlatProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { AddEntry($result, $addrs[0], $count); } } return $result; } # Generate cumulative profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a], [b], [c], [d] sub CumulativeProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { AddEntry($result, $a, $count); } } return $result; } # If the second-youngest PC on the stack is always the same, returns # that pc. Otherwise, returns undef. sub IsSecondPcAlwaysTheSame { my $profile = shift; my $second_pc = undef; foreach my $k (keys(%{$profile})) { my @addrs = split(/\n/, $k); if ($#addrs < 1) { return undef; } if (not defined $second_pc) { $second_pc = $addrs[1]; } else { if ($second_pc ne $addrs[1]) { return undef; } } } return $second_pc; } sub ExtractSymbolLocation { my $symbols = shift; my $address = shift; # 'addr2line' outputs "??:0" for unknown locations; we do the # same to be consistent. my $location = "??:0:unknown"; if (exists $symbols->{$address}) { my $file = $symbols->{$address}->[1]; if ($file eq "?") { $file = "??:0" } $location = $file . ":" . $symbols->{$address}->[0]; } return $location; } # Extracts a graph of calls. sub ExtractCalls { my $symbols = shift; my $profile = shift; my $calls = {}; while( my ($stack_trace, $count) = each %$profile ) { my @address = split(/\n/, $stack_trace); my $destination = ExtractSymbolLocation($symbols, $address[0]); AddEntry($calls, $destination, $count); for (my $i = 1; $i <= $#address; $i++) { my $source = ExtractSymbolLocation($symbols, $address[$i]); my $call = "$source -> $destination"; AddEntry($calls, $call, $count); $destination = $source; } } return $calls; } sub FilterFrames { my $symbols = shift; my $profile = shift; if ($main::opt_retain eq '' && $main::opt_exclude eq '') { return $profile; } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { my $sym; if (exists($symbols->{$a})) { $sym = $symbols->{$a}->[0]; } else { $sym = $a; } if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) { next; } if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) { next; } push(@path, $a); } if (scalar(@path) > 0) { my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } } return $result; } sub RemoveUninterestingFrames { my $symbols = shift; my $profile = shift; # List of function names to skip my %skip = (); my $skip_regexp = 'NOMATCH'; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { foreach my $name ('@JEMALLOC_PREFIX@calloc', 'cfree', '@JEMALLOC_PREFIX@malloc', 'newImpl', 'void* newImpl', '@JEMALLOC_PREFIX@free', '@JEMALLOC_PREFIX@memalign', '@JEMALLOC_PREFIX@posix_memalign', '@JEMALLOC_PREFIX@aligned_alloc', 'pvalloc', '@JEMALLOC_PREFIX@valloc', '@JEMALLOC_PREFIX@realloc', '@JEMALLOC_PREFIX@mallocx', '@JEMALLOC_PREFIX@rallocx', '@JEMALLOC_PREFIX@xallocx', '@JEMALLOC_PREFIX@dallocx', '@JEMALLOC_PREFIX@sdallocx', 'tc_calloc', 'tc_cfree', 'tc_malloc', 'tc_free', 'tc_memalign', 'tc_posix_memalign', 'tc_pvalloc', 'tc_valloc', 'tc_realloc', 'tc_new', 'tc_delete', 'tc_newarray', 'tc_deletearray', 'tc_new_nothrow', 'tc_newarray_nothrow', 'do_malloc', '::do_malloc', # new name -- got moved to an unnamed ns '::do_malloc_or_cpp_alloc', 'DoSampledAllocation', 'simple_alloc::allocate', '__malloc_alloc_template::allocate', '__builtin_delete', '__builtin_new', '__builtin_vec_delete', '__builtin_vec_new', 'operator new', 'operator new[]', # The entry to our memory-allocation routines on OS X 'malloc_zone_malloc', 'malloc_zone_calloc', 'malloc_zone_valloc', 'malloc_zone_realloc', 'malloc_zone_memalign', 'malloc_zone_free', # These mark the beginning/end of our custom sections '__start_google_malloc', '__stop_google_malloc', '__start_malloc_hook', '__stop_malloc_hook') { $skip{$name} = 1; $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything } # TODO: Remove TCMalloc once everything has been # moved into the tcmalloc:: namespace and we have flushed # old code out of the system. $skip_regexp = "TCMalloc|^tcmalloc::"; } elsif ($main::profile_type eq 'contention') { foreach my $vname ('base::RecordLockProfileData', 'base::SubmitMutexProfileData', 'base::SubmitSpinLockProfileData', 'Mutex::Unlock', 'Mutex::UnlockSlow', 'Mutex::ReaderUnlock', 'MutexLock::~MutexLock', 'SpinLock::Unlock', 'SpinLock::SlowUnlock', 'SpinLockHolder::~SpinLockHolder') { $skip{$vname} = 1; } } elsif ($main::profile_type eq 'cpu') { # Drop signal handlers used for CPU profile collection # TODO(dpeng): this should not be necessary; it's taken # care of by the general 2nd-pc mechanism below. foreach my $name ('ProfileData::Add', # historical 'ProfileData::prof_handler', # historical 'CpuProfiler::prof_handler', '__FRAME_END__', '__pthread_sighandler', '__restore') { $skip{$name} = 1; } } else { # Nothing skipped for unknown types } if ($main::profile_type eq 'cpu') { # If all the second-youngest program counters are the same, # this STRONGLY suggests that it is an artifact of measurement, # i.e., stack frames pushed by the CPU profiler signal handler. # Hence, we delete them. # (The topmost PC is read from the signal structure, not from # the stack, so it does not get involved.) while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { my $result = {}; my $func = ''; if (exists($symbols->{$second_pc})) { $second_pc = $symbols->{$second_pc}->[0]; } print STDERR "Removing $second_pc from all stack traces.\n"; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); splice @addrs, 1, 1; my $reduced_path = join("\n", @addrs); AddEntry($result, $reduced_path, $count); } $profile = $result; } } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { if (exists($symbols->{$a})) { my $func = $symbols->{$a}->[0]; if ($skip{$func} || ($func =~ m/$skip_regexp/)) { # Throw away the portion of the backtrace seen so far, under the # assumption that previous frames were for functions internal to the # allocator. @path = (); next; } } push(@path, $a); } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } $result = FilterFrames($symbols, $result); return $result; } # Reduce profile to granularity given by user sub ReduceProfile { my $symbols = shift; my $profile = shift; my $result = {}; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); my @path = (); my %seen = (); $seen{''} = 1; # So that empty keys are skipped foreach my $e (@translated) { # To avoid double-counting due to recursion, skip a stack-trace # entry if it has already been seen if (!$seen{$e}) { $seen{$e} = 1; push(@path, $e); } } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } return $result; } # Does the specified symbol array match the regexp? sub SymbolMatches { my $sym = shift; my $re = shift; if (defined($sym)) { for (my $i = 0; $i < $#{$sym}; $i += 3) { if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { return 1; } } } return 0; } # Focus only on paths involving specified regexps sub FocusProfile { my $symbols = shift; my $profile = shift; my $focus = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { AddEntry($result, $k, $count); last; } } } return $result; } # Focus only on paths not involving specified regexps sub IgnoreProfile { my $symbols = shift; my $profile = shift; my $ignore = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my $matched = 0; foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { $matched = 1; last; } } if (!$matched) { AddEntry($result, $k, $count); } } return $result; } # Get total count in profile sub TotalProfile { my $profile = shift; my $result = 0; foreach my $k (keys(%{$profile})) { $result += $profile->{$k}; } return $result; } # Add A to B sub AddProfile { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { my $v = $A->{$k}; AddEntry($R, $k, $v); } # add all keys in B foreach my $k (keys(%{$B})) { my $v = $B->{$k}; AddEntry($R, $k, $v); } return $R; } # Merges symbol maps sub MergeSymbols { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { $R->{$k} = $A->{$k}; } if (defined($B)) { foreach my $k (keys(%{$B})) { $R->{$k} = $B->{$k}; } } return $R; } # Add A to B sub AddPcs { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { $R->{$k} = 1 } # add all keys in B foreach my $k (keys(%{$B})) { $R->{$k} = 1 } return $R; } # Subtract B from A sub SubtractProfile { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { my $v = $A->{$k} - GetEntry($B, $k); if ($v < 0 && $main::opt_drop_negative) { $v = 0; } AddEntry($R, $k, $v); } if (!$main::opt_drop_negative) { # Take care of when subtracted profile has more entries foreach my $k (keys(%{$B})) { if (!exists($A->{$k})) { AddEntry($R, $k, 0 - $B->{$k}); } } } return $R; } # Get entry from profile; zero if not present sub GetEntry { my $profile = shift; my $k = shift; if (exists($profile->{$k})) { return $profile->{$k}; } else { return 0; } } # Add entry to specified profile sub AddEntry { my $profile = shift; my $k = shift; my $n = shift; if (!exists($profile->{$k})) { $profile->{$k} = 0; } $profile->{$k} += $n; } # Add a stack of entries to specified profile, and add them to the $pcs # list. sub AddEntries { my $profile = shift; my $pcs = shift; my $stack = shift; my $count = shift; my @k = (); foreach my $e (split(/\s+/, $stack)) { my $pc = HexExtend($e); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $count); } ##### Code to profile a server dynamically ##### sub CheckSymbolPage { my $url = SymbolPageURL(); my $command = ShellEscape(@URL_FETCHER, $url); open(SYMBOL, "$command |") or error($command); my $line = ; $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(SYMBOL); unless (defined($line)) { error("$url doesn't exist\n"); } if ($line =~ /^num_symbols:\s+(\d+)$/) { if ($1 == 0) { error("Stripped binary. No symbols available.\n"); } } else { error("Failed to get the number of symbols from $url\n"); } } sub IsProfileURL { my $profile_name = shift; if (-f $profile_name) { printf STDERR "Using local file $profile_name.\n"; return 0; } return 1; } sub ParseProfileURL { my $profile_name = shift; if (!defined($profile_name) || $profile_name eq "") { return (); } # Split profile URL - matches all non-empty strings, so no test. $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; my $proto = $1 || "http://"; my $hostport = $2; my $prefix = $3; my $profile = $4 || "/"; my $host = $hostport; $host =~ s/:.*//; my $baseurl = "$proto$hostport$prefix"; return ($host, $baseurl, $profile); } # We fetch symbols from the first profile argument. sub SymbolPageURL { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); return "$baseURL$SYMBOL_PAGE"; } sub FetchProgramName() { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); my $url = "$baseURL$PROGRAM_NAME_PAGE"; my $command_line = ShellEscape(@URL_FETCHER, $url); open(CMDLINE, "$command_line |") or error($command_line); my $cmdline = ; $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(CMDLINE); error("Failed to get program name from $url\n") unless defined($cmdline); $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. $cmdline =~ s!\n!!g; # Remove LFs. return $cmdline; } # Gee, curl's -L (--location) option isn't reliable at least # with its 7.12.3 version. Curl will forget to post data if # there is a redirection. This function is a workaround for # curl. Redirection happens on borg hosts. sub ResolveRedirectionForCurl { my $url = shift; my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); open(CMDLINE, "$command_line |") or error($command_line); while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^Location: (.*)/) { $url = $1; } } close(CMDLINE); return $url; } # Add a timeout flat to URL_FETCHER. Returns a new list. sub AddFetchTimeout { my $timeout = shift; my @fetcher = @_; if (defined($timeout)) { if (join(" ", @fetcher) =~ m/\bcurl -s/) { push(@fetcher, "--max-time", sprintf("%d", $timeout)); } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { push(@fetcher, sprintf("--deadline=%d", $timeout)); } } return @fetcher; } # Reads a symbol map from the file handle name given as $1, returning # the resulting symbol map. Also processes variables relating to symbols. # Currently, the only variable processed is 'binary=' which updates # $main::prog to have the correct program name. sub ReadSymbols { my $in = shift; my $map = {}; while (<$in>) { s/\r//g; # turn windows-looking lines into unix-looking lines # Removes all the leading zeroes from the symbols, see comment below. if (m/^0x0*([0-9a-f]+)\s+(.+)/) { $map->{$1} = $2; } elsif (m/^---/) { last; } elsif (m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1, $2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "binary") { if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", $main::prog, $value); } $main::prog = $value; } else { printf STDERR ("Ignoring unknown variable in symbols list: " . "'%s' = '%s'\n", $variable, $value); } } } return $map; } sub URLEncode { my $str = shift; $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg; return $str; } sub AppendSymbolFilterParams { my $url = shift; my @params = (); if ($main::opt_retain ne '') { push(@params, sprintf("retain=%s", URLEncode($main::opt_retain))); } if ($main::opt_exclude ne '') { push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude))); } if (scalar @params > 0) { $url = sprintf("%s?%s", $url, join("&", @params)); } return $url; } # Fetches and processes symbols to prepare them for use in the profile output # code. If the optional 'symbol_map' arg is not given, fetches symbols from # $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols # are assumed to have already been fetched into 'symbol_map' and are simply # extracted and processed. sub FetchSymbols { my $pcset = shift; my $symbol_map = shift; my %seen = (); my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq if (!defined($symbol_map)) { my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); open(POSTFILE, ">$main::tmpfile_sym"); print POSTFILE $post_data; close(POSTFILE); my $url = SymbolPageURL(); my $command_line; if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { $url = ResolveRedirectionForCurl($url); $url = AppendSymbolFilterParams($url); $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", $url); } else { $url = AppendSymbolFilterParams($url); $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) . " < " . ShellEscape($main::tmpfile_sym)); } # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); $symbol_map = ReadSymbols(*SYMBOL{IO}); close(SYMBOL); } my $symbols = {}; foreach my $pc (@pcs) { my $fullname; # For 64 bits binaries, symbols are extracted with 8 leading zeroes. # Then /symbol reads the long symbols in as uint64, and outputs # the result with a "0x%08llx" format which get rid of the zeroes. # By removing all the leading zeroes in both $pc and the symbols from # /symbol, the symbols match and are retrievable from the map. my $shortpc = $pc; $shortpc =~ s/^0*//; # Each line may have a list of names, which includes the function # and also other functions it has inlined. They are separated (in # PrintSymbolizedProfile), by --, which is illegal in function names. my $fullnames; if (defined($symbol_map->{$shortpc})) { $fullnames = $symbol_map->{$shortpc}; } else { $fullnames = "0x" . $pc; # Just use addresses } my $sym = []; $symbols->{$pc} = $sym; foreach my $fullname (split("--", $fullnames)) { my $name = ShortFunctionName($fullname); push(@{$sym}, $name, "?", $fullname); } } return $symbols; } sub BaseName { my $file_name = shift; $file_name =~ s!^.*/!!; # Remove directory name return $file_name; } sub MakeProfileBaseName { my ($binary_name, $profile_name) = @_; my ($host, $baseURL, $path) = ParseProfileURL($profile_name); my $binary_shortname = BaseName($binary_name); return sprintf("%s.%s.%s", $binary_shortname, $main::op_time, $host); } sub FetchDynamicProfile { my $binary_name = shift; my $profile_name = shift; my $fetch_name_only = shift; my $encourage_patience = shift; if (!IsProfileURL($profile_name)) { return $profile_name; } else { my ($host, $baseURL, $path) = ParseProfileURL($profile_name); if ($path eq "" || $path eq "/") { # Missing type specifier defaults to cpu-profile $path = $PROFILE_PAGE; } my $profile_file = MakeProfileBaseName($binary_name, $profile_name); my $url = "$baseURL$path"; my $fetch_timeout = undef; if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { if ($path =~ m/[?]/) { $url .= "&"; } else { $url .= "?"; } $url .= sprintf("seconds=%d", $main::opt_seconds); $fetch_timeout = $main::opt_seconds * 1.01 + 60; # Set $profile_type for consumption by PrintSymbolizedProfile. $main::profile_type = 'cpu'; } else { # For non-CPU profiles, we add a type-extension to # the target profile file name. my $suffix = $path; $suffix =~ s,/,.,g; $profile_file .= $suffix; # Set $profile_type for consumption by PrintSymbolizedProfile. if ($path =~ m/$HEAP_PAGE/) { $main::profile_type = 'heap'; } elsif ($path =~ m/$GROWTH_PAGE/) { $main::profile_type = 'growth'; } elsif ($path =~ m/$CONTENTION_PAGE/) { $main::profile_type = 'contention'; } } my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); if (! -d $profile_dir) { mkdir($profile_dir) || die("Unable to create profile directory $profile_dir: $!\n"); } my $tmp_profile = "$profile_dir/.tmp.$profile_file"; my $real_profile = "$profile_dir/$profile_file"; if ($fetch_name_only > 0) { return $real_profile; } my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; if ($encourage_patience) { print STDERR "Be patient...\n"; } } else { print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; } (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); print STDERR "Wrote profile to $real_profile\n"; $main::collected_profile = $real_profile; return $main::collected_profile; } } # Collect profiles in parallel sub FetchDynamicProfiles { my $items = scalar(@main::pfile_args); my $levels = log($items) / log(2); if ($items == 1) { $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); } else { # math rounding issues if ((2 ** $levels) < $items) { $levels++; } my $count = scalar(@main::pfile_args); for (my $i = 0; $i < $count; $i++) { $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); } print STDERR "Fetching $count profiles, Be patient...\n"; FetchDynamicProfilesRecurse($levels, 0, 0); $main::collected_profile = join(" \\\n ", @main::profile_files); } } # Recursively fork a process to get enough processes # collecting profiles sub FetchDynamicProfilesRecurse { my $maxlevel = shift; my $level = shift; my $position = shift; if (my $pid = fork()) { $position = 0 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); wait; } else { $position = 1 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); cleanup(); exit(0); } } # Collect a single profile sub TryCollectProfile { my $maxlevel = shift; my $level = shift; my $position = shift; if ($level >= ($maxlevel - 1)) { if ($position < scalar(@main::pfile_args)) { FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); } } else { FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); } } ##### Parsing code ##### # Provide a small streaming-read module to handle very large # cpu-profile files. Stream in chunks along a sliding window. # Provides an interface to get one 'slot', correctly handling # endian-ness differences. A slot is one 32-bit or 64-bit word # (depending on the input profile). We tell endianness and bit-size # for the profile by looking at the first 8 bytes: in cpu profiles, # the second slot is always 3 (we'll accept anything that's not 0). BEGIN { package CpuProfileStream; sub new { my ($class, $file, $fname) = @_; my $self = { file => $file, base => 0, stride => 512 * 1024, # must be a multiple of bitsize/8 slots => [], unpack_code => "", # N for big-endian, V for little perl_is_64bit => 1, # matters if profile is 64-bit }; bless $self, $class; # Let unittests adjust the stride if ($main::opt_test_stride > 0) { $self->{stride} = $main::opt_test_stride; } # Read the first two slots to figure out bitsize and endianness. my $slots = $self->{slots}; my $str; read($self->{file}, $str, 8); # Set the global $address_length based on what we see here. # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). $address_length = ($str eq (chr(0)x8)) ? 16 : 8; if ($address_length == 8) { if (substr($str, 6, 2) eq chr(0)x2) { $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 4, 2) eq chr(0)x2) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**16\n"); } @$slots = unpack($self->{unpack_code} . "*", $str); } else { # If we're a 64-bit profile, check if we're a 64-bit-capable # perl. Otherwise, each slot will be represented as a float # instead of an int64, losing precision and making all the # 64-bit addresses wrong. We won't complain yet, but will # later if we ever see a value that doesn't fit in 32 bits. my $has_q = 0; eval { $has_q = pack("Q", "1") ? 1 : 1; }; if (!$has_q) { $self->{perl_is_64bit} = 0; } read($self->{file}, $str, 8); if (substr($str, 4, 4) eq chr(0)x4) { # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 0, 4) eq chr(0)x4) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**32\n"); } my @pair = unpack($self->{unpack_code} . "*", $str); # Since we know one of the pair is 0, it's fine to just add them. @$slots = (0, $pair[0] + $pair[1]); } return $self; } # Load more data when we access slots->get(X) which is not yet in memory. sub overflow { my ($self) = @_; my $slots = $self->{slots}; $self->{base} += $#$slots + 1; # skip over data we're replacing my $str; read($self->{file}, $str, $self->{stride}); if ($address_length == 8) { # the 32-bit case # This is the easy case: unpack provides 32-bit unpacking primitives. @$slots = unpack($self->{unpack_code} . "*", $str); } else { # We need to unpack 32 bits at a time and combine. my @b32_values = unpack($self->{unpack_code} . "*", $str); my @b64_values = (); for (my $i = 0; $i < $#b32_values; $i += 2) { # TODO(csilvers): if this is a 32-bit perl, the math below # could end up in a too-large int, which perl will promote # to a double, losing necessary precision. Deal with that. # Right now, we just die. my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); if ($self->{unpack_code} eq 'N') { # big-endian ($lo, $hi) = ($hi, $lo); } my $value = $lo + $hi * (2**32); if (!$self->{perl_is_64bit} && # check value is exactly represented (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { ::error("Need a 64-bit perl to process this 64-bit profile.\n"); } push(@b64_values, $value); } @$slots = @b64_values; } } # Access the i-th long in the file (logically), or -1 at EOF. sub get { my ($self, $idx) = @_; my $slots = $self->{slots}; while ($#$slots >= 0) { if ($idx < $self->{base}) { # The only time we expect a reference to $slots[$i - something] # after referencing $slots[$i] is reading the very first header. # Since $stride > |header|, that shouldn't cause any lookback # errors. And everything after the header is sequential. print STDERR "Unexpected look-back reading CPU profile"; return -1; # shrug, don't know what better to return } elsif ($idx > $self->{base} + $#$slots) { $self->overflow(); } else { return $slots->[$idx - $self->{base}]; } } # If we get here, $slots is [], which means we've reached EOF return -1; # unique since slots is supposed to hold unsigned numbers } } # Reads the top, 'header' section of a profile, and returns the last # line of the header, commonly called a 'header line'. The header # section of a profile consists of zero or more 'command' lines that # are instructions to jeprof, which jeprof executes when reading the # header. All 'command' lines start with a %. After the command # lines is the 'header line', which is a profile-specific line that # indicates what type of profile it is, and perhaps other global # information about the profile. For instance, here's a header line # for a heap profile: # heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile # For historical reasons, the CPU profile does not contain a text- # readable header line. If the profile looks like a CPU profile, # this function returns "". If no header line could be found, this # function returns undef. # # The following commands are recognized: # %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' # # The input file should be in binmode. sub ReadProfileHeader { local *PROFILE = shift; my $firstchar = ""; my $line = ""; read(PROFILE, $firstchar, 1); seek(PROFILE, -1, 1); # unread the firstchar if ($firstchar !~ /[[:print:]]/) { # is not a text character return ""; } while (defined($line = )) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /^%warn\s+(.*)/) { # 'warn' command # Note this matches both '%warn blah\n' and '%warn\n'. print STDERR "WARNING: $1\n"; # print the rest of the line } elsif ($line =~ /^%/) { print STDERR "Ignoring unknown command from profile header: $line"; } else { # End of commands, must be the header line. return $line; } } return undef; # got to EOF without seeing a header line } sub IsSymbolizedProfileFile { my $file_name = shift; if (!(-e $file_name) || !(-r $file_name)) { return 0; } # Check if the file contains a symbol-section marker. open(TFILE, "<$file_name"); binmode TFILE; my $firstline = ReadProfileHeader(*TFILE); close(TFILE); if (!$firstline) { return 0; } $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; return $firstline =~ /^--- *$symbol_marker/; } # Parse profile generated by common/profiler.cc and return a reference # to a map: # $result->{version} Version number of profile file # $result->{period} Sampling period (in microseconds) # $result->{profile} Profile object # $result->{threads} Map of thread IDs to profile objects # $result->{map} Memory map info from profile # $result->{pcs} Hash of all PC values seen, key is hex address sub ReadProfile { my $prog = shift; my $fname = shift; my $result; # return value $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $contention_marker = $&; $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $growth_marker = $&; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $heap_marker = $&; # Look at first line to see if it is a heap or a CPU profile. # CPU profile may start with no header at all, and just binary data # (starting with \0\0\0\0) -- in that case, don't try to read the # whole firstline, since it may be gigabytes(!) of data. open(PROFILE, "<$fname") || error("$fname: $!\n"); binmode PROFILE; # New perls do UTF-8 processing my $header = ReadProfileHeader(*PROFILE); if (!defined($header)) { # means "at EOF" error("Profile is empty.\n"); } my $symbols; if ($header =~ m/^--- *$symbol_marker/o) { # Verify that the user asked for a symbolized profile if (!$main::use_symbolized_profile) { # we have both a binary and symbolized profiles, abort error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . "a binary arg. Try again without passing\n $prog\n"); } # Read the symbol section of the symbolized profile file. $symbols = ReadSymbols(*PROFILE{IO}); # Read the next line to get the header for the remaining profile. $header = ReadProfileHeader(*PROFILE) || ""; } if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) { # Skip "--- ..." line for profile types that have their own headers. $header = ReadProfileHeader(*PROFILE) || ""; } $main::profile_type = ''; if ($header =~ m/^heap profile:.*$growth_marker/o) { $main::profile_type = 'growth'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap profile:/) { $main::profile_type = 'heap'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap/) { $main::profile_type = 'heap'; $result = ReadThreadedHeapProfile($prog, $fname, $header); } elsif ($header =~ m/^--- *$contention_marker/o) { $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *Stacks:/) { print STDERR "Old format contention profile: mistakenly reports " . "condition variable signals as lock contentions.\n"; $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *$profile_marker/) { # the binary cpu profile data starts immediately after this line $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } else { if (defined($symbols)) { # a symbolized profile contains a format we don't recognize, bail out error("$fname: Cannot recognize profile section after symbols.\n"); } # no ascii header present -- must be a CPU profile $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } close(PROFILE); # if we got symbols along with the profile, return those as well if (defined($symbols)) { $result->{symbols} = $symbols; } return $result; } # Subtract one from caller pc so we map back to call instr. # However, don't do this if we're reading a symbolized profile # file, in which case the subtract-one was done when the file # was written. # # We apply the same logic to all readers, though ReadCPUProfile uses an # independent implementation. sub FixCallerAddresses { my $stack = shift; # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile() # dumps unadjusted profiles. { $stack =~ /(\s)/; my $delimiter = $1; my @addrs = split(' ', $stack); my @fixedaddrs; $#fixedaddrs = $#addrs; if ($#addrs >= 0) { $fixedaddrs[0] = $addrs[0]; } for (my $i = 1; $i <= $#addrs; $i++) { $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); } return join $delimiter, @fixedaddrs; } } # CPU profile reader sub ReadCPUProfile { my $prog = shift; my $fname = shift; # just used for logging local *PROFILE = shift; my $version; my $period; my $i; my $profile = {}; my $pcs = {}; # Parse string into array of slots. my $slots = CpuProfileStream->new(*PROFILE, $fname); # Read header. The current header version is a 5-element structure # containing: # 0: header count (always 0) # 1: header "words" (after this one: 3) # 2: format version (0) # 3: sampling period (usec) # 4: unused padding (always 0) if ($slots->get(0) != 0 ) { error("$fname: not a profile file, or old format profile file\n"); } $i = 2 + $slots->get(1); $version = $slots->get(2); $period = $slots->get(3); # Do some sanity checking on these header values. if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { error("$fname: not a profile file, or corrupted profile file\n"); } # Parse profile while ($slots->get($i) != -1) { my $n = $slots->get($i++); my $d = $slots->get($i++); if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); print STDERR "At index $i (address $addr):\n"; error("$fname: stack trace depth >= 2**32\n"); } if ($slots->get($i) == 0) { # End of profile data marker $i += $d; last; } # Make key out of the stack entries my @k = (); for (my $j = 0; $j < $d; $j++) { my $pc = $slots->get($i+$j); # Subtract one from caller pc so we map back to call instr. $pc--; $pc = sprintf("%0*x", $address_length, $pc); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $n); $i += $d; } # Parse map my $map = ''; seek(PROFILE, $i * 4, 0); read(PROFILE, $map, (stat PROFILE)[7]); my $r = {}; $r->{version} = $version; $r->{period} = $period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub HeapProfileIndex { my $index = 1; if ($main::opt_inuse_space) { $index = 1; } elsif ($main::opt_inuse_objects) { $index = 0; } elsif ($main::opt_alloc_space) { $index = 3; } elsif ($main::opt_alloc_objects) { $index = 2; } return $index; } sub ReadMappedLibraries { my $fh = shift; my $map = ""; # Read the /proc/self/maps data while (<$fh>) { s/\r//g; # turn windows-looking lines into unix-looking lines $map .= $_; } return $map; } sub ReadMemoryMap { my $fh = shift; my $map = ""; # Read /proc/self/maps data as formatted by DumpAddressMap() my $buildvar = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Parse "build=" specification if supplied if (m/^\s*build=(.*)\n/) { $buildvar = $1; } # Expand "$build" variable if available $_ =~ s/\$build\b/$buildvar/g; $map .= $_; } return $map; } sub AdjustSamples { my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; if ($sample_adjustment) { if ($sampling_algorithm == 2) { # Remote-heap version 2 # The sampling frequency is the rate of a Poisson process. # This means that the probability of sampling an allocation of # size X with sampling rate Y is 1 - exp(-X/Y) if ($n1 != 0) { my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n1 *= $scale_factor; $s1 *= $scale_factor; } if ($n2 != 0) { my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n2 *= $scale_factor; $s2 *= $scale_factor; } } else { # Remote-heap version 1 my $ratio; $ratio = (($s1*1.0)/$n1)/($sample_adjustment); if ($ratio < 1) { $n1 /= $ratio; $s1 /= $ratio; } $ratio = (($s2*1.0)/$n2)/($sample_adjustment); if ($ratio < 1) { $n2 /= $ratio; $s2 /= $ratio; } } } return ($n1, $s1, $n2, $s2); } sub ReadHeapProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $index = HeapProfileIndex(); # Find the type of this profile. The header line looks like: # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 # There are two pairs , the first inuse objects/space, and the # second allocated objects/space. This is followed optionally by a profile # type, and if that is present, optionally by a sampling frequency. # For remote heap profiles (v1): # The interpretation of the sampling frequency is that the profiler, for # each sample, calculates a uniformly distributed random integer less than # the given value, and records the next sample after that many bytes have # been allocated. Therefore, the expected sample interval is half of the # given frequency. By default, if not specified, the expected sample # interval is 128KB. Only remote-heap-page profiles are adjusted for # sample size. # For remote heap profiles (v2): # The sampling frequency is the rate of a Poisson process. This means that # the probability of sampling an allocation of size X with sampling rate Y # is 1 - exp(-X/Y) # For version 2, a typical header line might look like this: # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 # the trailing number (524288) is the sampling rate. (Version 1 showed # double the 'rate' here) my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { if (defined($6) && ($6 ne '')) { $type = $6; my $sample_period = $8; # $type is "heapprofile" for profiles generated by the # heap-profiler, and either "heap" or "heap_v2" for profiles # generated by sampling directly within tcmalloc. It can also # be "growth" for heap-growth profiles. The first is typically # found for profiles generated locally, and the others for # remote profiles. if (($type eq "heapprofile") || ($type !~ /heap/) ) { # No need to adjust for the sampling rate with heap-profiler-derived data $sampling_algorithm = 0; } elsif ($type =~ /_v2/) { $sampling_algorithm = 2; # version 2 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period); } } else { $sampling_algorithm = 1; # version 1 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period)/2; } } } else { # We detect whether or not this is a remote-heap profile by checking # that the total-allocated stats ($n2,$s2) are exactly the # same as the in-use stats ($n1,$s1). It is remotely conceivable # that a non-remote-heap profile may pass this check, but it is hard # to imagine how that could happen. # In this case it's so old it's guaranteed to be remote-heap version 1. my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); if (($n1 == $n2) && ($s1 == $s2)) { # This is likely to be a remote-heap based sample profile $sampling_algorithm = 1; } } } if ($sampling_algorithm > 0) { # For remote-heap generated profiles, adjust the counts and sizes to # account for the sample rate (we sample once every 128KB by default). if ($sample_adjustment == 0) { # Turn on profile adjustment. $sample_adjustment = 128*1024; print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; } else { printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", $sample_adjustment); } if ($sampling_algorithm > 1) { # We don't bother printing anything for the original version (version 1) printf STDERR "Heap version $sampling_algorithm\n"; } } my $profile = {}; my $pcs = {}; my $map = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # : [: ] @ a1 a2 a3 ... an s/^\s*//; s/\s*$//; if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { my $stack = $5; my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadThreadedHeapProfile { my ($prog, $fname, $header) = @_; my $index = HeapProfileIndex(); my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; # Assuming a very specific type of header for now. if ($header =~ m"^heap_v2/(\d+)") { $type = "_v2"; $sampling_algorithm = 2; $sample_adjustment = int($1); } if ($type ne "_v2" || !defined($sample_adjustment)) { die "Threaded heap profiles require v2 sampling with a sample rate\n"; } my $profile = {}; my $thread_profiles = {}; my $pcs = {}; my $map = ""; my $stack = ""; while () { s/\r//g; if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # @ a1 a2 ... an # t*: : [: ] # t1: : [: ] # ... # tn: : [: ] s/^\s*//; s/\s*$//; if (m/^@\s+(.*)$/) { $stack = $1; } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { if ($stack eq "") { # Still in the header, so this is just a per-thread summary. next; } my $thread = $2; my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); if ($thread eq "*") { AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } else { if (!exists($thread_profiles->{$thread})) { $thread_profiles->{$thread} = {}; } AddEntries($thread_profiles->{$thread}, $pcs, FixCallerAddresses($stack), $counts[$index]); } } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{threads} = $thread_profiles; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadSynchProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $map = ''; my $profile = {}; my $pcs = {}; my $sampling_period = 1; my $cyclespernanosec = 2.8; # Default assumption for old binaries my $seen_clockrate = 0; my $line; my $index = 0; if ($main::opt_total_delay) { $index = 0; } elsif ($main::opt_contentions) { $index = 1; } elsif ($main::opt_mean_delay) { $index = 2; } while ( $line = ) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $count, $stack) = ($1, $2, $3); # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; $count *= $sampling_period; my @values = ($cycles, $count, $cycles / $count); AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $stack) = ($1, $2); if ($cycles !~ /^\d+$/) { next; } # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1,$2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "cycles/second") { $cyclespernanosec = $value / 1e9; $seen_clockrate = 1; } elsif ($variable eq "sampling period") { $sampling_period = $value; } elsif ($variable eq "ms since reset") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } elsif ($variable eq "discarded samples") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } else { printf STDERR ("Ignoring unnknown variable in /contention output: " . "'%s' = '%s'\n",$variable,$value); } } else { # Memory map entry $map .= $line; } } if (!$seen_clockrate) { printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", $cyclespernanosec); } my $r = {}; $r->{version} = 0; $r->{period} = $sampling_period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } # Given a hex value in the form "0x1abcd" or "1abcd", return either # "0001abcd" or "000000000001abcd", depending on the current (global) # address length. sub HexExtend { my $addr = shift; $addr =~ s/^(0x)?0*//; my $zeros_needed = $address_length - length($addr); if ($zeros_needed < 0) { printf STDERR "Warning: address $addr is longer than address length $address_length\n"; return $addr; } return ("0" x $zeros_needed) . $addr; } ##### Symbol extraction ##### # Aggressively search the lib_prefix values for the given library # If all else fails, just return the name of the library unmodified. # If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" # it will search the following locations in this order, until it finds a file: # /my/path/lib/dir/mylib.so # /other/path/lib/dir/mylib.so # /my/path/dir/mylib.so # /other/path/dir/mylib.so # /my/path/mylib.so # /other/path/mylib.so # /lib/dir/mylib.so (returned as last resort) sub FindLibrary { my $file = shift; my $suffix = $file; # Search for the library as described above do { foreach my $prefix (@prefix_list) { my $fullpath = $prefix . $suffix; if (-e $fullpath) { return $fullpath; } } } while ($suffix =~ s|^/[^/]+/|/|); return $file; } # Return path to library with debugging symbols. # For libc libraries, the copy in /usr/lib/debug contains debugging symbols sub DebuggingLibrary { my $file = shift; if ($file =~ m|^/|) { if (-f "/usr/lib/debug$file") { return "/usr/lib/debug$file"; } elsif (-f "/usr/lib/debug$file.debug") { return "/usr/lib/debug$file.debug"; } } return undef; } # Parse text section header of a library using objdump sub ParseTextSectionHeaderFromObjdump { my $lib = shift; my $size = undef; my $vma; my $file_offset; # Get objdump output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Idx Name Size VMA LMA File off Algn # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file # offset may still be 8. But AddressSub below will still handle that. my @x = split; if (($#x >= 6) && ($x[1] eq '.text')) { $size = $x[2]; $vma = $x[3]; $file_offset = $x[5]; last; } } close(OBJDUMP); if (!defined($size)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } # Parse text section header of a library using otool (on OS X) sub ParseTextSectionHeaderFromOtool { my $lib = shift; my $size = undef; my $vma = undef; my $file_offset = undef; # Get otool output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); open(OTOOL, "$command |") || error("$command: $!\n"); my $cmd = ""; my $sectname = ""; my $segname = ""; foreach my $line () { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines # Load command <#> # cmd LC_SEGMENT # [...] # Section # sectname __text # segname __TEXT # addr 0x000009f8 # size 0x00018b9e # offset 2552 # align 2^2 (4) # We will need to strip off the leading 0x from the hex addresses, # and convert the offset into hex. if ($line =~ /Load command/) { $cmd = ""; $sectname = ""; $segname = ""; } elsif ($line =~ /Section/) { $sectname = ""; $segname = ""; } elsif ($line =~ /cmd (\w+)/) { $cmd = $1; } elsif ($line =~ /sectname (\w+)/) { $sectname = $1; } elsif ($line =~ /segname (\w+)/) { $segname = $1; } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && $sectname eq "__text" && $segname eq "__TEXT")) { next; } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { $vma = $1; } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { $size = $1; } elsif ($line =~ /\boffset ([0-9]+)/) { $file_offset = sprintf("%016x", $1); } if (defined($vma) && defined($size) && defined($file_offset)) { last; } } close(OTOOL); if (!defined($vma) || !defined($size) || !defined($file_offset)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } sub ParseTextSectionHeader { # obj_tool_map("otool") is only defined if we're in a Mach-O environment if (defined($obj_tool_map{"otool"})) { my $r = ParseTextSectionHeaderFromOtool(@_); if (defined($r)){ return $r; } } # If otool doesn't work, or we don't have it, fall back to objdump return ParseTextSectionHeaderFromObjdump(@_); } # Split /proc/pid/maps dump into a list of libraries sub ParseLibraries { return if $main::use_symbol_page; # We don't need libraries info. my $prog = Cwd::abs_path(shift); my $map = shift; my $pcs = shift; my $result = []; my $h = "[a-f0-9]+"; my $zero_offset = HexExtend("0"); my $buildvar = ""; foreach my $l (split("\n", $map)) { if ($l =~ m/^\s*build=(.*)$/) { $buildvar = $1; } my $start; my $finish; my $offset; my $lib; if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { # Full line from /proc/self/maps. Example: # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { # Cooked line from DumpAddressMap. Example: # 40000000-40015000: /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = $3; } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) { # PIEs and address space randomization do not play well with our # default assumption that main executable is at lowest # addresses. So we're detecting main executable in # /proc/self/maps as well. $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) # # Example: # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s # o.1 NCH -1 elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = FindLibrary($5); } else { next; } # Expand "$build" variable if available $lib =~ s/\$build\b/$buildvar/g; $lib = FindLibrary($lib); # Check for pre-relocated libraries, which use pre-relocated symbol tables # and thus require adjusting the offset that we'll use to translate # VM addresses into symbol table addresses. # Only do this if we're not going to fetch the symbol table from a # debugging copy of the library. if (!DebuggingLibrary($lib)) { my $text = ParseTextSectionHeader($lib); if (defined($text)) { my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); $offset = AddressAdd($offset, $vma_offset); } } if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } push(@{$result}, [$lib, $start, $finish, $offset]); } # Append special entry for additional library (not relocated) if ($main::opt_lib ne "") { my $text = ParseTextSectionHeader($main::opt_lib); if (defined($text)) { my $start = $text->{vma}; my $finish = AddressAdd($start, $text->{size}); push(@{$result}, [$main::opt_lib, $start, $finish, $start]); } } # Append special entry for the main program. This covers # 0..max_pc_value_seen, so that we assume pc values not found in one # of the library ranges will be treated as coming from the main # program binary. my $min_pc = HexExtend("0"); my $max_pc = $min_pc; # find the maximal PC value in any sample foreach my $pc (keys(%{$pcs})) { if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } } push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); return $result; } # Add two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressAdd { my $addr1 = shift; my $addr2 = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. if ($main::opt_debug and $main::opt_test) { print STDERR "AddressAdd $addr1 + $addr2 = "; } my $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); my $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2); my $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } my $r = sprintf("%07x", $sum); $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2) + $c; $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } $r = sprintf("%07x", $sum) . $r; $sum = hex($addr1) + hex($addr2) + $c; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } return $r; } } # Subtract two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressSub { my $addr1 = shift; my $addr2 = shift; my $diff; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $diff); } else { # Do the addition in 7-nibble chunks to trivialize borrow handling. # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } my $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); my $a2 = hex(substr($addr2,-7)); $addr2 = substr($addr2,0,-7); my $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; my $r = sprintf("%07x", $diff); $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); $a2 = hex(substr($addr2,-7)) + $b; $addr2 = substr($addr2,0,-7); $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; $r = sprintf("%07x", $diff) . $r; $a1 = hex($addr1); $a2 = hex($addr2) + $b; if ($a2 > $a1) { $a1 += 0x100; } $diff = $a1 - $a2; $r = sprintf("%02x", $diff) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Increment a hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressInc { my $addr = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr)+1) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. # We are always doing this to step through the addresses in a function, # and will almost never overflow the first chunk, so we check for this # case and exit early. # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } my $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; my $r = sprintf("%07x", $sum); if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "0000000"; } $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; $r = sprintf("%07x", $sum) . $r; if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "00000000000000"; } $sum = hex($addr) + 1; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Extract symbols for all PC values found in profile sub ExtractSymbols { my $libs = shift; my $pcset = shift; my $symbols = {}; # Map each PC value to the containing library. To make this faster, # we sort libraries by their starting pc value (highest first), and # advance through the libraries as we advance the pc. Sometimes the # addresses of libraries may overlap with the addresses of the main # binary, so to make sure the libraries 'win', we iterate over the # libraries in reverse order (which assumes the binary doesn't start # in the middle of a library, which seems a fair assumption). my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { my $libname = $lib->[0]; my $start = $lib->[1]; my $finish = $lib->[2]; my $offset = $lib->[3]; # Use debug library if it exists my $debug_libname = DebuggingLibrary($libname); if ($debug_libname) { $libname = $debug_libname; } # Get list of pcs that belong in this library. my $contained = []; my ($start_pc_index, $finish_pc_index); # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; $finish_pc_index--) { last if $pcs[$finish_pc_index - 1] le $finish; } # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; $start_pc_index--) { last if $pcs[$start_pc_index - 1] lt $start; } # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, # in case there are overlaps in libraries and the main binary. @{$contained} = splice(@pcs, $start_pc_index, $finish_pc_index - $start_pc_index); # Map to symbols MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); } return $symbols; } # Map list of PC values to symbols for a given image sub MapToSymbols { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; my $debug = 0; # Ignore empty binaries if ($#{$pclist} < 0) { return; } # Figure out the addr2line command to use my $addr2line = $obj_tool_map{"addr2line"}; my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); if (exists $obj_tool_map{"addr2line_pdb"}) { $addr2line = $obj_tool_map{"addr2line_pdb"}; $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); } # If "addr2line" isn't installed on the system at all, just use # nm to get what info we can (function names, but not line numbers). if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { MapSymbolsWithNM($image, $offset, $pclist, $symbols); return; } # "addr2line -i" can produce a variable number of lines per input # address, with no separator that allows us to tell when data for # the next address starts. So we find the address for a special # symbol (_fini) and interleave this address between all real # addresses passed to addr2line. The name of this special symbol # can then be used as a separator. $sep_address = undef; # May be filled in by MapSymbolsWithNM() my $nm_symbols = {}; MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); if (defined($sep_address)) { # Only add " -i" to addr2line if the binary supports it. # addr2line --help returns 0, but not if it sees an unknown flag first. if (system("$cmd -i --help >$dev_null 2>&1") == 0) { $cmd .= " -i"; } else { $sep_address = undef; # no need for sep_address if we don't support -i } } # Make file with all PC values with intervening 'sep_address' so # that we can reliably detect the end of inlined function list open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); if ($debug) { print("---- $image ---\n"); } for (my $i = 0; $i <= $#{$pclist}; $i++) { # addr2line always reads hex addresses, and does not need '0x' prefix. if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); if (defined($sep_address)) { printf ADDRESSES ("%s\n", $sep_address); } } close(ADDRESSES); if ($debug) { print("----\n"); system("cat", $main::tmpfile_sym); print("----\n"); system("$cmd < " . ShellEscape($main::tmpfile_sym)); print("----\n"); } open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") || error("$cmd: $!\n"); my $count = 0; # Index in pclist while () { # Read fullfunction and filelineinfo from next pair of lines s/\r?\n$//g; my $fullfunction = $_; $_ = ; s/\r?\n$//g; my $filelinenum = $_; if (defined($sep_address) && $fullfunction eq $sep_symbol) { # Terminating marker for data for this address $count++; next; } $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths my $pcstr = $pclist->[$count]; my $function = ShortFunctionName($fullfunction); my $nms = $nm_symbols->{$pcstr}; if (defined($nms)) { if ($fullfunction eq '??') { # nm found a symbol for us. $function = $nms->[0]; $fullfunction = $nms->[2]; } else { # MapSymbolsWithNM tags each routine with its starting address, # useful in case the image has multiple occurrences of this # routine. (It uses a syntax that resembles template paramters, # that are automatically stripped out by ShortFunctionName().) # addr2line does not provide the same information. So we check # if nm disambiguated our symbol, and if so take the annotated # (nm) version of the routine-name. TODO(csilvers): this won't # catch overloaded, inlined symbols, which nm doesn't see. # Better would be to do a check similar to nm's, in this fn. if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn $function = $nms->[0]; $fullfunction = $nms->[2]; } } } # Prepend to accumulated symbols for pcstr # (so that caller comes before callee) my $sym = $symbols->{$pcstr}; if (!defined($sym)) { $sym = []; $symbols->{$pcstr} = $sym; } unshift(@{$sym}, $function, $filelinenum, $fullfunction); if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } if (!defined($sep_address)) { # Inlining is off, so this entry ends immediately $count++; } } close(SYMBOLS); } # Use nm to map the list of referenced PCs to symbols. Return true iff we # are able to read procedure information via nm. sub MapSymbolsWithNM { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; # Get nm output sorted by increasing address my $symbol_table = GetProcedureBoundaries($image, "."); if (!%{$symbol_table}) { return 0; } # Start addresses are already the right length (8 or 16 hex digits). my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } keys(%{$symbol_table}); if ($#names < 0) { # No symbols: just use addresses foreach my $pc (@{$pclist}) { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } return 0; } # Sort addresses so we can do a join against nm output my $index = 0; my $fullname = $names[0]; my $name = ShortFunctionName($fullname); foreach my $pc (sort { $a cmp $b } @{$pclist}) { # Adjust for mapped offset my $mpc = AddressSub($pc, $offset); while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ $index++; $fullname = $names[$index]; $name = ShortFunctionName($fullname); } if ($mpc lt $symbol_table->{$fullname}->[1]) { $symbols->{$pc} = [$name, "?", $fullname]; } else { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } } return 1; } sub ShortFunctionName { my $function = shift; while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type return $function; } # Trim overly long symbols found in disassembler output sub CleanDisassembly { my $d = shift; while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments return $d; } # Clean file name for display sub CleanFileName { my ($f) = @_; $f =~ s|^/proc/self/cwd/||; $f =~ s|^\./||; return $f; } # Make address relative to section and clean up for display sub UnparseAddress { my ($offset, $address) = @_; $address = AddressSub($address, $offset); $address =~ s/^0x//; $address =~ s/^0*//; return $address; } ##### Miscellaneous ##### # Find the right versions of the above object tools to use. The # argument is the program file being analyzed, and should be an ELF # 32-bit or ELF 64-bit executable file. The location of the tools # is determined by considering the following options in this order: # 1) --tools option, if set # 2) JEPROF_TOOLS environment variable, if set # 3) the environment sub ConfigureObjTools { my $prog_file = shift; # Check for the existence of $prog_file because /usr/bin/file does not # predictably return error status in prod. (-e $prog_file) || error("$prog_file does not exist.\n"); my $file_type = undef; if (-e "/usr/bin/file") { # Follow symlinks (at least for systems where "file" supports that). my $escaped_prog_file = ShellEscape($prog_file); $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || /usr/bin/file $escaped_prog_file`; } elsif ($^O == "MSWin32") { $file_type = "MS Windows"; } else { print STDERR "WARNING: Can't determine the file type of $prog_file"; } if ($file_type =~ /64-bit/) { # Change $address_length to 16 if the program file is ELF 64-bit. # We can't detect this from many (most?) heap or lock contention # profiles, since the actual addresses referenced are generally in low # memory even for 64-bit programs. $address_length = 16; } if ($file_type =~ /MS Windows/) { # For windows, we provide a version of nm and addr2line as part of # the opensource release, which is capable of parsing # Windows-style PDB executables. It should live in the path, or # in the same directory as jeprof. $obj_tool_map{"nm_pdb"} = "nm-pdb"; $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; } if ($file_type =~ /Mach-O/) { # OS X uses otool to examine Mach-O files, rather than objdump. $obj_tool_map{"otool"} = "otool"; $obj_tool_map{"addr2line"} = "false"; # no addr2line $obj_tool_map{"objdump"} = "false"; # no objdump } # Go fill in %obj_tool_map with the pathnames to use: foreach my $tool (keys %obj_tool_map) { $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); } } # Returns the path of a caller-specified object tool. If --tools or # JEPROF_TOOLS are specified, then returns the full path to the tool # with that prefix. Otherwise, returns the path unmodified (which # means we will look for it on PATH). sub ConfigureTool { my $tool = shift; my $path; # --tools (or $JEPROF_TOOLS) is a comma separated list, where each # item is either a) a pathname prefix, or b) a map of the form # :. First we look for an entry of type (b) for our # tool. If one is found, we use it. Otherwise, we consider all the # pathname prefixes in turn, until one yields an existing file. If # none does, we use a default path. my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { $path = $2; # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. } elsif ($tools ne '') { foreach my $prefix (split(',', $tools)) { next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list if (-x $prefix . $tool) { $path = $prefix . $tool; last; } } if (!$path) { error("No '$tool' found with prefix specified by " . "--tools (or \$JEPROF_TOOLS) '$tools'\n"); } } else { # ... otherwise use the version that exists in the same directory as # jeprof. If there's nothing there, use $PATH. $0 =~ m,[^/]*$,; # this is everything after the last slash my $dirname = $`; # this is everything up to and including the last slash if (-x "$dirname$tool") { $path = "$dirname$tool"; } else { $path = $tool; } } if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } return $path; } sub ShellEscape { my @escaped_words = (); foreach my $word (@_) { my $escaped_word = $word; if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist $escaped_word =~ s/'/'\\''/; $escaped_word = "'$escaped_word'"; } push(@escaped_words, $escaped_word); } return join(" ", @escaped_words); } sub cleanup { unlink($main::tmpfile_sym); unlink(keys %main::tempnames); # We leave any collected profiles in $HOME/jeprof in case the user wants # to look at them later. We print a message informing them of this. if ((scalar(@main::profile_files) > 0) && defined($main::collected_profile)) { if (scalar(@main::profile_files) == 1) { print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; } print STDERR "If you want to investigate this profile further, you can do:\n"; print STDERR "\n"; print STDERR " jeprof \\\n"; print STDERR " $main::prog \\\n"; print STDERR " $main::collected_profile\n"; print STDERR "\n"; } } sub sighandler { cleanup(); exit(1); } sub error { my $msg = shift; print STDERR $msg; cleanup(); exit(1); } # Run $nm_command and get all the resulting procedure boundaries whose # names match "$regexp" and returns them in a hashtable mapping from # procedure name to a two-element vector of [start address, end address] sub GetProcedureBoundariesViaNm { my $escaped_nm_command = shift; # shell-escaped my $regexp = shift; my $symbol_table = {}; open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); my $last_start = "0"; my $routine = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (m/^\s*([0-9a-f]+) (.) (..*)/) { my $start_val = $1; my $type = $2; my $this_routine = $3; # It's possible for two symbols to share the same address, if # one is a zero-length variable (like __start_google_malloc) or # one symbol is a weak alias to another (like __libc_malloc). # In such cases, we want to ignore all values except for the # actual symbol, which in nm-speak has type "T". The logic # below does this, though it's a bit tricky: what happens when # we have a series of lines with the same address, is the first # one gets queued up to be processed. However, it won't # *actually* be processed until later, when we read a line with # a different address. That means that as long as we're reading # lines with the same address, we have a chance to replace that # item in the queue, which we do whenever we see a 'T' entry -- # that is, a line with type 'T'. If we never see a 'T' entry, # we'll just go ahead and process the first entry (which never # got touched in the queue), and ignore the others. if ($start_val eq $last_start && $type =~ /t/i) { # We are the 'T' symbol at this address, replace previous symbol. $routine = $this_routine; next; } elsif ($start_val eq $last_start) { # We're not the 'T' symbol at this address, so ignore us. next; } if ($this_routine eq $sep_symbol) { $sep_address = HexExtend($start_val); } # Tag this routine with the starting address in case the image # has multiple occurrences of this routine. We use a syntax # that resembles template parameters that are automatically # stripped out by ShortFunctionName() $this_routine .= "<$start_val>"; if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($start_val)]; } $last_start = $start_val; $routine = $this_routine; } elsif (m/^Loaded image name: (.+)/) { # The win32 nm workalike emits information about the binary it is using. if ($main::opt_debug) { print STDERR "Using Image $1\n"; } } elsif (m/^PDB file name: (.+)/) { # The win32 nm workalike emits information about the pdb it is using. if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } } } close(NM); # Handle the last line in the nm output. Unfortunately, we don't know # how big this last symbol is, because we don't know how big the file # is. For now, we just give it a size of 0. # TODO(csilvers): do better here. if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($last_start)]; } return $symbol_table; } # Gets the procedure boundaries for all routines in "$image" whose names # match "$regexp" and returns them in a hashtable mapping from procedure # name to a two-element vector of [start address, end address]. # Will return an empty map if nm is not installed or not working properly. sub GetProcedureBoundaries { my $image = shift; my $regexp = shift; # If $image doesn't start with /, then put ./ in front of it. This works # around an obnoxious bug in our probing of nm -f behavior. # "nm -f $image" is supposed to fail on GNU nm, but if: # # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND # b. you have a.out in your current directory (a not uncommon occurence) # # then "nm -f $image" succeeds because -f only looks at the first letter of # the argument, which looks valid because it's [BbSsPp], and then since # there's no image provided, it looks for a.out and finds it. # # This regex makes sure that $image starts with . or /, forcing the -f # parsing to fail since . and / are not valid formats. $image =~ s#^[^/]#./$&#; # For libc libraries, the copy in /usr/lib/debug contains debugging symbols my $debugging = DebuggingLibrary($image); if ($debugging) { $image = $debugging; } my $nm = $obj_tool_map{"nm"}; my $cppfilt = $obj_tool_map{"c++filt"}; # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm # binary doesn't support --demangle. In addition, for OS X we need # to use the -f flag to get 'flat' nm output (otherwise we don't sort # properly and get incorrect results). Unfortunately, GNU nm uses -f # in an incompatible way. So first we test whether our nm supports # --demangle and -f. my $demangle_flag = ""; my $cppfilt_flag = ""; my $to_devnull = ">$dev_null 2>&1"; if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { # In this mode, we do "nm --demangle " $demangle_flag = "--demangle"; $cppfilt_flag = ""; } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { # In this mode, we do "nm | c++filt" $cppfilt_flag = " | " . ShellEscape($cppfilt); }; my $flatten_flag = ""; if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { $flatten_flag = "-f"; } # Finally, in the case $imagie isn't a debug library, we try again with # -D to at least get *exported* symbols. If we can't use --demangle, # we use c++filt instead, if it exists on this system. my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", # 6nm is for Go binaries ShellEscape("6nm", "$image") . " 2>$dev_null | sort", ); # If the executable is an MS Windows PDB-format executable, we'll # have set up obj_tool_map("nm_pdb"). In this case, we actually # want to use both unix nm and windows-specific nm_pdb, since # PDB-format executables can apparently include dwarf .o files. if (exists $obj_tool_map{"nm_pdb"}) { push(@nm_commands, ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) . " 2>$dev_null"); } foreach my $nm_command (@nm_commands) { my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); return $symbol_table if (%{$symbol_table}); } my $symbol_table = {}; return $symbol_table; } # The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. # To make them more readable, we add underscores at interesting places. # This routine removes the underscores, producing the canonical representation # used by jeprof to represent addresses, particularly in the tested routines. sub CanonicalHex { my $arg = shift; return join '', (split '_',$arg); } # Unit test for AddressAdd: sub AddressAddUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd ($row->[0], $row->[1]); if ($sum ne $row->[2]) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); my $expected = join '', (split '_',$row->[2]); if ($sum ne CanonicalHex($row->[2])) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressSub: sub AddressSubUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub ($row->[0], $row->[1]); if ($sum ne $row->[3]) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); if ($sum ne CanonicalHex($row->[3])) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressInc: sub AddressIncUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc ($row->[0]); if ($sum ne $row->[4]) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc (CanonicalHex($row->[0])); if ($sum ne CanonicalHex($row->[4])) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Driver for unit tests. # Currently just the address add/subtract/increment routines for 64-bit. sub RunUnitTests { my $error_count = 0; # This is a list of tuples [a, b, a+b, a-b, a+1] my $unit_test_data_8 = [ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], [qw(00000001 ffffffff 00000000 00000002 00000002)], [qw(00000001 fffffff0 fffffff1 00000011 00000002)], ]; my $unit_test_data_16 = [ # The implementation handles data in 7-nibble chunks, so those are the # interesting boundaries. [qw(aaaaaaaa 50505050 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], [qw(50505050 aaaaaaaa 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], [qw(ffffffff aaaaaaaa 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], [qw(00000001 ffffffff 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], [qw(00000001 fffffff0 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], [qw(00_a00000a_aaaaaaa 50505050 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], [qw(0f_fff0005_0505050 aaaaaaaa 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], [qw(00_000000f_fffffff 01_800000a_aaaaaaa 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], [qw(00_0000000_0000001 ff_fffffff_fffffff 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], [qw(00_0000000_0000001 ff_fffffff_ffffff0 ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], ]; $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); if ($error_count > 0) { print STDERR $error_count, " errors: FAILED\n"; } else { print STDERR "PASS\n"; } exit ($error_count); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/jemalloc/build-aux/config.guess��������������������������������������������������0100755�0000765�0000024�00000125644�13404213401�0020341�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2016 Free Software Foundation, Inc. timestamp='2016-10-02' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "${UNAME_SYSTEM}" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval $set_cc_for_build cat <<-EOF > $dummy.c #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` ;; esac # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ /sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || \ echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` machine=${arch}${endian}-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "${UNAME_MACHINE_ARCH}" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "${UNAME_MACHINE_ARCH}" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}${abi}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; *:Sortix:*:*) echo ${UNAME_MACHINE}-unknown-sortix exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/lslpp ] ; then IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = hppa2.0w ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW64*:*) echo ${UNAME_MACHINE}-pc-mingw64 exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; *:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; e2k:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; k1om:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; mips64el:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-${LIBC} exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-${LIBC} exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval $set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac cat >&2 </dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: ��������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/jemalloc/build-aux/config.sub����������������������������������������������������0100755�0000765�0000024�00000106763�13404213401�0020005�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2016 Free Software Foundation, Inc. timestamp='2016-11-04' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*178) os=-lynxos178 ;; -lynx*5) os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ | ba \ | be32 | be64 \ | bfin \ | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ | e2k | epiphany \ | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 | or1k | or1knd | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pru \ | pyramid \ | riscv32 | riscv64 \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu \ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | visium \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; c54x) basic_machine=tic54x-unknown ;; c55x) basic_machine=tic55x-unknown ;; c6x) basic_machine=tic6x-unknown ;; leon|leon[3-9]) basic_machine=sparc-$basic_machine ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown os=-none ;; xscaleeb) basic_machine=armeb-unknown ;; xscaleel) basic_machine=armel-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ | pru-* \ | pyramid-* \ | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ | visium-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; asmjs) basic_machine=asmjs-unknown ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c54x-*) basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c55x-*) basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c6x-*) basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16 | cr16-*) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; e500v[12]) basic_machine=powerpc-unknown os=$os"spe" ;; e500v[12]-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` os=$os"spe" ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; leon-*|leon[3-9]-*) basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze*) basic_machine=microblaze-xilinx ;; mingw64) basic_machine=x86_64-pc os=-mingw64 ;; mingw32) basic_machine=i686-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; moxiebox) basic_machine=moxie-unknown os=-moxiebox ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) basic_machine=i686-pc os=-msys ;; mvs) basic_machine=i370-ibm os=-mvs ;; nacl) basic_machine=le32-unknown os=-nacl ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; neo-tandem) basic_machine=neo-tandem ;; nse-tandem) basic_machine=nse-tandem ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=-rdos ;; rdos32) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; strongarm-* | thumb-*) basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tile*) basic_machine=$basic_machine-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* | -cloudabi* | -sortix* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ | -onefs* | -tirtos* | -phoenix* | -fuchsia*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -nacl*) ;; -ios) ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; c8051-*) os=-elf ;; hexagon-*) os=-elf ;; tic54x-*) os=-coff ;; tic55x-*) os=-coff ;; tic6x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: �������������jemalloc-sys-0.3.2/jemalloc/build-aux/install-sh����������������������������������������������������0100755�0000765�0000024�00000012721�13404213401�0020014�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # # install - install a program, script, or datafile # This comes from X11R5 (mit/util/scripts/install.sh). # # Copyright 1991 by the Massachusetts Institute of Technology # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that # the above copyright notice appear in all copies and that both that # copyright notice and this permission notice appear in supporting # documentation, and that the name of M.I.T. not be used in advertising or # publicity pertaining to distribution of the software without specific, # written prior permission. M.I.T. makes no representations about the # suitability of this software for any purpose. It is provided "as is" # without express or implied warranty. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. It can only install one file at a time, a restriction # shared with many OS's install programs. # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit="${DOITPROG-}" # put in absolute paths if you don't have them in your path; or use env. vars. mvprog="${MVPROG-mv}" cpprog="${CPPROG-cp}" chmodprog="${CHMODPROG-chmod}" chownprog="${CHOWNPROG-chown}" chgrpprog="${CHGRPPROG-chgrp}" stripprog="${STRIPPROG-strip}" rmprog="${RMPROG-rm}" mkdirprog="${MKDIRPROG-mkdir}" transformbasename="" transform_arg="" instcmd="$mvprog" chmodcmd="$chmodprog 0755" chowncmd="" chgrpcmd="" stripcmd="" rmcmd="$rmprog -f" mvcmd="$mvprog" src="" dst="" dir_arg="" while [ x"$1" != x ]; do case $1 in -c) instcmd="$cpprog" shift continue;; -d) dir_arg=true shift continue;; -m) chmodcmd="$chmodprog $2" shift shift continue;; -o) chowncmd="$chownprog $2" shift shift continue;; -g) chgrpcmd="$chgrpprog $2" shift shift continue;; -s) stripcmd="$stripprog" shift continue;; -t=*) transformarg=`echo $1 | sed 's/-t=//'` shift continue;; -b=*) transformbasename=`echo $1 | sed 's/-b=//'` shift continue;; *) if [ x"$src" = x ] then src=$1 else # this colon is to work around a 386BSD /bin/sh bug : dst=$1 fi shift continue;; esac done if [ x"$src" = x ] then echo "install: no input file specified" exit 1 else true fi if [ x"$dir_arg" != x ]; then dst=$src src="" if [ -d $dst ]; then instcmd=: else instcmd=mkdir fi else # Waiting for this to be detected by the "$instcmd $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if [ -f $src -o -d $src ] then true else echo "install: $src does not exist" exit 1 fi if [ x"$dst" = x ] then echo "install: no destination specified" exit 1 else true fi # If destination is a directory, append the input filename; if your system # does not like double slashes in filenames, you may need to add some logic if [ -d $dst ] then dst="$dst"/`basename $src` else true fi fi ## this sed command emulates the dirname command dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` # Make sure that the destination directory exists. # this part is taken from Noah Friedman's mkinstalldirs script # Skip lots of stat calls in the usual case. if [ ! -d "$dstdir" ]; then defaultIFS=' ' IFS="${IFS-${defaultIFS}}" oIFS="${IFS}" # Some sh's can't handle IFS=/ for some reason. IFS='%' set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` IFS="${oIFS}" pathcomp='' while [ $# -ne 0 ] ; do pathcomp="${pathcomp}${1}" shift if [ ! -d "${pathcomp}" ] ; then $mkdirprog "${pathcomp}" else true fi pathcomp="${pathcomp}/" done fi if [ x"$dir_arg" != x ] then $doit $instcmd $dst && if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi else # If we're going to rename the final executable, determine the name now. if [ x"$transformarg" = x ] then dstfile=`basename $dst` else dstfile=`basename $dst $transformbasename | sed $transformarg`$transformbasename fi # don't allow the sed command to completely eliminate the filename if [ x"$dstfile" = x ] then dstfile=`basename $dst` else true fi # Make a temp file name in the proper directory. dsttmp=$dstdir/#inst.$$# # Move or copy the file name to the temp name $doit $instcmd $src $dsttmp && trap "rm -f ${dsttmp}" 0 && # and set any options; do chmod last to preserve setuid bits # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $instcmd $src $dsttmp" command. if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && # Now rename the file to the real destination. $doit $rmcmd -f $dstdir/$dstfile && $doit $mvcmd $dsttmp $dstdir/$dstfile fi && exit 0 �����������������������������������������������jemalloc-sys-0.3.2/jemalloc/ChangeLog���������������������������������������������������������������0100644�0000765�0000024�00000203706�13404213401�0015675�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Following are change highlights associated with official releases. Important bug fixes are all mentioned, but some internal enhancements are omitted here for brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc * 5.1.0 (May 4th, 2018) This release is primarily about fine-tuning, ranging from several new features to numerous notable performance and portability enhancements. The release and prior dev versions have been running in multiple large scale applications for months, and the cumulative improvements are substantial in many cases. Given the long and successful production runs, this release is likely a good candidate for applications to upgrade, from both jemalloc 5.0 and before. For performance-critical applications, the newly added TUNING.md provides guidelines on jemalloc tuning. New features: - Implement transparent huge page support for internal metadata. (@interwq) - Add opt.thp to allow enabling / disabling transparent huge pages for all mappings. (@interwq) - Add maximum background thread count option. (@djwatson) - Allow prof_active to control opt.lg_prof_interval and prof.gdump. (@interwq) - Allow arena index lookup based on allocation addresses via mallctl. (@lionkov) - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD) - Add opt.lg_extent_max_active_fit to set the max ratio between the size of the active extent selected (to split off from) and the size of the requested allocation. (@interwq, @davidtgoldblatt) - Add retain_grow_limit to set the max size when growing virtual address space. (@interwq) - Add mallctl interfaces: + arena..retain_grow_limit (@interwq) + arenas.lookup (@lionkov) + max_background_threads (@djwatson) + opt.lg_extent_max_active_fit (@interwq) + opt.max_background_threads (@djwatson) + opt.metadata_thp (@interwq) + opt.thp (@interwq) + stats.metadata_thp (@interwq) Portability improvements: - Support GNU/kFreeBSD configuration. (@paravoid) - Support m68k, nios2 and SH3 architectures. (@paravoid) - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo) - Fix symbol listing for cross-compiling. (@tamird) - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid) - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin) - Fix MSVC 2015 & 2017 builds. (@rustyx) - Improve RISC-V support. (@EdSchouten) - Set name mangling script in strict mode. (@nicolov) - Avoid MADV_HUGEPAGE on ARM. (@marxin) - Modify configure to determine return value of strerror_r. (@davidtgoldblatt, @cferris1000) - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani) - Fix 32-bit build on MSVC. (@rustyx) - Fix external symbol on MSVC. (@maksqwe) - Avoid a printf format specifier warning. (@jasone) - Add configure option --disable-initial-exec-tls which can allow jemalloc to be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD) - AArch64: Add ILP32 support. (@cmuellner) - Add --with-lg-vaddr configure option to support cross compiling. (@cmuellner, @davidtgoldblatt) Optimizations and refactors: - Improve active extent fit with extent_max_active_fit. This considerably reduces fragmentation over time and improves virtual memory and metadata usage. (@davidtgoldblatt, @interwq) - Eagerly coalesce large extents to reduce fragmentation. (@interwq) - sdallocx: only read size info when page aligned (i.e. possibly sampled), which speeds up the sized deallocation path significantly. (@interwq) - Avoid attempting new mappings for in place expansion with retain, since it rarely succeeds in practice and causes high overhead. (@interwq) - Refactor OOM handling in newImpl. (@wqfish) - Add internal fine-grained logging functionality for debugging use. (@davidtgoldblatt) - Refactor arena / tcache interactions. (@davidtgoldblatt) - Refactor extent management with dumpable flag. (@davidtgoldblatt) - Add runtime detection of lazy purging. (@interwq) - Use pairing heap instead of red-black tree for extents_avail. (@djwatson) - Use sysctl on startup in FreeBSD. (@trasz) - Use thread local prng state instead of atomic. (@djwatson) - Make decay to always purge one more extent than before, because in practice large extents are usually the ones that cross the decay threshold. Purging the additional extent helps save memory as well as reduce VM fragmentation. (@interwq) - Fast division by dynamic values. (@davidtgoldblatt) - Improve the fit for aligned allocation. (@interwq, @edwinsmith) - Refactor extent_t bitpacking. (@rkmisra) - Optimize the generated assembly for ticker operations. (@davidtgoldblatt) - Convert stats printing to use a structured text emitter. (@davidtgoldblatt) - Remove preserve_lru feature for extents management. (@djwatson) - Consolidate two memory loads into one on the fast deallocation path. (@davidtgoldblatt, @interwq) Bug fixes (most of the issues are only relevant to jemalloc 5.0): - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt) - Validate returned file descriptor before use. (@zonyitoo) - Fix a few background thread initialization and shutdown issues. (@interwq) - Fix an extent coalesce + decay race by taking both coalescing extents off the LRU list. (@interwq) - Fix potentially unbound increase during decay, caused by one thread keep stashing memory to purge while other threads generating new pages. The number of pages to purge is checked to prevent this. (@interwq) - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq) - Handle 32 bit mutex counters. (@rkmisra) - Fix a indexing bug when creating background threads. (@davidtgoldblatt, @binliu19) - Fix arguments passed to extent_init. (@yuleniwo, @interwq) - Fix addresses used for ordering mutexes. (@rkmisra) - Fix abort_conf processing during bootstrap. (@interwq) - Fix include path order for out-of-tree builds. (@cmuellner) Incompatible changes: - Remove --disable-thp. (@interwq) - Remove mallctl interfaces: + config.thp (@interwq) Documentation: - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson) * 5.0.1 (July 1, 2017) This bugfix release fixes several issues, most of which are obscure enough that typical applications are not impacted. Bug fixes: - Update decay->nunpurged before purging, in order to avoid potential update races and subsequent incorrect purging volume. (@interwq) - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy locking and/or background threads). This mitigates an initialization failure bug for which we still do not have a clear reproduction test case. (@interwq) - Modify tsd management so that it neither crashes nor leaks if a thread's only allocation activity is to call free() after TLS destructors have been executed. This behavior was observed when operating with GNU libc, and is unlikely to be an issue with other libc implementations. (@interwq) - Mask signals during background thread creation. This prevents signals from being inadvertently delivered to background threads. (@jasone, @davidtgoldblatt, @interwq) - Avoid inactivity checks within background threads, in order to prevent recursive mutex acquisition. (@interwq) - Fix extent_grow_retained() to use the specified hooks when the arena..extent_hooks mallctl is used to override the default hooks. (@interwq) - Add missing reentrancy support for custom extent hooks which allocate. (@interwq) - Post-fork(2), re-initialize the list of tcaches associated with each arena to contain no tcaches except the forking thread's. (@interwq) - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This fixes potential deadlocks after fork(2). (@interwq) - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to generate corrupt configure scripts. (@jasone) - Ensure that the configured page size (--with-lg-page) is no larger than the configured huge page size (--with-lg-hugepage). (@jasone) * 5.0.0 (June 13, 2017) Unlike all previous jemalloc releases, this release does not use naturally aligned "chunks" for virtual memory management, and instead uses page-aligned "extents". This change has few externally visible effects, but the internal impacts are... extensive. Many other internal changes combine to make this the most cohesively designed version of jemalloc so far, with ample opportunity for further enhancements. Continuous integration is now an integral aspect of development thanks to the efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a side effect the official release frequency may decrease over time. New features: - Implement optional per-CPU arena support; threads choose which arena to use based on current CPU rather than on fixed thread-->arena associations. (@interwq) - Implement two-phase decay of unused dirty pages. Pages transition from dirty-->muzzy-->clean, where the first phase transition relies on madvise(... MADV_FREE) semantics, and the second phase transition discards pages such that they are replaced with demand-zeroed pages on next access. (@jasone) - Increase decay time resolution from seconds to milliseconds. (@jasone) - Implement opt-in per CPU background threads, and use them for asynchronous decay-driven unused dirty page purging. (@interwq) - Add mutex profiling, which collects a variety of statistics useful for diagnosing overhead/contention issues. (@interwq) - Add C++ new/delete operator bindings. (@djwatson) - Support manually created arena destruction, such that all data and metadata are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats associated with destroyed arenas. (@jasone) - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing merged/destroyed arena statistics via mallctl. (@jasone) - Add opt.abort_conf to optionally abort if invalid configuration options are detected during initialization. (@interwq) - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the stats dumped during exit if opt.stats_print is true. (@jasone) - Add --with-version=VERSION for use when embedding jemalloc into another project's git repository. (@jasone) - Add --disable-thp to support cross compiling. (@jasone) - Add --with-lg-hugepage to support cross compiling. (@jasone) - Add mallctl interfaces (various authors): + background_thread + opt.abort_conf + opt.retain + opt.percpu_arena + opt.background_thread + opt.{dirty,muzzy}_decay_ms + opt.stats_print_opts + arena..initialized + arena..destroy + arena..{dirty,muzzy}_decay_ms + arena..extent_hooks + arenas.{dirty,muzzy}_decay_ms + arenas.bin..slab_size + arenas.nlextents + arenas.lextent..size + arenas.create + stats.background_thread.{num_threads,num_runs,run_interval} + stats.mutexes.{ctl,background_thread,prof,reset}. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} + stats.arenas..{dirty,muzzy}_decay_ms + stats.arenas..uptime + stats.arenas..{pmuzzy,base,internal,resident} + stats.arenas..{dirty,muzzy}_{npurge,nmadvise,purged} + stats.arenas..bins..{nslabs,reslabs,curslabs} + stats.arenas..bins..mutex. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} + stats.arenas..lextents..{nmalloc,ndalloc,nrequests,curlextents} + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} Portability improvements: - Improve reentrant allocation support, such that deadlock is less likely if e.g. a system library call in turn allocates memory. (@davidtgoldblatt, @interwq) - Support static linking of jemalloc with glibc. (@djwatson) Optimizations and refactors: - Organize virtual memory as "extents" of virtual memory pages, rather than as naturally aligned "chunks", and store all metadata in arbitrarily distant locations. This reduces virtual memory external fragmentation, and will interact better with huge pages (not yet explicitly supported). (@jasone) - Fold large and huge size classes together; only small and large size classes remain. (@jasone) - Unify the allocation paths, and merge most fast-path branching decisions. (@davidtgoldblatt, @interwq) - Embed per thread automatic tcache into thread-specific data, which reduces conditional branches and dereferences. Also reorganize tcache to increase fast-path data locality. (@interwq) - Rewrite atomics to closely model the C11 API, convert various synchronization from mutex-based to atomic, and use the explicit memory ordering control to resolve various hypothetical races without increasing synchronization overhead. (@davidtgoldblatt) - Extensively optimize rtree via various methods: + Add multiple layers of rtree lookup caching, since rtree lookups are now part of fast-path deallocation. (@interwq) + Determine rtree layout at compile time. (@jasone) + Make the tree shallower for common configurations. (@jasone) + Embed the root node in the top-level rtree data structure, thus avoiding one level of indirection. (@jasone) + Further specialize leaf elements as compared to internal node elements, and directly embed extent metadata needed for fast-path deallocation. (@jasone) + Ignore leading always-zero address bits (architecture-specific). (@jasone) - Reorganize headers (ongoing work) to make them hermetic, and disentangle various module dependencies. (@davidtgoldblatt) - Convert various internal data structures such as size class metadata from boot-time-initialized to compile-time-initialized. Propagate resulting data structure simplifications, such as making arena metadata fixed-size. (@jasone) - Simplify size class lookups when constrained to size classes that are multiples of the page size. This speeds lookups, but the primary benefit is complexity reduction in code that was the source of numerous regressions. (@jasone) - Lock individual extents when possible for localized extent operations, rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) - Use first fit layout policy instead of best fit, in order to improve packing. (@jasone) - If munmap(2) is not in use, use an exponential series to grow each arena's virtual memory, so that the number of disjoint virtual memory mappings remains low. (@jasone) - Implement per arena base allocators, so that arenas never share any virtual memory pages. (@jasone) - Automatically generate private symbol name mangling macros. (@jasone) Incompatible changes: - Replace chunk hooks with an expanded/normalized set of extent hooks. (@jasone) - Remove ratio-based purging. (@jasone) - Remove --disable-tcache. (@jasone) - Remove --disable-tls. (@jasone) - Remove --enable-ivsalloc. (@jasone) - Remove --with-lg-size-class-group. (@jasone) - Remove --with-lg-tiny-min. (@jasone) - Remove --disable-cc-silence. (@jasone) - Remove --enable-code-coverage. (@jasone) - Remove --disable-munmap (replaced by opt.retain). (@jasone) - Remove Valgrind support. (@jasone) - Remove quarantine support. (@jasone) - Remove redzone support. (@jasone) - Remove mallctl interfaces (various authors): + config.munmap + config.tcache + config.tls + config.valgrind + opt.lg_chunk + opt.purge + opt.lg_dirty_mult + opt.decay_time + opt.quarantine + opt.redzone + opt.thp + arena..lg_dirty_mult + arena..decay_time + arena..chunk_hooks + arenas.initialized + arenas.lg_dirty_mult + arenas.decay_time + arenas.bin..run_size + arenas.nlruns + arenas.lrun..size + arenas.nhchunks + arenas.hchunk..size + arenas.extend + stats.cactive + stats.arenas..lg_dirty_mult + stats.arenas..decay_time + stats.arenas..metadata.{mapped,allocated} + stats.arenas..{npurge,nmadvise,purged} + stats.arenas..huge.{allocated,nmalloc,ndalloc,nrequests} + stats.arenas..bins..{nruns,reruns,curruns} + stats.arenas..lruns..{nmalloc,ndalloc,nrequests,curruns} + stats.arenas..hchunks..{nmalloc,ndalloc,nrequests,curhchunks} Bug fixes: - Improve interval-based profile dump triggering to dump only one profile when a single allocation's size exceeds the interval. (@jasone) - Use prefixed function names (as controlled by --with-jemalloc-prefix) when pruning backtrace frames in jeprof. (@jasone) * 4.5.0 (February 28, 2017) This is the first release to benefit from much broader continuous integration testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure in place for prior releases, it would have caught all of the most serious regressions fixed by this release. New features: - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for transparent huge page integration. (@jasone) - Update zone allocator integration to work with macOS 10.12. (@glandium) - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not during configuration. (@jasone, @ronawho) Bug fixes: - Fix DSS (sbrk(2)-based) allocation. This regression was first released in 4.3.0. (@jasone) - Handle race in per size class utilization computation. This functionality was first released in 4.0.0. (@interwq) - Fix lock order reversal during gdump. (@jasone) - Fix/refactor tcache synchronization. This regression was first released in 4.0.0. (@jasone) - Fix various JSON-formatted malloc_stats_print() bugs. This functionality was first released in 4.3.0. (@jasone) - Fix huge-aligned allocation. This regression was first released in 4.4.0. (@jasone) - When transparent huge page integration is enabled, detect what state pages start in according to the kernel's current operating mode, and only convert arena chunks to non-huge during purging if that is not their initial state. This functionality was first released in 4.4.0. (@jasone) - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. This regression was first released in 4.0.0. (@jasone, @428desmo) - Properly detect sparc64 when building for Linux. (@glaubitz) * 4.4.0 (December 3, 2016) New features: - Add configure support for *-*-linux-android. (@cferris1000, @jasone) - Add the --disable-syscall configure option, for use on systems that place security-motivated limitations on syscall(2). (@jasone) - Add support for Debian GNU/kFreeBSD. (@thesam) Optimizations: - Add extent serial numbers and use them where appropriate as a sort key that is higher priority than address, so that the allocation policy prefers older extents. This tends to improve locality (decrease fragmentation) when memory grows downward. (@jasone) - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized on Linux 4.5 and newer. (@jasone) - Mark partially purged arena chunks as non-huge-page. This improves interaction with Linux's transparent huge page functionality. (@jasone) Bug fixes: - Fix size class computations for edge conditions involving extremely large allocations. This regression was first released in 4.0.0. (@jasone, @ingvarha) - Remove overly restrictive assertions related to the cactive statistic. This regression was first released in 4.1.0. (@jasone) - Implement a more reliable detection scheme for os_unfair_lock on macOS. (@jszakmeister) * 4.3.1 (November 7, 2016) Bug fixes: - Fix a severe virtual memory leak. This regression was first released in 4.3.0. (@interwq, @jasone) - Refactor atomic and prng APIs to restore support for 32-bit platforms that use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) * 4.3.0 (November 4, 2016) This is the first release that passes the test suite for multiple Windows configurations, thanks in large part to @glandium setting up continuous integration via AppVeyor (and Travis CI for Linux and OS X). New features: - Add "J" (JSON) support to malloc_stats_print(). (@jasone) - Add Cray compiler support. (@ronawho) Optimizations: - Add/use adaptive spinning for bootstrapping and radix tree node initialization. (@jasone) Bug fixes: - Fix large allocation to search starting in the optimal size class heap, which can substantially reduce virtual memory churn and fragmentation. This regression was first released in 4.0.0. (@mjp41, @jasone) - Fix stats.arenas..nthreads accounting. (@interwq) - Fix and simplify decay-based purging. (@jasone) - Make DSS (sbrk(2)-related) operations lockless, which resolves potential deadlocks during thread exit. (@jasone) - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, @jasone) - Fix over-sized allocation of arena_t (plus associated stats) data structures. (@jasone, @interwq) - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) - Fix a Valgrind integration bug. (@ronawho) - Disallow 0x5a junk filling when running in Valgrind. (@jasone) - Fix a file descriptor leak on Linux. This regression was first released in 4.2.0. (@vsarunas, @jasone) - Fix static linking of jemalloc with glibc. (@djwatson) - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This works around other libraries' system call wrappers performing reentrant allocation. (@kspinka, @Whissi, @jasone) - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, @jasone) - Fix cached memory management to avoid needless commit/decommit operations during purging, which resolves permanent virtual memory map fragmentation issues on Windows. (@mjp41, @jasone) - Fix TSD fetches to avoid (recursive) allocation. This is relevant to non-TLS and Windows configurations. (@jasone) - Fix malloc_conf overriding to work on Windows. (@jasone) - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) * 4.2.1 (June 8, 2016) Bug fixes: - Fix bootstrapping issues for configurations that require allocation during tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) - Fix gettimeofday() version of nstime_update(). (@ronawho) - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) - Fix potential VM map fragmentation regression. (@jasone) - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) - Fix heap profiling context leaks in reallocation edge cases. (@jasone) * 4.2.0 (May 12, 2016) New features: - Add the arena..reset mallctl, which makes it possible to discard all of an arena's allocations in a single operation. (@jasone) - Add the stats.retained and stats.arenas..retained statistics. (@jasone) - Add the --with-version configure option. (@jasone) - Support --with-lg-page values larger than actual page size. (@jasone) Optimizations: - Use pairing heaps rather than red-black trees for various hot data structures. (@djwatson, @jasone) - Streamline fast paths of rtree operations. (@jasone) - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) - Decommit unused virtual memory if the OS does not overcommit. (@jasone) - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order to avoid unfortunate interactions during fork(2). (@jasone) Bug fixes: - Fix chunk accounting related to triggering gdump profiles. (@jasone) - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) - Scale leak report summary according to sampling probability. (@jasone) * 4.1.1 (May 3, 2016) This bugfix release resolves a variety of mostly minor issues, though the bitmap fix is critical for 64-bit Windows. Bug fixes: - Fix the linear scan version of bitmap_sfu() to shift by the proper amount even when sizeof(long) is not the same as sizeof(void *), as on 64-bit Windows. (@jasone) - Fix hashing functions to avoid unaligned memory accesses (and resulting crashes). This is relevant at least to some ARM-based platforms. (@rkmisra) - Fix fork()-related lock rank ordering reversals. These reversals were unlikely to cause deadlocks in practice except when heap profiling was enabled and active. (@jasone) - Fix various chunk leaks in OOM code paths. (@jasone) - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) - Fix a variety of test failures that were due to test fragility rather than core bugs. (@jasone) * 4.1.0 (February 28, 2016) This release is primarily about optimizations, but it also incorporates a lot of portability-motivated refactoring and enhancements. Many people worked on this release, to an extent that even with the omission here of minor changes (see git revision history), and of the people who reported and diagnosed issues, so much of the work was contributed that starting with this release, changes are annotated with author credits to help reflect the collaborative effort involved. New features: - Implement decay-based unused dirty page purging, a major optimization with mallctl API impact. This is an alternative to the existing ratio-based unused dirty page purging, and is intended to eventually become the sole purging mechanism. New mallctls: + opt.purge + opt.decay_time + arena..decay + arena..decay_time + arenas.decay_time + stats.arenas..decay_time (@jasone, @cevans87) - Add --with-malloc-conf, which makes it possible to embed a default options string during configuration. This was motivated by the desire to specify --with-malloc-conf=purge:decay , since the default must remain purge:ratio until the 5.0.0 release. (@jasone) - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) - Make *allocx() size class overflow behavior defined. The maximum size class is now less than PTRDIFF_MAX to protect applications against numerical overflow, and all allocation functions are guaranteed to indicate errors rather than potentially crashing if the request size exceeds the maximum size class. (@jasone) - jeprof: + Add raw heap profile support. (@jasone) + Add --retain and --exclude for backtrace symbol filtering. (@jasone) Optimizations: - Optimize the fast path to combine various bootstrapping and configuration checks and execute more streamlined code in the common case. (@interwq) - Use linear scan for small bitmaps (used for small object tracking). In addition to speeding up bitmap operations on 64-bit systems, this reduces allocator metadata overhead by approximately 0.2%. (@djwatson) - Separate arena_avail trees, which substantially speeds up run tree operations. (@djwatson) - Use memoization (boot-time-computed table) for run quantization. Separate arena_avail trees reduced the importance of this optimization. (@jasone) - Attempt mmap-based in-place huge reallocation. This can dramatically speed up incremental huge reallocation. (@jasone) Incompatible changes: - Make opt.narenas unsigned rather than size_t. (@jasone) Bug fixes: - Fix stats.cactive accounting regression. (@rustyx, @jasone) - Handle unaligned keys in hash(). This caused problems for some ARM systems. (@jasone, @cferris1000) - Refactor arenas array. In addition to fixing a fork-related deadlock, this makes arena lookups faster and simpler. (@jasone) - Move retained memory allocation out of the default chunk allocation function, to a location that gets executed even if the application installs a custom chunk allocation function. This resolves a virtual memory leak. (@buchgr) - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) - Fix run quantization. In practice this bug had no impact unless applications requested memory with alignment exceeding one page. (@jasone, @djwatson) - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) - jeprof: + Don't discard curl options if timeout is not defined. (@djwatson) + Detect failed profile fetches. (@djwatson) - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for --disable-stats case. (@jasone) * 4.0.4 (October 24, 2015) This bugfix release fixes another xallocx() regression. No other regressions have come to light in over a month, so this is likely a good starting point for people who prefer to wait for "dot one" releases with all the major issues shaken out. Bug fixes: - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large allocations that have been randomly assigned an offset of 0 when --enable-cache-oblivious configure option is enabled. * 4.0.3 (September 24, 2015) This bugfix release continues the trend of xallocx() and heap profiling fixes. Bug fixes: - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large allocations when --enable-cache-oblivious configure option is enabled. - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations when resizing from/to a size class that is not a multiple of the chunk size. - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap profile dumping started. - Work around a potentially bad thread-specific data initialization interaction with NPTL (glibc's pthreads implementation). * 4.0.2 (September 21, 2015) This bugfix release addresses a few bugs specific to heap profiling. Bug fixes: - Fix ixallocx_prof_sample() to never modify nor create sampled small allocations. xallocx() is in general incapable of moving small allocations, so this fix removes buggy code without loss of generality. - Fix irallocx_prof_sample() to always allocate large regions, even when alignment is non-zero. - Fix prof_alloc_rollback() to read tdata from thread-specific data rather than dereferencing a potentially invalid tctx. * 4.0.1 (September 15, 2015) This is a bugfix release that is somewhat high risk due to the amount of refactoring required to address deep xallocx() problems. As a side effect of these fixes, xallocx() now tries harder to partially fulfill requests for optional extra space. Note that a couple of minor heap profiling optimizations are included, but these are better thought of as performance fixes that were integral to discovering most of the other bugs. Optimizations: - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the fast path when heap profiling is enabled. Additionally, split a special case out into arena_prof_tctx_reset(), which also avoids chunk metadata reads. - Optimize irallocx_prof() to optimistically update the sampler state. The prior implementation appears to have been a holdover from when rallocx()/xallocx() functionality was combined as rallocm(). Bug fixes: - Fix TLS configuration such that it is enabled by default for platforms on which it works correctly. - Fix arenas_cache_cleanup() and arena_get_hard() to handle allocation/deallocation within the application's thread-specific data cleanup functions even after arenas_cache is torn down. - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. - Fix chunk purge hook calls for in-place huge shrinking reallocation to specify the old chunk size rather than the new chunk size. This bug caused no correctness issues for the default chunk purge function, but was visible to custom functions set via the "arena..chunk_hooks" mallctl. - Fix heap profiling bugs: + Fix heap profiling to distinguish among otherwise identical sample sites with interposed resets (triggered via the "prof.reset" mallctl). This bug could cause data structure corruption that would most likely result in a segfault. + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + Make one call to prof_active_get_unlocked() per allocation event, and use the result throughout the relevant functions that handle an allocation event. Also add a missing check in prof_realloc(). These fixes protect allocation events against concurrent prof_active changes. + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() in the correct order. + Fix prof_realloc() to call prof_free_sampled_object() after calling prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were the same, the tctx could have been prematurely destroyed. - Fix portability bugs: + Don't bitshift by negative amounts when encoding/decoding run sizes in chunk header maps. This affected systems with page sizes greater than 8 KiB. + Rename index_t to szind_t to avoid an existing type on Solaris. + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to match glibc and avoid compilation errors when including both jemalloc/jemalloc.h and malloc.h in C++ code. + Don't assume that /bin/sh is appropriate when running size_classes.sh during configuration. + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + Link tests to librt if it contains clock_gettime(2). * 4.0.0 (August 17, 2015) This version contains many speed and space optimizations, both minor and major. The major themes are generalization, unification, and simplification. Although many of these optimizations cause no visible behavior change, their cumulative effect is substantial. New features: - Normalize size class spacing to be consistent across the complete size range. By default there are four size classes per size doubling, but this is now configurable via the --with-lg-size-class-group option. Also add the --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and --with-lg-tiny-min options, which can be used to tweak page and size class settings. Impacts: + Worst case performance for incrementally growing/shrinking reallocation is improved because there are far fewer size classes, and therefore copying happens less often. + Internal fragmentation is limited to 20% for all but the smallest size classes (those less than four times the quantum). (1B + 4 KiB) and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + Chunk fragmentation tends to be lower because there are fewer distinct run sizes to pack. - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and "tcache.destroy" mallctls control tcache lifetime and flushing, and the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API control which tcache is used for each operation. - Implement per thread heap profiling, as well as the ability to enable/disable heap profiling on a per thread basis. Add the "prof.reset", "prof.lg_sample", "thread.prof.name", "thread.prof.active", "opt.prof_thread_active_init", "prof.thread_active_init", and "thread.prof.active" mallctls. - Add support for per arena application-specified chunk allocators, configured via the "arena..chunk_hooks" mallctl. - Refactor huge allocation to be managed by arenas, so that arenas now function as general purpose independent allocators. This is important in the context of user-specified chunk allocators, aside from the scalability benefits. Related new statistics: + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" mallctls provide high level per arena huge allocation statistics. + The "arenas.nhchunks", "arenas.hchunk..size", "stats.arenas..hchunks..nmalloc", "stats.arenas..hchunks..ndalloc", "stats.arenas..hchunks..nrequests", and "stats.arenas..hchunks..curhchunks" mallctls provide per size class statistics. - Add the 'util' column to malloc_stats_print() output, which reports the proportion of available regions that are currently in use for each small size class. - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" mallctl), so that it is possible to separately enable junk filling for allocation versus deallocation. - Add the jemalloc-config script, which provides information about how jemalloc was configured, and how to integrate it into application builds. - Add metadata statistics, which are accessible via the "stats.metadata", "stats.arenas..metadata.mapped", and "stats.arenas..metadata.allocated" mallctls. - Add the "stats.resident" mallctl, which reports the upper limit of physically resident memory mapped by the allocator. - Add per arena control over unused dirty page purging, via the "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and "stats.arenas..lg_dirty_mult" mallctls. - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump feature on/off during program execution. - Add sdallocx(), which implements sized deallocation. The primary optimization over dallocx() is the removal of a metadata read, which often suffers an L1 cache miss. - Add missing header includes in jemalloc/jemalloc.h, so that applications only have to #include . - Add support for additional platforms: + Bitrig + Cygwin + DragonFlyBSD + iOS + OpenBSD + OpenRISC/or1k Optimizations: - Maintain dirty runs in per arena LRUs rather than in per arena trees of dirty-run-containing chunks. In practice this change significantly reduces dirty page purging volume. - Integrate whole chunks into the unused dirty page purging machinery. This reduces the cost of repeated huge allocation/deallocation, because it effectively introduces a cache of chunks. - Split the arena chunk map into two separate arrays, in order to increase cache locality for the frequently accessed bits. - Move small run metadata out of runs, into arena chunk headers. This reduces run fragmentation, smaller runs reduce external fragmentation for small size classes, and packed (less uniformly aligned) metadata layout improves CPU cache set distribution. - Randomly distribute large allocation base pointer alignment relative to page boundaries in order to more uniformly utilize CPU cache sets. This can be disabled via the --disable-cache-oblivious configure option, and queried via the "config.cache_oblivious" mallctl. - Micro-optimize the fast paths for the public API functions. - Refactor thread-specific data to reside in a single structure. This assures that only a single TLS read is necessary per call into the public API. - Implement in-place huge allocation growing and shrinking. - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make additional optimizations that reduce maximum lookup depth to one or two levels. This resolves what was a concurrency bottleneck for per arena huge allocation, because a global data structure is critical for determining which arenas own which huge allocations. Incompatible changes: - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious warnings by default. - Assure that the constness of malloc_usable_size()'s return type matches that of the system implementation. - Change the heap profile dump format to support per thread heap profiling, rename pprof to jeprof, and enhance it with the --thread= option. As a result, the bundled jeprof must now be used rather than the upstream (gperftools) pprof. - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can internally deadlock on some platforms. - Change the "arenas.nlruns" mallctl type from size_t to unsigned. - Replace the "stats.arenas..bins..allocated" mallctl with "stats.arenas..bins..curregs". - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. Removed features: - Remove the *allocm() API, which is superseded by the *allocx() API. - Remove the --enable-dss options, and make dss non-optional on all platforms which support sbrk(2). - Remove the "arenas.purge" mallctl, which was obsoleted by the "arena..purge" mallctl in 3.1.0. - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically detects whether it is running inside Valgrind. - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and "stats.huge.ndalloc" mallctls. - Remove the --enable-mremap option. - Remove the "stats.chunks.current", "stats.chunks.total", and "stats.chunks.high" mallctls. Bug fixes: - Fix the cactive statistic to decrease (rather than increase) when active memory decreases. This regression was first released in 3.5.0. - Fix OOM handling in memalign() and valloc(). A variant of this bug existed in all releases since 2.0.0, which introduced these functions. - Fix an OOM-related regression in arena_tcache_fill_small(), which could cause cache corruption on OOM. This regression was present in all releases from 2.2.0 through 3.6.0. - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), calloc(), and realloc() when profiling is enabled. - Fix the "arena..dss" mallctl to return an error if "primary" or "secondary" precedence is specified, but sbrk(2) is not supported. - Fix fallback lg_floor() implementations to handle extremely large inputs. - Ensure the default purgeable zone is after the default zone on OS X. - Fix latent bugs in atomic_*(). - Fix the "arena..dss" mallctl to handle read-only calls. - Fix tls_model configuration to enable the initial-exec model when possible. - Mark malloc_conf as a weak symbol so that the application can override it. - Correctly detect glibc's adaptive pthread mutexes. - Fix the --without-export configure option. * 3.6.0 (March 31, 2014) This version contains a critical bug fix for a regression present in 3.5.0 and 3.5.1. Bug fixes: - Fix a regression in arena_chunk_alloc() that caused crashes during small/large allocation if chunk allocation failed. In the absence of this bug, chunk allocation failure would result in allocation failure, e.g. NULL return from malloc(). This regression was introduced in 3.5.0. - Fix backtracing for gcc intrinsics-based backtracing by specifying -fno-omit-frame-pointer to gcc. Note that the application (and all the libraries it links to) must also be compiled with this option for backtracing to be reliable. - Use dss allocation precedence for huge allocations as well as small/large allocations. - Fix test assertion failure message formatting. This bug did not manifest on x86_64 systems because of implementation subtleties in va_list. - Fix inconsequential test failures for hash and SFMT code. New features: - Support heap profiling on FreeBSD. This feature depends on the proc filesystem being mounted during heap profile dumping. * 3.5.1 (February 25, 2014) This version primarily addresses minor bugs in test code. Bug fixes: - Configure Solaris/Illumos to use MADV_FREE. - Fix junk filling for mremap(2)-based huge reallocation. This is only relevant if configuring with the --enable-mremap option specified. - Avoid compilation failure if 'restrict' C99 keyword is not supported by the compiler. - Add a configure test for SSE2 rather than assuming it is usable on i686 systems. This fixes test compilation errors, especially on 32-bit Linux systems. - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit test. - Fix/remove flawed alignment-related overflow tests. - Prevent compiler optimizations that could change backtraces in the prof_accum unit test. * 3.5.0 (January 22, 2014) This version focuses on refactoring and automated testing, though it also includes some non-trivial heap profiling optimizations not mentioned below. New features: - Add the *allocx() API, which is a successor to the experimental *allocm() API. The *allocx() functions are slightly simpler to use because they have fewer parameters, they directly return the results of primary interest, and mallocx()/rallocx() avoid the strict aliasing pitfall that allocm()/rallocm() share with posix_memalign(). Note that *allocm() is slated for removal in the next non-bugfix release. - Add support for LinuxThreads. Bug fixes: - Unless heap profiling is enabled, disable floating point code and don't link with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 systems, makes it possible to completely disable floating point register use. Some versions of glibc neglect to save/restore caller-saved floating point registers during dynamic lazy symbol loading, and the symbol loading code uses whatever malloc the application happens to have linked/loaded with, the result being potential floating point register corruption. - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling backtrace creation in imemalign(). This bug impacted posix_memalign() and aligned_alloc(). - Fix a file descriptor leak in a prof_dump_maps() error path. - Fix prof_dump() to close the dump file descriptor for all relevant error paths. - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for allocation, not just deallocation. - Fix a data race for large allocation stats counters. - Fix a potential infinite loop during thread exit. This bug occurred on Solaris, and could affect other platforms with similar pthreads TSD implementations. - Don't junk-fill reallocations unless usable size changes. This fixes a violation of the *allocx()/*allocm() semantics. - Fix growing large reallocation to junk fill new space. - Fix huge deallocation to junk fill when munmap is disabled. - Change the default private namespace prefix from empty to je_, and change --with-private-namespace-prefix so that it prepends an additional prefix rather than replacing je_. This reduces the likelihood of applications which statically link jemalloc experiencing symbol name collisions. - Add missing private namespace mangling (relevant when --with-private-namespace is specified). - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as static even for debug builds. - Add a missing mutex unlock in a malloc_init_hard() error path. In practice this error path is never executed. - Fix numerous bugs in malloc_strotumax() error handling/reporting. These bugs had no impact except for malformed inputs. - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by existing calls, so they had no impact. * 3.4.1 (October 20, 2013) Bug fixes: - Fix a race in the "arenas.extend" mallctl that could cause memory corruption of internal data structures and subsequent crashes. - Fix Valgrind integration flaws that caused Valgrind warnings about reads of uninitialized memory in: + arena chunk headers + internal zero-initialized data structures (relevant to tcache and prof code) - Preserve errno during the first allocation. A readlink(2) call during initialization fails unless /etc/malloc.conf exists, so errno was typically set during the first allocation prior to this fix. - Fix compilation warnings reported by gcc 4.8.1. * 3.4.0 (June 2, 2013) This version is essentially a small bugfix release, but the addition of aarch64 support requires that the minor version be incremented. Bug fixes: - Fix race-triggered deadlocks in chunk_record(). These deadlocks were typically triggered by multiple threads concurrently deallocating huge objects. New features: - Add support for the aarch64 architecture. * 3.3.1 (March 6, 2013) This version fixes bugs that are typically encountered only when utilizing custom run-time options. Bug fixes: - Fix a locking order bug that could cause deadlock during fork if heap profiling were enabled. - Fix a chunk recycling bug that could cause the allocator to lose track of whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause corruption if allocating via sbrk(2) (unlikely unless running with the "dss:primary" option specified). This was completely harmless on Linux unless using mlockall(2) (and unlikely even then, unless the --disable-munmap configure option or the "dss:primary" option was specified). This regression was introduced in 3.1.0 by the mlockall(2)/madvise(2) interaction fix. - Fix TLS-related memory corruption that could occur during thread exit if the thread never allocated memory. Only the quarantine and prof facilities were susceptible. - Fix two quarantine bugs: + Internal reallocation of the quarantined object array leaked the old array. + Reallocation failure for internal reallocation of the quarantined object array (very unlikely) resulted in memory corruption. - Fix Valgrind integration to annotate all internally allocated memory in a way that keeps Valgrind happy about internal data structure access. - Fix building for s390 systems. * 3.3.0 (January 23, 2013) This version includes a few minor performance improvements in addition to the listed new features and bug fixes. New features: - Add clipping support to lg_chunk option processing. - Add the --enable-ivsalloc option. - Add the --without-export option. - Add the --disable-zone-allocator option. Bug fixes: - Fix "arenas.extend" mallctl to output the number of arenas. - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory is undefined. - Fix build break on FreeBSD related to alloca.h. * 3.2.0 (November 9, 2012) In addition to a couple of bug fixes, this version modifies page run allocation and dirty page purging algorithms in order to better control page-level virtual memory fragmentation. Incompatible changes: - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). Bug fixes: - Fix dss/mmap allocation precedence code to use recyclable mmap memory only after primary dss allocation fails. - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced in 3.1.0 by the addition of the "arena..purge" mallctl. * 3.1.0 (October 16, 2012) New features: - Auto-detect whether running inside Valgrind, thus removing the need to manually specify MALLOC_CONF=valgrind:true. - Add the "arenas.extend" mallctl, which allows applications to create manually managed arenas. - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, which provide control over dss/mmap precedence. - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". - Define LG_QUANTUM for hppa. Incompatible changes: - Disable tcache by default if running inside Valgrind, in order to avoid making unallocated objects appear reachable to Valgrind. - Drop const from malloc_usable_size() argument on Linux. Bug fixes: - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - Remove const from __*_hook variable declarations, so that glibc can modify them during process forking. - Fix mlockall(2)/madvise(2) interaction. - Fix fork(2)-related deadlocks. - Fix error return value for "thread.tcache.enabled" mallctl. * 3.0.0 (May 11, 2012) Although this version adds some major new features, the primary focus is on internal code cleanup that facilitates maintainability and portability, most of which is not reflected in the ChangeLog. This is the first release to incorporate substantial contributions from numerous other developers, and the result is a more broadly useful allocator (see the git revision history for contribution details). Note that the license has been unified, thanks to Facebook granting a license under the same terms as the other copyright holders (see COPYING). New features: - Implement Valgrind support, redzones, and quarantine. - Add support for additional platforms: + FreeBSD + Mac OS X Lion + MinGW + Windows (no support yet for replacing the system malloc) - Add support for additional architectures: + MIPS + SH4 + Tilera - Add support for cross compiling. - Add nallocm(), which rounds a request size up to the nearest size class without actually allocating. - Implement aligned_alloc() (blame C11). - Add the "thread.tcache.enabled" mallctl. - Add the "opt.prof_final" mallctl. - Update pprof (from gperftools 2.0). - Add the --with-mangling option. - Add the --disable-experimental option. - Add the --disable-munmap option, and make it the default on Linux. - Add the --enable-mremap option, which disables use of mremap(2) by default. Incompatible changes: - Enable stats by default. - Enable fill by default. - Disable lazy locking by default. - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - Rename the "arenas.pagesize" mallctl to "arenas.page". - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - Change the "opt.prof_accum" default from true to false. Removed features: - Remove the swap feature, including the "config.swap", "swap.avail", "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - Remove highruns statistics, including the "stats.arenas..bins..highruns" and "stats.arenas..lruns..highruns" mallctls. - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and "arenas.[tqcs]bins" mallctls. - Remove the "arenas.chunksize" mallctl. - Remove the "opt.lg_prof_tcmax" option. - Remove the "opt.lg_prof_bt_max" option. - Remove the "opt.lg_tcache_gc_sweep" option. - Remove the --disable-tiny option, including the "config.tiny" mallctl. - Remove the --enable-dynamic-page-shift configure option. - Remove the --enable-sysv configure option. Bug fixes: - Fix a statistics-related bug in the "thread.arena" mallctl that could cause invalid statistics and crashes. - Work around TLS deallocation via free() on Linux. This bug could cause write-after-free memory corruption. - Fix a potential deadlock that could occur during interval- and growth-triggered heap profile dumps. - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could cause memory corruption and crashes with --enable-dss specified. - Fix fork-related bugs that could cause deadlock in children between fork and exec. - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - Fix realloc(p, 0) to act like free(p). - Do not enforce minimum alignment in memalign(). - Check for NULL pointer in malloc_usable_size(). - Fix an off-by-one heap profile statistics bug that could be observed in interval- and growth-triggered heap profiles. - Fix the "epoch" mallctl to update cached stats even if the passed in epoch is 0. - Fix bin->runcur management to fix a layout policy bug. This bug did not affect correctness. - Fix a bug in choose_arena_hard() that potentially caused more arenas to be initialized than necessary. - Add missing "opt.lg_tcache_max" mallctl implementation. - Use glibc allocator hooks to make mixed allocator usage less likely. - Fix build issues for --disable-tcache. - Don't mangle pthread_create() when --with-private-namespace is specified. * 2.2.5 (November 14, 2011) Bug fixes: - Fix huge_ralloc() race when using mremap(2). This is a serious bug that could cause memory corruption and/or crashes. - Fix huge_ralloc() to maintain chunk statistics. - Fix malloc_stats_print(..., "a") output. * 2.2.4 (November 5, 2011) Bug fixes: - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as well as for --disable-tls builds in earlier releases. - Do not assume a 4 KiB page size in test/rallocm.c. * 2.2.3 (August 31, 2011) This version fixes numerous bugs related to heap profiling. Bug fixes: - Fix a prof-related race condition. This bug could cause memory corruption, but only occurred in non-default configurations (prof_accum:false). - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is excluded from backtraces). - Fix a prof-related bug in realloc() (only triggered by OOM errors). - Fix prof-related bugs in allocm() and rallocm(). - Fix prof_tdata_cleanup() for --disable-tls builds. - Fix a relative include path, to fix objdir builds. * 2.2.2 (July 30, 2011) Bug fixes: - Fix a build error for --disable-tcache. - Fix assertions in arena_purge() (for real this time). - Add the --with-private-namespace option. This is a workaround for symbol conflicts that can inadvertently arise when using static libraries. * 2.2.1 (March 30, 2011) Bug fixes: - Implement atomic operations for x86/x64. This fixes compilation failures for versions of gcc that are still in wide use. - Fix an assertion in arena_purge(). * 2.2.0 (March 22, 2011) This version incorporates several improvements to algorithms and data structures that tend to reduce fragmentation and increase speed. New features: - Add the "stats.cactive" mallctl. - Update pprof (from google-perftools 1.7). - Improve backtracing-related configuration logic, and add the --disable-prof-libgcc option. Bug fixes: - Change default symbol visibility from "internal", to "hidden", which decreases the overhead of library-internal function calls. - Fix symbol visibility so that it is also set on OS X. - Fix a build dependency regression caused by the introduction of the .pic.o suffix for PIC object files. - Add missing checks for mutex initialization failures. - Don't use libgcc-based backtracing except on x64, where it is known to work. - Fix deadlocks on OS X that were due to memory allocation in pthread_mutex_lock(). - Heap profiling-specific fixes: + Fix memory corruption due to integer overflow in small region index computation, when using a small enough sample interval that profiling context pointers are stored in small run headers. + Fix a bootstrap ordering bug that only occurred with TLS disabled. + Fix a rallocm() rsize bug. + Fix error detection bugs for aligned memory allocation. * 2.1.3 (March 14, 2011) Bug fixes: - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix for OS X in 2.1.2). - Fix a "thread.arena" mallctl bug. - Fix a thread cache stats merging bug. * 2.1.2 (March 2, 2011) Bug fixes: - Fix "thread.{de,}allocatedp" mallctl for OS X. - Add missing jemalloc.a to build system. * 2.1.1 (January 31, 2011) Bug fixes: - Fix aligned huge reallocation (affected allocm()). - Fix the ALLOCM_LG_ALIGN macro definition. - Fix a heap dumping deadlock. - Fix a "thread.arena" mallctl bug. * 2.1.0 (December 3, 2010) This version incorporates some optimizations that can't quite be considered bug fixes. New features: - Use Linux's mremap(2) for huge object reallocation when possible. - Avoid locking in mallctl*() when possible. - Add the "thread.[de]allocatedp" mallctl's. - Convert the manual page source from roff to DocBook, and generate both roff and HTML manuals. Bug fixes: - Fix a crash due to incorrect bootstrap ordering. This only impacted --enable-debug --enable-dss configurations. - Fix a minor statistics bug for mallctl("swap.avail", ...). * 2.0.1 (October 29, 2010) Bug fixes: - Fix a race condition in heap profiling that could cause undefined behavior if "opt.prof_accum" were disabled. - Add missing mutex unlocks for some OOM error paths in the heap profiling code. - Fix a compilation error for non-C99 builds. * 2.0.0 (October 24, 2010) This version focuses on the experimental *allocm() API, and on improved run-time configuration/introspection. Nonetheless, numerous performance improvements are also included. New features: - Implement the experimental {,r,s,d}allocm() API, which provides a superset of the functionality available via malloc(), calloc(), posix_memalign(), realloc(), malloc_usable_size(), and free(). These functions can be used to allocate/reallocate aligned zeroed memory, ask for optional extra memory during reallocation, prevent object movement during reallocation, etc. - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is more human-readable, and more flexible. For example: JEMALLOC_OPTIONS=AJP is now: MALLOC_CONF=abort:true,fill:true,stats_print:true - Port to Apple OS X. Sponsored by Mozilla. - Make it possible for the application to control thread-->arena mappings via the "thread.arena" mallctl. - Add compile-time support for all TLS-related functionality via pthreads TSD. This is mainly of interest for OS X, which does not support TLS, but has a TSD implementation with similar performance. - Override memalign() and valloc() if they are provided by the system. - Add the "arenas.purge" mallctl, which can be used to synchronously purge all dirty unused pages. - Make cumulative heap profiling data optional, so that it is possible to limit the amount of memory consumed by heap profiling data structures. - Add per thread allocation counters that can be accessed via the "thread.allocated" and "thread.deallocated" mallctls. Incompatible changes: - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - Increase default backtrace depth from 4 to 128 for heap profiling. - Disable interval-based profile dumps by default. Bug fixes: - Remove bad assertions in fork handler functions. These assertions could cause aborts for some combinations of configure settings. - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - Fix leak context reporting. This bug tended to cause the number of contexts to be underreported (though the reported number of objects and bytes were correct). - Fix a realloc() bug for large in-place growing reallocation. This bug could cause memory corruption, but it was hard to trigger. - Fix an allocation bug for small allocations that could be triggered if multiple threads raced to create a new run of backing pages. - Enhance the heap profiler to trigger samples based on usable size, rather than request size. - Fix a heap profiling bug due to sometimes losing track of requested object size for sampled objects. * 1.0.3 (August 12, 2010) Bug fixes: - Fix the libunwind-based implementation of stack backtracing (used for heap profiling). This bug could cause zero-length backtraces to be reported. - Add a missing mutex unlock in library initialization code. If multiple threads raced to initialize malloc, some of them could end up permanently blocked. * 1.0.2 (May 11, 2010) Bug fixes: - Fix junk filling of large objects, which could cause memory corruption. - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual memory limits could cause swap file configuration to fail. Contributed by Jordan DeLong. * 1.0.1 (April 14, 2010) Bug fixes: - Fix compilation when --enable-fill is specified. - Fix threads-related profiling bugs that affected accuracy and caused memory to be leaked during thread exit. - Fix dirty page purging race conditions that could cause crashes. - Fix crash in tcache flushing code during thread destruction. * 1.0.0 (April 11, 2010) This release focuses on speed and run-time introspection. Numerous algorithmic improvements make this release substantially faster than its predecessors. New features: - Implement autoconf-based configuration system. - Add mallctl*(), for the purposes of introspection and run-time configuration. - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - Compute various additional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. - Simplify the malloc_message() signature to only take one string argument, and incorporate an opaque data pointer argument for use by the application in combination with malloc_stats_print(). - Add support for allocation backed by one or more swap files, and allow the application to disable over-commit if swap files are in use. - Implement allocation profiling and leak checking. Removed features: - Remove the dynamic arena rebalancing code, since thread-specific caching reduces its utility. Bug fixes: - Modify chunk allocation to work when address space layout randomization (ASLR) is in use. - Fix thread cleanup bugs related to TLS destruction. - Handle 0-size allocation requests in posix_memalign(). - Fix a chunk leak. The leaked chunks were never touched, so this impacted virtual memory usage, but not physical memory usage. * linux_2008082[78]a (August 27/28, 2008) These snapshot releases are the simple result of incorporating Linux-specific support into the FreeBSD malloc sources. -------------------------------------------------------------------------------- vim:filetype=text:textwidth=80 jemalloc-sys-0.3.2/jemalloc/config.stamp.in010064400007650000024000000000001340421340100170210ustar0000000000000000jemalloc-sys-0.3.2/jemalloc/configure.ac010064400007650000024000002155461340421341300164210ustar0000000000000000dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.68) AC_INIT([Makefile.in]) AC_CONFIG_AUX_DIR([build-aux]) dnl ============================================================================ dnl Custom macro definitions. dnl JE_CONCAT_VVV(r, a, b) dnl dnl Set $r to the concatenation of $a and $b, with a space separating them iff dnl both $a and $b are non-empty. AC_DEFUN([JE_CONCAT_VVV], if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then $1="[$]{$2}[$]{$3}" else $1="[$]{$2} [$]{$3}" fi ) dnl JE_APPEND_VS(a, b) dnl dnl Set $a to the concatenation of $a and b, with a space separating them iff dnl both $a and b are non-empty. AC_DEFUN([JE_APPEND_VS], T_APPEND_V=$2 JE_CONCAT_VVV($1, $1, T_APPEND_V) ) CONFIGURE_CFLAGS= SPECIFIED_CFLAGS="${CFLAGS}" dnl JE_CFLAGS_ADD(cflag) dnl dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS. AC_DEFUN([JE_CFLAGS_ADD], [ AC_MSG_CHECKING([whether compiler supports $1]) T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" JE_APPEND_VS(CONFIGURE_CFLAGS, $1) JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], [je_cv_cflags_added=$1] AC_MSG_RESULT([yes]), [je_cv_cflags_added=] AC_MSG_RESULT([no]) [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"] ) JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) ]) dnl JE_CFLAGS_SAVE() dnl JE_CFLAGS_RESTORE() dnl dnl Save/restore CFLAGS. Nesting is not supported. AC_DEFUN([JE_CFLAGS_SAVE], SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" ) AC_DEFUN([JE_CFLAGS_RESTORE], CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) ) CONFIGURE_CXXFLAGS= SPECIFIED_CXXFLAGS="${CXXFLAGS}" dnl JE_CXXFLAGS_ADD(cxxflag) AC_DEFUN([JE_CXXFLAGS_ADD], [ AC_MSG_CHECKING([whether compiler supports $1]) T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1) JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], [je_cv_cxxflags_added=$1] AC_MSG_RESULT([yes]), [je_cv_cxxflags_added=] AC_MSG_RESULT([no]) [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"] ) AC_LANG_POP([C++]) JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) ]) dnl JE_COMPILABLE(label, hcode, mcode, rvar) dnl dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors dnl cause failure. AC_DEFUN([JE_COMPILABLE], [ AC_CACHE_CHECK([whether $1 is compilable], [$4], [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], [$3])], [$4=yes], [$4=no])]) ]) dnl ============================================================================ CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` AC_SUBST([CONFIG]) dnl Library revision. rev=2 AC_SUBST([rev]) srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi AC_SUBST([srcroot]) abs_srcroot="`cd \"${srcdir}\"; pwd`/" AC_SUBST([abs_srcroot]) objroot="" AC_SUBST([objroot]) abs_objroot="`pwd`/" AC_SUBST([abs_objroot]) dnl Munge install path variables. if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix AC_SUBST([PREFIX]) BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` AC_SUBST([BINDIR]) INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` AC_SUBST([INCLUDEDIR]) LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` AC_SUBST([LIBDIR]) DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` AC_SUBST([DATADIR]) MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` AC_SUBST([MANDIR]) dnl Support for building documentation. AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else dnl Documentation building will fail if this default gets used. DEFAULT_XSLROOT="" fi AC_ARG_WITH([xslroot], [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi ], XSLROOT="${DEFAULT_XSLROOT}" ) AC_SUBST([XSLROOT]) dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, dnl just prevent autoconf from molesting CFLAGS. CFLAGS=$CFLAGS AC_PROG_CC if test "x$GCC" != "xyes" ; then AC_CACHE_CHECK([whether compiler is MSVC], [je_cv_msvc], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _MSC_VER int fail[-1]; #endif ])], [je_cv_msvc=yes], [je_cv_msvc=no])]) fi dnl check if a cray prgenv wrapper compiler is being used je_cv_cray_prgenv_wrapper="" if test "x${PE_ENV}" != "x" ; then case "${CC}" in CC|cc) je_cv_cray_prgenv_wrapper="yes" ;; *) ;; esac fi AC_CACHE_CHECK([whether compiler is cray], [je_cv_cray], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _CRAYC int fail[-1]; #endif ])], [je_cv_cray=yes], [je_cv_cray=no])]) if test "x${je_cv_cray}" = "xyes" ; then AC_CACHE_CHECK([whether cray compiler version is 8.4], [je_cv_cray_84], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) int fail[-1]; #endif ])], [je_cv_cray_84=yes], [je_cv_cray_84=no])]) fi if test "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-std=gnu11]) if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) else JE_CFLAGS_ADD([-std=gnu99]) if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) fi fi JE_CFLAGS_ADD([-Wall]) JE_CFLAGS_ADD([-Wshorten-64-to-32]) JE_CFLAGS_ADD([-Wsign-compare]) JE_CFLAGS_ADD([-Wundef]) JE_CFLAGS_ADD([-Wno-format-zero-length]) JE_CFLAGS_ADD([-pipe]) JE_CFLAGS_ADD([-g3]) elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" JE_CFLAGS_ADD([-Zi]) JE_CFLAGS_ADD([-MT]) JE_CFLAGS_ADD([-W3]) JE_CFLAGS_ADD([-FS]) JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat) fi if test "x$je_cv_cray" = "xyes" ; then dnl cray compiler 8.4 has an inlining bug if test "x$je_cv_cray_84" = "xyes" ; then JE_CFLAGS_ADD([-hipa2]) JE_CFLAGS_ADD([-hnognu]) fi dnl ignore unreachable code warning JE_CFLAGS_ADD([-hnomessage=128]) dnl ignore redefinition of "malloc", "free", etc warning JE_CFLAGS_ADD([-hnomessage=1357]) fi AC_SUBST([CONFIGURE_CFLAGS]) AC_SUBST([SPECIFIED_CFLAGS]) AC_SUBST([EXTRA_CFLAGS]) AC_PROG_CPP AC_ARG_ENABLE([cxx], [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])], if test "x$enable_cxx" = "xno" ; then enable_cxx="0" else enable_cxx="1" fi , enable_cxx="1" ) if test "x$enable_cxx" = "x1" ; then dnl Require at least c++14, which is the first version to support sized dnl deallocation. C++ support is not compiled otherwise. m4_include([m4/ax_cxx_compile_stdcxx.m4]) AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) if test "x${HAVE_CXX14}" = "x1" ; then JE_CXXFLAGS_ADD([-Wall]) JE_CXXFLAGS_ADD([-g3]) SAVED_LIBS="${LIBS}" JE_APPEND_VS(LIBS, -lstdc++) JE_COMPILABLE([libstdc++ linkage], [ #include ], [[ int *arr = (int *)malloc(sizeof(int) * 42); if (arr == NULL) return 1; ]], [je_cv_libstdcxx]) if test "x${je_cv_libstdcxx}" = "xno" ; then LIBS="${SAVED_LIBS}" fi else enable_cxx="0" fi fi AC_SUBST([enable_cxx]) AC_SUBST([CONFIGURE_CXXFLAGS]) AC_SUBST([SPECIFIED_CXXFLAGS]) AC_SUBST([EXTRA_CXXFLAGS]) AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) if test "x${ac_cv_big_endian}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99) fi if test "x${je_cv_msvc}" = "xyes" ; then LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit]) else AC_CHECK_SIZEOF([void *]) if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) fi fi AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) AC_CHECK_SIZEOF([int]) if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) AC_CHECK_SIZEOF([long]) if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) AC_CHECK_SIZEOF([long long]) if test "x${ac_cv_sizeof_long_long}" = "x8" ; then LG_SIZEOF_LONG_LONG=3 elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then LG_SIZEOF_LONG_LONG=2 else AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG]) AC_CHECK_SIZEOF([intmax_t]) if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) AC_CANONICAL_HOST dnl CPU-specific settings. CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) HAVE_CPU_SPINWAIT=1 if test "x${je_cv_msvc}" = "xyes" ; then AC_CACHE_VAL([je_cv_pause_msvc], [JE_COMPILABLE([pause instruction MSVC], [], [[_mm_pause(); return 0;]], [je_cv_pause_msvc])]) if test "x${je_cv_pause_msvc}" = "xyes" ; then CPU_SPINWAIT='_mm_pause()' fi else AC_CACHE_VAL([je_cv_pause], [JE_COMPILABLE([pause instruction], [], [[__asm__ volatile("pause"); return 0;]], [je_cv_pause])]) if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi fi ;; *) HAVE_CPU_SPINWAIT=0 ;; esac AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT]) AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) AC_ARG_WITH([lg_vaddr], [AS_HELP_STRING([--with-lg-vaddr=], [Number of significant virtual address bits])], [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"]) case "${host_cpu}" in aarch64) if test "x$LG_VADDR" = "xdetect"; then AC_MSG_CHECKING([number of significant virtual address bits]) if test "x${LG_SIZEOF_PTR}" = "x2" ; then #aarch64 ILP32 LG_VADDR=32 else #aarch64 LP64 LG_VADDR=48 fi AC_MSG_RESULT([$LG_VADDR]) fi ;; x86_64) if test "x$LG_VADDR" = "xdetect"; then AC_CACHE_CHECK([number of significant virtual address bits], [je_cv_lg_vaddr], AC_RUN_IFELSE([AC_LANG_PROGRAM( [[ #include #ifdef _WIN32 #include #include typedef unsigned __int32 uint32_t; #else #include #endif ]], [[ uint32_t r[[4]]; uint32_t eax_in = 0x80000008U; #ifdef _WIN32 __cpuid((int *)r, (int)eax_in); #else asm volatile ("cpuid" : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]]) : "a" (eax_in), "c" (0) ); #endif uint32_t eax_out = r[[0]]; uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); FILE *f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } if (vaddr > (sizeof(void *) << 3)) { vaddr = sizeof(void *) << 3; } fprintf(f, "%u", vaddr); fclose(f); return 0; ]])], [je_cv_lg_vaddr=`cat conftest.out`], [je_cv_lg_vaddr=error], [je_cv_lg_vaddr=57])) if test "x${je_cv_lg_vaddr}" != "x" ; then LG_VADDR="${je_cv_lg_vaddr}" fi if test "x${LG_VADDR}" != "xerror" ; then AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) else AC_MSG_ERROR([cannot determine number of significant virtual address bits]) fi fi ;; *) if test "x$LG_VADDR" = "xdetect"; then AC_MSG_CHECKING([number of significant virtual address bits]) if test "x${LG_SIZEOF_PTR}" = "x3" ; then LG_VADDR=64 elif test "x${LG_SIZEOF_PTR}" = "x2" ; then LG_VADDR=32 elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" else AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}]) fi AC_MSG_RESULT([$LG_VADDR]) fi ;; esac AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then TEST_LD_MODE='-dynamic' fi if test "x${je_cv_cray}" = "xyes" ; then CC_MM= fi AN_MAKEVAR([AR], [AC_PROG_AR]) AN_PROGRAM([ar], [AC_PROG_AR]) AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) AC_PROG_AR AN_MAKEVAR([NM], [AC_PROG_NM]) AN_PROGRAM([nm], [AC_PROG_NM]) AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)]) AC_PROG_NM AC_PROG_AWK dnl Platform-specific settings. abi and RPATH can probably be determined dnl programmatically, but doing so is error-prone, which makes it generally dnl not worth the trouble. dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. default_retain="0" maps_coalesce="1" DUMP_SYMS="${NM} -a" SYM_PREFIX="" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" SYM_PREFIX="_" ;; *-*-freebsd*) abi="elf" AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" ;; *-*-openbsd*) abi="elf" force_tls="0" ;; *-*-bitrig*) abi="elf" ;; *-*-linux-android) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_C11_ATOMICS]) force_tls="0" if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-linux*) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-kfreebsd*) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) ;; *-*-netbsd*) AC_MSG_CHECKING([ABI]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifdef __ELF__ /* ELF */ #else #error aout #endif ]])], [abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) ;; *-*-solaris2*) abi="elf" RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS) JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl) ;; *-ibm-aix*) if test "${LG_SIZEOF_PTR}" = "3"; then dnl 64bit AIX LD_PRELOAD_VAR="LDR_PRELOAD64" else dnl 32bit AIX LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" link_whole_archive="1" fi case "${host}" in *-*-cygwin*) DUMP_SYMS="dumpbin /SYMBOLS" ;; *) ;; esac a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) AC_MSG_RESULT([Unsupported operating system: ${host}]) abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const AC_CHECK_HEADERS([malloc.h], [ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [#include #include size_t malloc_usable_size(const void *ptr); ], [])],[ AC_MSG_RESULT([yes]) ],[ JEMALLOC_USABLE_SIZE_CONST= AC_MSG_RESULT([no]) ]) ]) AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) AC_SUBST([abi]) AC_SUBST([RPATH]) AC_SUBST([LD_PRELOAD_VAR]) AC_SUBST([so]) AC_SUBST([importlib]) AC_SUBST([o]) AC_SUBST([a]) AC_SUBST([exe]) AC_SUBST([libprefix]) AC_SUBST([link_whole_archive]) AC_SUBST([DSO_LDFLAGS]) AC_SUBST([EXTRA_LDFLAGS]) AC_SUBST([SOREV]) AC_SUBST([PIC_CFLAGS]) AC_SUBST([CTARGET]) AC_SUBST([LDTARGET]) AC_SUBST([TEST_LD_MODE]) AC_SUBST([MKLIB]) AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) AC_SUBST([DUMP_SYMS]) AC_SUBST([CC_MM]) dnl Determine whether libm must be linked to use e.g. log(3). AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])]) if test "x$ac_cv_search_log" != "xnone required" ; then LM="$ac_cv_search_log" else LM= fi AC_SUBST(LM) JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], [je_cv_attribute]) if test "x${je_cv_attribute}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then JE_CFLAGS_ADD([-fvisibility=hidden]) JE_CXXFLAGS_ADD([-fvisibility=hidden]) fi fi dnl Check for tls_model attribute support (clang 3.0 still lacks support). JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([tls_model attribute], [], [static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0;], [je_cv_tls_model]) JE_CFLAGS_RESTORE() dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for dnl --disable-initial-exec-tls) dnl Check for alloc_size attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([alloc_size attribute], [#include ], [void *foo(size_t size) __attribute__((alloc_size(1)));], [je_cv_alloc_size]) JE_CFLAGS_RESTORE() if test "x${je_cv_alloc_size}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) fi dnl Check for format(gnu_printf, ...) attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], [je_cv_format_gnu_printf]) JE_CFLAGS_RESTORE() if test "x${je_cv_format_gnu_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) fi dnl Check for format(printf, ...) attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], [je_cv_format_printf]) JE_CFLAGS_RESTORE() if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi dnl Support optional additions to rpath. AC_ARG_WITH([rpath], [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi, RPATH_EXTRA= ) AC_SUBST([RPATH_EXTRA]) dnl Disable rules that do automatic regeneration of configure output by default. AC_ARG_ENABLE([autogen], [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi , enable_autogen="0" ) AC_SUBST([enable_autogen]) AC_PROG_INSTALL AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) dnl Perform no name mangling by default. AC_ARG_WITH([mangling], [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], [mangling_map="$with_mangling"], [mangling_map=""]) dnl Do not prefix public APIs by default. AC_ARG_WITH([jemalloc_prefix], [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], [JEMALLOC_PREFIX="$with_jemalloc_prefix"], [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi] ) if test "x$JEMALLOC_PREFIX" = "x" ; then AC_DEFINE([JEMALLOC_IS_MALLOC]) else JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) fi AC_SUBST([JEMALLOC_PREFIX]) AC_SUBST([JEMALLOC_CPREFIX]) AC_ARG_WITH([export], [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], [if test "x$with_export" = "xno"; then AC_DEFINE([JEMALLOC_EXPORT],[]) fi] ) public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" dnl Check for additional platform-specific public API functions. AC_CHECK_FUNC([memalign], [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) public_syms="${public_syms} memalign"]) AC_CHECK_FUNC([valloc], [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) public_syms="${public_syms} valloc"]) dnl Check for allocator-related functions that should be wrapped. wrap_syms= if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_CHECK_FUNC([__libc_calloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ]) wrap_syms="${wrap_syms} __libc_calloc"]) AC_CHECK_FUNC([__libc_free], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ]) wrap_syms="${wrap_syms} __libc_free"]) AC_CHECK_FUNC([__libc_malloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ]) wrap_syms="${wrap_syms} __libc_malloc"]) AC_CHECK_FUNC([__libc_memalign], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ]) wrap_syms="${wrap_syms} __libc_memalign"]) AC_CHECK_FUNC([__libc_realloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ]) wrap_syms="${wrap_syms} __libc_realloc"]) AC_CHECK_FUNC([__libc_valloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ]) wrap_syms="${wrap_syms} __libc_valloc"]) AC_CHECK_FUNC([__posix_memalign], [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ]) wrap_syms="${wrap_syms} __posix_memalign"]) fi case "${host}" in *-*-mingw* | *-*-cygwin*) wrap_syms="${wrap_syms} tls_callback" ;; *) ;; esac dnl Mangle library-private APIs. AC_ARG_WITH([private_namespace], [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], [JEMALLOC_PRIVATE_NAMESPACE="je_"] ) AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" AC_SUBST([private_namespace]) dnl Do not add suffix to installed files by default. AC_ARG_WITH([install_suffix], [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], [INSTALL_SUFFIX="$with_install_suffix"], [INSTALL_SUFFIX=] ) install_suffix="$INSTALL_SUFFIX" AC_SUBST([install_suffix]) dnl Specify default malloc_conf. AC_ARG_WITH([malloc_conf], [AS_HELP_STRING([--with-malloc-conf=], [config.malloc_conf options string])], [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"], [JEMALLOC_CONFIG_MALLOC_CONF=""] ) config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"]) dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of dnl jemalloc_protos_jet.h easy. je_="je_" AC_SUBST([je_]) cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" dnl Do not compile with debugging by default. AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], [Build debugging code])], [if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi ], [enable_debug="0"] ) if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) fi if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) fi AC_SUBST([enable_debug]) dnl Only optimize if not debugging. if test "x$enable_debug" = "x0" ; then if test "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-O3]) JE_CXXFLAGS_ADD([-O3]) JE_CFLAGS_ADD([-funroll-loops]) elif test "x$je_cv_msvc" = "xyes" ; then JE_CFLAGS_ADD([-O2]) JE_CXXFLAGS_ADD([-O2]) else JE_CFLAGS_ADD([-O]) JE_CXXFLAGS_ADD([-O]) fi fi dnl Enable statistics calculation by default. AC_ARG_ENABLE([stats], [AS_HELP_STRING([--disable-stats], [Disable statistics calculation/reporting])], [if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi ], [enable_stats="1"] ) if test "x$enable_stats" = "x1" ; then AC_DEFINE([JEMALLOC_STATS], [ ]) fi AC_SUBST([enable_stats]) dnl Do not enable profiling by default. AC_ARG_ENABLE([prof], [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], [if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi ], [enable_prof="0"] ) if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi AC_ARG_ENABLE([prof-libunwind], [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], [if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi ], [enable_prof_libunwind="0"] ) AC_ARG_WITH([static_libunwind], [AS_HELP_STRING([--with-static-libunwind=], [Path to static libunwind library; use rather than dynamically linking])], if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) fi LUNWIND="$with_static_libunwind" fi, LUNWIND="-lunwind" ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) if test "x$LUNWIND" = "x-lunwind" ; then AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)], [enable_prof_libunwind="0"]) else JE_APPEND_VS(LIBS, $LUNWIND) fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) fi fi AC_ARG_ENABLE([prof-libgcc], [AS_HELP_STRING([--disable-prof-libgcc], [Do not use libgcc for backtracing])], [if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi ], [enable_prof_libgcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) if test "x${enable_prof_libgcc}" = "x1" ; then AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"]) fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) fi else enable_prof_libgcc="0" fi AC_ARG_ENABLE([prof-gcc], [AS_HELP_STRING([--disable-prof-gcc], [Do not use gcc intrinsics for backtracing])], [if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi ], [enable_prof_gcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-fno-omit-frame-pointer]) backtrace_method="gcc intrinsics" AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi AC_MSG_CHECKING([configured backtracing method]) AC_MSG_RESULT([$backtrace_method]) if test "x$enable_prof" = "x1" ; then dnl Heap profiling uses the log(3) function. JE_APPEND_VS(LIBS, $LM) AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) dnl Indicate whether adjacent virtual memory mappings automatically coalesce dnl (and fragment on demand). if test "x${maps_coalesce}" = "x1" ; then AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) fi dnl Indicate whether to retain memory (rather than using munmap()) by default. if test "x$default_retain" = "x1" ; then AC_DEFINE([JEMALLOC_RETAIN], [ ]) fi dnl Enable allocation from DSS if supported by the OS. have_dss="1" dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then AC_DEFINE([JEMALLOC_DSS], [ ]) fi dnl Support the junk/zero filling option by default. AC_ARG_ENABLE([fill], [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])], [if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi ], [enable_fill="1"] ) if test "x$enable_fill" = "x1" ; then AC_DEFINE([JEMALLOC_FILL], [ ]) fi AC_SUBST([enable_fill]) dnl Disable utrace(2)-based tracing by default. AC_ARG_ENABLE([utrace], [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], [if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi ], [enable_utrace="0"] ) JE_COMPILABLE([utrace(2)], [ #include #include #include #include #include ], [ utrace((void *)0, 0); ], [je_cv_utrace]) if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then AC_DEFINE([JEMALLOC_UTRACE], [ ]) fi AC_SUBST([enable_utrace]) dnl Do not support the xmalloc option by default. AC_ARG_ENABLE([xmalloc], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], [if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi ], [enable_xmalloc="0"] ) if test "x$enable_xmalloc" = "x1" ; then AC_DEFINE([JEMALLOC_XMALLOC], [ ]) fi AC_SUBST([enable_xmalloc]) dnl Support cache-oblivious allocation alignment by default. AC_ARG_ENABLE([cache-oblivious], [AS_HELP_STRING([--disable-cache-oblivious], [Disable support for cache-oblivious allocation alignment])], [if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi ], [enable_cache_oblivious="1"] ) if test "x$enable_cache_oblivious" = "x1" ; then AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) fi AC_SUBST([enable_cache_oblivious]) dnl Do not log by default. AC_ARG_ENABLE([log], [AS_HELP_STRING([--enable-log], [Support debug logging])], [if test "x$enable_log" = "xno" ; then enable_log="0" else enable_log="1" fi ], [enable_log="0"] ) if test "x$enable_log" = "x1" ; then AC_DEFINE([JEMALLOC_LOG], [ ]) fi AC_SUBST([enable_log]) JE_COMPILABLE([a program using __builtin_unreachable], [ void foo (void) { __builtin_unreachable(); } ], [ { foo(); } ], [je_cv_gcc_builtin_unreachable]) if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable]) else AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort]) fi dnl ============================================================================ dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. dnl One of those two functions should (theoretically) exist on all platforms dnl that jemalloc currently has a chance of functioning on without modification. dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if dnl ffsl() or __builtin_ffsl() are defined, respectively. JE_COMPILABLE([a program using __builtin_ffsl], [ #include #include #include ], [ { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ], [je_cv_gcc_builtin_ffsl]) if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) else JE_COMPILABLE([a program using ffsl], [ #include #include #include ], [ { int rv = ffsl(0x08); printf("%d\n", rv); } ], [je_cv_function_ffsl]) if test "x${je_cv_function_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) else AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()]) fi fi AC_ARG_WITH([lg_quantum], [AS_HELP_STRING([--with-lg-quantum=], [Base 2 log of minimum allocation alignment])], [LG_QUANTA="$with_lg_quantum"], [LG_QUANTA="3 4"]) if test "x$with_lg_quantum" != "x" ; then AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum]) fi AC_ARG_WITH([lg_page], [AS_HELP_STRING([--with-lg-page=], [Base 2 log of system page size])], [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"]) if test "x$LG_PAGE" = "xdetect"; then AC_CACHE_CHECK([LG_PAGE], [je_cv_lg_page], AC_RUN_IFELSE([AC_LANG_PROGRAM( [[ #include #ifdef _WIN32 #include #else #include #endif #include ]], [[ int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d", result); fclose(f); return 0; ]])], [je_cv_lg_page=`cat conftest.out`], [je_cv_lg_page=undefined], [je_cv_lg_page=12])) fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE]) else AC_MSG_ERROR([cannot determine value for LG_PAGE]) fi AC_ARG_WITH([lg_hugepage], [AS_HELP_STRING([--with-lg-hugepage=], [Base 2 log of system huge page size])], [je_cv_lg_hugepage="${with_lg_hugepage}"], [je_cv_lg_hugepage=""]) if test "x${je_cv_lg_hugepage}" = "x" ; then dnl Look in /proc/meminfo (Linux-specific) for information on the default huge dnl page size, if any. The relevant line looks like: dnl dnl Hugepagesize: 2048 kB if test -e "/proc/meminfo" ; then hpsk=[`cat /proc/meminfo 2>/dev/null | \ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ awk '{print $2}'`] if test "x${hpsk}" != "x" ; then je_cv_lg_hugepage=10 while test "${hpsk}" -gt 1 ; do hpsk="$((hpsk / 2))" je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" done fi fi dnl Set default if unable to automatically configure. if test "x${je_cv_lg_hugepage}" = "x" ; then je_cv_lg_hugepage=21 fi fi if test "x${LG_PAGE}" != "xundefined" -a \ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})]) fi AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}]) AC_ARG_WITH([lg_page_sizes], [AS_HELP_STRING([--with-lg-page-sizes=], [Base 2 logs of system page sizes to support])], [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"]) dnl ============================================================================ dnl jemalloc configuration. dnl AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=..--g], [Version string])], [ echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null if test $? -eq 0 ; then echo "$with_version" > "${objroot}VERSION" else echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null if test $? -ne 0 ; then AC_MSG_ERROR([${with_version} does not match ..--g or VERSION]) fi fi ], [ dnl Set VERSION if source directory is inside a git repository. if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then dnl Pattern globs aren't powerful enough to match both single- and dnl double-digit version numbers, so iterate over patterns to support up dnl to version 99.99.99 without any accidental matches. for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null if test $? -eq 0 ; then mv "${objroot}VERSION.tmp" "${objroot}VERSION" break fi done fi rm -f "${objroot}VERSION.tmp" ]) if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then AC_MSG_RESULT( [Missing VERSION file, and unable to generate it; creating bogus VERSION]) echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` AC_SUBST([jemalloc_version]) AC_SUBST([jemalloc_version_major]) AC_SUBST([jemalloc_version_minor]) AC_SUBST([jemalloc_version_bugfix]) AC_SUBST([jemalloc_version_nrev]) AC_SUBST([jemalloc_version_gid]) dnl ============================================================================ dnl Configure pthreads. if test "x$abi" != "xpecoff" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ]) AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) dnl Some systems may embed pthreads functionality in libc; check for libpthread dnl first, but try libc too before failing. AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) wrap_syms="${wrap_syms} pthread_create" have_pthread="1" dnl Check if we have dlsym support. have_dlsym="1" AC_CHECK_HEADERS([dlfcn.h], AC_CHECK_FUNC([dlsym], [], [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]), [have_dlsym="0"]) if test "x$have_dlsym" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ]) fi JE_COMPILABLE([pthread_atfork(3)], [ #include ], [ pthread_atfork((void *)0, (void *)0, (void *)0); ], [je_cv_pthread_atfork]) if test "x${je_cv_pthread_atfork}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) fi dnl Check if pthread_setname_np is available with the expected API. JE_COMPILABLE([pthread_setname_np(3)], [ #include ], [ pthread_setname_np(pthread_self(), "setname_test"); ], [je_cv_pthread_setname_np]) if test "x${je_cv_pthread_setname_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ]) fi fi JE_APPEND_VS(CPPFLAGS, -D_REENTRANT) dnl Check whether clock_gettime(2) is in libc or librt. AC_SEARCH_LIBS([clock_gettime], [rt]) dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then if test "$ac_cv_search_clock_gettime" != "-lrt"; then JE_CFLAGS_SAVE() unset ac_cv_search_clock_gettime JE_CFLAGS_ADD([-dynamic]) AC_SEARCH_LIBS([clock_gettime], [rt]) JE_CFLAGS_RESTORE() fi fi dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific). JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [ #include ], [ struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); ], [je_cv_clock_monotonic_coarse]) if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE]) fi dnl check for CLOCK_MONOTONIC. JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [ #include #include ], [ struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); #if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 # error _POSIX_MONOTONIC_CLOCK missing/invalid #endif ], [je_cv_clock_monotonic]) if test "x${je_cv_clock_monotonic}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC]) fi dnl Check for mach_absolute_time(). JE_COMPILABLE([mach_absolute_time()], [ #include ], [ mach_absolute_time(); ], [je_cv_mach_absolute_time]) if test "x${je_cv_mach_absolute_time}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) fi dnl Use syscall(2) (if available) by default. AC_ARG_ENABLE([syscall], [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])], [if test "x$enable_syscall" = "xno" ; then enable_syscall="0" else enable_syscall="1" fi ], [enable_syscall="1"] ) if test "x$enable_syscall" = "x1" ; then dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS dnl X 10.12's deprecation warning prevents use. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_COMPILABLE([syscall(2)], [ #include #include ], [ syscall(SYS_write, 2, "hello", 5); ], [je_cv_syscall]) JE_CFLAGS_RESTORE() if test "x$je_cv_syscall" = "xyes" ; then AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ]) fi fi dnl Check if the GNU-specific secure_getenv function exists. AC_CHECK_FUNC([secure_getenv], [have_secure_getenv="1"], [have_secure_getenv="0"] ) if test "x$have_secure_getenv" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) fi dnl Check if the GNU-specific sched_getcpu function exists. AC_CHECK_FUNC([sched_getcpu], [have_sched_getcpu="1"], [have_sched_getcpu="0"] ) if test "x$have_sched_getcpu" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ]) fi dnl Check if the GNU-specific sched_setaffinity function exists. AC_CHECK_FUNC([sched_setaffinity], [have_sched_setaffinity="1"], [have_sched_setaffinity="0"] ) if test "x$have_sched_setaffinity" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ]) fi dnl Check if the Solaris/BSD issetugid function exists. AC_CHECK_FUNC([issetugid], [have_issetugid="1"], [have_issetugid="0"] ) if test "x$have_issetugid" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ]) fi dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use dnl it rather than pthreads TSD cleanup functions to support cleanup during dnl thread exit, in order to avoid pthreads library recursion during dnl bootstrapping. AC_CHECK_FUNC([_malloc_thread_cleanup], [have__malloc_thread_cleanup="1"], [have__malloc_thread_cleanup="0"] ) if test "x$have__malloc_thread_cleanup" = "x1" ; then AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) wrap_syms="${wrap_syms} _malloc_thread_cleanup" force_tls="1" fi dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If dnl so, mutex initialization causes allocation, and we need to implement this dnl callback function in order to prevent recursive allocation. AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], [have__pthread_mutex_init_calloc_cb="1"], [have__pthread_mutex_init_calloc_cb="0"] ) if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" fi dnl Disable lazy locking by default. AC_ARG_ENABLE([lazy_lock], [AS_HELP_STRING([--enable-lazy-lock], [Enable lazy locking (only lock when multi-threaded)])], [if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi ], [enable_lazy_lock=""] ) if test "x${enable_lazy_lock}" = "x" ; then if test "x${force_lazy_lock}" = "x1" ; then AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) enable_lazy_lock="1" else enable_lazy_lock="0" fi fi if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented]) enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$have_dlsym" = "x1" ; then AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) else AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.]) fi fi AC_SUBST([enable_lazy_lock]) dnl Automatically configure TLS. if test "x${force_tls}" = "x1" ; then enable_tls="1" elif test "x${force_tls}" = "x0" ; then enable_tls="0" else enable_tls="1" fi if test "x${enable_tls}" = "x1" ; then AC_MSG_CHECKING([for TLS]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ __thread int x; ]], [[ x = 42; return 0; ]])], AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]) enable_tls="0") else enable_tls="0" fi AC_SUBST([enable_tls]) if test "x${enable_tls}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) fi dnl ============================================================================ dnl Check for C11 atomics. JE_COMPILABLE([C11 atomics], [ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif ], [ uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return r == 0; ], [je_cv_c11_atomics]) if test "x${je_cv_c11_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_C11_ATOMICS]) fi dnl ============================================================================ dnl Check for GCC-style __atomic atomics. JE_COMPILABLE([GCC __atomic atomics], [ ], [ int x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = x; return after_add == 1; ], [je_cv_gcc_atomic_atomics]) if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS]) fi dnl ============================================================================ dnl Check for GCC-style __sync atomics. JE_COMPILABLE([GCC __sync atomics], [ ], [ int x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = x; return (before_add == 0) && (after_add == 1); ], [je_cv_gcc_sync_atomics]) if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS]) fi dnl ============================================================================ dnl Check for atomic(3) operations as provided on Darwin. dnl We need this not for the atomic operations (which are provided above), but dnl rather for the OSSpinLock type it exposes. JE_COMPILABLE([Darwin OSAtomic*()], [ #include #include ], [ { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ], [je_cv_osatomic]) if test "x${je_cv_osatomic}" = "xyes" ; then AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) fi dnl ============================================================================ dnl Check for madvise(2). JE_COMPILABLE([madvise(2)], [ #include ], [ madvise((void *)0, 0, 0); ], [je_cv_madvise]) if test "x${je_cv_madvise}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) dnl Check for madvise(..., MADV_FREE). JE_COMPILABLE([madvise(..., MADV_FREE)], [ #include ], [ madvise((void *)0, 0, MADV_FREE); ], [je_cv_madv_free]) if test "x${je_cv_madv_free}" = "xyes" ; then AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) elif test "x${je_cv_madvise}" = "xyes" ; then case "${host_cpu}" in i686|x86_64) case "${host}" in *-*-linux*) AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ]) ;; esac ;; esac fi dnl Check for madvise(..., MADV_DONTNEED). JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [ #include ], [ madvise((void *)0, 0, MADV_DONTNEED); ], [je_cv_madv_dontneed]) if test "x${je_cv_madv_dontneed}" = "xyes" ; then AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) fi dnl Check for madvise(..., MADV_DO[NT]DUMP). JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [ #include ], [ madvise((void *)0, 0, MADV_DONTDUMP); madvise((void *)0, 0, MADV_DODUMP); ], [je_cv_madv_dontdump]) if test "x${je_cv_madv_dontdump}" = "xyes" ; then AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ]) fi dnl Check for madvise(..., MADV_[NO]HUGEPAGE). JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [ #include ], [ madvise((void *)0, 0, MADV_HUGEPAGE); madvise((void *)0, 0, MADV_NOHUGEPAGE); ], [je_cv_thp]) case "${host_cpu}" in arm*) ;; *) if test "x${je_cv_thp}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ]) fi ;; esac fi dnl ============================================================================ dnl Check whether __sync_{add,sub}_and_fetch() are available despite dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined. AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[ AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()], [je_cv_sync_compare_and_swap_$2], [AC_LINK_IFELSE([AC_LANG_PROGRAM([ #include ], [ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 { uint$1_t x$1 = 0; __sync_add_and_fetch(&x$1, 42); __sync_sub_and_fetch(&x$1, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force #endif ])], [je_cv_sync_compare_and_swap_$2=yes], [je_cv_sync_compare_and_swap_$2=no])]) if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ]) fi ]) if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4) JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8) fi dnl ============================================================================ dnl Check for __builtin_clz() and __builtin_clzl(). AC_CACHE_CHECK([for __builtin_clz], [je_cv_builtin_clz], [AC_LINK_IFELSE([AC_LANG_PROGRAM([], [ { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ])], [je_cv_builtin_clz=yes], [je_cv_builtin_clz=no])]) if test "x${je_cv_builtin_clz}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) fi dnl ============================================================================ dnl Check for os_unfair_lock operations as provided on Darwin. JE_COMPILABLE([Darwin os_unfair_lock_*()], [ #include #include ], [ #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 #error "os_unfair_lock is not supported" #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); #endif ], [je_cv_os_unfair_lock]) if test "x${je_cv_os_unfair_lock}" = "xyes" ; then AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) fi dnl ============================================================================ dnl Check for spinlock(3) operations as provided on Darwin. JE_COMPILABLE([Darwin OSSpin*()], [ #include #include ], [ OSSpinLock lock = 0; OSSpinLockLock(&lock); OSSpinLockUnlock(&lock); ], [je_cv_osspin]) if test "x${je_cv_osspin}" = "xyes" ; then AC_DEFINE([JEMALLOC_OSSPIN], [ ]) fi dnl ============================================================================ dnl Darwin-related configuration. AC_ARG_ENABLE([zone-allocator], [AS_HELP_STRING([--disable-zone-allocator], [Disable zone allocator for Darwin])], [if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi ], [if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi ] ) AC_SUBST([enable_zone_allocator]) if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) fi AC_DEFINE([JEMALLOC_ZONE], [ ]) fi dnl ============================================================================ dnl Use initial-exec TLS by default. AC_ARG_ENABLE([initial-exec-tls], [AS_HELP_STRING([--disable-initial-exec-tls], [Disable the initial-exec tls model])], [if test "x$enable_initial_exec_tls" = "xno" ; then enable_initial_exec_tls="0" else enable_initial_exec_tls="1" fi ], [enable_initial_exec_tls="1"] ) AC_SUBST([enable_initial_exec_tls]) if test "x${je_cv_tls_model}" = "xyes" -a \ "x${enable_initial_exec_tls}" = "x1" ; then AC_DEFINE([JEMALLOC_TLS_MODEL], [__attribute__((tls_model("initial-exec")))]) else AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) fi dnl ============================================================================ dnl Enable background threads if possible. if test "x${have_pthread}" = "x1" -a "x${have_dlsym}" = "x1" \ -a "x${je_cv_os_unfair_lock}" != "xyes" \ -a "x${je_cv_osspin}" != "xyes" ; then AC_DEFINE([JEMALLOC_BACKGROUND_THREAD]) fi dnl ============================================================================ dnl Check for glibc malloc hooks JE_COMPILABLE([glibc malloc hook], [ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); ], [ void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ], [je_cv_glibc_malloc_hook]) if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" fi fi JE_COMPILABLE([glibc memalign hook], [ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); ], [ void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ], [je_cv_glibc_memalign_hook]) if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) wrap_syms="${wrap_syms} __memalign_hook" fi fi JE_COMPILABLE([pthreads adaptive mutexes], [ #include ], [ pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ], [je_cv_pthread_mutex_adaptive_np]) if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) fi JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-D_GNU_SOURCE]) JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([strerror_r returns char with gnu source], [ #include #include #include #include ], [ char *buffer = (char *) malloc(100); char *error = strerror_r(EINVAL, buffer, 100); printf("%s\n", error); ], [je_cv_strerror_r_returns_char_with_gnu_source]) JE_CFLAGS_RESTORE() if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ]) fi dnl ============================================================================ dnl Check for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL dnl ============================================================================ dnl Define commands that generate output files. AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` echo "${n}:${m}" >> "${f}" dnl Remove name from public_syms so that it isn't redefined later. public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ], [ srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [ f="${objroot}include/jemalloc/internal/private_symbols.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" ], [ srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [ f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" ], [ srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" 3 "${LG_PAGE_SIZES}" 2 > "${objroot}include/jemalloc/internal/size_classes.h" ], [ SHELL="${SHELL}" srcdir="${srcdir}" objroot="${objroot}" LG_QUANTA="${LG_QUANTA}" LG_PAGE_SIZES="${LG_PAGE_SIZES}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ], [ srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" ]) dnl Process .in files. AC_SUBST([cfghdrs_in]) AC_SUBST([cfghdrs_out]) AC_CONFIG_HEADERS([$cfghdrs_tup]) dnl ============================================================================ dnl Generate outputs. AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof]) AC_SUBST([cfgoutputs_in]) AC_SUBST([cfgoutputs_out]) AC_OUTPUT dnl ============================================================================ dnl Print out the results of configuration. AC_MSG_RESULT([===============================================================================]) AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) AC_MSG_RESULT([library revision : ${rev}]) AC_MSG_RESULT([]) AC_MSG_RESULT([CONFIG : ${CONFIG}]) AC_MSG_RESULT([CC : ${CC}]) AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}]) AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}]) AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) AC_MSG_RESULT([CXX : ${CXX}]) AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}]) AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}]) AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) AC_MSG_RESULT([]) AC_MSG_RESULT([PREFIX : ${PREFIX}]) AC_MSG_RESULT([BINDIR : ${BINDIR}]) AC_MSG_RESULT([DATADIR : ${DATADIR}]) AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) AC_MSG_RESULT([MANDIR : ${MANDIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([srcroot : ${srcroot}]) AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) AC_MSG_RESULT([objroot : ${objroot}]) AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) AC_MSG_RESULT([]) AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) AC_MSG_RESULT([install_suffix : ${install_suffix}]) AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}]) AC_MSG_RESULT([autogen : ${enable_autogen}]) AC_MSG_RESULT([debug : ${enable_debug}]) AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([prof : ${enable_prof}]) AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([utrace : ${enable_utrace}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([log : ${enable_log}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) AC_MSG_RESULT([cxx : ${enable_cxx}]) AC_MSG_RESULT([===============================================================================]) jemalloc-sys-0.3.2/jemalloc/COPYING010064400007650000024000000032471340421340100151540ustar0000000000000000Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- Copyright (C) 2002-2018 Jason Evans . All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. Copyright (C) 2009-2018 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice(s), this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice(s), this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- jemalloc-sys-0.3.2/jemalloc/doc/html.xsl.in010064400007650000024000000003711340421340100167620ustar0000000000000000 jemalloc-sys-0.3.2/jemalloc/doc/jemalloc.xml.in010064400007650000024000004561471340421341300176210ustar0000000000000000 User Manual jemalloc @jemalloc_version@ Jason Evans Author JEMALLOC 3 jemalloc jemalloc general purpose memory allocation functions LIBRARY This manual describes jemalloc @jemalloc_version@. More information can be found at the jemalloc website. SYNOPSIS #include <jemalloc/jemalloc.h> Standard API void *malloc size_t size void *calloc size_t number size_t size int posix_memalign void **ptr size_t alignment size_t size void *aligned_alloc size_t alignment size_t size void *realloc void *ptr size_t size void free void *ptr Non-standard API void *mallocx size_t size int flags void *rallocx void *ptr size_t size int flags size_t xallocx void *ptr size_t size size_t extra int flags size_t sallocx void *ptr int flags void dallocx void *ptr int flags void sdallocx void *ptr size_t size int flags size_t nallocx size_t size int flags int mallctl const char *name void *oldp size_t *oldlenp void *newp size_t newlen int mallctlnametomib const char *name size_t *mibp size_t *miblenp int mallctlbymib const size_t *mib size_t miblen void *oldp size_t *oldlenp void *newp size_t newlen void malloc_stats_print void (*write_cb) void *, const char * void *cbopaque const char *opts size_t malloc_usable_size const void *ptr void (*malloc_message) void *cbopaque const char *s const char *malloc_conf; DESCRIPTION Standard API The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object. The calloc() function allocates space for number objects, each size bytes in length. The result is identical to calling malloc() with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes. The posix_memalign() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large as sizeof(void *). The aligned_alloc() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested alignment must be a power of 2. Behavior is undefined if size is not an integral multiple of alignment. The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that realloc() may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the realloc() function behaves identically to malloc() for the specified size. The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs. Non-standard API The mallocx(), rallocx(), xallocx(), sallocx(), dallocx(), sdallocx(), and nallocx() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to specify one or more of the following: MALLOCX_LG_ALIGN(la) Align the memory allocation to start at an address that is a multiple of (1 << la). This macro does not validate that la is within the valid range. MALLOCX_ALIGN(a) Align the memory allocation to start at an address that is a multiple of a, where a is a power of two. This macro does not validate that a is a power of 2. MALLOCX_ZERO Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized. MALLOCX_TCACHE(tc) Use the thread-specific cache (tcache) specified by the identifier tc, which must have been acquired via the tcache.create mallctl. This macro does not validate that tc specifies a valid identifier. MALLOCX_TCACHE_NONE Do not use a thread-specific cache (tcache). Unless MALLOCX_TCACHE(tc) or MALLOCX_TCACHE_NONE is specified, an automatically managed tcache will be used under many circumstances. This macro cannot be used in the same flags argument as MALLOCX_TCACHE(tc). MALLOCX_ARENA(a) Use the arena specified by the index a. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that a specifies an arena index in the valid range. The mallocx() function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if size is 0. The rallocx() function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if size is 0. The xallocx() function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is made to resize the allocation to be at least (size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if size is 0, or if (size + extra > SIZE_T_MAX). The sallocx() function returns the real size of the allocation at ptr. The dallocx() function causes the memory referenced by ptr to be made available for future allocations. The sdallocx() function is an extension of dallocx() with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by nallocx() or sallocx(). The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function, and returns the real size of the allocation that would result from the equivalent mallocx() function call, or 0 if the inputs exceed the maximum supported size class and/or alignment. Behavior is undefined if size is 0. The mallctl() function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a location in a tree-structured namespace; see the section for documentation on the tree contents. To read a value, pass a pointer via oldp to adequate space to contain the value, and a pointer to its length via oldlenp; otherwise pass NULL and NULL. Similarly, to write a value, pass a pointer to the value via newp, and its length via newlen; otherwise pass NULL and 0. The mallctlnametomib() function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a Management Information Base (MIB) that can be passed repeatedly to mallctlbymib(). Upon successful return from mallctlnametomib(), mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components in name and the input value of *miblenp. Thus it is possible to pass a *miblenp that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name components that are integers (e.g. the 2 in arenas.bin.2.size), the corresponding MIB component will always be that integer. Therefore, it is legitimate to construct code like the following: The malloc_stats_print() function writes summary statistics via the write_cb callback function pointer and cbopaque data passed to write_cb, or malloc_message() if write_cb is NULL. The statistics are presented in human-readable form unless J is specified as a character within the opts string, in which case the statistics are presented in JSON format. This function can be called repeatedly. General information that never changes during execution can be omitted by specifying g as a character within the opts string. Note that malloc_message() uses the mallctl*() functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If is specified during configuration, m, d, and a can be specified to omit merged arena, destroyed merged arena, and per arena statistics, respectively; b and l can be specified to omit per size class statistics for bins and large objects, respectively; x can be specified to omit all mutex statistics. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The malloc_usable_size() function is not a mechanism for in-place realloc(); rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported by malloc_usable_size() should not be depended on, since such behavior is entirely implementation-dependent. TUNING Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time. The string specified via , the string pointed to by the global variable malloc_conf, the name of the file referenced by the symbolic link named /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before main() is entered, so the declaration of malloc_conf should specify an initializer that contains the final value to be read by jemalloc. and malloc_conf are compile-time mechanisms, whereas /etc/malloc.conf and MALLOC_CONF can be safely set any time prior to program invocation. An options string is a comma-separated list of option:value pairs. There is one key corresponding to each opt.* mallctl (see the section for options documentation). For example, abort:true,narenas:1 sets the opt.abort and opt.narenas options. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values. IMPLEMENTATION NOTES Traditionally, allocators have used sbrk 2 to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If sbrk 2 is supported by the operating system, this allocator uses both mmap 2 and sbrk 2, in that order of preference; otherwise only mmap 2 is used. This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is a small fixed per-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation. These overheads are not generally an issue, given the number of arenas normally used. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions. In addition to multiple arenas, this allocator supports thread-specific caching, in order to make it possible to completely avoid synchronization for most allocation requests. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache. Memory is conceptually broken into extents. Extents are always aligned to multiples of the page size. This alignment makes it possible to find metadata for user objects quickly. User objects are broken into two categories according to size: small and large. Contiguous small objects comprise a slab, which resides within a single extent, whereas large objects each have their own extents backing them. Small objects are managed in groups by slabs. Each slab maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(double). All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, and large size classes extend from four times the page size up to the largest size class that does not exceed PTRDIFF_MAX. Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating. The realloc(), rallocx(), and xallocx() functions may resize allocations without moving them under limited circumstances. Unlike the *allocx() API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call realloc() to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large allocations in place, as long as the pre-size and post-size are both large. For shrinkage to succeed, the extent allocator must support splitting (see arena.<i>.extent_hooks). Growth only succeeds if the trailing memory is currently available, and the extent allocator supports merging. Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the size classes in each category are as shown in . Size classes Category Spacing Size Small lg [8] 16 [16, 32, 48, 64, 80, 96, 112, 128] 32 [160, 192, 224, 256] 64 [320, 384, 448, 512] 128 [640, 768, 896, 1024] 256 [1280, 1536, 1792, 2048] 512 [2560, 3072, 3584, 4096] 1 KiB [5 KiB, 6 KiB, 7 KiB, 8 KiB] 2 KiB [10 KiB, 12 KiB, 14 KiB] Large 2 KiB [16 KiB] 4 KiB [20 KiB, 24 KiB, 28 KiB, 32 KiB] 8 KiB [40 KiB, 48 KiB, 54 KiB, 64 KiB] 16 KiB [80 KiB, 96 KiB, 112 KiB, 128 KiB] 32 KiB [160 KiB, 192 KiB, 224 KiB, 256 KiB] 64 KiB [320 KiB, 384 KiB, 448 KiB, 512 KiB] 128 KiB [640 KiB, 768 KiB, 896 KiB, 1 MiB] 256 KiB [1280 KiB, 1536 KiB, 1792 KiB, 2 MiB] 512 KiB [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] 1 MiB [5 MiB, 6 MiB, 7 MiB, 8 MiB] 2 MiB [10 MiB, 12 MiB, 14 MiB, 16 MiB] 4 MiB [20 MiB, 24 MiB, 28 MiB, 32 MiB] 8 MiB [40 MiB, 48 MiB, 56 MiB, 64 MiB] ... ... 512 PiB [2560 PiB, 3 EiB, 3584 PiB, 4 EiB] 1 EiB [5 EiB, 6 EiB, 7 EiB]
MALLCTL NAMESPACE The following names are defined in the namespace accessible via the mallctl*() functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of stats.arenas.<i>.* and arena.<i>.{initialized,purge,decay,dss}, <i> equal to MALLCTL_ARENAS_ALL can be used to operate on all arenas or access the summation of statistics from all arenas; similarly <i> equal to MALLCTL_ARENAS_DESTROYED can be used to access the summation of statistics from all destroyed arenas. These constants can be utilized either via mallctlnametomib() followed by mallctlbymib(), or via code such as the following: Take special note of the epoch mallctl, which controls refreshing of cached dynamic statistics. version (const char *) r- Return the jemalloc version string. epoch (uint64_t) rw If a value is passed in, refresh the data from which the mallctl*() functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh. background_thread (bool) rw Enable/disable internal background worker threads. When set to true, background threads are created on demand (the number of background threads will be no more than the number of CPUs or active arenas). Threads run periodically, and handle purging asynchronously. When switching off, background threads are terminated synchronously. Note that after fork2 function, the state in the child process will be disabled regardless the state in parent process. See stats.background_thread for related stats. opt.background_thread can be used to set the default option. This option is only available on selected pthread-based platforms. max_background_threads (size_t) rw Maximum number of background worker threads that will be created. This value is capped at opt.max_background_threads at startup. config.cache_oblivious (bool) r- was specified during build configuration. config.debug (bool) r- was specified during build configuration. config.fill (bool) r- was specified during build configuration. config.lazy_lock (bool) r- was specified during build configuration. config.malloc_conf (const char *) r- Embedded configure-time-specified run-time options string, empty unless was specified during build configuration. config.prof (bool) r- was specified during build configuration. config.prof_libgcc (bool) r- was not specified during build configuration. config.prof_libunwind (bool) r- was specified during build configuration. config.stats (bool) r- was specified during build configuration. config.utrace (bool) r- was specified during build configuration. config.xmalloc (bool) r- was specified during build configuration. opt.abort (bool) r- Abort-on-warning enabled/disabled. If true, most warnings are fatal. Note that runtime option warnings are not included (see opt.abort_conf for that). The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.abort_conf (bool) r- Abort-on-invalid-configuration enabled/disabled. If true, invalid runtime options are fatal. The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.metadata_thp (const char *) r- Controls whether to allow jemalloc to use transparent huge page (THP) for internal metadata (see stats.metadata). always allows such usage. auto uses no THP initially, but may begin to do so when metadata usage reaches certain level. The default is disabled. opt.retain (bool) r- If true, retain unused virtual memory for later reuse rather than discarding it by calling munmap 2 or equivalent (see stats.retained for related details). This option is disabled by default unless discarding virtual memory is known to trigger platform-specific performance problems, e.g. for [64-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi-permanent VM map holes under normal jemalloc operation. Although munmap 2 causes issues on 32-bit Linux as well, retaining virtual memory for 32-bit Linux is disabled by default due to the practical possibility of address space exhaustion. opt.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. The following settings are supported if sbrk 2 is supported by the operating system: disabled, primary, and secondary; otherwise only disabled is supported. The default is secondary if sbrk 2 is supported by the operating system; disabled otherwise. opt.narenas (unsigned) r- Maximum number of arenas to use for automatic multiplexing of threads and arenas. The default is four times the number of CPUs, or one if there is a single CPU. opt.percpu_arena (const char *) r- Per CPU arena mode. Use the percpu setting to enable this feature, which uses number of CPUs to determine number of arenas, and bind threads to arenas dynamically based on the CPU the thread runs on currently. phycpu setting uses one arena per physical CPU, which means the two hyper threads on the same CPU share one arena. Note that no runtime checking regarding the availability of hyper threading is done at the moment. When set to disabled, narenas and thread to arena association will not be impacted by this option. The default is disabled. opt.background_thread (const bool) r- Internal background worker threads enabled/disabled. Because of potential circular dependencies, enabling background thread using this option may cause crash or deadlock during initialization. For a reliable way to use this feature, see background_thread for dynamic control options and details. This option is disabled by default. opt.max_background_threads (const size_t) r- Maximum number of background threads that will be created if background_thread is set. Defaults to number of cpus. opt.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged (i.e. converted to muzzy via e.g. madvise(...MADV_FREE) if supported by the operating system, or converted to clean otherwise) and/or reused. Dirty pages are defined as previously having been potentially written to by the application, and therefore consuming physical memory, yet having no current use. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.dirty_decay_ms and arena.<i>.dirty_decay_ms for related dynamic control options. See opt.muzzy_decay_ms for a description of muzzy pages. opt.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged (i.e. converted to clean) and/or reused. Muzzy pages are defined as previously having been unused dirty pages that were subsequently purged in a manner that left them subject to the reclamation whims of the operating system (e.g. madvise(...MADV_FREE)), and therefore in an indeterminate state. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused muzzy pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.muzzy_decay_ms and arena.<i>.muzzy_decay_ms for related dynamic control options. opt.lg_extent_max_active_fit (size_t) r- When reusing dirty extents, this determines the (log base 2 of the) maximum ratio between the size of the active extent selected (to split off from) and the size of the requested allocation. This prevents the splitting of large active extents for smaller allocations, which can reduce fragmentation over the long run (especially for non-active extents). Lower value may reduce fragmentation, at the cost of extra active extents. The default value is 6, which gives a maximum ratio of 64 (2^6). opt.stats_print (bool) r- Enable/disable statistics printing at exit. If enabled, the malloc_stats_print() function is called at program exit via an atexit 3 function. opt.stats_print_opts can be combined to specify output options. If is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation functions. Furthermore, atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default. opt.stats_print_opts (const char *) r- Options (the opts string) to pass to the malloc_stats_print() at exit (enabled through opt.stats_print). See available options in malloc_stats_print(). Has no effect unless opt.stats_print is enabled. The default is . opt.junk (const char *) r- [] Junk filling. If set to alloc, each byte of uninitialized allocated memory will be initialized to 0xa5. If set to free, all deallocated memory will be initialized to 0x5a. If set to true, both allocated and deallocated memory will be initialized, and if set to false, junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is false by default unless is specified during configuration, in which case it is true by default. opt.zero (bool) r- [] Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so realloc() and rallocx() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. opt.utrace (bool) r- [] Allocation tracing based on utrace 2 enabled/disabled. This option is disabled by default. opt.xmalloc (bool) r- [] Abort-on-out-of-memory enabled/disabled. If enabled, rather than returning failure for any allocation function, display a diagnostic message on STDERR_FILENO and cause the program to drop core (using abort 3). If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: This option is disabled by default. opt.tcache (bool) r- Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by default. opt.lg_tcache_max (size_t) r- Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes are cached, and at a maximum all large size classes are cached. The default maximum is 32 KiB (2^15). opt.thp (const char *) r- Transparent hugepage (THP) mode. Settings "always", "never" and "default" are available if THP is supported by the operating system. The "always" setting enables transparent hugepage for all user memory mappings with MADV_HUGEPAGE; "never" ensures no transparent hugepage with MADV_NOHUGEPAGE; the default setting "default" makes no changes. Note that: this option does not affect THP for jemalloc internal metadata (see opt.metadata_thp); in addition, for arenas with customized extent_hooks, this option is bypassed as it is implemented as part of the default extent hooks. opt.prof (bool) r- [] Memory profiling enabled/disabled. If enabled, profile memory allocation activity. See the opt.prof_active option for on-the-fly activation/deactivation. See the opt.lg_prof_sample option for probabilistic sampling control. See the opt.prof_accum option for control of cumulative sample reporting. See the opt.lg_prof_interval option for information on interval-triggered profile dumping, the opt.prof_gdump option for information on high-water-triggered profile dumping, and the opt.prof_final option for final profile dumping. Profile output is compatible with the jeprof command, which is based on the pprof that is developed as part of the gperftools package. See HEAP PROFILE FORMAT for heap profile format documentation. opt.prof_prefix (const char *) r- [] Filename prefix for profile dumps. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is jeprof. opt.prof_active (bool) r- [] Profiling activated/deactivated. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the opt.prof option) but inactive, then toggle profiling at any time during program execution with the prof.active mallctl. This option is enabled by default. opt.prof_thread_active_init (bool) r- [] Initial setting for thread.prof.active in newly created threads. The initial setting for newly created threads can also be changed during execution via the prof.thread_active_init mallctl. This option is enabled by default. opt.lg_prof_sample (size_t) r- [] Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead. The default sample interval is 512 KiB (2^19 B). opt.prof_accum (bool) r- [] Reporting of cumulative object/byte counts in profile dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest. This option is disabled by default. opt.lg_prof_interval (ssize_t) r- [] Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.i<iseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. By default, interval-triggered profile dumping is disabled (encoded as -1). opt.prof_gdump (bool) r- [] Set the initial state of prof.gdump, which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum. This option is disabled by default. opt.prof_final (bool) r- [] Use an atexit 3 function to dump final memory usage to a file named according to the pattern <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the opt.prof_prefix option. Note that atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). This option is disabled by default. opt.prof_leak (bool) r- [] Leak reporting enabled/disabled. If enabled, use an atexit 3 function to report memory leaks detected by allocation sampling. See the opt.prof option for information on analyzing heap profile output. This option is disabled by default. thread.arena (unsigned) rw Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the arena.i.initialized mallctl), it will be automatically initialized as a side effect of calling this interface. thread.allocated (uint64_t) r- [] Get the total number of bytes ever allocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.allocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.allocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.deallocated (uint64_t) r- [] Get the total number of bytes ever deallocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.deallocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.deallocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.tcache.enabled (bool) rw Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming disabled (see thread.tcache.flush). thread.tcache.flush (void) -- Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures associated with the calling thread's tcache. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful. thread.prof.name (const char *) r- or -w [] Get/set the descriptive name associated with the calling thread in memory profile dumps. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution. The output string of this interface should be copied for non-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The name string must be nil-terminated and comprised only of characters in the sets recognized by isgraph 3 and isblank 3. thread.prof.active (bool) rw [] Control whether sampling is currently active for the calling thread. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. This flag is enabled by default. tcache.create (unsigned) r- Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the MALLOCX_TCACHE(tc) macro to explicitly use the specified cache rather than the automatically managed one that is used by default. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds. tcache.flush (unsigned) -w Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to thread.tcache.flush, except that the tcache will never be automatically discarded. tcache.destroy (unsigned) -w Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation. arena.<i>.initialized (bool) r- Get whether the specified arena's statistics are initialized (i.e. the arena was initialized prior to the current epoch). This interface can also be nominally used to query whether the merged statistics corresponding to MALLCTL_ARENAS_ALL are initialized (always true). arena.<i>.decay (void) -- Trigger decay-based purging of unused dirty/muzzy pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. The proportion of unused dirty/muzzy pages to be purged depends on the current time; see opt.dirty_decay_ms and opt.muzy_decay_ms for details. arena.<i>.purge (void) -- Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. arena.<i>.reset (void) -- Discard all of the arena's extant allocations. This interface can only be used with arenas explicitly created via arenas.create. None of the arena's discarded/cached allocations may accessed afterward. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand. arena.<i>.destroy (void) -- Destroy the arena. Discard all of the arena's extant allocations using the same mechanism as for arena.<i>.reset (with all the same constraints and side effects), merge the arena stats into those accessible at arena index MALLCTL_ARENAS_DESTROYED, and then completely discard all metadata associated with the arena. Future calls to arenas.create may recycle the arena index. Destruction will fail if any threads are currently associated with the arena as a result of calls to thread.arena. arena.<i>.dss (const char *) rw Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. See opt.dss for supported settings. arena.<i>.dirty_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to -1 (i.e. purging disabled). See opt.dirty_decay_ms for additional information. arena.<i>.muzzy_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. Each time this interface is set, all currently unused muzzy pages are considered to have fully decayed, which causes immediate purging of all unused muzzy pages unless the decay time is set to -1 (i.e. purging disabled). See opt.muzzy_decay_ms for additional information. arena.<i>.retain_grow_limit (size_t) rw Maximum size to grow retained region (only relevant when opt.retain is enabled). This controls the maximum increment to expand virtual memory, or allocation through arena.<i>extent_hooks. In particular, if customized extent hooks reserve physical memory (e.g. 1G huge pages), this is useful to control the allocation hook's input size. The default is no limit. arena.<i>.extent_hooks (extent_hooks_t *) rw Get or set the extent management hook functions for arena <i>. The functions must be capable of operating on all extant extents associated with arena <i>, usually by passing unknown extents to the replaced functions. In practice, it is feasible to control allocation for arenas explicitly created via arenas.create such that all extents originate from an application-supplied extent allocator (by specifying the custom extent hook functions during arena creation), but the automatically created arenas will have already created extents prior to the application having an opportunity to take over extent allocation. The extent_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. However, there are performance and platform reasons to retain extents for later reuse. Cleanup attempts cascade from deallocation to decommit to forced purging to lazy purging, which gives the extent management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations. All operations except allocation can be universally opted out of by setting the hook pointers to NULL, or selectively opted out of by returning failure. Note that once the extent hook is set, the structure is accessed directly by the associated arenas, so it must remain valid for the entire lifetime of the arenas. typedef void *(extent_alloc_t) extent_hooks_t *extent_hooks void *new_addr size_t size size_t alignment bool *zero bool *commit unsigned arena_ind An extent allocation function conforms to the extent_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that the extent's base address is a multiple of alignment, as well as setting *zero to indicate whether the extent is zeroed and *commit to indicate whether the extent is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The size parameter is always a multiple of the page size. The alignment parameter is always a power of two at least as large as the page size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. If new_addr is not NULL, the returned pointer must be new_addr on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the default extent allocation function makes the arena's arena.<i>.dss setting irrelevant. typedef bool (extent_dalloc_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent deallocation function conforms to the extent_dalloc_t type and deallocates an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from deallocation; the virtual memory mapping associated with the extent remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse. typedef void (extent_destroy_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent destruction function conforms to the extent_destroy_t type and unconditionally destroys an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind. This function may be called to destroy retained extents during arena destruction (see arena.<i>.destroy). typedef bool (extent_commit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent commit function conforms to the extent_commit_t type and commits zeroed physical memory to back pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. If the function returns true, this indicates insufficient physical memory to satisfy the request. typedef bool (extent_decommit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent decommit function conforms to the extent_decommit_t type and decommits any physical memory that is backing pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success, in which case the pages will be committed via the extent commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse. typedef bool (extent_purge_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent purge function conforms to the extent_purge_t type and discards physical pages within the virtual memory mapping associated with an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind. A lazy extent purge function (e.g. implemented via madvise(...MADV_FREE)) can delay purging indefinitely and leave the pages within the purged virtual memory range in an indeterminite state, whereas a forced extent purge function immediately purges, and the pages within the virtual memory range will be zero-filled the next time they are accessed. If the function returns true, this indicates failure to purge. typedef bool (extent_split_t) extent_hooks_t *extent_hooks void *addr size_t size size_t size_a size_t size_b bool committed unsigned arena_ind An extent split function conforms to the extent_split_t type and optionally splits an extent at given addr and size into two adjacent extents, the first of size_a bytes, and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extent remains unsplit and therefore should continue to be operated on as a whole. typedef bool (extent_merge_t) extent_hooks_t *extent_hooks void *addr_a size_t size_a void *addr_b size_t size_b bool committed unsigned arena_ind An extent merge function conforms to the extent_merge_t type and optionally merges adjacent extents, at given addr_a and size_a with given addr_b and size_b into one contiguous extent, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extents remain distinct mappings and therefore should continue to be operated on independently. arenas.narenas (unsigned) r- Current limit on number of arenas. arenas.dirty_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize arena.<i>.dirty_decay_ms during arena creation. See opt.dirty_decay_ms for additional information. arenas.muzzy_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused, used to initialize arena.<i>.muzzy_decay_ms during arena creation. See opt.muzzy_decay_ms for additional information. arenas.quantum (size_t) r- Quantum size. arenas.page (size_t) r- Page size. arenas.tcache_max (size_t) r- Maximum thread-cached size class. arenas.nbins (unsigned) r- Number of bin size classes. arenas.nhbins (unsigned) r- Total number of thread cache bin size classes. arenas.bin.<i>.size (size_t) r- Maximum size supported by size class. arenas.bin.<i>.nregs (uint32_t) r- Number of regions per slab. arenas.bin.<i>.slab_size (size_t) r- Number of bytes per slab. arenas.nlextents (unsigned) r- Total number of large size classes. arenas.lextent.<i>.size (size_t) r- Maximum size supported by this large size class. arenas.create (unsigned, extent_hooks_t *) rw Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index. arenas.lookup (unsigned, void*) rw Index of the arena to which an allocation belongs to. prof.thread_active_init (bool) rw [] Control the initial setting for thread.prof.active in newly created threads. See the opt.prof_thread_active_init option for additional information. prof.active (bool) rw [] Control whether sampling is currently active. See the opt.prof_active option for additional information, as well as the interrelated thread.prof.active mallctl. prof.dump (const char *) -w [] Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern <prefix>.<pid>.<seq>.m<mseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.gdump (bool) rw [] When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.u<useq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.reset (size_t) -w [] Reset all memory profile statistics, and optionally update the sample rate (see opt.lg_prof_sample and prof.lg_sample). prof.lg_sample (size_t) r- [] Get the current sample rate (see opt.lg_prof_sample). prof.interval (uint64_t) r- [] Average number of bytes allocated between interval-based profile dumps. See the opt.lg_prof_interval option for additional information. stats.allocated (size_t) r- [] Total number of bytes allocated by the application. stats.active (size_t) r- [] Total number of bytes in active pages allocated by the application. This is a multiple of the page size, and greater than or equal to stats.allocated. This does not include stats.arenas.<i>.pdirty, stats.arenas.<i>.pmuzzy, nor pages entirely devoted to allocator metadata. stats.metadata (size_t) r- [] Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap-sensitive allocator metadata structures (see stats.arenas.<i>.base) and internal allocations (see stats.arenas.<i>.internal). Transparent huge page (enabled with opt.metadata_thp) usage is not considered. stats.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See stats.metadata and opt.metadata_thp) for details. stats.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size, and is larger than stats.active. stats.mapped (size_t) r- [] Total number of bytes in active extents mapped by the allocator. This is larger than stats.active. This does not include inactive extents, even those that contain unused dirty pages, which means that there is no strict ordering between this and stats.resident. stats.retained (size_t) r- [] Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e.g. munmap 2 or similar. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see extent hooks for details). Retained memory is excluded from mapped memory statistics, e.g. stats.mapped. stats.background_thread.num_threads (size_t) r- [] Number of background threads running currently. stats.background_thread.num_runs (uint64_t) r- [] Total number of runs from all background threads. stats.background_thread.run_interval (uint64_t) r- [] Average run interval in nanoseconds of background threads. stats.mutexes.ctl.{counter}; (counter specific type) r- [] Statistics on ctl mutex (global scope; mallctl related). {counter} is one of the counters below: num_ops (uint64_t): Total number of lock acquisition operations on this mutex. num_spin_acq (uint64_t): Number of times the mutex was spin-acquired. When the mutex is currently locked and cannot be acquired immediately, a short period of spin-retry within jemalloc will be performed. Acquired through spin generally means the contention was lightweight and not causing context switches. num_wait (uint64_t): Number of times the mutex was wait-acquired, which means the mutex contention was not solved by spin-retry, and blocking operation was likely involved in order to acquire the mutex. This event generally implies higher cost / longer delay, and should be investigated if it happens often. max_wait_time (uint64_t): Maximum length of time in nanoseconds spent on a single wait-acquired lock operation. Note that to avoid profiling overhead on the common path, this does not consider spin-acquired cases. total_wait_time (uint64_t): Cumulative time in nanoseconds spent on wait-acquired lock operations. Similarly, spin-acquired cases are not considered. max_num_thds (uint32_t): Maximum number of threads waiting on this mutex simultaneously. Similarly, spin-acquired cases are not considered. num_owner_switch (uint64_t): Number of times the current mutex owner is different from the previous one. This event does not generally imply an issue; rather it is an indicator of how often the protected data are accessed by different threads. stats.mutexes.background_thread.{counter} (counter specific type) r- [] Statistics on background_thread mutex (global scope; background_thread related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.prof.{counter} (counter specific type) r- [] Statistics on prof mutex (global scope; profiling related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.reset (void) -- [] Reset all mutex profile statistics, including global mutexes, arena mutexes and bin mutexes. stats.arenas.<i>.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. See opt.dss for details. stats.arenas.<i>.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. See opt.dirty_decay_ms for details. stats.arenas.<i>.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. See opt.muzzy_decay_ms for details. stats.arenas.<i>.nthreads (unsigned) r- Number of threads currently assigned to arena. stats.arenas.<i>.uptime (uint64_t) r- Time elapsed (in nanoseconds) since the arena was created. If <i> equals 0 or MALLCTL_ARENAS_ALL, this is the uptime since malloc initialization. stats.arenas.<i>.pactive (size_t) r- Number of pages in active extents. stats.arenas.<i>.pdirty (size_t) r- Number of pages within unused extents that are potentially dirty, and for which madvise() or similar has not been called. See opt.dirty_decay_ms for a description of dirty pages. stats.arenas.<i>.pmuzzy (size_t) r- Number of pages within unused extents that are muzzy. See opt.muzzy_decay_ms for a description of muzzy pages. stats.arenas.<i>.mapped (size_t) r- [] Number of mapped bytes. stats.arenas.<i>.retained (size_t) r- [] Number of retained bytes. See stats.retained for details. stats.arenas.<i>.base (size_t) r- [] Number of bytes dedicated to bootstrap-sensitive allocator metadata structures. stats.arenas.<i>.internal (size_t) r- [] Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap profiles. stats.arenas.<i>.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See opt.metadata_thp for details. stats.arenas.<i>.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the arena, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size. stats.arenas.<i>.dirty_npurge (uint64_t) r- [] Number of dirty page purge sweeps performed. stats.arenas.<i>.dirty_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge dirty pages. stats.arenas.<i>.dirty_purged (uint64_t) r- [] Number of dirty pages purged. stats.arenas.<i>.muzzy_npurge (uint64_t) r- [] Number of muzzy page purge sweeps performed. stats.arenas.<i>.muzzy_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge muzzy pages. stats.arenas.<i>.muzzy_purged (uint64_t) r- [] Number of muzzy pages purged. stats.arenas.<i>.small.allocated (size_t) r- [] Number of bytes currently allocated by small objects. stats.arenas.<i>.small.nmalloc (uint64_t) r- [] Cumulative number of times a small allocation was requested from the arena's bins, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.small.ndalloc (uint64_t) r- [] Cumulative number of times a small allocation was returned to the arena's bins, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.small.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all bin size classes. stats.arenas.<i>.large.allocated (size_t) r- [] Number of bytes currently allocated by large objects. stats.arenas.<i>.large.nmalloc (uint64_t) r- [] Cumulative number of times a large extent was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.large.ndalloc (uint64_t) r- [] Cumulative number of times a large extent was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.large.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all large size classes. stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by bin regions of the corresponding size class. stats.arenas.<i>.bins.<j>.curregs (size_t) r- [] Current number of regions for this size class. stats.arenas.<i>.bins.<j>.nfills (uint64_t) r- Cumulative number of tcache fills. stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r- Cumulative number of tcache flushes. stats.arenas.<i>.bins.<j>.nslabs (uint64_t) r- [] Cumulative number of slabs created. stats.arenas.<i>.bins.<j>.nreslabs (uint64_t) r- [] Cumulative number of times the current slab from which to allocate changed. stats.arenas.<i>.bins.<j>.curslabs (size_t) r- [] Current number of slabs. stats.arenas.<i>.bins.<j>.mutex.{counter} (counter specific type) r- [] Statistics on arena.<i>.bins.<j> mutex (arena bin scope; bin operation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.lextents.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.lextents.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.lextents.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by large extents of the corresponding size class. stats.arenas.<i>.lextents.<j>.curlextents (size_t) r- [] Current number of large allocations for this size class. stats.arenas.<i>.mutexes.large.{counter} (counter specific type) r- [] Statistics on arena.<i>.large mutex (arena scope; large allocation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extent_avail.{counter} (counter specific type) r- [] Statistics on arena.<i>.extent_avail mutex (arena scope; extent avail related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_dirty mutex (arena scope; dirty extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_muzzy mutex (arena scope; muzzy extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_retained.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_retained mutex (arena scope; retained extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_dirty mutex (arena scope; decay for dirty pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_muzzy mutex (arena scope; decay for muzzy pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.base.{counter} (counter specific type) r- [] Statistics on arena.<i>.base mutex (arena scope; base allocator related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.tcache_list.{counter} (counter specific type) r- [] Statistics on arena.<i>.tcache_list mutex (arena scope; tcache to arena association related). This mutex is expected to be accessed less often. {counter} is one of the counters in mutex profiling counters. HEAP PROFILE FORMAT Although the heap profiling functionality was originally designed to be compatible with the pprof command that is developed as part of the gperftools package, the addition of per thread heap profiling functionality required a different heap profile format. The jeprof command is derived from pprof, with enhancements to support the heap profile format described here. In the following hypothetical heap profile, [...] indicates elision for the sake of compactness. The following matches the above heap profile, but most tokens are replaced with <description> to indicate descriptions of the corresponding fields. / : : [: ] [...] : : [: ] [...] : : [: ] [...] @ [...] [...] : : [: ] : : [: ] : : [: ] [...] MAPPED_LIBRARIES: /maps>]]> DEBUGGING MALLOC PROBLEMS When debugging, it is a good idea to configure/build jemalloc with the and options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc. Programs often accidentally depend on uninitialized memory actually being filled with zero bytes. Junk filling (see the opt.junk option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the opt.zero option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs. This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive. DIAGNOSTIC MESSAGES If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the opt.abort option is set, most warnings are treated as errors. The malloc_message variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. malloc_message() takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock. All messages are prefixed by <jemalloc>: . RETURN VALUES Standard API The malloc() and calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM. The posix_memalign() function returns the value 0 if successful; otherwise it returns an error value. The posix_memalign() function will fail if: EINVAL The alignment parameter is not a power of 2 at least as large as sizeof(void *). ENOMEM Memory allocation error. The aligned_alloc() function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The aligned_alloc() function will fail if: EINVAL The alignment parameter is not a power of 2. ENOMEM Memory allocation error. The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an allocation failure. The realloc() function always leaves the original buffer intact when an error occurs. The free() function returns no value. Non-standard API The mallocx() and rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. The xallocx() function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place. The sallocx() function returns the real size of the allocation pointed to by ptr. The nallocx() returns the real size that would result from a successful equivalent mallocx() function call, or zero if insufficient memory is available to perform the size computation. The mallctl(), mallctlnametomib(), and mallctlbymib() functions return 0 on success; otherwise they return an error value. The functions will fail if: EINVAL newp is not NULL, and newlen is too large or too small. Alternatively, *oldlenp is too large or too small; in this case as much data as possible are read despite the error. ENOENT name or mib specifies an unknown/invalid value. EPERM Attempt to read or write void value, or attempt to write read-only value. EAGAIN A memory allocation failure occurred. EFAULT An interface with side effects failed in some way not directly related to mallctl*() read/write processing. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. ENVIRONMENT The following environment variable affects the execution of the allocation functions: MALLOC_CONF If the environment variable MALLOC_CONF is set, the characters it contains will be interpreted as options. EXAMPLES To dump core whenever a problem occurs: ln -s 'abort:true' /etc/malloc.conf To specify in the source that only one arena should be automatically created: SEE ALSO madvise 2, mmap 2, sbrk 2, utrace 2, alloca 3, atexit 3, getpagesize 3 STANDARDS The malloc(), calloc(), realloc(), and free() functions conform to ISO/IEC 9899:1990 (ISO C90). The posix_memalign() function conforms to IEEE Std 1003.1-2001 (POSIX.1).
jemalloc-sys-0.3.2/jemalloc/doc/manpages.xsl.in010064400007650000024000000003171340421340100176110ustar0000000000000000 jemalloc-sys-0.3.2/jemalloc/doc/stylesheet.xsl010064400007650000024000000006371340421340100176070ustar0000000000000000 ansi jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_externs.h010064400007650000024000000107031340421341300241530ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H #include "jemalloc/internal/bin.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_muzzy_decay_ms; extern percpu_arena_mode_t opt_percpu_arena; extern const char *percpu_arena_mode_names[]; extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; extern malloc_mutex_t arenas_lock; void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, bin_stats_t *bstats, arena_stats_large_t *lstats); void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); #ifdef JEMALLOC_JET size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); #endif extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero); void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent); void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); ssize_t arena_dirty_decay_ms_get(arena_t *arena); bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); ssize_t arena_muzzy_decay_ms_get(arena_t *arena); bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); void arena_reset(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena); void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero); typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero); void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr); void arena_dalloc_small(tsdn_t *tsdn, void *ptr); bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); dss_prec_t arena_dss_prec_get(arena_t *arena); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_dirty_decay_ms_default_get(void); bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); ssize_t arena_muzzy_decay_ms_default_get(void); bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit); unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); size_t arena_extent_sn_next(arena_t *arena); arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); void arena_boot(void); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork2(tsdn_t *tsdn, arena_t *arena); void arena_prefork3(tsdn_t *tsdn, arena_t *arena); void arena_prefork4(tsdn_t *tsdn, arena_t *arena); void arena_prefork5(tsdn_t *tsdn, arena_t *arena); void arena_prefork6(tsdn_t *tsdn, arena_t *arena); void arena_prefork7(tsdn_t *tsdn, arena_t *arena); void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_inlines_a.h010064400007650000024000000027021340421340100244210ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H static inline unsigned arena_ind_get(const arena_t *arena) { return base_ind_get(arena->base); } static inline void arena_internal_add(arena_t *arena, size_t size) { atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); } static inline void arena_internal_sub(arena_t *arena, size_t size) { atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); } static inline size_t arena_internal_get(arena_t *arena) { return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); } static inline bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { return false; } return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); } static inline void percpu_arena_update(tsd_t *tsd, unsigned cpu) { assert(have_percpu_arena); arena_t *oldarena = tsd_arena_get(tsd); assert(oldarena != NULL); unsigned oldind = arena_ind_get(oldarena); if (oldind != cpu) { unsigned newind = cpu; arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); assert(newarena != NULL); /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); tcache_t *tcache = tcache_get(tsd); if (tcache != NULL) { tcache_arena_reassociate(tsd_tsdn(tsd), tcache, newarena); } } } #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_inlines_b.h010064400007650000024000000225421340421341300244310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" JEMALLOC_ALWAYS_INLINE prof_tctx_t * arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { const extent_t *extent = iealloc(tsdn, ptr); if (unlikely(!extent_slab_get(extent))) { return large_prof_tctx_get(tsdn, extent); } } else { if (unlikely(!alloc_ctx->slab)) { return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); } } return (prof_tctx_t *)(uintptr_t)1U; } JEMALLOC_ALWAYS_INLINE void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { extent_t *extent = iealloc(tsdn, ptr); if (unlikely(!extent_slab_get(extent))) { large_prof_tctx_set(tsdn, extent, tctx); } } else { if (unlikely(!alloc_ctx->slab)) { large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); } } } static inline void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); extent_t *extent = iealloc(tsdn, ptr); assert(!extent_slab_get(extent)); large_prof_tctx_reset(tsdn, extent); } JEMALLOC_ALWAYS_INLINE void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { tsd_t *tsd; ticker_t *decay_ticker; if (unlikely(tsdn_null(tsdn))) { return; } tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); if (unlikely(decay_ticker == NULL)) { return; } if (unlikely(ticker_ticks(decay_ticker, nticks))) { arena_decay(tsdn, arena, false, false); } } JEMALLOC_ALWAYS_INLINE void arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); arena_decay_ticks(tsdn, arena, 1); } JEMALLOC_ALWAYS_INLINE void * arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); if (likely(tcache != NULL)) { if (likely(size <= SMALL_MAXCLASS)) { return tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path); } if (likely(size <= tcache_maxclass)) { return tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } return arena_malloc_hard(tsdn, arena, size, ind, zero); } JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(tsdn_t *tsdn, const void *ptr) { return extent_arena_get(iealloc(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE size_t arena_salloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind != NSIZES); return sz_index2size(szind); } JEMALLOC_ALWAYS_INLINE size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr) { /* * Return 0 if ptr is not within an extent managed by jemalloc. This * function has two extra costs relative to isalloc(): * - The rtree calls cannot claim to be dependent lookups, which induces * rtree lookup load dependencies. * - The lookup may fail, so there is an extra branch to check for * failure. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent; szind_t szind; if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, false, &extent, &szind)) { return 0; } if (extent == NULL) { return 0; } assert(extent_state_get(extent) == extent_state_active); /* Only slab members should be looked up via interior pointers. */ assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); assert(szind != NSIZES); return sz_index2size(szind); } static inline void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { assert(ptr != NULL); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind; bool slab; rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); if (config_debug) { extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(szind < NSIZES); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); if (unlikely(tcache == NULL)) { arena_dalloc_no_tcache(tsdn, ptr); return; } szind_t szind; bool slab; rtree_ctx_t *rtree_ctx; if (alloc_ctx != NULL) { szind = alloc_ctx->szind; slab = alloc_ctx->slab; assert(szind != NSIZES); } else { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); } if (config_debug) { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(szind < NSIZES); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { if (szind < nhbins) { if (config_prof && unlikely(szind < NBINS)) { arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); } else { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } } static inline void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { assert(ptr != NULL); assert(size <= LARGE_MAXCLASS); szind_t szind; bool slab; if (!config_prof || !opt_prof) { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ szind = sz_size2index(size); slab = (szind < NBINS); } if ((config_prof && opt_prof) || config_debug) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); assert(szind == sz_size2index(size)); assert((config_prof && opt_prof) || slab == (szind < NBINS)); if (config_debug) { extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(slab == extent_slab_get(extent)); } } if (likely(slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); assert(size <= LARGE_MAXCLASS); if (unlikely(tcache == NULL)) { arena_sdalloc_no_tcache(tsdn, ptr, size); return; } szind_t szind; bool slab; UNUSED alloc_ctx_t local_ctx; if (config_prof && opt_prof) { if (alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind, &local_ctx.slab); assert(local_ctx.szind == sz_size2index(size)); alloc_ctx = &local_ctx; } slab = alloc_ctx->slab; szind = alloc_ctx->szind; } else { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ szind = sz_size2index(size); slab = (szind < NBINS); } if (config_debug) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { if (szind < nhbins) { if (config_prof && unlikely(szind < NBINS)) { arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); } else { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } } #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_stats.h010064400007650000024000000152731340421341300236300ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H #define JEMALLOC_INTERNAL_ARENA_STATS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/size_classes.h" /* * In those architectures that support 64-bit atomics, we use atomic updates for * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize * externally. */ #ifdef JEMALLOC_ATOMIC_U64 typedef atomic_u64_t arena_stats_u64_t; #else /* Must hold the arena stats mutex while reading atomically. */ typedef uint64_t arena_stats_u64_t; #endif typedef struct arena_stats_large_s arena_stats_large_t; struct arena_stats_large_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ arena_stats_u64_t nmalloc; arena_stats_u64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ arena_stats_u64_t nrequests; /* Partially derived. */ /* Current number of allocations of this size class. */ size_t curlextents; /* Derived. */ }; typedef struct arena_stats_decay_s arena_stats_decay_t; struct arena_stats_decay_s { /* Total number of purge sweeps. */ arena_stats_u64_t npurge; /* Total number of madvise calls made. */ arena_stats_u64_t nmadvise; /* Total number of pages purged. */ arena_stats_u64_t purged; }; /* * Arena stats. Note that fields marked "derived" are not directly maintained * within the arena code; rather their values are derived during stats merge * requests. */ typedef struct arena_stats_s arena_stats_t; struct arena_stats_s { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_t mtx; #endif /* Number of bytes currently mapped, excluding retained memory. */ atomic_zu_t mapped; /* Partially derived. */ /* * Number of unused virtual memory bytes currently retained. Retained * bytes are technically mapped (though always decommitted or purged), * but they are excluded from the mapped statistic (above). */ atomic_zu_t retained; /* Derived. */ arena_stats_decay_t decay_dirty; arena_stats_decay_t decay_muzzy; atomic_zu_t base; /* Derived. */ atomic_zu_t internal; atomic_zu_t resident; /* Derived. */ atomic_zu_t metadata_thp; atomic_zu_t allocated_large; /* Derived. */ arena_stats_u64_t nmalloc_large; /* Derived. */ arena_stats_u64_t ndalloc_large; /* Derived. */ arena_stats_u64_t nrequests_large; /* Derived. */ /* Number of bytes cached in tcache associated with this arena. */ atomic_zu_t tcache_bytes; /* Derived. */ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; /* One element for each large size class. */ arena_stats_large_t lstats[NSIZES - NBINS]; /* Arena uptime. */ nstime_t uptime; }; static inline bool arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) { if (config_debug) { for (size_t i = 0; i < sizeof(arena_stats_t); i++) { assert(((char *)arena_stats)[i] == 0); } } #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { return true; } #endif /* Memory is zeroed, so there is no need to clear stats. */ return false; } static inline void arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_lock(tsdn, &arena_stats->mtx); #endif } static inline void arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_unlock(tsdn, &arena_stats->mtx); #endif } static inline uint64_t arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_u64(p, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); return *p; #endif } static inline void arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p, uint64_t x) { #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); *p += x; #endif } UNUSED static inline void arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p, uint64_t x) { #ifdef JEMALLOC_ATOMIC_U64 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); *p -= x; assert(*p + x >= *p); #endif } /* * Non-atomically sets *dst += src. *dst needs external synchronization. * This lets us avoid the cost of a fetch_add when its unnecessary (note that * the types here are atomic). */ static inline void arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { #ifdef JEMALLOC_ATOMIC_U64 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); #else *dst += src; #endif } static inline size_t arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_zu(p, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); return atomic_load_zu(p, ATOMIC_RELAXED); #endif } static inline void arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); atomic_store_zu(p, cur + x, ATOMIC_RELAXED); #endif } static inline void arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); atomic_store_zu(p, cur - x, ATOMIC_RELAXED); #endif } /* Like the _u64 variant, needs an externally synchronized *dst. */ static inline void arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); } static inline void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, szind_t szind, uint64_t nrequests) { arena_stats_lock(tsdn, arena_stats); arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - NBINS].nrequests, nrequests); arena_stats_unlock(tsdn, arena_stats); } static inline void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { arena_stats_lock(tsdn, arena_stats); arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); arena_stats_unlock(tsdn, arena_stats); } #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_structs_a.h010064400007650000024000000004451340421340100244710ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H #include "jemalloc/internal/bitmap.h" struct arena_slab_data_s { /* Per region allocated/deallocated bitmap. */ bitmap_t bitmap[BITMAP_GROUPS_MAX]; }; #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_structs_b.h010064400007650000024000000150451340421341300244770ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #include "jemalloc/internal/arena_stats.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bin.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/ticker.h" struct arena_decay_s { /* Synchronizes all non-atomic fields. */ malloc_mutex_t mtx; /* * True if a thread is currently purging the extents associated with * this decay structure. */ bool purging; /* * Approximate time in milliseconds from the creation of a set of unused * dirty pages until an equivalent set of unused dirty pages is purged * and/or reused. */ atomic_zd_t time_ms; /* time / SMOOTHSTEP_NSTEPS. */ nstime_t interval; /* * Time at which the current decay interval logically started. We do * not actually advance to a new epoch until sometime after it starts * because of scheduling and computation delays, and it is even possible * to completely skip epochs. In all cases, during epoch advancement we * merge all relevant activity into the most recently recorded epoch. */ nstime_t epoch; /* Deadline randomness generator. */ uint64_t jitter_state; /* * Deadline for current epoch. This is the sum of interval and per * epoch jitter which is a uniform random variable in [0..interval). * Epochs always advance by precise multiples of interval, but we * randomize the deadline to reduce the likelihood of arenas purging in * lockstep. */ nstime_t deadline; /* * Number of unpurged pages at beginning of current epoch. During epoch * advancement we use the delta between arena->decay_*.nunpurged and * extents_npages_get(&arena->extents_*) to determine how many dirty * pages, if any, were generated. */ size_t nunpurged; /* * Trailing log of how many unused dirty pages were generated during * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last * element is the most recent epoch. Corresponding epoch times are * relative to epoch. */ size_t backlog[SMOOTHSTEP_NSTEPS]; /* * Pointer to associated stats. These stats are embedded directly in * the arena's stats due to how stats structures are shared between the * arena and ctl code. * * Synchronization: Same as associated arena's stats field. */ arena_stats_decay_t *stats; /* Peak number of pages in associated extents. Used for debug only. */ uint64_t ceil_npages; }; struct arena_s { /* * Number of threads currently assigned to this arena. Each thread has * two distinct assignments, one for application-serving allocation, and * the other for internal metadata allocation. Internal metadata must * not be allocated from arenas explicitly created via the arenas.create * mallctl, because the arena..reset mallctl indiscriminately * discards all allocations for the affected arena. * * 0: Application allocation. * 1: Internal metadata allocation. * * Synchronization: atomic. */ atomic_u_t nthreads[2]; /* * When percpu_arena is enabled, to amortize the cost of reading / * updating the current CPU id, track the most recent thread accessing * this arena, and only read CPU if there is a mismatch. */ tsdn_t *last_thd; /* Synchronization: internal. */ arena_stats_t stats; /* * Lists of tcaches and cache_bin_array_descriptors for extant threads * associated with this arena. Stats from these are merged * incrementally, and at exit if opt_stats_print is enabled. * * Synchronization: tcache_ql_mtx. */ ql_head(tcache_t) tcache_ql; ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; malloc_mutex_t tcache_ql_mtx; /* Synchronization: internal. */ prof_accum_t prof_accum; uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base * pointers. * * Synchronization: atomic. */ atomic_zu_t offset_state; /* * Extent serial number generator state. * * Synchronization: atomic. */ atomic_zu_t extent_sn_next; /* * Represents a dss_prec_t, but atomically. * * Synchronization: atomic. */ atomic_u_t dss_prec; /* * Number of pages in active extents. * * Synchronization: atomic. */ atomic_zu_t nactive; /* * Extant large allocations. * * Synchronization: large_mtx. */ extent_list_t large; /* Synchronizes all large allocation/update/deallocation. */ malloc_mutex_t large_mtx; /* * Collections of extents that were previously allocated. These are * used when allocating extents, in an attempt to re-use address space. * * Synchronization: internal. */ extents_t extents_dirty; extents_t extents_muzzy; extents_t extents_retained; /* * Decay-based purging state, responsible for scheduling extent state * transitions. * * Synchronization: internal. */ arena_decay_t decay_dirty; /* dirty --> muzzy */ arena_decay_t decay_muzzy; /* muzzy --> retained */ /* * Next extent size class in a growing series to use when satisfying a * request via the extent hooks (only if opt_retain). This limits the * number of disjoint virtual memory ranges so that extent merging can * be effective even if multiple arenas' extent allocation requests are * highly interleaved. * * retain_grow_limit is the max allowed size ind to expand (unless the * required size is greater). Default is no limit, and controlled * through mallctl only. * * Synchronization: extent_grow_mtx */ pszind_t extent_grow_next; pszind_t retain_grow_limit; malloc_mutex_t extent_grow_mtx; /* * Available extent structures that were allocated via * base_alloc_extent(). * * Synchronization: extent_avail_mtx. */ extent_tree_t extent_avail; malloc_mutex_t extent_avail_mtx; /* * bins is used to store heaps of free regions. * * Synchronization: internal. */ bin_t bins[NBINS]; /* * Base allocator, from which arena metadata are allocated. * * Synchronization: internal. */ base_t *base; /* Used to determine uptime. Read-only after initialization. */ nstime_t create_time; }; /* Used in conjunction with tsd for fast arena-related context lookup. */ struct arena_tdata_s { ticker_t decay_ticker; }; /* Used to pass rtree lookup context down the path. */ struct alloc_ctx_s { szind_t szind; bool slab; }; #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/arena_types.h010064400007650000024000000027051340421341300236320ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H #define JEMALLOC_INTERNAL_ARENA_TYPES_H /* Maximum number of regions in one slab. */ #define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN) #define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) /* Default decay times in milliseconds. */ #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) #define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000) /* Number of event ticks between time checks. */ #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_slab_data_s arena_slab_data_t; typedef struct arena_decay_s arena_decay_t; typedef struct arena_s arena_t; typedef struct arena_tdata_s arena_tdata_t; typedef struct alloc_ctx_s alloc_ctx_t; typedef enum { percpu_arena_mode_names_base = 0, /* Used for options processing. */ /* * *_uninit are used only during bootstrapping, and must correspond * to initialized variant plus percpu_arena_mode_enabled_base. */ percpu_arena_uninit = 0, per_phycpu_arena_uninit = 1, /* All non-disabled modes must come after percpu_arena_disabled. */ percpu_arena_disabled = 2, percpu_arena_mode_names_limit = 3, /* Used for options processing. */ percpu_arena_mode_enabled_base = 3, percpu_arena = 3, per_phycpu_arena = 4 /* Hyper threads share arena. */ } percpu_arena_mode_t; #define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) #define PERCPU_ARENA_DEFAULT percpu_arena_disabled #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/assert.h010064400007650000024000000024621340421340100226160ustar0000000000000000#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/util.h" /* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (unlikely(config_debug && !(e))) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ unreachable(); \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) { \ not_implemented(); \ } \ } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #ifndef cassert #define cassert(c) do { \ if (unlikely(!(c))) { \ not_reached(); \ } \ } while (0) #endif jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/atomic.h010064400007650000024000000046421340421341300225760ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_H #define JEMALLOC_INTERNAL_ATOMIC_H #define ATOMIC_INLINE static inline #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) # include "jemalloc/internal/atomic_gcc_atomic.h" #elif defined(JEMALLOC_GCC_SYNC_ATOMICS) # include "jemalloc/internal/atomic_gcc_sync.h" #elif defined(_MSC_VER) # include "jemalloc/internal/atomic_msvc.h" #elif defined(JEMALLOC_C11_ATOMICS) # include "jemalloc/internal/atomic_c11.h" #else # error "Don't have atomics implemented on this platform." #endif /* * This header gives more or less a backport of C11 atomics. The user can write * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate * counterparts of the C11 atomic functions for type, as so: * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); * and then write things like: * int *some_ptr; * atomic_pi_t atomic_ptr_to_int; * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); * assert(some_ptr == prev_value); * and expect things to work in the obvious way. * * Also included (with naming differences to avoid conflicts with the standard * library): * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). */ /* * Pure convenience, so that we don't have to type "atomic_memory_order_" * quite so often. */ #define ATOMIC_RELAXED atomic_memory_order_relaxed #define ATOMIC_ACQUIRE atomic_memory_order_acquire #define ATOMIC_RELEASE atomic_memory_order_release #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst /* * Not all platforms have 64-bit atomics. If we do, this #define exposes that * fact. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # define JEMALLOC_ATOMIC_U64 #endif JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) /* * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only * platform that actually needs to know the size, MSVC. */ JEMALLOC_GENERATE_ATOMICS(bool, b, 0) JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) #ifdef JEMALLOC_ATOMIC_U64 JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) #endif #undef ATOMIC_INLINE #endif /* JEMALLOC_INTERNAL_ATOMIC_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/atomic_c11.h010064400007650000024000000067321340421340100232410ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H #define JEMALLOC_INTERNAL_ATOMIC_C11_H #include #define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) #define atomic_memory_order_t memory_order #define atomic_memory_order_relaxed memory_order_relaxed #define atomic_memory_order_acquire memory_order_acquire #define atomic_memory_order_release memory_order_release #define atomic_memory_order_acq_rel memory_order_acq_rel #define atomic_memory_order_seq_cst memory_order_seq_cst #define atomic_fence atomic_thread_fence #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef _Atomic(type) atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ /* \ * A strict interpretation of the C standard prevents \ * atomic_load from taking a const argument, but it's \ * convenient for our purposes. This cast is a workaround. \ */ \ atomic_##short_type##_t* a_nonconst = \ (atomic_##short_type##_t*)a; \ return atomic_load_explicit(a_nonconst, mo); \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ atomic_store_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return atomic_exchange_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return atomic_compare_exchange_weak_explicit(a, expected, \ desired, success_mo, failure_mo); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return atomic_compare_exchange_strong_explicit(a, expected, \ desired, success_mo, failure_mo); \ } /* * Integral types have some special operations available that non-integral ones * lack. */ #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_add_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_sub_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_and_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_or_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_xor_explicit(a, val, mo); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h010064400007650000024000000100211340421340100247270ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H #define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H #include "jemalloc/internal/assert.h" #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; ATOMIC_INLINE int atomic_enum_to_builtin(atomic_memory_order_t mo) { switch (mo) { case atomic_memory_order_relaxed: return __ATOMIC_RELAXED; case atomic_memory_order_acquire: return __ATOMIC_ACQUIRE; case atomic_memory_order_release: return __ATOMIC_RELEASE; case atomic_memory_order_acq_rel: return __ATOMIC_ACQ_REL; case atomic_memory_order_seq_cst: return __ATOMIC_SEQ_CST; } /* Can't happen; the switch is exhaustive. */ not_reached(); } ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { __atomic_thread_fence(atomic_enum_to_builtin(mo)); } #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef struct { \ type repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ type result; \ __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ return result; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ type result; \ __atomic_exchange(&a->repr, &val, &result, \ atomic_enum_to_builtin(mo)); \ return result; \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ true, atomic_enum_to_builtin(success_mo), \ atomic_enum_to_builtin(failure_mo)); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ false, \ atomic_enum_to_builtin(success_mo), \ atomic_enum_to_builtin(failure_mo)); \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_add(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_sub(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_and(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_or(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_xor(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h010064400007650000024000000137511340421341300244470ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H #define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { /* Easy cases first: no barrier, and full barrier. */ if (mo == atomic_memory_order_relaxed) { asm volatile("" ::: "memory"); return; } if (mo == atomic_memory_order_seq_cst) { asm volatile("" ::: "memory"); __sync_synchronize(); asm volatile("" ::: "memory"); return; } asm volatile("" ::: "memory"); # if defined(__i386__) || defined(__x86_64__) /* This is implicit on x86. */ # elif defined(__ppc__) asm volatile("lwsync"); # elif defined(__sparc__) && defined(__arch64__) if (mo == atomic_memory_order_acquire) { asm volatile("membar #LoadLoad | #LoadStore"); } else if (mo == atomic_memory_order_release) { asm volatile("membar #LoadStore | #StoreStore"); } else { asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); } # else __sync_synchronize(); # endif asm volatile("" ::: "memory"); } /* * A correct implementation of seq_cst loads and stores on weakly ordered * architectures could do either of the following: * 1. store() is weak-fence -> store -> strong fence, load() is load -> * strong-fence. * 2. store() is strong-fence -> store, load() is strong-fence -> load -> * weak-fence. * The tricky thing is, load() and store() above can be the load or store * portions of a gcc __sync builtin, so we have to follow GCC's lead, which * means going with strategy 2. * On strongly ordered architectures, the natural strategy is to stick a strong * fence after seq_cst stores, and have naked loads. So we want the strong * fences in different places on different architectures. * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to * accomplish this. */ ATOMIC_INLINE void atomic_pre_sc_load_fence() { # if defined(__i386__) || defined(__x86_64__) || \ (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_relaxed); # else atomic_fence(atomic_memory_order_seq_cst); # endif } ATOMIC_INLINE void atomic_post_sc_store_fence() { # if defined(__i386__) || defined(__x86_64__) || \ (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_seq_cst); # else atomic_fence(atomic_memory_order_relaxed); # endif } #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef struct { \ type volatile repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ if (mo == atomic_memory_order_seq_cst) { \ atomic_pre_sc_load_fence(); \ } \ type result = a->repr; \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_acquire); \ } \ return result; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_release); \ } \ a->repr = val; \ if (mo == atomic_memory_order_seq_cst) { \ atomic_post_sc_store_fence(); \ } \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ /* \ * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ * an atomic exchange builtin. We fake it with a CAS loop. \ */ \ while (true) { \ type old = a->repr; \ if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ return old; \ } \ } \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ return true; \ } else { \ *expected = prev; \ return false; \ } \ } \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ return true; \ } else { \ *expected = prev; \ return false; \ } \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_add(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_sub(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_and(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_or(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_xor(&a->repr, val); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/atomic_msvc.h010064400007650000024000000126441340421340100236240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H #define JEMALLOC_INTERNAL_ATOMIC_MSVC_H #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; typedef char atomic_repr_0_t; typedef short atomic_repr_1_t; typedef long atomic_repr_2_t; typedef __int64 atomic_repr_3_t; ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { _ReadWriteBarrier(); # if defined(_M_ARM) || defined(_M_ARM64) /* ARM needs a barrier for everything but relaxed. */ if (mo != atomic_memory_order_relaxed) { MemoryBarrier(); } # elif defined(_M_IX86) || defined (_M_X64) /* x86 needs a barrier only for seq_cst. */ if (mo == atomic_memory_order_seq_cst) { MemoryBarrier(); } # else # error "Don't know how to create atomics for this platform for MSVC." # endif _ReadWriteBarrier(); } #define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t #define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) #define ATOMIC_RAW_CONCAT(a, b) a ## b #define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) #define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) #define ATOMIC_INTERLOCKED_SUFFIX_0 8 #define ATOMIC_INTERLOCKED_SUFFIX_1 16 #define ATOMIC_INTERLOCKED_SUFFIX_2 #define ATOMIC_INTERLOCKED_SUFFIX_3 64 #define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ typedef struct { \ ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_acquire); \ } \ return (type) ret; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_release); \ } \ a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ if (mo == atomic_memory_order_seq_cst) { \ atomic_fence(atomic_memory_order_seq_cst); \ } \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ ATOMIC_INTERLOCKED_REPR(lg_size) e = \ (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ ATOMIC_INTERLOCKED_REPR(lg_size) d = \ (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ ATOMIC_INTERLOCKED_REPR(lg_size) old = \ ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ lg_size)(&a->repr, d, e); \ if (old == e) { \ return true; \ } else { \ *expected = (type)old; \ return false; \ } \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ /* We implement the weak version with strong semantics. */ \ return atomic_compare_exchange_weak_##short_type(a, expected, \ desired, success_mo, failure_mo); \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ /* \ * MSVC warns on negation of unsigned operands, but for us it \ * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ */ \ __pragma(warning(push)) \ __pragma(warning(disable: 4146)) \ return atomic_fetch_add_##short_type(a, -val, mo); \ __pragma(warning(pop)) \ } \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/background_thread_externs.h010064400007650000024000000025301340421341300265320ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H extern bool opt_background_thread; extern size_t opt_max_background_threads; extern malloc_mutex_t background_thread_lock; extern atomic_b_t background_thread_enabled_state; extern size_t n_background_threads; extern size_t max_background_threads; extern background_thread_info_t *background_thread_info; extern bool can_enable_background_thread; bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_threads_enable(tsd_t *tsd); bool background_threads_disable(tsd_t *tsd); void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new); void background_thread_prefork0(tsdn_t *tsdn); void background_thread_prefork1(tsdn_t *tsdn); void background_thread_postfork_parent(tsdn_t *tsdn); void background_thread_postfork_child(tsdn_t *tsdn); bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats); void background_thread_ctl_init(tsdn_t *tsdn); #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); #endif bool background_thread_boot0(void); bool background_thread_boot1(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/background_thread_inlines.h010064400007650000024000000036521340421341300265110ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H JEMALLOC_ALWAYS_INLINE bool background_thread_enabled(void) { return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE void background_thread_enabled_set(tsdn_t *tsdn, bool state) { malloc_mutex_assert_owner(tsdn, &background_thread_lock); atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE background_thread_info_t * arena_background_thread_info_get(arena_t *arena) { unsigned arena_ind = arena_ind_get(arena); return &background_thread_info[arena_ind % ncpus]; } JEMALLOC_ALWAYS_INLINE uint64_t background_thread_wakeup_time_get(background_thread_info_t *info) { uint64_t next_wakeup = nstime_ns(&info->next_wakeup); assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); return next_wakeup; } JEMALLOC_ALWAYS_INLINE void background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, uint64_t wakeup_time) { malloc_mutex_assert_owner(tsdn, &info->mtx); atomic_store_b(&info->indefinite_sleep, wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); nstime_init(&info->next_wakeup, wakeup_time); } JEMALLOC_ALWAYS_INLINE bool background_thread_indefinite_sleep(background_thread_info_t *info) { return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); } JEMALLOC_ALWAYS_INLINE void arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, bool is_background_thread) { if (!background_thread_enabled() || is_background_thread) { return; } background_thread_info_t *info = arena_background_thread_info_get(arena); if (background_thread_indefinite_sleep(info)) { background_thread_interval_check(tsdn, arena, &arena->decay_dirty, 0); } } #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/background_thread_structs.h010064400007650000024000000033201340421340100265440ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H /* This file really combines "structs" and "types", but only transitionally. */ #if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) # define JEMALLOC_PTHREAD_CREATE_WRAPPER #endif #define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT typedef enum { background_thread_stopped, background_thread_started, /* Thread waits on the global lock when paused (for arena_reset). */ background_thread_paused, } background_thread_state_t; struct background_thread_info_s { #ifdef JEMALLOC_BACKGROUND_THREAD /* Background thread is pthread specific. */ pthread_t thread; pthread_cond_t cond; #endif malloc_mutex_t mtx; background_thread_state_t state; /* When true, it means no wakeup scheduled. */ atomic_b_t indefinite_sleep; /* Next scheduled wakeup time (absolute time in ns). */ nstime_t next_wakeup; /* * Since the last background thread run, newly added number of pages * that need to be purged by the next wakeup. This is adjusted on * epoch advance, and is used to determine whether we should signal the * background thread to wake up earlier. */ size_t npages_to_purge_new; /* Stats: total number of runs since started. */ uint64_t tot_n_runs; /* Stats: total sleep time since started. */ nstime_t tot_sleep_time; }; typedef struct background_thread_info_s background_thread_info_t; struct background_thread_stats_s { size_t num_threads; uint64_t num_runs; nstime_t run_interval; }; typedef struct background_thread_stats_s background_thread_stats_t; #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/base_externs.h010064400007650000024000000016621340421340100240000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H #define JEMALLOC_INTERNAL_BASE_EXTERNS_H extern metadata_thp_mode_t opt_metadata_thp; extern const char *metadata_thp_mode_names[]; base_t *b0get(void); base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); void base_delete(tsdn_t *tsdn, base_t *base); extent_hooks_t *base_extent_hooks_get(base_t *base); extent_hooks_t *base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks); void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, size_t *mapped, size_t *n_thp); void base_prefork(tsdn_t *tsdn, base_t *base); void base_postfork_parent(tsdn_t *tsdn, base_t *base); void base_postfork_child(tsdn_t *tsdn, base_t *base); bool base_boot(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/base_inlines.h010064400007650000024000000004701340421340100237450ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H #define JEMALLOC_INTERNAL_BASE_INLINES_H static inline unsigned base_ind_get(const base_t *base) { return base->ind; } static inline bool metadata_thp_enabled(void) { return (opt_metadata_thp != metadata_thp_disabled); } #endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/base_structs.h010064400007650000024000000030331340421341300240140ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H #define JEMALLOC_INTERNAL_BASE_STRUCTS_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/size_classes.h" /* Embedded at the beginning of every block of base-managed virtual memory. */ struct base_block_s { /* Total size of block's virtual memory mapping. */ size_t size; /* Next block in list of base's blocks. */ base_block_t *next; /* Tracks unused trailing space. */ extent_t extent; }; struct base_s { /* Associated arena's index within the arenas array. */ unsigned ind; /* * User-configurable extent hook functions. Points to an * extent_hooks_t. */ atomic_p_t extent_hooks; /* Protects base_alloc() and base_stats_get() operations. */ malloc_mutex_t mtx; /* Using THP when true (metadata_thp auto mode). */ bool auto_thp_switched; /* * Most recent size class in the series of increasingly large base * extents. Logarithmic spacing between subsequent allocations ensures * that the total number of distinct mappings remains small. */ pszind_t pind_last; /* Serial number generation state. */ size_t extent_sn_next; /* Chain of all blocks associated with base. */ base_block_t *blocks; /* Heap of extents that track unused trailing space within blocks. */ extent_heap_t avail[NSIZES]; /* Stats, only maintained if config_stats. */ size_t allocated; size_t resident; size_t mapped; /* Number of THP regions touched. */ size_t n_thp; }; #endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/base_types.h010064400007650000024000000021211340421340100234430ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H #define JEMALLOC_INTERNAL_BASE_TYPES_H typedef struct base_block_s base_block_t; typedef struct base_s base_t; #define METADATA_THP_DEFAULT metadata_thp_disabled /* * In auto mode, arenas switch to huge pages for the base allocator on the * second base block. a0 switches to thp on the 5th block (after 20 megabytes * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. */ #define BASE_AUTO_THP_THRESHOLD 2 #define BASE_AUTO_THP_THRESHOLD_A0 5 typedef enum { metadata_thp_disabled = 0, /* * Lazily enable hugepage for metadata. To avoid high RSS caused by THP * + low usage arena (i.e. THP becomes a significant percentage), the * "auto" option only starts using THP after a base allocator used up * the first THP region. Starting from the second hugepage (in a single * arena), "auto" behaves the same as "always", i.e. madvise hugepage * right away. */ metadata_thp_auto = 1, metadata_thp_always = 2, metadata_thp_mode_limit = 3 } metadata_thp_mode_t; #endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/bin.h010064400007650000024000000057701340421341300220750ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIN_H #define JEMALLOC_INTERNAL_BIN_H #include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/bin_stats.h" /* * A bin contains a set of extents that are currently being used for slab * allocations. */ /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each slab has the following layout: * * /--------------------\ * | region 0 | * |--------------------| * | region 1 | * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | region nregs-1 | * \--------------------/ */ typedef struct bin_info_s bin_info_t; struct bin_info_s { /* Size of regions in a slab for this bin's size class. */ size_t reg_size; /* Total size of a slab for this bin's size class. */ size_t slab_size; /* Total number of regions in a slab for this bin's size class. */ uint32_t nregs; /* * Metadata used to manipulate bitmaps for slabs associated with this * bin. */ bitmap_info_t bitmap_info; }; extern const bin_info_t bin_infos[NBINS]; typedef struct bin_s bin_t; struct bin_s { /* All operations on bin_t fields require lock ownership. */ malloc_mutex_t lock; /* * Current slab being used to service allocations of this bin's size * class. slabcur is independent of slabs_{nonfull,full}; whenever * slabcur is reassigned, the previous slab must be deallocated or * inserted into slabs_{nonfull,full}. */ extent_t *slabcur; /* * Heap of non-full slabs. This heap is used to assure that new * allocations come from the non-full slab that is oldest/lowest in * memory. */ extent_heap_t slabs_nonfull; /* List used to track full slabs. */ extent_list_t slabs_full; /* Bin statistics. */ bin_stats_t stats; }; /* Initializes a bin to empty. Returns true on error. */ bool bin_init(bin_t *bin); /* Forking. */ void bin_prefork(tsdn_t *tsdn, bin_t *bin); void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin); void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); /* Stats. */ static inline void bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock); dst_bin_stats->nmalloc += bin->stats.nmalloc; dst_bin_stats->ndalloc += bin->stats.ndalloc; dst_bin_stats->nrequests += bin->stats.nrequests; dst_bin_stats->curregs += bin->stats.curregs; dst_bin_stats->nfills += bin->stats.nfills; dst_bin_stats->nflushes += bin->stats.nflushes; dst_bin_stats->nslabs += bin->stats.nslabs; dst_bin_stats->reslabs += bin->stats.reslabs; dst_bin_stats->curslabs += bin->stats.curslabs; malloc_mutex_unlock(tsdn, &bin->lock); } #endif /* JEMALLOC_INTERNAL_BIN_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/bin_stats.h010064400007650000024000000024271340421340100233040ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIN_STATS_H #define JEMALLOC_INTERNAL_BIN_STATS_H #include "jemalloc/internal/mutex_prof.h" typedef struct bin_stats_s bin_stats_t; struct bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ size_t curregs; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of slabs created for this bin's size class. */ uint64_t nslabs; /* * Total number of slabs reused by extracting them from the slabs heap * for this bin's size class. */ uint64_t reslabs; /* Current number of slabs in this bin. */ size_t curslabs; mutex_prof_data_t mutex_data; }; #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/bit_util.h010064400007650000024000000062011340421341300231260ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H #define JEMALLOC_INTERNAL_BIT_UTIL_H #include "jemalloc/internal/assert.h" #define BIT_UTIL_INLINE static inline /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif BIT_UTIL_INLINE unsigned ffs_llu(unsigned long long bitmap) { return JEMALLOC_INTERNAL_FFSLL(bitmap); } BIT_UTIL_INLINE unsigned ffs_lu(unsigned long bitmap) { return JEMALLOC_INTERNAL_FFSL(bitmap); } BIT_UTIL_INLINE unsigned ffs_u(unsigned bitmap) { return JEMALLOC_INTERNAL_FFS(bitmap); } BIT_UTIL_INLINE unsigned ffs_zu(size_t bitmap) { #if LG_SIZEOF_PTR == LG_SIZEOF_INT return ffs_u(bitmap); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG return ffs_lu(bitmap); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return ffs_llu(bitmap); #else #error No implementation for size_t ffs() #endif } BIT_UTIL_INLINE unsigned ffs_u64(uint64_t bitmap) { #if LG_SIZEOF_LONG == 3 return ffs_lu(bitmap); #elif LG_SIZEOF_LONG_LONG == 3 return ffs_llu(bitmap); #else #error No implementation for 64-bit ffs() #endif } BIT_UTIL_INLINE unsigned ffs_u32(uint32_t bitmap) { #if LG_SIZEOF_INT == 2 return ffs_u(bitmap); #else #error No implementation for 32-bit ffs() #endif return ffs_u(bitmap); } BIT_UTIL_INLINE uint64_t pow2_ceil_u64(uint64_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x |= x >> 32; x++; return x; } BIT_UTIL_INLINE uint32_t pow2_ceil_u32(uint32_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return x; } /* Compute the smallest power of 2 that is >= x. */ BIT_UTIL_INLINE size_t pow2_ceil_zu(size_t x) { #if (LG_SIZEOF_PTR == 3) return pow2_ceil_u64(x); #else return pow2_ceil_u32(x); #endif } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { size_t ret; assert(x != 0); asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); assert(ret < UINT_MAX); return (unsigned)ret; } #elif (defined(_MSC_VER)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { unsigned long ret; assert(x != 0); #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type size for lg_floor()" #endif assert(ret < UINT_MAX); return (unsigned)ret; } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { assert(x != 0); #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); #else # error "Unsupported type size for lg_floor()" #endif } #else BIT_UTIL_INLINE unsigned lg_floor(size_t x) { assert(x != 0); x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3) x |= (x >> 32); #endif if (x == SIZE_T_MAX) { return (8 << LG_SIZEOF_PTR) - 1; } x++; return ffs_zu(x) - 2; } #endif #undef BIT_UTIL_INLINE #endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/bitmap.h010064400007650000024000000257401340421341300226000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BITMAP_H #define JEMALLOC_INTERNAL_BITMAP_H #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/size_classes.h" typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #if LG_SLAB_MAXREGS > LG_CEIL_NSIZES /* Maximum bitmap bit count is determined by maximum regions per slab. */ # define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS #else /* Maximum bitmap bit count is determined by number of extent size classes. */ # define LG_BITMAP_MAXBITS LG_CEIL_NSIZES #endif #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute * force linear search, if we would have to call ffs_lu() more than 2^3 times, * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define BITMAP_USE_TREE #endif /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ #define BITMAP_GROUPS_L0(nbits) \ BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_L1(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) #define BITMAP_GROUPS_L2(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) #define BITMAP_GROUPS_L3(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS((nbits))))) #define BITMAP_GROUPS_L4(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ #define BITMAP_GROUPS_1_LEVEL(nbits) \ BITMAP_GROUPS_L0(nbits) #define BITMAP_GROUPS_2_LEVEL(nbits) \ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) #define BITMAP_GROUPS_3_LEVEL(nbits) \ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) #define BITMAP_GROUPS_4_LEVEL(nbits) \ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) #define BITMAP_GROUPS_5_LEVEL(nbits) \ (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #ifdef BITMAP_USE_TREE #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) #else # error "Unsupported bitmap size" #endif /* * Maximum number of levels possible. This could be statically computed based * on LG_BITMAP_MAXBITS: * * #define BITMAP_MAX_LEVELS \ * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) * * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the * various cascading macros. The only additional cost this incurs is some * unused trailing entries in bitmap_info_t structures; the bitmaps themselves * are not impacted. */ #define BITMAP_MAX_LEVELS 5 #define BITMAP_INFO_INITIALIZER(nbits) { \ /* nbits. */ \ nbits, \ /* nlevels. */ \ (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ /* levels. */ \ { \ {0}, \ {BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + BITMAP_GROUPS_L0(nbits)} \ } \ } #else /* BITMAP_USE_TREE */ #define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) #define BITMAP_INFO_INITIALIZER(nbits) { \ /* nbits. */ \ nbits, \ /* ngroups. */ \ BITMAP_BITS2GROUPS(nbits) \ } #endif /* BITMAP_USE_TREE */ typedef struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; } bitmap_level_t; typedef struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; #ifdef BITMAP_USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; #else /* BITMAP_USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* BITMAP_USE_TREE */ } bitmap_info_t; void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); size_t bitmap_size(const bitmap_info_t *binfo); static inline bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef BITMAP_USE_TREE size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); #else size_t i; for (i = 0; i < binfo->ngroups; i++) { if (bitmap[i] != 0) { return false; } } return true; #endif } static inline bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); } static inline void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) { break; } } } #endif } /* ffu: find first unset >= bit. */ static inline size_t bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { assert(min_bit < binfo->nbits); #ifdef BITMAP_USE_TREE size_t bit = 0; for (unsigned level = binfo->nlevels; level--;) { size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + 1)); bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit >> lg_bits_per_group)]; unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); assert(group_nmask <= BITMAP_GROUP_NBITS); bitmap_t group_mask = ~((1LU << group_nmask) - 1); bitmap_t group_masked = group & group_mask; if (group_masked == 0LU) { if (group == 0LU) { return binfo->nbits; } /* * min_bit was preceded by one or more unset bits in * this group, but there are no other unset bits in this * group. Try again starting at the first bit of the * next sibling. This will recurse at most once per * non-root level. */ size_t sib_base = bit + (ZU(1) << lg_bits_per_group); assert(sib_base > min_bit); assert(sib_base > bit); if (sib_base >= binfo->nbits) { return binfo->nbits; } return bitmap_ffu(bitmap, binfo, sib_base); } bit += ((size_t)(ffs_lu(group_masked) - 1)) << (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); } assert(bit >= min_bit); assert(bit < binfo->nbits); return bit; #else size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) - 1); size_t bit; do { bit = ffs_lu(g); if (bit != 0) { return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); } i++; g = bitmap[i]; } while (i < binfo->ngroups); return binfo->nbits; #endif } /* sfu: set first unset. */ static inline size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(!bitmap_full(bitmap, binfo)); #ifdef BITMAP_USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } #else i = 0; g = bitmap[0]; while ((bit = ffs_lu(g)) == 0) { i++; g = bitmap[i]; } bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif bitmap_set(bitmap, binfo, bit); return bit; } static inline void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (!propagate) { break; } } } #endif /* BITMAP_USE_TREE */ } #endif /* JEMALLOC_INTERNAL_BITMAP_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/cache_bin.h010064400007650000024000000064651340421341300232220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H #define JEMALLOC_INTERNAL_CACHE_BIN_H #include "jemalloc/internal/ql.h" /* * The cache_bins are the mechanism that the tcache and the arena use to * communicate. The tcache fills from and flushes to the arena by passing a * cache_bin_t to fill/flush. When the arena needs to pull stats from the * tcaches associated with it, it does so by iterating over its * cache_bin_array_descriptor_t objects and reading out per-bin stats it * contains. This makes it so that the arena need not know about the existence * of the tcache at all. */ /* * The count of the number of cached allocations in a bin. We make this signed * so that negative numbers can encode "invalid" states (e.g. a low water mark * of -1 for a cache that has been depleted). */ typedef int32_t cache_bin_sz_t; typedef struct cache_bin_stats_s cache_bin_stats_t; struct cache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ typedef struct cache_bin_info_s cache_bin_info_t; struct cache_bin_info_s { /* Upper limit on ncached. */ cache_bin_sz_t ncached_max; }; typedef struct cache_bin_s cache_bin_t; struct cache_bin_s { /* Min # cached since last GC. */ cache_bin_sz_t low_water; /* # of cached objects. */ cache_bin_sz_t ncached; /* * ncached and stats are both modified frequently. Let's keep them * close so that they have a higher chance of being on the same * cacheline, thus less write-backs. */ cache_bin_stats_t tstats; /* * Stack of available objects. * * To make use of adjacent cacheline prefetch, the items in the avail * stack goes to higher address for newer allocations. avail points * just above the available space, which means that * avail[-ncached, ... -1] are available items and the lowest item will * be allocated first. */ void **avail; }; typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; struct cache_bin_array_descriptor_s { /* * The arena keeps a list of the cache bins associated with it, for * stats collection. */ ql_elm(cache_bin_array_descriptor_t) link; /* Pointers to the tcache bins. */ cache_bin_t *bins_small; cache_bin_t *bins_large; }; static inline void cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, cache_bin_t *bins_small, cache_bin_t *bins_large) { ql_elm_new(descriptor, link); descriptor->bins_small = bins_small; descriptor->bins_large = bins_large; } JEMALLOC_ALWAYS_INLINE void * cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { void *ret; if (unlikely(bin->ncached == 0)) { bin->low_water = -1; *success = false; return NULL; } /* * success (instead of ret) should be checked upon the return of this * function. We avoid checking (ret == NULL) because there is never a * null stored on the avail stack (which is unknown to the compiler), * and eagerly checking ret would cause pipeline stall (waiting for the * cacheline). */ *success = true; ret = *(bin->avail - bin->ncached); bin->ncached--; if (unlikely(bin->ncached < bin->low_water)) { bin->low_water = bin->ncached; } return ret; } #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/ckh.h010064400007650000024000000063011340421340100220560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CKH_H #define JEMALLOC_INTERNAL_CKH_H #include "jemalloc/internal/tsd.h" /* Cuckoo hashing implementation. Skip to the end for the interface. */ /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Hash table cell. */ typedef struct { const void *key; const void *data; } ckhc_t; /* The hash table itself. */ typedef struct { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ uint64_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; } ckh_t; /******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ /* Lifetime management. Minitems is the initial capacity. */ bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); /* Get the number of elements in the set. */ size_t ckh_count(ckh_t *ckh); /* * To iterate over the elements in the table, initialize *tabind to 0 and call * this function until it returns true. Each call that returns false will * update *key and *data to the next element in the table, assuming the pointers * are non-NULL. */ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); /* * Basic hash table operations -- insert, removal, lookup. For ckh_remove and * ckh_search, key or data can be NULL. The hash-table only stores pointers to * the key and value, and doesn't do any lifetime management. */ bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); /* Some useful hash and comparison functions for strings and pointers. */ void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_INTERNAL_CKH_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/ctl.h010064400007650000024000000070351340421341300221030ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CTL_H #define JEMALLOC_INTERNAL_CTL_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" /* Maximum ctl tree depth. */ #define CTL_MAX_DEPTH 7 typedef struct ctl_node_s { bool named; } ctl_node_t; typedef struct ctl_named_node_s { ctl_node_t node; const char *name; /* If (nchildren == 0), this is a terminal node. */ size_t nchildren; const ctl_node_t *children; int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); } ctl_named_node_t; typedef struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); } ctl_indexed_node_t; typedef struct ctl_arena_stats_s { arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; bin_stats_t bstats[NBINS]; arena_stats_large_t lstats[NSIZES - NBINS]; } ctl_arena_stats_t; typedef struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t metadata_thp; size_t resident; size_t mapped; size_t retained; background_thread_stats_t background_thread; mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; } ctl_stats_t; typedef struct ctl_arena_s ctl_arena_t; struct ctl_arena_s { unsigned arena_ind; bool initialized; ql_elm(ctl_arena_t) destroyed_link; /* Basic stats, supported even if !config_stats. */ unsigned nthreads; const char *dss; ssize_t dirty_decay_ms; ssize_t muzzy_decay_ms; size_t pactive; size_t pdirty; size_t pmuzzy; /* NULL if !config_stats. */ ctl_arena_stats_t *astats; }; typedef struct ctl_arenas_s { uint64_t epoch; unsigned narenas; ql_head(ctl_arena_t) destroyed; /* * Element 0 corresponds to merged stats for extant arenas (accessed via * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. */ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; } ctl_arenas_t; int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ ": Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf(": Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ ": Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_INTERNAL_CTL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/div.h010064400007650000024000000022241340421340100220730ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DIV_H #define JEMALLOC_INTERNAL_DIV_H #include "jemalloc/internal/assert.h" /* * This module does the division that computes the index of a region in a slab, * given its offset relative to the base. * That is, given a divisor d, an n = i * d (all integers), we'll return i. * We do some pre-computation to do this more quickly than a CPU division * instruction. * We bound n < 2^32, and don't support dividing by one. */ typedef struct div_info_s div_info_t; struct div_info_s { uint32_t magic; #ifdef JEMALLOC_DEBUG size_t d; #endif }; void div_init(div_info_t *div_info, size_t divisor); static inline size_t div_compute(div_info_t *div_info, size_t n) { assert(n <= (uint32_t)-1); /* * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, * the compilers I tried were all smart enough to turn this into the * appropriate "get the high 32 bits of the result of a multiply" (e.g. * mul; mov edx eax; on x86, umull on arm, etc.). */ size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; #ifdef JEMALLOC_DEBUG assert(i * div_info->d == n); #endif return i; } #endif /* JEMALLOC_INTERNAL_DIV_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/emitter.h010064400007650000024000000264211340421341300227720ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EMITTER_H #define JEMALLOC_INTERNAL_EMITTER_H #include "jemalloc/internal/ql.h" typedef enum emitter_output_e emitter_output_t; enum emitter_output_e { emitter_output_json, emitter_output_table }; typedef enum emitter_justify_e emitter_justify_t; enum emitter_justify_e { emitter_justify_left, emitter_justify_right, /* Not for users; just to pass to internal functions. */ emitter_justify_none }; typedef enum emitter_type_e emitter_type_t; enum emitter_type_e { emitter_type_bool, emitter_type_int, emitter_type_unsigned, emitter_type_uint32, emitter_type_uint64, emitter_type_size, emitter_type_ssize, emitter_type_string, /* * A title is a column title in a table; it's just a string, but it's * not quoted. */ emitter_type_title, }; typedef struct emitter_col_s emitter_col_t; struct emitter_col_s { /* Filled in by the user. */ emitter_justify_t justify; int width; emitter_type_t type; union { bool bool_val; int int_val; unsigned unsigned_val; uint32_t uint32_val; uint64_t uint64_val; size_t size_val; ssize_t ssize_val; const char *str_val; }; /* Filled in by initialization. */ ql_elm(emitter_col_t) link; }; typedef struct emitter_row_s emitter_row_t; struct emitter_row_s { ql_head(emitter_col_t) cols; }; static inline void emitter_row_init(emitter_row_t *row) { ql_new(&row->cols); } static inline void emitter_col_init(emitter_col_t *col, emitter_row_t *row) { ql_elm_new(col, link); ql_tail_insert(&row->cols, col, link); } typedef struct emitter_s emitter_t; struct emitter_s { emitter_output_t output; /* The output information. */ void (*write_cb)(void *, const char *); void *cbopaque; int nesting_depth; /* True if we've already emitted a value at the given depth. */ bool item_at_depth; }; static inline void emitter_init(emitter_t *emitter, emitter_output_t emitter_output, void (*write_cb)(void *, const char *), void *cbopaque) { emitter->output = emitter_output; emitter->write_cb = write_cb; emitter->cbopaque = cbopaque; emitter->item_at_depth = false; emitter->nesting_depth = 0; } /* Internal convenience function. Write to the emitter the given string. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void emitter_printf(emitter_t *emitter, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); va_end(ap); } /* Write to the emitter the given string, but only in table mode. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void emitter_table_printf(emitter_t *emitter, const char *format, ...) { if (emitter->output == emitter_output_table) { va_list ap; va_start(ap, format); malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); va_end(ap); } } static inline void emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, emitter_justify_t justify, int width) { size_t written; if (justify == emitter_justify_none) { written = malloc_snprintf(out_fmt, out_size, "%%%s", fmt_specifier); } else if (justify == emitter_justify_left) { written = malloc_snprintf(out_fmt, out_size, "%%-%d%s", width, fmt_specifier); } else { written = malloc_snprintf(out_fmt, out_size, "%%%d%s", width, fmt_specifier); } /* Only happens in case of bad format string, which *we* choose. */ assert(written < out_size); } /* * Internal. Emit the given value type in the relevant encoding (so that the * bool true gets mapped to json "true", but the string "true" gets mapped to * json "\"true\"", for instance. * * Width is ignored if justify is emitter_justify_none. */ static inline void emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, emitter_type_t value_type, const void *value) { size_t str_written; #define BUF_SIZE 256 #define FMT_SIZE 10 /* * We dynamically generate a format string to emit, to let us use the * snprintf machinery. This is kinda hacky, but gets the job done * quickly without having to think about the various snprintf edge * cases. */ char fmt[FMT_SIZE]; char buf[BUF_SIZE]; #define EMIT_SIMPLE(type, format) \ emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \ emitter_printf(emitter, fmt, *(const type *)value); \ switch (value_type) { case emitter_type_bool: emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); emitter_printf(emitter, fmt, *(const bool *)value ? "true" : "false"); break; case emitter_type_int: EMIT_SIMPLE(int, "d") break; case emitter_type_unsigned: EMIT_SIMPLE(unsigned, "u") break; case emitter_type_ssize: EMIT_SIMPLE(ssize_t, "zd") break; case emitter_type_size: EMIT_SIMPLE(size_t, "zu") break; case emitter_type_string: str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", *(const char *const *)value); /* * We control the strings we output; we shouldn't get anything * anywhere near the fmt size. */ assert(str_written < BUF_SIZE); emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); emitter_printf(emitter, fmt, buf); break; case emitter_type_uint32: EMIT_SIMPLE(uint32_t, FMTu32) break; case emitter_type_uint64: EMIT_SIMPLE(uint64_t, FMTu64) break; case emitter_type_title: EMIT_SIMPLE(char *const, "s"); break; default: unreachable(); } #undef BUF_SIZE #undef FMT_SIZE } /* Internal functions. In json mode, tracks nesting state. */ static inline void emitter_nest_inc(emitter_t *emitter) { emitter->nesting_depth++; emitter->item_at_depth = false; } static inline void emitter_nest_dec(emitter_t *emitter) { emitter->nesting_depth--; emitter->item_at_depth = true; } static inline void emitter_indent(emitter_t *emitter) { int amount = emitter->nesting_depth; const char *indent_str; if (emitter->output == emitter_output_json) { indent_str = "\t"; } else { amount *= 2; indent_str = " "; } for (int i = 0; i < amount; i++) { emitter_printf(emitter, "%s", indent_str); } } static inline void emitter_json_key_prefix(emitter_t *emitter) { emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); emitter_indent(emitter); } static inline void emitter_begin(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth == 0); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); } else { // tabular init emitter_printf(emitter, "%s", ""); } } static inline void emitter_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth == 1); emitter_nest_dec(emitter); emitter_printf(emitter, "\n}\n"); } } /* * Note emits a different kv pair as well, but only in table mode. Omits the * note if table_note_key is NULL. */ static inline void emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, emitter_type_t value_type, const void *value, const char *table_note_key, emitter_type_t table_note_value_type, const void *table_note_value) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_json_key_prefix(emitter); emitter_printf(emitter, "\"%s\": ", json_key); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); } else { emitter_indent(emitter); emitter_printf(emitter, "%s: ", table_key); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); if (table_note_key != NULL) { emitter_printf(emitter, " (%s: ", table_note_key); emitter_print_value(emitter, emitter_justify_none, -1, table_note_value_type, table_note_value); emitter_printf(emitter, ")"); } emitter_printf(emitter, "\n"); } emitter->item_at_depth = true; } static inline void emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, emitter_type_t value_type, const void *value) { emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL, emitter_type_bool, NULL); } static inline void emitter_json_kv(emitter_t *emitter, const char *json_key, emitter_type_t value_type, const void *value) { if (emitter->output == emitter_output_json) { emitter_kv(emitter, json_key, NULL, value_type, value); } } static inline void emitter_table_kv(emitter_t *emitter, const char *table_key, emitter_type_t value_type, const void *value) { if (emitter->output == emitter_output_table) { emitter_kv(emitter, NULL, table_key, value_type, value); } } static inline void emitter_dict_begin(emitter_t *emitter, const char *json_key, const char *table_header) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "\"%s\": {", json_key); emitter_nest_inc(emitter); } else { emitter_indent(emitter); emitter_printf(emitter, "%s\n", table_header); emitter_nest_inc(emitter); } } static inline void emitter_dict_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); emitter_printf(emitter, "\n"); emitter_indent(emitter); emitter_printf(emitter, "}"); } else { emitter_nest_dec(emitter); } } static inline void emitter_json_dict_begin(emitter_t *emitter, const char *json_key) { if (emitter->output == emitter_output_json) { emitter_dict_begin(emitter, json_key, NULL); } } static inline void emitter_json_dict_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { emitter_dict_end(emitter); } } static inline void emitter_table_dict_begin(emitter_t *emitter, const char *table_key) { if (emitter->output == emitter_output_table) { emitter_dict_begin(emitter, NULL, table_key); } } static inline void emitter_table_dict_end(emitter_t *emitter) { if (emitter->output == emitter_output_table) { emitter_dict_end(emitter); } } static inline void emitter_json_arr_begin(emitter_t *emitter, const char *json_key) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "\"%s\": [", json_key); emitter_nest_inc(emitter); } } static inline void emitter_json_arr_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); emitter_printf(emitter, "\n"); emitter_indent(emitter); emitter_printf(emitter, "]"); } } static inline void emitter_json_arr_obj_begin(emitter_t *emitter) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); } } static inline void emitter_json_arr_obj_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); emitter_printf(emitter, "\n"); emitter_indent(emitter); emitter_printf(emitter, "}"); } } static inline void emitter_json_arr_value(emitter_t *emitter, emitter_type_t value_type, const void *value) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); } } static inline void emitter_table_row(emitter_t *emitter, emitter_row_t *row) { if (emitter->output != emitter_output_table) { return; } emitter_col_t *col; ql_foreach(col, &row->cols, link) { emitter_print_value(emitter, col->justify, col->width, col->type, (const void *)&col->bool_val); } emitter_table_printf(emitter, "\n"); } #endif /* JEMALLOC_INTERNAL_EMITTER_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_dss.h010064400007650000024000000013301340421340100234660ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H #define JEMALLOC_INTERNAL_EXTENT_DSS_H typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" extern const char *dss_prec_names[]; extern const char *opt_dss; dss_prec_t extent_dss_prec_get(void); bool extent_dss_prec_set(dss_prec_t dss_prec); void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool extent_in_dss(void *addr); bool extent_dss_mergeable(void *addr_a, void *addr_b); void extent_dss_boot(void); #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_externs.h010064400007650000024000000063361340421341300244030ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/rtree.h" extern size_t opt_lg_extent_max_active_fit; extern rtree_t extents_rtree; extern const extent_hooks_t extent_hooks_default; extern mutex_pool_t extent_mutex_pool; extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); extent_hooks_t *extent_hooks_get(arena_t *arena); extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks); #ifdef JEMALLOC_JET size_t extent_size_quantize_floor(size_t size); size_t extent_size_quantize_ceil(size_t size); #endif rb_proto(, extent_avail_, extent_tree_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, bool delay_coalesce); extent_state_t extents_state_get(const extents_t *extents); size_t extents_npages_get(extents_t *extents); extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); void extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); void extents_prefork(tsdn_t *tsdn, extents_t *extents); void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); bool extent_boot(void); #endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_inlines.h010064400007650000024000000267221340421341300243550ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H #define JEMALLOC_INTERNAL_EXTENT_INLINES_H #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sz.h" static inline void extent_lock(tsdn_t *tsdn, extent_t *extent) { assert(extent != NULL); mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); } static inline void extent_unlock(tsdn_t *tsdn, extent_t *extent) { assert(extent != NULL); mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); } static inline void extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { assert(extent1 != NULL && extent2 != NULL); mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, (uintptr_t)extent2); } static inline void extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { assert(extent1 != NULL && extent2 != NULL); mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, (uintptr_t)extent2); } static inline arena_t * extent_arena_get(const extent_t *extent) { unsigned arena_ind = (unsigned)((extent->e_bits & EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); /* * The following check is omitted because we should never actually read * a NULL arena pointer. */ if (false && arena_ind >= MALLOCX_ARENA_LIMIT) { return NULL; } assert(arena_ind < MALLOCX_ARENA_LIMIT); return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); } static inline szind_t extent_szind_get_maybe_invalid(const extent_t *extent) { szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> EXTENT_BITS_SZIND_SHIFT); assert(szind <= NSIZES); return szind; } static inline szind_t extent_szind_get(const extent_t *extent) { szind_t szind = extent_szind_get_maybe_invalid(extent); assert(szind < NSIZES); /* Never call when "invalid". */ return szind; } static inline size_t extent_usize_get(const extent_t *extent) { return sz_index2size(extent_szind_get(extent)); } static inline size_t extent_sn_get(const extent_t *extent) { return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> EXTENT_BITS_SN_SHIFT); } static inline extent_state_t extent_state_get(const extent_t *extent) { return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> EXTENT_BITS_STATE_SHIFT); } static inline bool extent_zeroed_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> EXTENT_BITS_ZEROED_SHIFT); } static inline bool extent_committed_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> EXTENT_BITS_COMMITTED_SHIFT); } static inline bool extent_dumpable_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >> EXTENT_BITS_DUMPABLE_SHIFT); } static inline bool extent_slab_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> EXTENT_BITS_SLAB_SHIFT); } static inline unsigned extent_nfree_get(const extent_t *extent) { assert(extent_slab_get(extent)); return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> EXTENT_BITS_NFREE_SHIFT); } static inline void * extent_base_get(const extent_t *extent) { assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)); return PAGE_ADDR2BASE(extent->e_addr); } static inline void * extent_addr_get(const extent_t *extent) { assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)); return extent->e_addr; } static inline size_t extent_size_get(const extent_t *extent) { return (extent->e_size_esn & EXTENT_SIZE_MASK); } static inline size_t extent_esn_get(const extent_t *extent) { return (extent->e_size_esn & EXTENT_ESN_MASK); } static inline size_t extent_bsize_get(const extent_t *extent) { return extent->e_bsize; } static inline void * extent_before_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) - PAGE); } static inline void * extent_last_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) + extent_size_get(extent) - PAGE); } static inline void * extent_past_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) + extent_size_get(extent)); } static inline arena_slab_data_t * extent_slab_data_get(extent_t *extent) { assert(extent_slab_get(extent)); return &extent->e_slab_data; } static inline const arena_slab_data_t * extent_slab_data_get_const(const extent_t *extent) { assert(extent_slab_get(extent)); return &extent->e_slab_data; } static inline prof_tctx_t * extent_prof_tctx_get(const extent_t *extent) { return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, ATOMIC_ACQUIRE); } static inline void extent_arena_set(extent_t *extent, arena_t *arena) { unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << MALLOCX_ARENA_BITS) - 1); extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); } static inline void extent_addr_set(extent_t *extent, void *addr) { extent->e_addr = addr; } static inline void extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) { assert(extent_base_get(extent) == extent_addr_get(extent)); if (alignment < PAGE) { unsigned lg_range = LG_PAGE - lg_floor(CACHELINE_CEILING(alignment)); size_t r; if (!tsdn_null(tsdn)) { tsd_t *tsd = tsdn_tsd(tsdn); r = (size_t)prng_lg_range_u64( tsd_offset_statep_get(tsd), lg_range); } else { r = prng_lg_range_zu( &extent_arena_get(extent)->offset_state, lg_range, true); } uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - lg_range); extent->e_addr = (void *)((uintptr_t)extent->e_addr + random_offset); assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == extent->e_addr); } } static inline void extent_size_set(extent_t *extent, size_t size) { assert((size & ~EXTENT_SIZE_MASK) == 0); extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); } static inline void extent_esn_set(extent_t *extent, size_t esn) { extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & EXTENT_ESN_MASK); } static inline void extent_bsize_set(extent_t *extent, size_t bsize) { extent->e_bsize = bsize; } static inline void extent_szind_set(extent_t *extent, szind_t szind) { assert(szind <= NSIZES); /* NSIZES means "invalid". */ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); } static inline void extent_nfree_set(extent_t *extent, unsigned nfree) { assert(extent_slab_get(extent)); extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_inc(extent_t *extent) { assert(extent_slab_get(extent)); extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_dec(extent_t *extent) { assert(extent_slab_get(extent)); extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_sn_set(extent_t *extent, size_t sn) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); } static inline void extent_state_set(extent_t *extent, extent_state_t state) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); } static inline void extent_zeroed_set(extent_t *extent, bool zeroed) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); } static inline void extent_committed_set(extent_t *extent, bool committed) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); } static inline void extent_dumpable_set(extent_t *extent, bool dumpable) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) | ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT); } static inline void extent_slab_set(extent_t *extent, bool slab) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); } static inline void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); } static inline void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, bool committed, bool dumpable) { assert(addr == PAGE_ADDR2BASE(addr) || !slab); extent_arena_set(extent, arena); extent_addr_set(extent, addr); extent_size_set(extent, size); extent_slab_set(extent, slab); extent_szind_set(extent, szind); extent_sn_set(extent, sn); extent_state_set(extent, state); extent_zeroed_set(extent, zeroed); extent_committed_set(extent, committed); extent_dumpable_set(extent, dumpable); ql_elm_new(extent, ql_link); if (config_prof) { extent_prof_tctx_set(extent, NULL); } } static inline void extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { extent_arena_set(extent, NULL); extent_addr_set(extent, addr); extent_bsize_set(extent, bsize); extent_slab_set(extent, false); extent_szind_set(extent, NSIZES); extent_sn_set(extent, sn); extent_state_set(extent, extent_state_active); extent_zeroed_set(extent, true); extent_committed_set(extent, true); extent_dumpable_set(extent, true); } static inline void extent_list_init(extent_list_t *list) { ql_new(list); } static inline extent_t * extent_list_first(const extent_list_t *list) { return ql_first(list); } static inline extent_t * extent_list_last(const extent_list_t *list) { return ql_last(list, ql_link); } static inline void extent_list_append(extent_list_t *list, extent_t *extent) { ql_tail_insert(list, extent, ql_link); } static inline void extent_list_prepend(extent_list_t *list, extent_t *extent) { ql_head_insert(list, extent, ql_link); } static inline void extent_list_replace(extent_list_t *list, extent_t *to_remove, extent_t *to_insert) { ql_after_insert(to_remove, to_insert, ql_link); ql_remove(list, to_remove, ql_link); } static inline void extent_list_remove(extent_list_t *list, extent_t *extent) { ql_remove(list, extent, ql_link); } static inline int extent_sn_comp(const extent_t *a, const extent_t *b) { size_t a_sn = extent_sn_get(a); size_t b_sn = extent_sn_get(b); return (a_sn > b_sn) - (a_sn < b_sn); } static inline int extent_esn_comp(const extent_t *a, const extent_t *b) { size_t a_esn = extent_esn_get(a); size_t b_esn = extent_esn_get(b); return (a_esn > b_esn) - (a_esn < b_esn); } static inline int extent_ad_comp(const extent_t *a, const extent_t *b) { uintptr_t a_addr = (uintptr_t)extent_addr_get(a); uintptr_t b_addr = (uintptr_t)extent_addr_get(b); return (a_addr > b_addr) - (a_addr < b_addr); } static inline int extent_ead_comp(const extent_t *a, const extent_t *b) { uintptr_t a_eaddr = (uintptr_t)a; uintptr_t b_eaddr = (uintptr_t)b; return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); } static inline int extent_snad_comp(const extent_t *a, const extent_t *b) { int ret; ret = extent_sn_comp(a, b); if (ret != 0) { return ret; } ret = extent_ad_comp(a, b); return ret; } static inline int extent_esnead_comp(const extent_t *a, const extent_t *b) { int ret; ret = extent_esn_comp(a, b); if (ret != 0) { return ret; } ret = extent_ead_comp(a, b); return ret; } #endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_mmap.h010064400007650000024000000005101340421340100236260ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H extern bool opt_retain; void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool extent_dalloc_mmap(void *addr, size_t size); #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_structs.h010064400007650000024000000165501340421341300244210ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H #define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/size_classes.h" typedef enum { extent_state_active = 0, extent_state_dirty = 1, extent_state_muzzy = 2, extent_state_retained = 3 } extent_state_t; /* Extent (span of pages). Use accessor functions for e_* fields. */ struct extent_s { /* * Bitfield containing several fields: * * a: arena_ind * b: slab * c: committed * d: dumpable * z: zeroed * t: state * i: szind * f: nfree * n: sn * * nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa * * arena_ind: Arena from which this extent came, or all 1 bits if * unassociated. * * slab: The slab flag indicates whether the extent is used for a slab * of small regions. This helps differentiate small size classes, * and it indicates whether interior pointers can be looked up via * iealloc(). * * committed: The committed flag indicates whether physical memory is * committed to the extent, whether explicitly or implicitly * as on a system that overcommits and satisfies physical * memory needs on demand via soft page faults. * * dumpable: The dumpable flag indicates whether or not we've set the * memory in question to be dumpable. Note that this * interacts somewhat subtly with user-specified extent hooks, * since we don't know if *they* are fiddling with * dumpability (in which case, we don't want to undo whatever * they're doing). To deal with this scenario, we: * - Make dumpable false only for memory allocated with the * default hooks. * - Only allow memory to go from non-dumpable to dumpable, * and only once. * - Never make the OS call to allow dumping when the * dumpable bit is already set. * These three constraints mean that we will never * accidentally dump user memory that the user meant to set * nondumpable with their extent hooks. * * * zeroed: The zeroed flag is used by extent recycling code to track * whether memory is zero-filled. * * state: The state flag is an extent_state_t. * * szind: The szind flag indicates usable size class index for * allocations residing in this extent, regardless of whether the * extent is a slab. Extent size and usable size often differ * even for non-slabs, either due to sz_large_pad or promotion of * sampled small regions. * * nfree: Number of free regions in slab. * * sn: Serial number (potentially non-unique). * * Serial numbers may wrap around if !opt_retain, but as long as * comparison functions fall back on address comparison for equal * serial numbers, stable (if imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of * wrap-around, e.g. when splitting an extent and assigning the same * serial number to both resulting adjacent extents. */ uint64_t e_bits; #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) #define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS #define EXTENT_BITS_ARENA_SHIFT 0 #define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT) #define EXTENT_BITS_SLAB_WIDTH 1 #define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT) #define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT) #define EXTENT_BITS_COMMITTED_WIDTH 1 #define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT) #define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT) #define EXTENT_BITS_DUMPABLE_WIDTH 1 #define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT) #define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT) #define EXTENT_BITS_ZEROED_WIDTH 1 #define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT) #define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT) #define EXTENT_BITS_STATE_WIDTH 2 #define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT) #define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT) #define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES #define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT) #define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT) #define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1) #define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) #define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) /* Pointer to the extent that this structure is responsible for. */ void *e_addr; union { /* * Extent size and serial number associated with the extent * structure (different than the serial number for the extent at * e_addr). * * ssssssss [...] ssssssss ssssnnnn nnnnnnnn */ size_t e_size_esn; #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) #define EXTENT_ESN_MASK ((size_t)PAGE-1) /* Base extent size, which may not be a multiple of PAGE. */ size_t e_bsize; }; /* * List linkage, used by a variety of lists: * - bin_t's slabs_full * - extents_t's LRU * - stashed dirty extents * - arena's large allocations */ ql_elm(extent_t) ql_link; /* * Linkage for per size class sn/address-ordered heaps, and * for extent_avail */ phn(extent_t) ph_link; union { /* Small region slab metadata. */ arena_slab_data_t e_slab_data; /* * Profile counters, used for large objects. Points to a * prof_tctx_t. */ atomic_p_t e_prof_tctx; }; }; typedef ql_head(extent_t) extent_list_t; typedef ph(extent_t) extent_tree_t; typedef ph(extent_t) extent_heap_t; /* Quantized collection of extents, with built-in LRU queue. */ struct extents_s { malloc_mutex_t mtx; /* * Quantized per size class heaps of extents. * * Synchronization: mtx. */ extent_heap_t heaps[NPSIZES+1]; /* * Bitmap for which set bits correspond to non-empty heaps. * * Synchronization: mtx. */ bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)]; /* * LRU of all extents in heaps. * * Synchronization: mtx. */ extent_list_t lru; /* * Page sum for all extents in heaps. * * The synchronization here is a little tricky. Modifications to npages * must hold mtx, but reads need not (though, a reader who sees npages * without holding the mutex can't assume anything about the rest of the * state of the extents_t). */ atomic_zu_t npages; /* All stored extents must be in the same state. */ extent_state_t state; /* * If true, delay coalescing until eviction; otherwise coalesce during * deallocation. */ bool delay_coalesce; }; #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/extent_types.h010064400007650000024000000007551340421341300240560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H #define JEMALLOC_INTERNAL_EXTENT_TYPES_H typedef struct extent_s extent_t; typedef struct extents_s extents_t; #define EXTENT_HOOKS_INITIALIZER NULL #define EXTENT_GROW_MAX_PIND (NPSIZES - 1) /* * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) * is the max ratio between the size of the active extent and the new extent. */ #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 #endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/hash.h010064400007650000024000000170261340421341300222450ustar0000000000000000#ifndef JEMALLOC_INTERNAL_HASH_H #define JEMALLOC_INTERNAL_HASH_H #include "jemalloc/internal/assert.h" /* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ /******************************************************************************/ /* Internal implementation. */ static inline uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } static inline uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } static inline uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { uint32_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); return ret; } return p[i]; } static inline uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { uint64_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); return ret; } return p[i]; } static inline uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } static inline uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } static inline uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return h1; } UNUSED static inline void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } UNUSED static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */ case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */ case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */ case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */ case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; /* falls through */ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ static inline void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else { uint64_t hashes[2]; hash_x86_128(key, (int)len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; } #endif } #endif /* JEMALLOC_INTERNAL_HASH_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/hooks.h010064400007650000024000000014531340421341300224420ustar0000000000000000#ifndef JEMALLOC_INTERNAL_HOOKS_H #define JEMALLOC_INTERNAL_HOOKS_H extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)(); extern JEMALLOC_EXPORT void (*hooks_libc_hook)(); #define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) #define open JEMALLOC_HOOK(open, hooks_libc_hook) #define read JEMALLOC_HOOK(read, hooks_libc_hook) #define write JEMALLOC_HOOK(write, hooks_libc_hook) #define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook) #define close JEMALLOC_HOOK(close, hooks_libc_hook) #define creat JEMALLOC_HOOK(creat, hooks_libc_hook) #define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook) /* Note that this is undef'd and re-define'd in src/prof.c. */ #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /* JEMALLOC_INTERNAL_HOOKS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h010064400007650000024000000041241340421341300261510ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" # ifdef _WIN64 # if LG_VADDR <= 32 # error Generate the headers using x64 vcargs # endif # else # if LG_VADDR > 32 # undef LG_VADDR # define LG_VADDR 32 # endif # endif #else # include # include # if !defined(__pnacl__) && !defined(__native_client__) # include # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # if defined(SYS_open) && defined(__aarch64__) /* Android headers may define SYS_open to __NR_open even though * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ # undef SYS_open # endif # include # endif # include # include # ifdef JEMALLOC_OS_UNFAIR_LOCK # include # endif # ifdef JEMALLOC_GLIBC_MALLOC_HOOK # include # endif # include # include # include # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME # include # endif #endif #include #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #ifndef SSIZE_MAX # define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) #endif #include #include #include #include #include #include #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include #include #include #ifdef _MSC_VER # include typedef intptr_t ssize_t; # define PATH_MAX 1024 # define STDERR_FILENO 2 # define __func__ __FUNCTION__ # ifdef JEMALLOC_HAS_RESTRICT # define restrict __restrict # endif /* Disable warnings about deprecated system functions. */ # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } #endif #else # include #endif #include #endif /* JEMALLOC_INTERNAL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in010064400007650000024000000247211340421341300264120ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ #undef JEMALLOC_PREFIX #undef JEMALLOC_CPREFIX /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ #undef JEMALLOC_OVERRIDE___LIBC_CALLOC #undef JEMALLOC_OVERRIDE___LIBC_FREE #undef JEMALLOC_OVERRIDE___LIBC_MALLOC #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN #undef JEMALLOC_OVERRIDE___LIBC_REALLOC #undef JEMALLOC_OVERRIDE___LIBC_VALLOC #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #undef JEMALLOC_PRIVATE_NAMESPACE /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #undef CPU_SPINWAIT /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ #undef HAVE_CPU_SPINWAIT /* * Number of significant bits in virtual addresses. This may be less than the * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ #undef LG_VADDR /* Defined if C11 atomics are available. */ #undef JEMALLOC_C11_ATOMICS /* Defined if GCC __atomic atomics are available. */ #undef JEMALLOC_GCC_ATOMIC_ATOMICS /* Defined if GCC __sync atomics are available. */ #undef JEMALLOC_GCC_SYNC_ATOMICS /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the * functions are defined in libgcc instead of being inlines). */ #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 /* * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the * functions are defined in libgcc instead of being inlines). */ #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 /* * Defined if __builtin_clz() and __builtin_clzl() are available. */ #undef JEMALLOC_HAVE_BUILTIN_CLZ /* * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. */ #undef JEMALLOC_OS_UNFAIR_LOCK /* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ #undef JEMALLOC_OSSPIN /* Defined if syscall(2) is usable. */ #undef JEMALLOC_USE_SYSCALL /* * Defined if secure_getenv(3) is available. */ #undef JEMALLOC_HAVE_SECURE_GETENV /* * Defined if issetugid(2) is available. */ #undef JEMALLOC_HAVE_ISSETUGID /* Defined if pthread_atfork(3) is available. */ #undef JEMALLOC_HAVE_PTHREAD_ATFORK /* Defined if pthread_setname_np(3) is available. */ #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. */ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC /* * Defined if mach_absolute_time() is available. */ #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ #undef JEMALLOC_MALLOC_THREAD_CLEANUP /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ #undef JEMALLOC_THREADED_INIT /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ #undef JEMALLOC_MUTEX_INIT_CB /* Non-empty if the tls_model attribute is supported. */ #undef JEMALLOC_TLS_MODEL /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ #undef JEMALLOC_DEBUG /* JEMALLOC_STATS enables statistics calculation. */ #undef JEMALLOC_STATS /* JEMALLOC_PROF enables allocation profiling. */ #undef JEMALLOC_PROF /* Use libunwind for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBUNWIND /* Use libgcc for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBGCC /* Use gcc intrinsics for profile backtracing if defined. */ #undef JEMALLOC_PROF_GCC /* * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage * segment (DSS). */ #undef JEMALLOC_DSS /* Support memory filling (junk/zero). */ #undef JEMALLOC_FILL /* Support utrace(2)-based tracing. */ #undef JEMALLOC_UTRACE /* Support optional abort() on OOM. */ #undef JEMALLOC_XMALLOC /* Support lazy locking (avoid locking unless a second thread is launched). */ #undef JEMALLOC_LAZY_LOCK /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #undef LG_QUANTUM /* One page is 2^LG_PAGE bytes. */ #undef LG_PAGE /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ #undef LG_HUGEPAGE /* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. * This is the normal order of things for mmap()/munmap(), but on Windows * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. * mappings do *not* coalesce/fragment. */ #undef JEMALLOC_MAPS_COALESCE /* * If defined, retain memory for later reuse by default rather than using e.g. * munmap() to unmap freed extents. This is enabled on 64-bit Linux because * common sequences of mmap()/munmap() calls will cause virtual memory map * holes. */ #undef JEMALLOC_RETAIN /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS /* * Used to mark unreachable code to quiet "end of non-void" compiler warnings. * Don't use this directly; instead use unreachable() from util.h */ #undef JEMALLOC_INTERNAL_UNREACHABLE /* * ffs*() functions to use for bitmapping. Don't use these directly; instead, * use ffs_*() from util.h. */ #undef JEMALLOC_INTERNAL_FFSLL #undef JEMALLOC_INTERNAL_FFSL #undef JEMALLOC_INTERNAL_FFS /* * If defined, explicitly attempt to more uniformly distribute large allocation * pointer alignments across all cache indices. */ #undef JEMALLOC_CACHE_OBLIVIOUS /* * If defined, enable logging facilities. We make this a configure option to * avoid taking extra branches everywhere. */ #undef JEMALLOC_LOG /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE /* * Methods for determining whether the OS overcommits. * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's * /proc/sys/vm.overcommit_memory file. * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. */ #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY /* Defined if madvise(2) is available. */ #undef JEMALLOC_HAVE_MADVISE /* * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE * arguments to madvise(2). */ #undef JEMALLOC_HAVE_MADVISE_HUGE /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_FREE) : This marks pages as being unused, such that they * will be discarded rather than swapped out. * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is * defined, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched; * otherwise this behaves similarly to * MADV_FREE, though typically with higher * system overhead. */ #undef JEMALLOC_PURGE_MADVISE_FREE #undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ #undef JEMALLOC_DEFINE_MADVISE_FREE /* * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. */ #undef JEMALLOC_MADVISE_DONTDUMP /* * Defined if transparent huge pages (THPs) are supported via the * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. */ #undef JEMALLOC_THP /* Define if operating system has alloca.h header. */ #undef JEMALLOC_HAS_ALLOCA_H /* C99 restrict keyword supported. */ #undef JEMALLOC_HAS_RESTRICT /* For use by hash code. */ #undef JEMALLOC_BIG_ENDIAN /* sizeof(int) == 2^LG_SIZEOF_INT. */ #undef LG_SIZEOF_INT /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #undef LG_SIZEOF_LONG /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ #undef LG_SIZEOF_LONG_LONG /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #undef LG_SIZEOF_INTMAX_T /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ #undef JEMALLOC_GLIBC_MALLOC_HOOK /* glibc memalign hook. */ #undef JEMALLOC_GLIBC_MEMALIGN_HOOK /* pthread support */ #undef JEMALLOC_HAVE_PTHREAD /* dlsym() support */ #undef JEMALLOC_HAVE_DLSYM /* Adaptive mutex support in pthreads. */ #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP /* GNU specific sched_getcpu support */ #undef JEMALLOC_HAVE_SCHED_GETCPU /* GNU specific sched_setaffinity support */ #undef JEMALLOC_HAVE_SCHED_SETAFFINITY /* * If defined, all the features necessary for background threads are present. */ #undef JEMALLOC_BACKGROUND_THREAD /* * If defined, jemalloc symbols are not exported (doesn't work when * JEMALLOC_PREFIX is not defined). */ #undef JEMALLOC_EXPORT /* config.malloc_conf options string. */ #undef JEMALLOC_CONFIG_MALLOC_CONF /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ #undef JEMALLOC_IS_MALLOC /* * Defined if strerror_r returns char * if _GNU_SOURCE is defined. */ #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h010064400007650000024000000032311340421341300265450ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTERNS_H #define JEMALLOC_INTERNAL_EXTERNS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/tsd_types.h" /* TSD checks this to set thread local slow state accordingly. */ extern bool malloc_slow; /* Run-time options. */ extern bool opt_abort; extern bool opt_abort_conf; extern const char *opt_junk; extern bool opt_junk_alloc; extern bool opt_junk_free; extern bool opt_utrace; extern bool opt_xmalloc; extern bool opt_zero; extern unsigned opt_narenas; /* Number of CPUs. */ extern unsigned ncpus; /* Number of arenas used for automatic multiplexing of threads and arenas. */ extern unsigned narenas_auto; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. */ extern atomic_p_t arenas[]; void *a0malloc(size_t size); void a0dalloc(void *ptr); void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); void arena_set(unsigned ind, arena_t *arena); unsigned narenas_total_get(void); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); arena_t *arena_choose_hard(tsd_t *tsd, bool internal); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); void iarena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); void arenas_tdata_cleanup(tsd_t *tsd); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); bool malloc_initialized(void); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h010064400007650000024000000103031340421340100266560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INCLUDES_H #define JEMALLOC_INTERNAL_INCLUDES_H /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. * * Historically, we dealt with this by each header into four sections (types, * structs, externs, and inlines), and included each header file multiple times * in this file, picking out the portion we want on each pass using the * following #defines: * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. * * We're moving toward a world in which the dependencies are explicit; each file * will #include the headers it depends on (rather than relying on them being * implicitly available via this file including every header file in the * project). * * We're now in an intermediate state: we've broken up the header files to avoid * having to include each one multiple times, but have not yet moved the * dependency information into the header files (i.e. we still rely on the * ordering in this file to ensure all a header's dependencies are available in * its translation unit). Each component is now broken up into multiple header * files, corresponding to the sections above (e.g. instead of "foo.h", we now * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). * * Those files which have been converted to explicitly include their * inter-component dependencies are now in the initial HERMETIC HEADERS * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, * must be included first in every translation unit) for system headers and * global jemalloc definitions, however. */ /******************************************************************************/ /* TYPES */ /******************************************************************************/ #include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/base_types.h" #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/prof_types.h" /******************************************************************************/ /* STRUCTS */ /******************************************************************************/ #include "jemalloc/internal/arena_structs_a.h" #include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/base_structs.h" #include "jemalloc/internal/prof_structs.h" #include "jemalloc/internal/arena_structs_b.h" #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/background_thread_structs.h" /******************************************************************************/ /* EXTERNS */ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/extent_externs.h" #include "jemalloc/internal/base_externs.h" #include "jemalloc/internal/arena_externs.h" #include "jemalloc/internal/large_externs.h" #include "jemalloc/internal/tcache_externs.h" #include "jemalloc/internal/prof_externs.h" #include "jemalloc/internal/background_thread_externs.h" /******************************************************************************/ /* INLINES */ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_inlines_a.h" #include "jemalloc/internal/base_inlines.h" /* * Include portions of arena code interleaved with tcache code in order to * resolve circular dependencies. */ #include "jemalloc/internal/prof_inlines_a.h" #include "jemalloc/internal/arena_inlines_a.h" #include "jemalloc/internal/extent_inlines.h" #include "jemalloc/internal/jemalloc_internal_inlines_b.h" #include "jemalloc/internal/tcache_inlines.h" #include "jemalloc/internal/arena_inlines_b.h" #include "jemalloc/internal/jemalloc_internal_inlines_c.h" #include "jemalloc/internal/prof_inlines_b.h" #include "jemalloc/internal/background_thread_inlines.h" #endif /* JEMALLOC_INTERNAL_INCLUDES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h010064400007650000024000000105211340421341300270160ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_A_H #define JEMALLOC_INTERNAL_INLINES_A_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/ticker.h" JEMALLOC_ALWAYS_INLINE malloc_cpuid_t malloc_getcpu(void) { assert(have_percpu_arena); #if defined(JEMALLOC_HAVE_SCHED_GETCPU) return (malloc_cpuid_t)sched_getcpu(); #else not_reached(); return -1; #endif } /* Return the chosen arena index based on current cpu. */ JEMALLOC_ALWAYS_INLINE unsigned percpu_arena_choose(void) { assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); malloc_cpuid_t cpuid = malloc_getcpu(); assert(cpuid >= 0); unsigned arena_ind; if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / 2)) { arena_ind = cpuid; } else { assert(opt_percpu_arena == per_phycpu_arena); /* Hyper threads on the same physical CPU share arena. */ arena_ind = cpuid - ncpus / 2; } return arena_ind; } /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ JEMALLOC_ALWAYS_INLINE unsigned percpu_arena_ind_limit(percpu_arena_mode_t mode) { assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); if (mode == per_phycpu_arena && ncpus > 1) { if (ncpus % 2) { /* This likely means a misconfig. */ return ncpus / 2 + 1; } return ncpus / 2; } else { return ncpus; } } static inline arena_tdata_t * arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { arena_tdata_t *tdata; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); if (unlikely(arenas_tdata == NULL)) { /* arenas_tdata hasn't been initialized yet. */ return arena_tdata_get_hard(tsd, ind); } if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { /* * ind is invalid, cache is old (too small), or tdata to be * initialized. */ return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : NULL); } tdata = &arenas_tdata[ind]; if (likely(tdata != NULL) || !refresh_if_missing) { return tdata; } return arena_tdata_get_hard(tsd, ind); } static inline arena_t * arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_t *ret; assert(ind < MALLOCX_ARENA_LIMIT); ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); if (unlikely(ret == NULL)) { if (init_if_missing) { ret = arena_init(tsdn, ind, (extent_hooks_t *)&extent_hooks_default); } } return ret; } static inline ticker_t * decay_ticker_get(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata; tdata = arena_tdata_get(tsd, ind, true); if (unlikely(tdata == NULL)) { return NULL; } return &tdata->decay_ticker; } JEMALLOC_ALWAYS_INLINE cache_bin_t * tcache_small_bin_get(tcache_t *tcache, szind_t binind) { assert(binind < NBINS); return &tcache->bins_small[binind]; } JEMALLOC_ALWAYS_INLINE cache_bin_t * tcache_large_bin_get(tcache_t *tcache, szind_t binind) { assert(binind >= NBINS &&binind < nhbins); return &tcache->bins_large[binind - NBINS]; } JEMALLOC_ALWAYS_INLINE bool tcache_available(tsd_t *tsd) { /* * Thread specific auto tcache might be unavailable if: 1) during tcache * initialization, or 2) disabled through thread.tcache.enabled mallctl * or config options. This check covers all cases. */ if (likely(tsd_tcache_enabled_get(tsd))) { /* Associated arena == NULL implies tcache init in progress. */ assert(tsd_tcachep_get(tsd)->arena == NULL || tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != NULL); return true; } return false; } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(tsd_t *tsd) { if (!tcache_available(tsd)) { return NULL; } return tsd_tcachep_get(tsd); } static inline void pre_reentrancy(tsd_t *tsd, arena_t *arena) { /* arena is the current context. Reentry from a0 is not allowed. */ assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); bool fast = tsd_fast(tsd); assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); ++*tsd_reentrancy_levelp_get(tsd); if (fast) { /* Prepare slow path for reentrancy. */ tsd_slow_update(tsd); assert(tsd->state == tsd_state_nominal_slow); } } static inline void post_reentrancy(tsd_t *tsd) { int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); assert(*reentrancy_level > 0); if (--*reentrancy_level == 0) { tsd_slow_update(tsd); } } #endif /* JEMALLOC_INTERNAL_INLINES_A_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h010064400007650000024000000043611340421341300270240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_B_H #define JEMALLOC_INTERNAL_INLINES_B_H #include "jemalloc/internal/rtree.h" /* Choose an arena based on a per-thread value. */ static inline arena_t * arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { arena_t *ret; if (arena != NULL) { return arena; } /* During reentrancy, arena 0 is the safest bet. */ if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { return arena_get(tsd_tsdn(tsd), 0, true); } ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); if (unlikely(ret == NULL)) { ret = arena_choose_hard(tsd, internal); assert(ret); if (tcache_available(tsd)) { tcache_t *tcache = tcache_get(tsd); if (tcache->arena != NULL) { /* See comments in tcache_data_init().*/ assert(tcache->arena == arena_get(tsd_tsdn(tsd), 0, false)); if (tcache->arena != ret) { tcache_arena_reassociate(tsd_tsdn(tsd), tcache, ret); } } else { tcache_arena_associate(tsd_tsdn(tsd), tcache, ret); } } } /* * Note that for percpu arena, if the current arena is outside of the * auto percpu arena range, (i.e. thread is assigned to a manually * managed arena), then percpu arena is skipped. */ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && !internal && (arena_ind_get(ret) < percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != tsd_tsdn(tsd))) { unsigned ind = percpu_arena_choose(); if (arena_ind_get(ret) != ind) { percpu_arena_update(tsd, ind); ret = tsd_arena_get(tsd); } ret->last_thd = tsd_tsdn(tsd); } return ret; } static inline arena_t * arena_choose(tsd_t *tsd, arena_t *arena) { return arena_choose_impl(tsd, arena, false); } static inline arena_t * arena_ichoose(tsd_t *tsd, arena_t *arena) { return arena_choose_impl(tsd, arena, true); } static inline bool arena_is_auto(arena_t *arena) { assert(narenas_auto > 0); return (arena_ind_get(arena) < narenas_auto); } JEMALLOC_ALWAYS_INLINE extent_t * iealloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); } #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h010064400007650000024000000152321340421341300270240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_C_H #define JEMALLOC_INTERNAL_INLINES_C_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/witness.h" /* * Translating the names of the 'i' functions: * Abbreviations used in the first part of the function name (before * alloc/dalloc) describe what that function accomplishes: * a: arena (query) * s: size (query, or sized deallocation) * e: extent (query) * p: aligned (allocates) * vs: size (query, without knowing that the pointer is into the heap) * r: rallocx implementation * x: xallocx implementation * Abbreviations used in the second part of the function name (after * alloc/dalloc) describe the arguments it takes * z: whether to return zeroed memory * t: accepts a tcache_t * parameter * m: accepts an arena_t * parameter */ JEMALLOC_ALWAYS_INLINE arena_t * iaalloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); return arena_aalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE size_t isalloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); return arena_salloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void * iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path) { void *ret; assert(size != 0); assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); } ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); if (config_stats && is_internal && likely(ret != NULL)) { arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); } return ret; } JEMALLOC_ALWAYS_INLINE void * ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, NULL, slow_path); } JEMALLOC_ALWAYS_INLINE void * ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_internal, arena_t *arena) { void *ret; assert(usize != 0); assert(usize == sz_sa2u(usize, alignment)); assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_internal && likely(ret != NULL)) { arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); } return ret; } JEMALLOC_ALWAYS_INLINE void * ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); } JEMALLOC_ALWAYS_INLINE void * ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, tcache_get(tsd), false, NULL); } JEMALLOC_ALWAYS_INLINE size_t ivsalloc(tsdn_t *tsdn, const void *ptr) { return arena_vsalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) { assert(ptr != NULL); assert(!is_internal || tcache == NULL); assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (config_stats && is_internal) { arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); } if (!is_internal && !tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { assert(tcache == NULL); } arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); } JEMALLOC_ALWAYS_INLINE void idalloc(tsd_t *tsd, void *ptr) { idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); } JEMALLOC_ALWAYS_INLINE void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); } JEMALLOC_ALWAYS_INLINE void * iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); void *p; size_t usize, copysize; usize = sz_sa2u(size + extra, alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return NULL; } p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); if (p == NULL) { if (extra == 0) { return NULL; } /* Try again, without extra this time. */ usize = sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return NULL; } p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); if (p == NULL) { return NULL; } } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return p; } JEMALLOC_ALWAYS_INLINE void * iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { assert(ptr != NULL); assert(size != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* * Existing object alignment is inadequate; allocate new space * and copy. */ return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment, zero, tcache, arena); } return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, tcache); } JEMALLOC_ALWAYS_INLINE void * iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero) { return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, tcache_get(tsd), NULL); } JEMALLOC_ALWAYS_INLINE bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { assert(ptr != NULL); assert(size != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* Existing object alignment is inadequate. */ return true; } return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero); } #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h010064400007650000024000000017701340421341300263470ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MACROS_H #define JEMALLOC_INTERNAL_MACROS_H #ifdef JEMALLOC_DEBUG # define JEMALLOC_ALWAYS_INLINE static inline #else # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline #endif #ifdef _MSC_VER # define inline _inline #endif #define UNUSED JEMALLOC_ATTR(unused) #define ZU(z) ((size_t)z) #define ZD(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) #define QD(q) ((int64_t)q) #define KZU(z) ZU(z##ULL) #define KZD(z) ZD(z##LL) #define KQU(q) QU(q##ULL) #define KQD(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) # define restrict #endif /* Various function pointers are statick and immutable except during testing. */ #ifdef JEMALLOC_JET # define JET_MUTABLE #else # define JET_MUTABLE const #endif #define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ #endif /* JEMALLOC_INTERNAL_MACROS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h010064400007650000024000000120241340421341300262210ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TYPES_H #define JEMALLOC_INTERNAL_TYPES_H /* Page size index type. */ typedef unsigned pszind_t; /* Size class index type. */ typedef unsigned szind_t; /* Processor / core id type. */ typedef int malloc_cpuid_t; /* * Flags bits: * * a: arena * t: tcache * 0: unused * z: zero * n: alignment * * aaaaaaaa aaaatttt tttttttt 0znnnnnn */ #define MALLOCX_ARENA_BITS 12 #define MALLOCX_TCACHE_BITS 12 #define MALLOCX_LG_ALIGN_BITS 6 #define MALLOCX_ARENA_SHIFT 20 #define MALLOCX_TCACHE_SHIFT 8 #define MALLOCX_ARENA_MASK \ (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) /* NB: Arena index bias decreases the maximum number of arenas by 1. */ #define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) #define MALLOCX_TCACHE_MASK \ (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) #define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) #define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) #define MALLOCX_ALIGN_GET(flags) \ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) #define MALLOCX_ZERO_GET(flags) \ ((bool)(flags & MALLOCX_ZERO)) #define MALLOCX_TCACHE_GET(flags) \ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) #define MALLOCX_ARENA_GET(flags) \ (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) /* Smallest size class to support. */ #define TINY_MIN (1U << LG_TINY_MIN) /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM # if (defined(__i386__) || defined(_M_IX86)) # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif # ifdef __aarch64__ # define LG_QUANTUM 4 # endif # ifdef __hppa__ # define LG_QUANTUM 4 # endif # ifdef __m68k__ # define LG_QUANTUM 3 # endif # ifdef __mips__ # define LG_QUANTUM 3 # endif # ifdef __nios2__ # define LG_QUANTUM 3 # endif # ifdef __or1k__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif # if defined(__riscv) || defined(__riscv__) # define LG_QUANTUM 4 # endif # ifdef __s390__ # define LG_QUANTUM 4 # endif # if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ defined(__SH4_SINGLE_ONLY__)) # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 # endif # ifdef __le32__ # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM # error "Unknown minimum alignment for architecture; specify via " "--with-lg-quantum" # endif #endif #define QUANTUM ((size_t)(1U << LG_QUANTUM)) #define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ #define QUANTUM_CEILING(a) \ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) #define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ #define LONG_CEILING(a) \ (((a) + LONG_MASK) & ~LONG_MASK) #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) #define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ #define PTR_CEILING(a) \ (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. * In addition, this controls the spacing of cacheline-spaced size classes. * * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can * only handle raw constants. */ #define LG_CACHELINE 6 #define CACHELINE 64 #define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & (alignment - 1))) /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ (((s) + (alignment - 1)) & ((~(alignment)) + 1)) /* Declare a variable-length array. */ #if __STDC_VERSION__ < 199901L # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef JEMALLOC_HAS_ALLOCA_H # include # else # include # endif # endif # define VARIABLE_ARRAY(type, name, count) \ type *name = alloca(sizeof(type) * (count)) #else # define VARIABLE_ARRAY(type, name, count) type name[(count)] #endif #endif /* JEMALLOC_INTERNAL_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in010064400007650000024000000067731340421341300253730ustar0000000000000000#ifndef JEMALLOC_PREAMBLE_H #define JEMALLOC_PREAMBLE_H #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" #ifdef JEMALLOC_UTRACE #include #endif #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # undef JEMALLOC_IS_MALLOC # define JEMALLOC_N(n) jet_##n # include "jemalloc/internal/public_namespace.h" # define JEMALLOC_NO_RENAME # include "../jemalloc@install_suffix@.h" # undef JEMALLOC_NO_RENAME #else # define JEMALLOC_N(n) @private_namespace@##n # include "../jemalloc@install_suffix@.h" #endif #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) #include #endif #ifdef JEMALLOC_ZONE #include #include #include #endif #include "jemalloc/internal/jemalloc_internal_macros.h" /* * Note that the ordering matters here; the hook itself is name-mangled. We * want the inclusion of hooks to happen early, so that we hook as much as * possible. */ #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE # ifndef JEMALLOC_JET # include "jemalloc/internal/private_namespace.h" # else # include "jemalloc/internal/private_namespace_jet.h" # endif #endif #include "jemalloc/internal/hooks.h" #ifdef JEMALLOC_DEFINE_MADVISE_FREE # define JEMALLOC_MADV_FREE 8 #endif static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool have_madvise_huge = #ifdef JEMALLOC_HAVE_MADVISE_HUGE true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; static const bool maps_coalesce = #ifdef JEMALLOC_MAPS_COALESCE true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_cache_oblivious = #ifdef JEMALLOC_CACHE_OBLIVIOUS true #else false #endif ; /* * Undocumented, for jemalloc development use only at the moment. See the note * in jemalloc/internal/log.h. */ static const bool config_log = #ifdef JEMALLOC_LOG true #else false #endif ; #ifdef JEMALLOC_HAVE_SCHED_GETCPU /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA #endif static const bool have_percpu_arena = #ifdef JEMALLOC_PERCPU_ARENA true #else false #endif ; /* * Undocumented, and not recommended; the application should take full * responsibility for tracking provenance. */ static const bool force_ivsalloc = #ifdef JEMALLOC_FORCE_IVSALLOC true #else false #endif ; static const bool have_background_thread = #ifdef JEMALLOC_BACKGROUND_THREAD true #else false #endif ; #endif /* JEMALLOC_PREAMBLE_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/large_externs.h010064400007650000024000000023261340421341300241610ustar0000000000000000#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H #define JEMALLOC_INTERNAL_LARGE_EXTERNS_H void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t usize_max, bool zero); void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, size_t alignment, bool zero, tcache_t *tcache); typedef void (large_dalloc_junk_t)(void *, size_t); extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; typedef void (large_dalloc_maybe_junk_t)(void *, size_t); extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); void large_dalloc(tsdn_t *tsdn, extent_t *extent); size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/log.h010064400007650000024000000072521340421340100221000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_LOG_H #define JEMALLOC_INTERNAL_LOG_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #ifdef JEMALLOC_LOG # define JEMALLOC_LOG_VAR_BUFSIZE 1000 #else # define JEMALLOC_LOG_VAR_BUFSIZE 1 #endif #define JEMALLOC_LOG_BUFSIZE 4096 /* * The log malloc_conf option is a '|'-delimited list of log_var name segments * which should be logged. The names are themselves hierarchical, with '.' as * the delimiter (a "segment" is just a prefix in the log namespace). So, if * you have: * * log("arena", "log msg for arena"); // 1 * log("arena.a", "log msg for arena.a"); // 2 * log("arena.b", "log msg for arena.b"); // 3 * log("arena.a.a", "log msg for arena.a.a"); // 4 * log("extent.a", "log msg for extent.a"); // 5 * log("extent.b", "log msg for extent.b"); // 6 * * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and * 6 will print at runtime. You can enable logging from all log vars by * writing "log=.". * * None of this should be regarded as a stable API for right now. It's intended * as a debugging interface, to let us keep around some of our printf-debugging * statements. */ extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; extern atomic_b_t log_init_done; typedef struct log_var_s log_var_t; struct log_var_s { /* * Lowest bit is "inited", second lowest is "enabled". Putting them in * a single word lets us avoid any fences on weak architectures. */ atomic_u_t state; const char *name; }; #define LOG_NOT_INITIALIZED 0U #define LOG_INITIALIZED_NOT_ENABLED 1U #define LOG_ENABLED 2U #define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} /* * Returns the value we should assume for state (which is not necessarily * accurate; if logging is done before logging has finished initializing, then * we default to doing the safe thing by logging everything). */ unsigned log_var_update_state(log_var_t *log_var); /* We factor out the metadata management to allow us to test more easily. */ #define log_do_begin(log_var) \ if (config_log) { \ unsigned log_state = atomic_load_u(&(log_var).state, \ ATOMIC_RELAXED); \ if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ log_state = log_var_update_state(&(log_var)); \ assert(log_state != LOG_NOT_INITIALIZED); \ } \ if (log_state == LOG_ENABLED) { \ { /* User code executes here. */ #define log_do_end(log_var) \ } \ } \ } /* * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during * preprocessing. To work around this, we take all potential extra arguments in * a var-args functions. Since a varargs macro needs at least one argument in * the "...", we accept the format string there, and require that the first * argument in this "..." is a const char *. */ static inline void log_impl_varargs(const char *name, ...) { char buf[JEMALLOC_LOG_BUFSIZE]; va_list ap; va_start(ap, name); const char *format = va_arg(ap, const char *); size_t dst_offset = 0; dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); dst_offset += malloc_vsnprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); dst_offset += malloc_snprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); va_end(ap); malloc_write(buf); } /* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ #define LOG(log_var_str, ...) \ do { \ static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ log_do_begin(log_var) \ log_impl_varargs((log_var).name, __VA_ARGS__); \ log_do_end(log_var) \ } while (0) #endif /* JEMALLOC_INTERNAL_LOG_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/malloc_io.h010064400007650000024000000056511340421340100232560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H #define JEMALLOC_INTERNAL_MALLOC_IO_H #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "ll" # else # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "" # endif # define FMTd32 "d" # define FMTu32 "u" # define FMTx32 "x" # define FMTd64 FMT64_PREFIX "d" # define FMTu64 FMT64_PREFIX "u" # define FMTx64 FMT64_PREFIX "x" # define FMTdPTR FMTPTR_PREFIX "d" # define FMTuPTR FMTPTR_PREFIX "u" # define FMTxPTR FMTPTR_PREFIX "x" #else # include # define FMTd32 PRId32 # define FMTu32 PRIu32 # define FMTx32 PRIx32 # define FMTd64 PRId64 # define FMTu64 PRIu64 # define FMTx64 PRIx64 # define FMTdPTR PRIdPTR # define FMTuPTR PRIuPTR # define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); /* * The caller can set write_cb and cbopaque to null to choose to print with the * je_malloc_message hook. */ void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); static inline ssize_t malloc_write_fd(int fd, const void *buf, size_t count) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. * * syscall() returns long or int, depending on platform, so capture the * result in the widest plausible type to avoid compiler warnings. */ long result = syscall(SYS_write, fd, buf, count); #else ssize_t result = (ssize_t)write(fd, buf, #ifdef _WIN32 (unsigned int) #endif count); #endif return (ssize_t)result; } static inline ssize_t malloc_read_fd(int fd, void *buf, size_t count) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) long result = syscall(SYS_read, fd, buf, count); #else ssize_t result = read(fd, buf, #ifdef _WIN32 (unsigned int) #endif count); #endif return (ssize_t)result; } #endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/mutex.h010064400007650000024000000176311340421341300224660ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_H #define JEMALLOC_INTERNAL_MUTEX_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/witness.h" typedef enum { /* Can only acquire one mutex of a given witness rank at a time. */ malloc_mutex_rank_exclusive, /* * Can acquire multiple mutexes of the same witness rank, but in * address-ascending order only. */ malloc_mutex_address_ordered } malloc_mutex_lock_order_t; typedef struct malloc_mutex_s malloc_mutex_t; struct malloc_mutex_s { union { struct { /* * prof_data is defined first to reduce cacheline * bouncing: the data is not touched by the mutex holder * during unlocking, while might be modified by * contenders. Having it before the mutex itself could * avoid prefetching a modified cacheline (for the * unlocking thread). */ mutex_prof_data_t prof_data; #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif }; /* * We only touch witness when configured w/ debug. However we * keep the field in a union when !debug so that we don't have * to pollute the code base with #ifdefs, while avoid paying the * memory cost. */ #if !defined(JEMALLOC_DEBUG) witness_t witness; malloc_mutex_lock_order_t lock_order; #endif }; #if defined(JEMALLOC_DEBUG) witness_t witness; malloc_mutex_lock_order_t lock_order; #endif }; /* * Based on benchmark results, a fixed spin with this amount of retries works * well for our critical sections. */ #define MALLOC_MUTEX_MAX_SPIN 250 #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) # else # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock)) #else # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) #endif #define LOCK_PROF_DATA_INITIALIZER \ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ ATOMIC_INIT(0), 0, NULL, 0} #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #endif #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_lock_slow(malloc_mutex_t *mutex); static inline void malloc_mutex_lock_final(malloc_mutex_t *mutex) { MALLOC_MUTEX_LOCK(mutex); } static inline bool malloc_mutex_trylock_final(malloc_mutex_t *mutex) { return MALLOC_MUTEX_TRYLOCK(mutex); } static inline void mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (config_stats) { mutex_prof_data_t *data = &mutex->prof_data; data->n_lock_ops++; if (data->prev_owner != tsdn) { data->prev_owner = tsdn; data->n_owner_switches++; } } } /* Trylock: return false if the lock is successfully acquired. */ static inline bool malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { return true; } mutex_owner_stats_update(tsdn, mutex); } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); return false; } /* Aggregate lock prof data. */ static inline void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { nstime_add(&sum->tot_wait_time, &data->tot_wait_time); if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { nstime_copy(&sum->max_wait_time, &data->max_wait_time); } sum->n_wait_times += data->n_wait_times; sum->n_spin_acquired += data->n_spin_acquired; if (sum->max_n_thds < data->max_n_thds) { sum->max_n_thds = data->max_n_thds; } uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, ATOMIC_RELAXED); uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( &data->n_waiting_thds, ATOMIC_RELAXED); atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, ATOMIC_RELAXED); sum->n_owner_switches += data->n_owner_switches; sum->n_lock_ops += data->n_lock_ops; } static inline void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { malloc_mutex_lock_slow(mutex); } mutex_owner_stats_update(tsdn, mutex); } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } static inline void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { MALLOC_MUTEX_UNLOCK(mutex); } } static inline void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } static inline void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } /* Copy the prof data from mutex for processing. */ static inline void malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { mutex_prof_data_t *source = &mutex->prof_data; /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); /* * Not *really* allowed (we shouldn't be doing non-atomic loads of * atomic data), but the mutex protection makes this safe, and writing * a member-for-member copy is tedious for this situation. */ *data = *source; /* n_wait_thds is not reported (modified w/o locking). */ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); } #endif /* JEMALLOC_INTERNAL_MUTEX_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/mutex_pool.h010064400007650000024000000054731340421340100235150ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H #define JEMALLOC_INTERNAL_MUTEX_POOL_H #include "jemalloc/internal/hash.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/witness.h" /* We do mod reductions by this value, so it should be kept a power of 2. */ #define MUTEX_POOL_SIZE 256 typedef struct mutex_pool_s mutex_pool_t; struct mutex_pool_s { malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; }; bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); /* Internal helper - not meant to be called outside this module. */ static inline malloc_mutex_t * mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { size_t hash_result[2]; hash(&key, sizeof(key), 0xd50dcc1b, hash_result); return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; } static inline void mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { for (int i = 0; i < MUTEX_POOL_SIZE; i++) { malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); } } /* * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. * You're not allowed to acquire mutexes in the pool one at a time. You have to * acquire all the mutexes you'll need in a single function call, and then * release them all in a single function call. */ static inline void mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { mutex_pool_assert_not_held(tsdn, pool); malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); malloc_mutex_lock(tsdn, mutex); } static inline void mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); malloc_mutex_unlock(tsdn, mutex); mutex_pool_assert_not_held(tsdn, pool); } static inline void mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, uintptr_t key2) { mutex_pool_assert_not_held(tsdn, pool); malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { malloc_mutex_lock(tsdn, mutex1); malloc_mutex_lock(tsdn, mutex2); } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { malloc_mutex_lock(tsdn, mutex1); } else { malloc_mutex_lock(tsdn, mutex2); malloc_mutex_lock(tsdn, mutex1); } } static inline void mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, uintptr_t key2) { malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); if (mutex1 == mutex2) { malloc_mutex_unlock(tsdn, mutex1); } else { malloc_mutex_unlock(tsdn, mutex1); malloc_mutex_unlock(tsdn, mutex2); } mutex_pool_assert_not_held(tsdn, pool); } static inline void mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); } #endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/mutex_prof.h010064400007650000024000000057101340421340100235040ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H #define JEMALLOC_INTERNAL_MUTEX_PROF_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/tsd_types.h" #define MUTEX_PROF_GLOBAL_MUTEXES \ OP(background_thread) \ OP(ctl) \ OP(prof) typedef enum { #define OP(mtx) global_prof_mutex_##mtx, MUTEX_PROF_GLOBAL_MUTEXES #undef OP mutex_prof_num_global_mutexes } mutex_prof_global_ind_t; #define MUTEX_PROF_ARENA_MUTEXES \ OP(large) \ OP(extent_avail) \ OP(extents_dirty) \ OP(extents_muzzy) \ OP(extents_retained) \ OP(decay_dirty) \ OP(decay_muzzy) \ OP(base) \ OP(tcache_list) typedef enum { #define OP(mtx) arena_prof_mutex_##mtx, MUTEX_PROF_ARENA_MUTEXES #undef OP mutex_prof_num_arena_mutexes } mutex_prof_arena_ind_t; #define MUTEX_PROF_UINT64_COUNTERS \ OP(num_ops, uint64_t, "n_lock_ops") \ OP(num_wait, uint64_t, "n_waiting") \ OP(num_spin_acq, uint64_t, "n_spin_acq") \ OP(num_owner_switch, uint64_t, "n_owner_switch") \ OP(total_wait_time, uint64_t, "total_wait_ns") \ OP(max_wait_time, uint64_t, "max_wait_ns") #define MUTEX_PROF_UINT32_COUNTERS \ OP(max_num_thds, uint32_t, "max_n_thds") #define MUTEX_PROF_COUNTERS \ MUTEX_PROF_UINT64_COUNTERS \ MUTEX_PROF_UINT32_COUNTERS #define OP(counter, type, human) mutex_counter_##counter, #define COUNTER_ENUM(counter_list, t) \ typedef enum { \ counter_list \ mutex_prof_num_##t##_counters \ } mutex_prof_##t##_counter_ind_t; COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) #undef COUNTER_ENUM #undef OP typedef struct { /* * Counters touched on the slow path, i.e. when there is lock * contention. We update them once we have the lock. */ /* Total time (in nano seconds) spent waiting on this mutex. */ nstime_t tot_wait_time; /* Max time (in nano seconds) spent on a single lock operation. */ nstime_t max_wait_time; /* # of times have to wait for this mutex (after spinning). */ uint64_t n_wait_times; /* # of times acquired the mutex through local spinning. */ uint64_t n_spin_acquired; /* Max # of threads waiting for the mutex at the same time. */ uint32_t max_n_thds; /* Current # of threads waiting on the lock. Atomic synced. */ atomic_u32_t n_waiting_thds; /* * Data touched on the fast path. These are modified right after we * grab the lock, so it's placed closest to the end (i.e. right before * the lock) so that we have a higher chance of them being on the same * cacheline. */ /* # of times the mutex holder is different than the previous one. */ uint64_t n_owner_switches; /* Previous mutex holder, to facilitate n_owner_switches. */ tsdn_t *prev_owner; /* # of lock() operations in total. */ uint64_t n_lock_ops; } mutex_prof_data_t; #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/nstime.h010064400007650000024000000024301340421340100226070ustar0000000000000000#ifndef JEMALLOC_INTERNAL_NSTIME_H #define JEMALLOC_INTERNAL_NSTIME_H /* Maximum supported number of seconds (~584 years). */ #define NSTIME_SEC_MAX KQU(18446744072) #define NSTIME_ZERO_INITIALIZER {0} typedef struct { uint64_t ns; } nstime_t; void nstime_init(nstime_t *time, uint64_t ns); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); uint64_t nstime_sec(const nstime_t *time); uint64_t nstime_msec(const nstime_t *time); uint64_t nstime_nsec(const nstime_t *time); void nstime_copy(nstime_t *time, const nstime_t *source); int nstime_compare(const nstime_t *a, const nstime_t *b); void nstime_add(nstime_t *time, const nstime_t *addend); void nstime_iadd(nstime_t *time, uint64_t addend); void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); void nstime_isubtract(nstime_t *time, uint64_t subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); typedef bool (nstime_monotonic_t)(void); extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; typedef bool (nstime_update_t)(nstime_t *); extern nstime_update_t *JET_MUTABLE nstime_update; #endif /* JEMALLOC_INTERNAL_NSTIME_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/pages.h010064400007650000024000000060671340421340100224210ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H #define JEMALLOC_INTERNAL_PAGES_EXTERNS_H /* Page size. LG_PAGE is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif #define PAGE ((size_t)(1U << LG_PAGE)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the page base address for the page containing address a. */ #define PAGE_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~PAGE_MASK)) /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) /* Return the huge page base address for the huge page containing address a. */ #define HUGEPAGE_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) /* Return the smallest pagesize multiple that is >= s. */ #define HUGEPAGE_CEILING(s) \ (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) # define PAGES_CAN_PURGE_LAZY #endif /* * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. * * The only supported way to hard-purge on Windows is to decommit and then * re-commit, but doing so is racy, and if re-commit fails it's a pain to * propagate the "poisoned" memory state. Since we typically decommit as the * next step after purging on Windows anyway, there's no point in adding such * complexity. */ #if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ defined(JEMALLOC_MAPS_COALESCE)) # define PAGES_CAN_PURGE_FORCED #endif static const bool pages_can_purge_lazy = #ifdef PAGES_CAN_PURGE_LAZY true #else false #endif ; static const bool pages_can_purge_forced = #ifdef PAGES_CAN_PURGE_FORCED true #else false #endif ; typedef enum { thp_mode_default = 0, /* Do not change hugepage settings. */ thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ thp_mode_names_limit = 3, /* Used for option processing. */ thp_mode_not_supported = 3 /* No THP support detected. */ } thp_mode_t; #define THP_MODE_DEFAULT thp_mode_default extern thp_mode_t opt_thp; extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ extern const char *thp_mode_names[]; void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); void pages_unmap(void *addr, size_t size); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge_lazy(void *addr, size_t size); bool pages_purge_forced(void *addr, size_t size); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); bool pages_dontdump(void *addr, size_t size); bool pages_dodump(void *addr, size_t size); bool pages_boot(void); void pages_set_thp_state (void *ptr, size_t size); #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/ph.h010064400007650000024000000304151340421340100217230ustar0000000000000000/* * A Pairing Heap implementation. * * "The Pairing Heap: A New Form of Self-Adjusting Heap" * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf * * With auxiliary twopass list, described in a follow on paper. * * "Pairing Heaps: Experiments and Analysis" * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf * ******************************************************************************* */ #ifndef PH_H_ #define PH_H_ /* Node structure. */ #define phn(a_type) \ struct { \ a_type *phn_prev; \ a_type *phn_next; \ a_type *phn_lchild; \ } /* Root structure. */ #define ph(a_type) \ struct { \ a_type *ph_root; \ } /* Internal utility macros. */ #define phn_lchild_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_lchild) #define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ a_phn->a_field.phn_lchild = a_lchild; \ } while (0) #define phn_next_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_next) #define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ a_phn->a_field.phn_prev = a_prev; \ } while (0) #define phn_prev_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_prev) #define phn_next_set(a_type, a_field, a_phn, a_next) do { \ a_phn->a_field.phn_next = a_next; \ } while (0) #define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ a_type *phn0child; \ \ assert(a_phn0 != NULL); \ assert(a_phn1 != NULL); \ assert(a_cmp(a_phn0, a_phn1) <= 0); \ \ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ phn_next_set(a_type, a_field, a_phn1, phn0child); \ if (phn0child != NULL) { \ phn_prev_set(a_type, a_field, phn0child, a_phn1); \ } \ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ } while (0) #define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ if (a_phn0 == NULL) { \ r_phn = a_phn1; \ } else if (a_phn1 == NULL) { \ r_phn = a_phn0; \ } else if (a_cmp(a_phn0, a_phn1) < 0) { \ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ a_cmp); \ r_phn = a_phn0; \ } else { \ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ a_cmp); \ r_phn = a_phn1; \ } \ } while (0) #define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *head = NULL; \ a_type *tail = NULL; \ a_type *phn0 = a_phn; \ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ \ /* \ * Multipass merge, wherein the first two elements of a FIFO \ * are repeatedly merged, and each result is appended to the \ * singly linked FIFO, until the FIFO contains only a single \ * element. We start with a sibling list but no reference to \ * its tail, so we do a single pass over the sibling list to \ * populate the FIFO. \ */ \ if (phn1 != NULL) { \ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, NULL); \ phn_next_set(a_type, a_field, phn0, NULL); \ phn_prev_set(a_type, a_field, phn1, NULL); \ phn_next_set(a_type, a_field, phn1, NULL); \ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ head = tail = phn0; \ phn0 = phnrest; \ while (phn0 != NULL) { \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ phnrest = phn_next_get(a_type, a_field, \ phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, \ phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, \ NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ phn_prev_set(a_type, a_field, phn1, \ NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = phnrest; \ } else { \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = NULL; \ } \ } \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ while (true) { \ head = phn_next_get(a_type, a_field, \ phn1); \ assert(phn_prev_get(a_type, a_field, \ phn0) == NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ assert(phn_prev_get(a_type, a_field, \ phn1) == NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ if (head == NULL) { \ break; \ } \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, \ phn0); \ } \ } \ } \ r_phn = phn0; \ } while (0) #define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ if (phn != NULL) { \ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_prev_set(a_type, a_field, phn, NULL); \ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ assert(phn_next_get(a_type, a_field, phn) == NULL); \ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ a_ph->ph_root); \ } \ } while (0) #define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ if (lchild == NULL) { \ r_phn = NULL; \ } else { \ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ r_phn); \ } \ } while (0) /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ #define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ a_attr void a_prefix##new(a_ph_type *ph); \ a_attr bool a_prefix##empty(a_ph_type *ph); \ a_attr a_type *a_prefix##first(a_ph_type *ph); \ a_attr a_type *a_prefix##any(a_ph_type *ph); \ a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); /* * The ph_gen() macro generates a type-specific pairing heap implementation, * based on the above cpp macros. */ #define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_ph_type *ph) { \ memset(ph, 0, sizeof(ph(a_type))); \ } \ a_attr bool \ a_prefix##empty(a_ph_type *ph) { \ return (ph->ph_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_ph_type *ph) { \ if (ph->ph_root == NULL) { \ return NULL; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ return ph->ph_root; \ } \ a_attr a_type * \ a_prefix##any(a_ph_type *ph) { \ if (ph->ph_root == NULL) { \ return NULL; \ } \ a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ if (aux != NULL) { \ return aux; \ } \ return ph->ph_root; \ } \ a_attr void \ a_prefix##insert(a_ph_type *ph, a_type *phn) { \ memset(&phn->a_field, 0, sizeof(phn(a_type))); \ \ /* \ * Treat the root as an aux list during insertion, and lazily \ * merge during a_prefix##remove_first(). For elements that \ * are inserted, then removed via a_prefix##remove() before the \ * aux list is ever processed, this makes insert/remove \ * constant-time, whereas eager merging would make insert \ * O(log n). \ */ \ if (ph->ph_root == NULL) { \ ph->ph_root = phn; \ } else { \ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ a_field, ph->ph_root)); \ if (phn_next_get(a_type, a_field, ph->ph_root) != \ NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, ph->ph_root), \ phn); \ } \ phn_prev_set(a_type, a_field, phn, ph->ph_root); \ phn_next_set(a_type, a_field, ph->ph_root, phn); \ } \ } \ a_attr a_type * \ a_prefix##remove_first(a_ph_type *ph) { \ a_type *ret; \ \ if (ph->ph_root == NULL) { \ return NULL; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ \ ret = ph->ph_root; \ \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ \ return ret; \ } \ a_attr a_type * \ a_prefix##remove_any(a_ph_type *ph) { \ /* \ * Remove the most recently inserted aux list element, or the \ * root if the aux list is empty. This has the effect of \ * behaving as a LIFO (and insertion/removal is therefore \ * constant-time) if a_prefix##[remove_]first() are never \ * called. \ */ \ if (ph->ph_root == NULL) { \ return NULL; \ } \ a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ if (ret != NULL) { \ a_type *aux = phn_next_get(a_type, a_field, ret); \ phn_next_set(a_type, a_field, ph->ph_root, aux); \ if (aux != NULL) { \ phn_prev_set(a_type, a_field, aux, \ ph->ph_root); \ } \ return ret; \ } \ ret = ph->ph_root; \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ return ret; \ } \ a_attr void \ a_prefix##remove(a_ph_type *ph, a_type *phn) { \ a_type *replace, *parent; \ \ if (ph->ph_root == phn) { \ /* \ * We can delete from aux list without merging it, but \ * we need to merge if we are dealing with the root \ * node and it has children. \ */ \ if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ ph->ph_root = phn_next_get(a_type, a_field, \ phn); \ if (ph->ph_root != NULL) { \ phn_prev_set(a_type, a_field, \ ph->ph_root, NULL); \ } \ return; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ if (ph->ph_root == phn) { \ ph_merge_children(a_type, a_field, ph->ph_root, \ a_cmp, ph->ph_root); \ return; \ } \ } \ \ /* Get parent (if phn is leftmost child) before mutating. */ \ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ if (phn_lchild_get(a_type, a_field, parent) != phn) { \ parent = NULL; \ } \ } \ /* Find a possible replacement node, and link to parent. */ \ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ /* Set next/prev for sibling linked list. */ \ if (replace != NULL) { \ if (parent != NULL) { \ phn_prev_set(a_type, a_field, replace, parent); \ phn_lchild_set(a_type, a_field, parent, \ replace); \ } else { \ phn_prev_set(a_type, a_field, replace, \ phn_prev_get(a_type, a_field, phn)); \ if (phn_prev_get(a_type, a_field, phn) != \ NULL) { \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ replace); \ } \ } \ phn_next_set(a_type, a_field, replace, \ phn_next_get(a_type, a_field, phn)); \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ replace); \ } \ } else { \ if (parent != NULL) { \ a_type *next = phn_next_get(a_type, a_field, \ phn); \ phn_lchild_set(a_type, a_field, parent, next); \ if (next != NULL) { \ phn_prev_set(a_type, a_field, next, \ parent); \ } \ } else { \ assert(phn_prev_get(a_type, a_field, phn) != \ NULL); \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ phn_next_get(a_type, a_field, phn)); \ } \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ phn_prev_get(a_type, a_field, phn)); \ } \ } \ } #endif /* PH_H_ */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/private_namespace.sh010075500007650000024000000001371340421340100251660ustar0000000000000000#!/bin/sh for symbol in `cat "$@"` ; do echo "#define ${symbol} JEMALLOC_N(${symbol})" done jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/private_symbols.sh010075500007650000024000000021741340421340100247250ustar0000000000000000#!/bin/sh # # Generate private_symbols[_jet].awk. # # Usage: private_symbols.sh * # # is typically "" or "_". sym_prefix=$1 shift cat <' output. # # Handle lines like: # 0000000000000008 D opt_junk # 0000000000007574 T malloc_initialized (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) } # Process 'dumpbin /SYMBOLS ' output. # # Handle lines like: # 353 00008098 SECT4 notype External | opt_junk # 3F1 00000000 SECT7 notype () External | malloc_initialized ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { print $NF } EOF jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prng.h010064400007650000024000000113041340421340100222560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PRNG_H #define JEMALLOC_INTERNAL_PRNG_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. */ /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ #define PRNG_A_32 UINT32_C(1103515241) #define PRNG_C_32 UINT32_C(12347) #define PRNG_A_64 UINT64_C(6364136223846793005) #define PRNG_C_64 UINT64_C(1442695040888963407) JEMALLOC_ALWAYS_INLINE uint32_t prng_state_next_u32(uint32_t state) { return (state * PRNG_A_32) + PRNG_C_32; } JEMALLOC_ALWAYS_INLINE uint64_t prng_state_next_u64(uint64_t state) { return (state * PRNG_A_64) + PRNG_C_64; } JEMALLOC_ALWAYS_INLINE size_t prng_state_next_zu(size_t state) { #if LG_SIZEOF_PTR == 2 return (state * PRNG_A_32) + PRNG_C_32; #elif LG_SIZEOF_PTR == 3 return (state * PRNG_A_64) + PRNG_C_64; #else #error Unsupported pointer size #endif } /******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ /* * The prng_lg_range functions give a uniform int in the half-open range [0, * 2**lg_range). If atomic is true, they do so safely from multiple threads. * Multithreaded 64-bit prngs aren't supported. */ JEMALLOC_ALWAYS_INLINE uint32_t prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { uint32_t ret, state0, state1; assert(lg_range > 0); assert(lg_range <= 32); state0 = atomic_load_u32(state, ATOMIC_RELAXED); if (atomic) { do { state1 = prng_state_next_u32(state0); } while (!atomic_compare_exchange_weak_u32(state, &state0, state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); } else { state1 = prng_state_next_u32(state0); atomic_store_u32(state, state1, ATOMIC_RELAXED); } ret = state1 >> (32 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range) { uint64_t ret, state1; assert(lg_range > 0); assert(lg_range <= 64); state1 = prng_state_next_u64(*state); *state = state1; ret = state1 >> (64 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE size_t prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { size_t ret, state0, state1; assert(lg_range > 0); assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); state0 = atomic_load_zu(state, ATOMIC_RELAXED); if (atomic) { do { state1 = prng_state_next_zu(state0); } while (atomic_compare_exchange_weak_zu(state, &state0, state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); } else { state1 = prng_state_next_zu(state0); atomic_store_zu(state, state1, ATOMIC_RELAXED); } ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); return ret; } /* * The prng_range functions behave like the prng_lg_range, but return a result * in [0, range) instead of [0, 2**lg_range). */ JEMALLOC_ALWAYS_INLINE uint32_t prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { uint32_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u32(state, lg_range, atomic); } while (ret >= range); return ret; } JEMALLOC_ALWAYS_INLINE uint64_t prng_range_u64(uint64_t *state, uint64_t range) { uint64_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u64(state, lg_range); } while (ret >= range); return ret; } JEMALLOC_ALWAYS_INLINE size_t prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { size_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_zu(state, lg_range, atomic); } while (ret >= range); return ret; } #endif /* JEMALLOC_INTERNAL_PRNG_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prof_externs.h010064400007650000024000000066231340421341300240410ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H #define JEMALLOC_INTERNAL_PROF_EXTERNS_H #include "jemalloc/internal/mutex.h" extern malloc_mutex_t bt2gctx_mtx; extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); #endif typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *JET_MUTABLE prof_dump_open; typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *JET_MUTABLE prof_dump_header; #ifdef JEMALLOC_JET void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes); #endif bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); bool prof_thread_active_init_get(tsdn_t *tsdn); bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(tsd_t *tsd); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prof_inlines_a.h010064400007650000024000000046571340421341300243170ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H #define JEMALLOC_INTERNAL_PROF_INLINES_A_H #include "jemalloc/internal/mutex.h" static inline bool prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { cassert(config_prof); bool overflow; uint64_t a0, a1; /* * If the application allocates fast enough (and/or if idump is slow * enough), extreme overflow here (a1 >= prof_interval * 2) can cause * idump trigger coalescing. This is an intentional mechanism that * avoids rate-limiting allocation. */ #ifdef JEMALLOC_ATOMIC_U64 a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); do { a1 = a0 + accumbytes; assert(a1 >= a0); overflow = (a1 >= prof_interval); if (overflow) { a1 %= prof_interval; } } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else malloc_mutex_lock(tsdn, &prof_accum->mtx); a0 = prof_accum->accumbytes; a1 = a0 + accumbytes; overflow = (a1 >= prof_interval); if (overflow) { a1 %= prof_interval; } prof_accum->accumbytes = a1; malloc_mutex_unlock(tsdn, &prof_accum->mtx); #endif return overflow; } static inline void prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { cassert(config_prof); /* * Cancel out as much of the excessive prof_accumbytes increase as * possible without underflowing. Interval-triggered dumps occur * slightly more often than intended as a result of incomplete * canceling. */ uint64_t a0, a1; #ifdef JEMALLOC_ATOMIC_U64 a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); do { a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) : 0; } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else malloc_mutex_lock(tsdn, &prof_accum->mtx); a0 = prof_accum->accumbytes; a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) : 0; prof_accum->accumbytes = a1; malloc_mutex_unlock(tsdn, &prof_accum->mtx); #endif } JEMALLOC_ALWAYS_INLINE bool prof_active_get_unlocked(void) { /* * Even if opt_prof is true, sampling can be temporarily disabled by * setting prof_active to false. No locking is used when reading * prof_active in the fast path, so there are no guarantees regarding * how long it will take for all threads to notice state changes. */ return prof_active; } #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prof_inlines_b.h010064400007650000024000000125231340421341300243070ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H #define JEMALLOC_INTERNAL_PROF_INLINES_B_H #include "jemalloc/internal/sz.h" JEMALLOC_ALWAYS_INLINE bool prof_gdump_get_unlocked(void) { /* * No locking is used when reading prof_gdump_val in the fast path, so * there are no guarantees regarding how long it will take for all * threads to notice state changes. */ return prof_gdump_val; } JEMALLOC_ALWAYS_INLINE prof_tdata_t * prof_tdata_get(tsd_t *tsd, bool create) { prof_tdata_t *tdata; cassert(config_prof); tdata = tsd_prof_tdata_get(tsd); if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } assert(tdata == NULL || tdata->attached); } return tdata; } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_reset(tsdn, ptr, tctx); } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { tdata = NULL; } if (tdata_out != NULL) { *tdata_out = tdata; } if (unlikely(tdata == NULL)) { return true; } if (likely(tdata->bytes_until_sample >= usize)) { if (update) { tdata->bytes_until_sample -= usize; } return true; } else { if (tsd_reentrancy_level_get(tsd) > 0) { return true; } /* Compute new sample threshold. */ if (update) { prof_sample_threshold_update(tdata); } return !tdata->active; } } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { prof_tctx_t *ret; prof_tdata_t *tdata; prof_bt_t bt; assert(usize == sz_s2u(usize)); if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, &tdata))) { ret = (prof_tctx_t *)(uintptr_t)1U; } else { bt_init(&bt, tdata->vec); prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return ret; } JEMALLOC_ALWAYS_INLINE void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(tsdn, ptr)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { prof_malloc_sample_object(tsdn, ptr, usize, tctx); } else { prof_tctx_set(tsdn, ptr, usize, alloc_ctx, (prof_tctx_t *)(uintptr_t)1U); } } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled, moved; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { assert(usize == isalloc(tsd_tsdn(tsd), ptr)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ prof_alloc_rollback(tsd, tctx, true); tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); moved = (ptr != old_ptr); if (unlikely(sampled)) { prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); } else if (moved) { prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, (prof_tctx_t *)(uintptr_t)1U); } else if (unlikely(old_sampled)) { /* * prof_tctx_set() would work for the !moved case as well, but * prof_tctx_reset() is slightly cheaper, and the proper thing * to do here in the presence of explicit knowledge re: moved * state. */ prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); } else { assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == (uintptr_t)1U); } /* * The prof_free_sampled_object() call must come after the * prof_malloc_sample_object() call, because tctx and old_tctx may be * the same, in which case reversing the call order could cause the tctx * to be prematurely destroyed as a side effect of momentarily zeroed * counters. */ if (unlikely(old_sampled)) { prof_free_sampled_object(tsd, old_usize, old_tctx); } } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); cassert(config_prof); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { prof_free_sampled_object(tsd, usize, tctx); } } #endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prof_structs.h010064400007650000024000000120621340421341300240520ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H #define JEMALLOC_INTERNAL_PROF_STRUCTS_H #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/rb.h" struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_accum_s { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_t mtx; uint64_t accumbytes; #else atomic_u64_t accumbytes; #endif }; struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; typedef enum { prof_tctx_state_initializing, prof_tctx_state_nominal, prof_tctx_state_dumping, prof_tctx_state_purgatory /* Dumper must finish destroying. */ } prof_tctx_state_t; struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ uint64_t thr_uid; uint64_t thr_discrim; /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; /* Associated global context. */ prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, * but coexisting in gctx->tctxs. There are two ways that such * coexistence can occur: * - A dumper thread can cause a tctx to be retained in the purgatory * state. * - Although a single "producer" thread must create all tctx's which * share the same thr_uid, multiple "consumers" can each concurrently * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only * gets called once each time cnts.cur{objs,bytes} drop to 0, but this * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ bool prepared; /* Current dump-related state, protected by gctx->lock. */ prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of * limbo due to one of: * - Initializing this gctx. * - Initializing per thread counters associated with this gctx. * - Preparing to destroy this gctx. * - Dumping a heap profile that includes this gctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Associated backtrace. */ prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ uint64_t thr_discrim; /* Included in heap profile dumps if non-NULL. */ char *thread_name; bool attached; bool expired; rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks * backtraces for which it has non-zero allocation/deallocation counters * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ ckh_t bt2tctx; /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are * currently being dumped. New threads' tdata's have this initialized * to false so that they aren't accidentally included in later dump * phases. */ bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ bool active; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ void *vec[PROF_BT_MAX]; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/prof_types.h010064400007650000024000000033621340421340100235070ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H #define JEMALLOC_INTERNAL_PROF_TYPES_H typedef struct prof_bt_s prof_bt_t; typedef struct prof_accum_s prof_accum_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NTDATA_LOCKS 256 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/public_namespace.sh010075500007650000024000000002011340421340100247620ustar0000000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#define je_${n} JEMALLOC_N(${n})" done jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/public_unnamespace.sh010075500007650000024000000001571340421340100253370ustar0000000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#undef je_${n}" done jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/ql.h010064400007650000024000000047101340421340100217270ustar0000000000000000#ifndef JEMALLOC_INTERNAL_QL_H #define JEMALLOC_INTERNAL_QL_H #include "jemalloc/internal/qr.h" /* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) #endif /* JEMALLOC_INTERNAL_QL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/qr.h010064400007650000024000000044561340421340100217440ustar0000000000000000#ifndef JEMALLOC_INTERNAL_QR_H #define JEMALLOC_INTERNAL_QR_H /* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ a_type *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_type, a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) #endif /* JEMALLOC_INTERNAL_QR_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/rb.h010064400007650000024000001126411340421340100217210ustar0000000000000000/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include * #include * #define NDEBUG // (Optional, see assert(3).) * #include * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #include * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ #ifndef __PGI #define RB_COMPACT #endif #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ } /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ /* Bookkeeping bit cannot be used by node pointer. */ \ assert(((uintptr_t)(a_node) & 0x1) == 0); \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #endif /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. * * static void * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); * Description: Iterate over the tree with post-order traversal, remove * each node, and run the callback if non-null. This is * used for destroying a tree without paying the cost to * rebalance it. The tree must not be otherwise altered * during traversal. * Args: * tree: Pointer to an initialized red-black tree object. * cb : Callback function, which, if non-null, is called for each node * during iteration. There is no way to stop iteration once it * has begun. * arg : Opaque pointer passed to cb(). */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return ret; \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (left != NULL && rbtn_red_get(a_type, a_field, \ left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != NULL; pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (leftrightleft != NULL && rbtn_red_get(a_type, \ a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } else if (cmp > 0) { \ return a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ return ret; \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } else if (cmp < 0) { \ return a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ return ret; \ } \ a_attr void \ a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ a_type *, void *), void *arg) { \ if (node == NULL) { \ return; \ } \ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ node), cb, arg); \ rbtn_left_set(a_type, a_field, (node), NULL); \ a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ node), cb, arg); \ rbtn_right_set(a_type, a_field, (node), NULL); \ if (cb) { \ cb(node, arg); \ } \ } \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg) { \ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/rtree.h010064400007650000024000000377001340421341300224440ustar0000000000000000#ifndef JEMALLOC_INTERNAL_RTREE_H #define JEMALLOC_INTERNAL_RTREE_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree_tsd.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/tsd.h" /* * This radix tree implementation is tailored to the singular purpose of * associating metadata with extents that are currently owned by jemalloc. * ******************************************************************************* */ /* Number of high insignificant bits. */ #define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) /* Number of low insigificant bits. */ #define RTREE_NLIB LG_PAGE /* Number of significant bits. */ #define RTREE_NSB (LG_VADDR - RTREE_NLIB) /* Number of levels in radix tree. */ #if RTREE_NSB <= 10 # define RTREE_HEIGHT 1 #elif RTREE_NSB <= 36 # define RTREE_HEIGHT 2 #elif RTREE_NSB <= 52 # define RTREE_HEIGHT 3 #else # error Unsupported number of significant virtual address bits #endif /* Use compact leaf representation if virtual address encoding allows. */ #if RTREE_NHIB >= LG_CEIL_NSIZES # define RTREE_LEAF_COMPACT #endif /* Needed for initialization only. */ #define RTREE_LEAFKEY_INVALID ((uintptr_t)1) typedef struct rtree_node_elm_s rtree_node_elm_t; struct rtree_node_elm_s { atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ }; struct rtree_leaf_elm_s { #ifdef RTREE_LEAF_COMPACT /* * Single pointer-width field containing all three leaf element fields. * For example, on a 64-bit x64 system with 48 significant virtual * memory address bits, the index, extent, and slab fields are packed as * such: * * x: index * e: extent * b: slab * * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b */ atomic_p_t le_bits; #else atomic_p_t le_extent; /* (extent_t *) */ atomic_u_t le_szind; /* (szind_t) */ atomic_b_t le_slab; /* (bool) */ #endif }; typedef struct rtree_level_s rtree_level_t; struct rtree_level_s { /* Number of key bits distinguished by this level. */ unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ unsigned cumbits; }; typedef struct rtree_s rtree_t; struct rtree_s { malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ #if RTREE_HEIGHT > 1 rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; #else rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; #endif }; /* * Split the bits into one to three partitions depending on number of * significant bits. It the number of bits does not divide evenly into the * number of levels, place one remainder bit per level starting at the leaf * level. */ static const rtree_level_t rtree_levels[] = { #if RTREE_HEIGHT == 1 {RTREE_NSB, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 2 {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 3 {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, {RTREE_NSB/3 + RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} #else # error Unsupported rtree height #endif }; bool rtree_new(rtree_t *rtree, bool zeroed); typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; #ifdef JEMALLOC_JET void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); #endif rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leafkey(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - rtree_levels[RTREE_HEIGHT-1].bits); unsigned maskbits = ptrbits - cumbits; uintptr_t mask = ~((ZU(1) << maskbits) - 1); return (key & mask); } JEMALLOC_ALWAYS_INLINE size_t rtree_cache_direct_map(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - rtree_levels[RTREE_HEIGHT-1].bits); unsigned maskbits = ptrbits - cumbits; return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); } JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(uintptr_t key, unsigned level) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = rtree_levels[level].cumbits; unsigned shiftbits = ptrbits - cumbits; unsigned maskbits = rtree_levels[level].bits; uintptr_t mask = (ZU(1) << maskbits) - 1; return ((key >> shiftbits) & mask); } /* * Atomic getters. * * dependent: Reading a value on behalf of a pointer to a valid allocation * is guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the * pointer came into existence. * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ # ifdef RTREE_LEAF_COMPACT JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { return (uintptr_t)atomic_load_p(&elm->le_bits, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); } JEMALLOC_ALWAYS_INLINE extent_t * rtree_leaf_elm_bits_extent_get(uintptr_t bits) { # ifdef __aarch64__ /* * aarch64 doesn't sign extend the highest virtual address bit to set * the higher ones. Instead, the high bits gets zeroed. */ uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; /* Mask off the slab bit. */ uintptr_t low_bit_mask = ~(uintptr_t)1; uintptr_t mask = high_bit_mask & low_bit_mask; return (extent_t *)(bits & mask); # else /* Restore sign-extended high bits, mask slab bit. */ return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> RTREE_NHIB) & ~((uintptr_t)0x1)); # endif } JEMALLOC_ALWAYS_INLINE szind_t rtree_leaf_elm_bits_szind_get(uintptr_t bits) { return (szind_t)(bits >> LG_VADDR); } JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_bits_slab_get(uintptr_t bits) { return (bool)(bits & (uintptr_t)0x1); } # endif JEMALLOC_ALWAYS_INLINE extent_t * rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_extent_get(bits); #else extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); return extent; #endif } JEMALLOC_ALWAYS_INLINE szind_t rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_szind_get(bits); #else return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); #endif } JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_slab_get(bits); #else return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); #endif } static inline void rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind) { assert(szind <= NSIZES); #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_t *elm, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); /* * Write extent last, since the element is atomically considered valid * as soon as the extent field is non-NULL. */ rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); #endif } static inline void rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind, bool slab) { assert(!slab || szind < NBINS); /* * The caller implicitly assures that it is the only writer to the szind * and slab fields, and that the extent field cannot currently change. */ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); } JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing) { assert(key != 0); assert(!dependent || !init_missing); size_t slot = rtree_cache_direct_map(key); uintptr_t leafkey = rtree_leafkey(key); assert(leafkey != RTREE_LEAFKEY_INVALID); /* Fast path: L1 direct mapped cache. */ if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; assert(leaf != NULL); uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); return &leaf[subkey]; } /* * Search the L2 LRU cache. On hit, swap the matching element into the * slot in L1 cache, and move the position in L2 up by 1. */ #define RTREE_CACHE_CHECK_L2(i) do { \ if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ assert(leaf != NULL); \ if (i > 0) { \ /* Bubble up by one. */ \ rtree_ctx->l2_cache[i].leafkey = \ rtree_ctx->l2_cache[i - 1].leafkey; \ rtree_ctx->l2_cache[i].leaf = \ rtree_ctx->l2_cache[i - 1].leaf; \ rtree_ctx->l2_cache[i - 1].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[i - 1].leaf = \ rtree_ctx->cache[slot].leaf; \ } else { \ rtree_ctx->l2_cache[0].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[0].leaf = \ rtree_ctx->cache[slot].leaf; \ } \ rtree_ctx->cache[slot].leafkey = leafkey; \ rtree_ctx->cache[slot].leaf = leaf; \ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ return &leaf[subkey]; \ } \ } while (0) /* Check the first cache entry. */ RTREE_CACHE_CHECK_L2(0); /* Search the remaining cache elements. */ for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { RTREE_CACHE_CHECK_L2(i); } #undef RTREE_CACHE_CHECK_L2 return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent, init_missing); } static inline bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, extent_t *extent, szind_t szind, bool slab) { /* Use rtree_clear() to set the extent to NULL. */ assert(extent != NULL); rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true); if (elm == NULL) { return true; } assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); return false; } JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false); if (!dependent && elm == NULL) { return NULL; } assert(elm != NULL); return elm; } JEMALLOC_ALWAYS_INLINE extent_t * rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return NULL; } return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); } JEMALLOC_ALWAYS_INLINE szind_t rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return NSIZES; } return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); } /* * rtree_slab_read() is intentionally omitted because slab is always read in * conjunction with szind, which makes rtree_szind_slab_read() a better choice. */ JEMALLOC_ALWAYS_INLINE bool rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return true; } *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); return false; } JEMALLOC_ALWAYS_INLINE bool rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return true; } #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); *r_szind = rtree_leaf_elm_bits_szind_get(bits); *r_slab = rtree_leaf_elm_bits_slab_get(bits); #else *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); #endif return false; } static inline void rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t szind, bool slab) { assert(!slab || szind < NBINS); rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); } static inline void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != NULL); rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false); } #endif /* JEMALLOC_INTERNAL_RTREE_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/rtree_tsd.h010064400007650000024000000034771340421341300233220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H #define JEMALLOC_INTERNAL_RTREE_CTX_H /* * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each * entry supports an entire leaf, so the cache hit rate is typically high even * with a small number of entries. In rare cases extent activity will straddle * the boundary between two leaf nodes. Furthermore, an arena may use a * combination of dss and mmap. Note that as memory usage grows past the amount * that this cache can directly cover, the cache will become less effective if * locality of reference is low, but the consequence is merely cache misses * while traversing the tree nodes. * * The L1 direct mapped cache offers consistent and low cost on cache hit. * However collision could affect hit rate negatively. This is resolved by * combining with a L2 LRU cache, which requires linear search and re-ordering * on access but suffers no collision. Note that, the cache will itself suffer * cache misses if made overly large, plus the cost of linear search in the LRU * cache. */ #define RTREE_CTX_LG_NCACHE 4 #define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) #define RTREE_CTX_NCACHE_L2 8 /* * Zero initializer required for tsd initialization only. Proper initialization * done via rtree_ctx_data_init(). */ #define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}} typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; struct rtree_ctx_cache_elm_s { uintptr_t leafkey; rtree_leaf_elm_t *leaf; }; typedef struct rtree_ctx_s rtree_ctx_t; struct rtree_ctx_s { /* Direct mapped cache. */ rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; /* L2 LRU cache. */ rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; }; void rtree_ctx_data_init(rtree_ctx_t *ctx); #endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/size_classes.sh010075500007650000024000000231061340421341300241730ustar0000000000000000#!/bin/sh # # Usage: size_classes.sh # The following limits are chosen such that they cover all supported platforms. # Pointer sizes. lg_zarr="2 3" # Quanta. lg_qarr=$1 # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. lg_tmin=$2 # Maximum lookup size. lg_kmax=12 # Page sizes. lg_parr=`echo $3 | tr ',' ' '` # Size class group size (number of size classes for each size doubling). lg_g=$4 pow2() { e=$1 pow2_result=1 while [ ${e} -gt 0 ] ; do pow2_result=$((${pow2_result} + ${pow2_result})) e=$((${e} - 1)) done } lg() { x=$1 lg_result=0 while [ ${x} -gt 1 ] ; do lg_result=$((${lg_result} + 1)) x=$((${x} / 2)) done } lg_ceil() { y=$1 lg ${y}; lg_floor=${lg_result} pow2 ${lg_floor}; pow2_floor=${pow2_result} if [ ${pow2_floor} -lt ${y} ] ; then lg_ceil_result=$((${lg_floor} + 1)) else lg_ceil_result=${lg_floor} fi } reg_size_compute() { lg_grp=$1 lg_delta=$2 ndelta=$3 pow2 ${lg_grp}; grp=${pow2_result} pow2 ${lg_delta}; delta=${pow2_result} reg_size=$((${grp} + ${delta}*${ndelta})) } slab_size() { lg_p=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 pow2 ${lg_p}; p=${pow2_result} reg_size_compute ${lg_grp} ${lg_delta} ${ndelta} # Compute smallest slab size that is an integer multiple of reg_size. try_slab_size=${p} try_nregs=$((${try_slab_size} / ${reg_size})) perfect=0 while [ ${perfect} -eq 0 ] ; do perfect_slab_size=${try_slab_size} perfect_nregs=${try_nregs} try_slab_size=$((${try_slab_size} + ${p})) try_nregs=$((${try_slab_size} / ${reg_size})) if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then perfect=1 fi done slab_size_pgs=$((${perfect_slab_size} / ${p})) } size_class() { index=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 lg_p=$5 lg_kmax=$6 if [ ${lg_delta} -ge ${lg_p} ] ; then psz="yes" else pow2 ${lg_p}; p=${pow2_result} pow2 ${lg_grp}; grp=${pow2_result} pow2 ${lg_delta}; delta=${pow2_result} sz=$((${grp} + ${delta} * ${ndelta})) npgs=$((${sz} / ${p})) if [ ${sz} -eq $((${npgs} * ${p})) ] ; then psz="yes" else psz="no" fi fi lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" else rem="no" fi lg_size=${lg_grp} if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then lg_size=$((${lg_grp} + 1)) else lg_size=${lg_grp} rem="yes" fi if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then bin="yes" slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs} else bin="no" pgs=0 fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then lg_delta_lookup=${lg_delta} else lg_delta_lookup="no" fi printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup} # Defined upon return: # - psz ("yes" or "no") # - bin ("yes" or "no") # - pgs # - lg_delta_lookup (${lg_delta} or "no") } sep_line() { echo " \\" } size_classes() { lg_z=$1 lg_q=$2 lg_t=$3 lg_p=$4 lg_g=$5 pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} echo "#define SIZE_CLASSES \\" echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 npsizes=0 # Tiny size classes. ndelta=0 index=0 lg_grp=${lg_t} lg_delta=${lg_grp} while [ ${lg_grp} -lt ${lg_q} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi ntbins=$((${ntbins} + 1)) lg_tiny_maxclass=${lg_grp} # Final written value is correct. index=$((${index} + 1)) lg_delta=${lg_grp} lg_grp=$((${lg_grp} + 1)) done # First non-tiny group. if [ ${ntbins} -gt 0 ] ; then sep_line # The first size class has an unusual encoding, because the size has to be # split between grp and delta*ndelta. lg_grp=$((${lg_grp} - 1)) ndelta=1 size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do sep_line ndelta=1 if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then ndelta_limit=$((${g} - 1)) else ndelta_limit=${g} fi while [ ${ndelta} -le ${ndelta_limit} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" if [ ${lg_g} -gt 0 ] ; then lg_large_minclass=$((${lg_grp} + 1)) else lg_large_minclass=$((${lg_grp} + 2)) fi fi # Final written value is correct: large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) done echo nsizes=${index} lg_ceil ${nsizes}; lg_ceil_nsizes=${lg_ceil_result} # Defined upon completion: # - ntbins # - nlbins # - nbins # - nsizes # - lg_ceil_nsizes # - npsizes # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass # - lg_large_minclass # - large_maxclass } cat < 256) # error "Too many small size classes" #endif #endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */ EOF jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/smoothstep.h010064400007650000024000000364121340421340100235240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H #define JEMALLOC_INTERNAL_SMOOTHSTEP_H /* * This file was generated by the following command: * sh smoothstep.sh smoother 200 24 3 15 */ /******************************************************************************/ /* * This header defines a precomputed table based on the smoothstep family of * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so * that floating point math can be avoided. * * 3 2 * smoothstep(x) = -2x + 3x * * 5 4 3 * smootherstep(x) = 6x - 15x + 10x * * 7 6 5 4 * smootheststep(x) = -20x + 70x - 84x + 35x */ #define SMOOTHSTEP_VARIANT "smoother" #define SMOOTHSTEP_NSTEPS 200 #define SMOOTHSTEP_BFP 24 #define SMOOTHSTEP \ /* STEP(step, h, x, y) */ \ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ #endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/smoothstep.sh010075500007650000024000000056321340421340100237120ustar0000000000000000#!/bin/sh # # Generate a discrete lookup table for a sigmoid function in the smoothstep # family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table # entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode # the entries using a binary fixed point representation. # # Usage: smoothstep.sh # # is in {smooth, smoother, smoothest}. # must be greater than zero. # must be in [0..62]; reasonable values are roughly [10..30]. # is x decimal precision. # is y decimal precision. #set -x cmd="sh smoothstep.sh $*" variant=$1 nsteps=$2 bfp=$3 xprec=$4 yprec=$5 case "${variant}" in smooth) ;; smoother) ;; smoothest) ;; *) echo "Unsupported variant" exit 1 ;; esac smooth() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoother() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoothest() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } cat <iteration < 5) { for (i = 0; i < (1U << spin->iteration); i++) { spin_cpu_spinwait(); } spin->iteration++; } else { #ifdef _WIN32 SwitchToThread(); #else sched_yield(); #endif } } #undef SPIN_INLINE #endif /* JEMALLOC_INTERNAL_SPIN_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/stats.h010064400007650000024000000016731340421341300224610ustar0000000000000000#ifndef JEMALLOC_INTERNAL_STATS_H #define JEMALLOC_INTERNAL_STATS_H /* OPTION(opt, var_name, default, set_value_to) */ #define STATS_PRINT_OPTIONS \ OPTION('J', json, false, true) \ OPTION('g', general, true, false) \ OPTION('m', merged, config_stats, false) \ OPTION('d', destroyed, config_stats, false) \ OPTION('a', unmerged, config_stats, false) \ OPTION('b', bins, true, false) \ OPTION('l', large, true, false) \ OPTION('x', mutex, true, false) enum { #define OPTION(o, v, d, s) stats_print_option_num_##v, STATS_PRINT_OPTIONS #undef OPTION stats_print_tot_num_options }; /* Options for stats_print. */ extern bool opt_stats_print; extern char opt_stats_print_opts[stats_print_tot_num_options+1]; /* Implements je_malloc_stats_print. */ void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_INTERNAL_STATS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/sz.h010064400007650000024000000177501340421341300217620ustar0000000000000000#ifndef JEMALLOC_INTERNAL_SIZE_H #define JEMALLOC_INTERNAL_SIZE_H #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/util.h" /* * sz module: Size computations. * * Some abbreviations used here: * p: Page * ind: Index * s, sz: Size * u: Usable size * a: Aligned * * These are not always used completely consistently, but should be enough to * interpret function names. E.g. sz_psz2ind converts page size to page size * index; sz_sa2u converts a (size, alignment) allocation request to the usable * size that would result from such an allocation. */ /* * sz_pind2sz_tab encodes the same information as could be computed by * sz_pind2sz_compute(). */ extern size_t const sz_pind2sz_tab[NPSIZES+1]; /* * sz_index2size_tab encodes the same information as could be computed (at * unacceptable cost in some code paths) by sz_index2size_compute(). */ extern size_t const sz_index2size_tab[NSIZES]; /* * sz_size2index_tab is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via sz_size2index(). */ extern uint8_t const sz_size2index_tab[]; static const size_t sz_large_pad = #ifdef JEMALLOC_CACHE_OBLIVIOUS PAGE #else 0 #endif ; JEMALLOC_ALWAYS_INLINE pszind_t sz_psz2ind(size_t psz) { if (unlikely(psz > LARGE_MAXCLASS)) { return NPSIZES; } { pszind_t x = lg_floor((psz<<1)-1); pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - (LG_SIZE_CLASS_GROUP + LG_PAGE); pszind_t grp = shift << LG_SIZE_CLASS_GROUP; pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; size_t delta_inverse_mask = ZU(-1) << lg_delta; pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); pszind_t ind = grp + mod; return ind; } } static inline size_t sz_pind2sz_compute(pszind_t pind) { if (unlikely(pind == NPSIZES)) { return LARGE_MAXCLASS + PAGE; } { size_t grp = pind >> LG_SIZE_CLASS_GROUP; size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_PAGE + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_PAGE-1); size_t mod_size = (mod+1) << lg_delta; size_t sz = grp_size + mod_size; return sz; } } static inline size_t sz_pind2sz_lookup(pszind_t pind) { size_t ret = (size_t)sz_pind2sz_tab[pind]; assert(ret == sz_pind2sz_compute(pind)); return ret; } static inline size_t sz_pind2sz(pszind_t pind) { assert(pind < NPSIZES+1); return sz_pind2sz_lookup(pind); } static inline size_t sz_psz2u(size_t psz) { if (unlikely(psz > LARGE_MAXCLASS)) { return LARGE_MAXCLASS + PAGE; } { size_t x = lg_floor((psz<<1)-1); size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (psz + delta_mask) & ~delta_mask; return usize; } } static inline szind_t sz_size2index_compute(size_t size) { if (unlikely(size > LARGE_MAXCLASS)) { return NSIZES; } #if (NTBINS != 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } #endif { szind_t x = lg_floor((size<<1)-1); szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); szind_t grp = shift << LG_SIZE_CLASS_GROUP; szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta_inverse_mask = ZU(-1) << lg_delta; szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); szind_t index = NTBINS + grp + mod; return index; } } JEMALLOC_ALWAYS_INLINE szind_t sz_size2index_lookup(size_t size) { assert(size <= LOOKUP_MAXCLASS); { szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]); assert(ret == sz_size2index_compute(size)); return ret; } } JEMALLOC_ALWAYS_INLINE szind_t sz_size2index(size_t size) { assert(size > 0); if (likely(size <= LOOKUP_MAXCLASS)) { return sz_size2index_lookup(size); } return sz_size2index_compute(size); } static inline size_t sz_index2size_compute(szind_t index) { #if (NTBINS > 0) if (index < NTBINS) { return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); } #endif { size_t reduced_index = index - NTBINS; size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_QUANTUM-1); size_t mod_size = (mod+1) << lg_delta; size_t usize = grp_size + mod_size; return usize; } } JEMALLOC_ALWAYS_INLINE size_t sz_index2size_lookup(szind_t index) { size_t ret = (size_t)sz_index2size_tab[index]; assert(ret == sz_index2size_compute(index)); return ret; } JEMALLOC_ALWAYS_INLINE size_t sz_index2size(szind_t index) { assert(index < NSIZES); return sz_index2size_lookup(index); } JEMALLOC_ALWAYS_INLINE size_t sz_s2u_compute(size_t size) { if (unlikely(size > LARGE_MAXCLASS)) { return 0; } #if (NTBINS > 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); } #endif { size_t x = lg_floor((size<<1)-1); size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; return usize; } } JEMALLOC_ALWAYS_INLINE size_t sz_s2u_lookup(size_t size) { size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); assert(ret == sz_s2u_compute(size)); return ret; } /* * Compute usable size that would result from allocating an object with the * specified size. */ JEMALLOC_ALWAYS_INLINE size_t sz_s2u(size_t size) { assert(size > 0); if (likely(size <= LOOKUP_MAXCLASS)) { return sz_s2u_lookup(size); } return sz_s2u_compute(size); } /* * Compute usable size that would result from allocating an object with the * specified size and alignment. */ JEMALLOC_ALWAYS_INLINE size_t sz_sa2u(size_t size, size_t alignment) { size_t usize; assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* Try for a small size class. */ if (size <= SMALL_MAXCLASS && alignment < PAGE) { /* * Round size up to the nearest multiple of alignment. * * This done, we can take advantage of the fact that for each * small size class, every object is aligned at the smallest * power of two that is non-zero in the base two representation * of the size. For example: * * Size | Base 2 | Minimum alignment * -----+----------+------------------ * 96 | 1100000 | 32 * 144 | 10100000 | 32 * 192 | 11000000 | 64 */ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); if (usize < LARGE_MINCLASS) { return usize; } } /* Large size class. Beware of overflow. */ if (unlikely(alignment > LARGE_MAXCLASS)) { return 0; } /* Make sure result is a large size class. */ if (size <= LARGE_MINCLASS) { usize = LARGE_MINCLASS; } else { usize = sz_s2u(size); if (usize < size) { /* size_t overflow. */ return 0; } } /* * Calculate the multi-page mapping that large_palloc() would need in * order to guarantee the alignment. */ if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { /* size_t overflow. */ return 0; } return usize; } #endif /* JEMALLOC_INTERNAL_SIZE_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tcache_externs.h010064400007650000024000000042071340421341300243160ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H #define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H #include "jemalloc/internal/size_classes.h" extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern cache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern unsigned nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); tcache_t *tcache_create_explicit(tsd_t *tsd); void tcache_cleanup(tsd_t *tsd); void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); bool tcache_boot(tsdn_t *tsdn); void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); void tcache_prefork(tsdn_t *tsdn); void tcache_postfork_parent(tsdn_t *tsdn); void tcache_postfork_child(tsdn_t *tsdn); void tcache_flush(tsd_t *tsd); bool tsd_tcache_data_init(tsd_t *tsd); bool tsd_tcache_enabled_data_init(tsd_t *tsd); #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tcache_inlines.h010064400007650000024000000131621340421341300242670ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H #define JEMALLOC_INTERNAL_TCACHE_INLINES_H #include "jemalloc/internal/bin.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" static inline bool tcache_enabled_get(tsd_t *tsd) { return tsd_tcache_enabled_get(tsd); } static inline void tcache_enabled_set(tsd_t *tsd, bool enabled) { bool was_enabled = tsd_tcache_enabled_get(tsd); if (!was_enabled && enabled) { tsd_tcache_data_init(tsd); } else if (was_enabled && !enabled) { tcache_cleanup(tsd); } /* Commit the state last. Above calls check current state. */ tsd_tcache_enabled_set(tsd, enabled); tsd_slow_update(tsd); } JEMALLOC_ALWAYS_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache) { if (TCACHE_GC_INCR == 0) { return; } if (unlikely(ticker_tick(&tcache->gc_ticker))) { tcache_event_hard(tsd, tcache); } } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, UNUSED size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; cache_bin_t *bin; bool tcache_success; size_t usize JEMALLOC_CC_SILENCE_INIT(0); assert(binind < NBINS); bin = tcache_small_bin_get(tcache, binind); ret = cache_bin_alloc_easy(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) { return NULL; } ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, bin, binind, &tcache_hard_success); if (tcache_hard_success == false) { return NULL; } } assert(ret); /* * Only compute usize if required. The checks in the following if * statement are all static. */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = sz_index2size(binind); assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (slow_path && config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); } if (config_stats) { bin->tstats.nrequests++; } if (config_prof) { tcache->prof_accumbytes += usize; } tcache_event(tsd, tcache); return ret; } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; cache_bin_t *bin; bool tcache_success; assert(binind >= NBINS &&binind < nhbins); bin = tcache_large_bin_get(tcache, binind); ret = cache_bin_alloc_easy(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) { return NULL; } ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); if (ret == NULL) { return NULL; } } else { size_t usize JEMALLOC_CC_SILENCE_INIT(0); /* Only compute usize on demand */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = sz_index2size(binind); assert(usize <= tcache_maxclass); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { memset(ret, JEMALLOC_ALLOC_JUNK, usize); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { memset(ret, 0, usize); } if (config_stats) { bin->tstats.nrequests++; } if (config_prof) { tcache->prof_accumbytes += usize; } } tcache_event(tsd, tcache); return ret; } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { cache_bin_t *bin; cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, &bin_infos[binind]); } bin = tcache_small_bin_get(tcache, binind); bin_info = &tcache_bin_info[binind]; if (unlikely(bin->ncached == bin_info->ncached_max)) { tcache_bin_flush_small(tsd, tcache, bin, binind, (bin_info->ncached_max >> 1)); } assert(bin->ncached < bin_info->ncached_max); bin->ncached++; *(bin->avail - bin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { cache_bin_t *bin; cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); if (slow_path && config_fill && unlikely(opt_junk_free)) { large_dalloc_junk(ptr, sz_index2size(binind)); } bin = tcache_large_bin_get(tcache, binind); bin_info = &tcache_bin_info[binind]; if (unlikely(bin->ncached == bin_info->ncached_max)) { tcache_bin_flush_large(tsd, bin, binind, (bin_info->ncached_max >> 1), tcache); } assert(bin->ncached < bin_info->ncached_max); bin->ncached++; *(bin->avail - bin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) { elm->tcache = tcache_create_explicit(tsd); } return elm->tcache; } #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tcache_structs.h010064400007650000024000000034731340421341300243410ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H #define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H #include "jemalloc/internal/ql.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/cache_bin.h" #include "jemalloc/internal/ticker.h" struct tcache_s { /* * To minimize our cache-footprint, we put the frequently accessed data * together at the start of this struct. */ /* Cleared after arena_prof_accum(). */ uint64_t prof_accumbytes; /* Drives incremental GC. */ ticker_t gc_ticker; /* * The pointer stacks associated with bins follow as a contiguous array. * During tcache initialization, the avail pointer in each element of * tbins is initialized to point to the proper offset within this array. */ cache_bin_t bins_small[NBINS]; /* * This data is less hot; we can be a little less careful with our * footprint here. */ /* Lets us track all the tcaches in an arena. */ ql_elm(tcache_t) link; /* * The descriptor lets the arena find our cache bins without seeing the * tcache definition. This enables arenas to aggregate stats across * tcaches without having a tcache dependency. */ cache_bin_array_descriptor_t cache_bin_array_descriptor; /* The arena this tcache is associated with. */ arena_t *arena; /* Next bin to GC. */ szind_t next_gc_bin; /* For small bins, fill (ncached_max >> lg_fill_div). */ uint8_t lg_fill_div[NBINS]; /* * We put the cache bins for large size classes at the end of the * struct, since some of them might not get used. This might end up * letting us avoid touching an extra page if we don't have to. */ cache_bin_t bins_large[NSIZES-NBINS]; }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { tcache_t *tcache; tcaches_t *next; }; }; #endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tcache_types.h010064400007650000024000000036131340421341300237720ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H #define JEMALLOC_INTERNAL_TCACHE_TYPES_H #include "jemalloc/internal/size_classes.h" typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute minimum number of cache slots for each small bin. */ #define TCACHE_NSLOTS_SMALL_MIN 20 /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per slab for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) /* Used in TSD static initializer only. Real init in tcache_data_init(). */ #define TCACHE_ZERO_INITIALIZER {0} /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ #define TCACHE_ENABLED_ZERO_INITIALIZER false #endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/ticker.h010064400007650000024000000037371340421341300226070ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TICKER_H #define JEMALLOC_INTERNAL_TICKER_H #include "jemalloc/internal/util.h" /** * A ticker makes it easy to count-down events until some limit. You * ticker_init the ticker to trigger every nticks events. You then notify it * that an event has occurred with calls to ticker_tick (or that nticks events * have occurred with a call to ticker_ticks), which will return true (and reset * the counter) if the countdown hit zero. */ typedef struct { int32_t tick; int32_t nticks; } ticker_t; static inline void ticker_init(ticker_t *ticker, int32_t nticks) { ticker->tick = nticks; ticker->nticks = nticks; } static inline void ticker_copy(ticker_t *ticker, const ticker_t *other) { *ticker = *other; } static inline int32_t ticker_read(const ticker_t *ticker) { return ticker->tick; } /* * Not intended to be a public API. Unfortunately, on x86, neither gcc nor * clang seems smart enough to turn * ticker->tick -= nticks; * if (unlikely(ticker->tick < 0)) { * fixup ticker * return true; * } * return false; * into * subq %nticks_reg, (%ticker_reg) * js fixup ticker * * unless we force "fixup ticker" out of line. In that case, gcc gets it right, * but clang now does worse than before. So, on x86 with gcc, we force it out * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be * worth the hassle, but this is on the fast path of both malloc and free (via * tcache_event). */ #if defined(__GNUC__) && !defined(__clang__) \ && (defined(__x86_64__) || defined(__i386__)) JEMALLOC_NOINLINE #endif static bool ticker_fixup(ticker_t *ticker) { ticker->tick = ticker->nticks; return true; } static inline bool ticker_ticks(ticker_t *ticker, int32_t nticks) { ticker->tick -= nticks; if (unlikely(ticker->tick < 0)) { return ticker_fixup(ticker); } return false; } static inline bool ticker_tick(ticker_t *ticker) { return ticker_ticks(ticker, 1); } #endif /* JEMALLOC_INTERNAL_TICKER_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd.h010064400007650000024000000226131340421341300221120ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TSD_H #define JEMALLOC_INTERNAL_TSD_H #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/prof_types.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/rtree_tsd.h" #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/witness.h" /* * Thread-Specific-Data layout * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- * s: state * e: tcache_enabled * m: thread_allocated (config_stats) * f: thread_deallocated (config_stats) * p: prof_tdata (config_prof) * c: rtree_ctx (rtree cache accessed on deallocation) * t: tcache * --- data not accessed on tcache fast path: arena-related fields --- * d: arenas_tdata_bypass * r: reentrancy_level * x: narenas_tdata * i: iarena * a: arena * o: arenas_tdata * Loading TSD data is on the critical path of basically all malloc operations. * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. * Use a compact layout to reduce cache footprint. * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ * |---------------------------- 1st cacheline ----------------------------| * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | * |---------------------------- 2nd cacheline ----------------------------| * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | * |---------------------------- 3nd cacheline ----------------------------| * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | * +-------------------------------------------------------------------------+ * Note: the entire tcache is embedded into TSD and spans multiple cachelines. * * The last 3 members (i, a and o) before tcache isn't really needed on tcache * fast path. However we have a number of unused tcache bins and witnesses * (never touched unless config_debug) at the end of tcache, so we place them * there to avoid breaking the cachelines and possibly paging in an extra page. */ #ifdef JEMALLOC_JET typedef void (*test_callback_t)(int *); # define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 # define MALLOC_TEST_TSD \ O(test_data, int, int) \ O(test_callback, test_callback_t, int) # define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL #else # define MALLOC_TEST_TSD # define MALLOC_TEST_TSD_INITIALIZER #endif /* O(name, type, nullable type */ #define MALLOC_TSD \ O(tcache_enabled, bool, bool) \ O(arenas_tdata_bypass, bool, bool) \ O(reentrancy_level, int8_t, int8_t) \ O(narenas_tdata, uint32_t, uint32_t) \ O(offset_state, uint64_t, uint64_t) \ O(thread_allocated, uint64_t, uint64_t) \ O(thread_deallocated, uint64_t, uint64_t) \ O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ O(iarena, arena_t *, arena_t *) \ O(arena, arena_t *, arena_t *) \ O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ O(tcache, tcache_t, tcache_t) \ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ MALLOC_TEST_TSD #define TSD_INITIALIZER { \ tsd_state_uninitialized, \ TCACHE_ENABLED_ZERO_INITIALIZER, \ false, \ 0, \ 0, \ 0, \ 0, \ 0, \ NULL, \ RTREE_CTX_ZERO_INITIALIZER, \ NULL, \ NULL, \ NULL, \ TCACHE_ZERO_INITIALIZER, \ WITNESS_TSD_INITIALIZER \ MALLOC_TEST_TSD_INITIALIZER \ } enum { tsd_state_nominal = 0, /* Common case --> jnz. */ tsd_state_nominal_slow = 1, /* Initialized but on slow path. */ /* the above 2 nominal states should be lower values. */ tsd_state_nominal_max = 1, /* used for comparison only. */ tsd_state_minimal_initialized = 2, tsd_state_purgatory = 3, tsd_state_reincarnated = 4, tsd_state_uninitialized = 5 }; /* Manually limit tsd_state_t to a single byte. */ typedef uint8_t tsd_state_t; /* The actual tsd. */ struct tsd_s { /* * The contents should be treated as totally opaque outside the tsd * module. Access any thread-local state through the getters and * setters below. */ tsd_state_t state; #define O(n, t, nt) \ t use_a_getter_or_setter_instead_##n; MALLOC_TSD #undef O }; /* * Wrapper around tsd_t that makes it possible to avoid implicit conversion * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be * explicitly converted to tsd_t, which is non-nullable. */ struct tsdn_s { tsd_t tsd; }; #define TSDN_NULL ((tsdn_t *)0) JEMALLOC_ALWAYS_INLINE tsdn_t * tsd_tsdn(tsd_t *tsd) { return (tsdn_t *)tsd; } JEMALLOC_ALWAYS_INLINE bool tsdn_null(const tsdn_t *tsdn) { return tsdn == NULL; } JEMALLOC_ALWAYS_INLINE tsd_t * tsdn_tsd(tsdn_t *tsdn) { assert(!tsdn_null(tsdn)); return &tsdn->tsd; } void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); void tsd_cleanup(void *arg); tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); void tsd_slow_update(tsd_t *tsd); /* * We put the platform-specific data declarations and inlines into their own * header files to avoid cluttering this file. They define tsd_boot0, * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #include "jemalloc/internal/tsd_malloc_thread_cleanup.h" #elif (defined(JEMALLOC_TLS)) #include "jemalloc/internal/tsd_tls.h" #elif (defined(_WIN32)) #include "jemalloc/internal/tsd_win.h" #else #include "jemalloc/internal/tsd_generic.h" #endif /* * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of * foo. This omits some safety checks, and so can be used during tsd * initialization and cleanup. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get_unsafe(tsd_t *tsd) { \ return &tsd->use_a_getter_or_setter_instead_##n; \ } MALLOC_TSD #undef O /* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) { \ assert(tsd->state == tsd_state_nominal || \ tsd->state == tsd_state_nominal_slow || \ tsd->state == tsd_state_reincarnated || \ tsd->state == tsd_state_minimal_initialized); \ return tsd_##n##p_get_unsafe(tsd); \ } MALLOC_TSD #undef O /* * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE nt * \ tsdn_##n##p_get(tsdn_t *tsdn) { \ if (tsdn_null(tsdn)) { \ return NULL; \ } \ tsd_t *tsd = tsdn_tsd(tsdn); \ return (nt *)tsd_##n##p_get(tsd); \ } MALLOC_TSD #undef O /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) { \ return *tsd_##n##p_get(tsd); \ } MALLOC_TSD #undef O /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t val) { \ assert(tsd->state != tsd_state_reincarnated && \ tsd->state != tsd_state_minimal_initialized); \ *tsd_##n##p_get(tsd) = val; \ } MALLOC_TSD #undef O JEMALLOC_ALWAYS_INLINE void tsd_assert_fast(tsd_t *tsd) { assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && tsd_reentrancy_level_get(tsd) == 0); } JEMALLOC_ALWAYS_INLINE bool tsd_fast(tsd_t *tsd) { bool fast = (tsd->state == tsd_state_nominal); if (fast) { tsd_assert_fast(tsd); } return fast; } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_impl(bool init, bool minimal) { tsd_t *tsd = tsd_get(init); if (!init && tsd_get_allocates() && tsd == NULL) { return NULL; } assert(tsd != NULL); if (unlikely(tsd->state != tsd_state_nominal)) { return tsd_fetch_slow(tsd, minimal); } assert(tsd_fast(tsd)); tsd_assert_fast(tsd); return tsd; } /* Get a minimal TSD that requires no cleanup. See comments in free(). */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_min(void) { return tsd_fetch_impl(true, true); } /* For internal background threads use only. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_internal_fetch(void) { tsd_t *tsd = tsd_fetch_min(); /* Use reincarnated state to prevent full initialization. */ tsd->state = tsd_state_reincarnated; return tsd; } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch(void) { return tsd_fetch_impl(true, false); } static inline bool tsd_nominal(tsd_t *tsd) { return (tsd->state <= tsd_state_nominal_max); } JEMALLOC_ALWAYS_INLINE tsdn_t * tsdn_fetch(void) { if (!tsd_booted_get()) { return NULL; } return tsd_tsdn(tsd_fetch_impl(false, false)); } JEMALLOC_ALWAYS_INLINE rtree_ctx_t * tsd_rtree_ctx(tsd_t *tsd) { return tsd_rtree_ctxp_get(tsd); } JEMALLOC_ALWAYS_INLINE rtree_ctx_t * tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { /* * If tsd cannot be accessed, initialize the fallback rtree_ctx and * return a pointer to it. */ if (unlikely(tsdn_null(tsdn))) { rtree_ctx_data_init(fallback); return fallback; } return tsd_rtree_ctx(tsdn_tsd(tsdn)); } #endif /* JEMALLOC_INTERNAL_TSD_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd_generic.h010064400007650000024000000070241340421341300236050ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_GENERIC_H typedef struct tsd_init_block_s tsd_init_block_t; struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; /* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ typedef struct tsd_init_head_s tsd_init_head_t; typedef struct { bool initialized; tsd_t val; } tsd_wrapper_t; void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); extern pthread_key_t tsd_tsd; extern tsd_init_head_t tsd_init_head; extern tsd_wrapper_t tsd_boot_wrapper; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE void tsd_cleanup_wrapper(void *arg) { tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; if (wrapper->initialized) { wrapper->initialized = false; tsd_cleanup(&wrapper->val); if (wrapper->initialized) { /* Trigger another cleanup round. */ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { malloc_write(": Error setting TSD\n"); if (opt_abort) { abort(); } } return; } } malloc_tsd_dalloc(wrapper); } JEMALLOC_ALWAYS_INLINE void tsd_wrapper_set(tsd_wrapper_t *wrapper) { if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { malloc_write(": Error setting TSD\n"); abort(); } } JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); if (init && unlikely(wrapper == NULL)) { tsd_init_block_t block; wrapper = (tsd_wrapper_t *) tsd_init_check_recursion(&tsd_init_head, &block); if (wrapper) { return wrapper; } wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); block.data = (void *)wrapper; if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } else { wrapper->initialized = false; tsd_t initializer = TSD_INITIALIZER; wrapper->val = initializer; } tsd_wrapper_set(wrapper); tsd_init_finish(&tsd_init_head, &block); } return wrapper; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { return true; } tsd_wrapper_set(&tsd_boot_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { tsd_wrapper_t *wrapper; wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; tsd_t initializer = TSD_INITIALIZER; wrapper->val = initializer; tsd_wrapper_set(wrapper); } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { if (tsd_boot0()) { return true; } tsd_boot1(); return false; } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return true; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(init); if (tsd_get_allocates() && !init && wrapper == NULL) { return NULL; } return &wrapper->val; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(true); if (likely(&wrapper->val != val)) { wrapper->val = *(val); } wrapper->initialized = true; } jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h010064400007650000024000000021631340421341300264750ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H extern __thread tsd_t tsd_tls; extern __thread bool tsd_initialized; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_cleanup_wrapper(void) { if (tsd_initialized) { tsd_initialized = false; tsd_cleanup(&tsd_tls); } return tsd_initialized; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { /* Do nothing. */ } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { return tsd_boot0(); } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return false; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { assert(tsd_booted); return &tsd_tls; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { assert(tsd_booted); if (likely(&tsd_tls != val)) { tsd_tls = (*val); } tsd_initialized = true; } jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd_tls.h010064400007650000024000000021011340421341300227620ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_TLS_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_TLS_H extern __thread tsd_t tsd_tls; extern pthread_key_t tsd_tsd; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { return true; } tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { /* Do nothing. */ } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { return tsd_boot0(); } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return false; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(UNUSED bool init) { assert(tsd_booted); return &tsd_tls; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { assert(tsd_booted); if (likely(&tsd_tls != val)) { tsd_tls = (*val); } if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { malloc_write(": Error setting tsd.\n"); if (opt_abort) { abort(); } } } jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd_types.h010064400007650000024000000004021340421340100233230ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H #define JEMALLOC_INTERNAL_TSD_TYPES_H #define MALLOC_TSD_CLEANUPS_MAX 2 typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; typedef bool (*malloc_tsd_cleanup_t)(void); #endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/tsd_win.h010064400007650000024000000057121340421340100227650ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_WIN_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_WIN_H typedef struct { bool initialized; tsd_t val; } tsd_wrapper_t; extern DWORD tsd_tsd; extern tsd_wrapper_t tsd_boot_wrapper; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_cleanup_wrapper(void) { DWORD error = GetLastError(); tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); SetLastError(error); if (wrapper == NULL) { return false; } if (wrapper->initialized) { wrapper->initialized = false; tsd_cleanup(&wrapper->val); if (wrapper->initialized) { /* Trigger another cleanup round. */ return true; } } malloc_tsd_dalloc(wrapper); return false; } JEMALLOC_ALWAYS_INLINE void tsd_wrapper_set(tsd_wrapper_t *wrapper) { if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { malloc_write(": Error setting TSD\n"); abort(); } } JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { DWORD error = GetLastError(); tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); SetLastError(error); if (init && unlikely(wrapper == NULL)) { wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } else { wrapper->initialized = false; /* MSVC is finicky about aggregate initialization. */ tsd_t tsd_initializer = TSD_INITIALIZER; wrapper->val = tsd_initializer; } tsd_wrapper_set(wrapper); } return wrapper; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { tsd_tsd = TlsAlloc(); if (tsd_tsd == TLS_OUT_OF_INDEXES) { return true; } malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_wrapper_set(&tsd_boot_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { tsd_wrapper_t *wrapper; wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; tsd_t initializer = TSD_INITIALIZER; wrapper->val = initializer; tsd_wrapper_set(wrapper); } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { if (tsd_boot0()) { return true; } tsd_boot1(); return false; } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return true; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(init); if (tsd_get_allocates() && !init && wrapper == NULL) { return NULL; } return &wrapper->val; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(true); if (likely(&wrapper->val != val)) { wrapper->val = *(val); } wrapper->initialized = true; } jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/util.h010064400007650000024000000026671340421340100223010ustar0000000000000000#ifndef JEMALLOC_INTERNAL_UTIL_H #define JEMALLOC_INTERNAL_UTIL_H #define UTIL_INLINE static inline /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) #endif #ifndef JEMALLOC_FREE_JUNK # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* cpp macro definition stringification. */ #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #define JEMALLOC_CC_SILENCE_INIT(v) = v #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) #else # define likely(x) !!(x) # define unlikely(x) !!(x) #endif #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() /* Set error code. */ UTIL_INLINE void set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code. */ UTIL_INLINE int get_errno(void) { #ifdef _WIN32 return GetLastError(); #else return errno; #endif } #undef UTIL_INLINE #endif /* JEMALLOC_INTERNAL_UTIL_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/internal/witness.h010064400007650000024000000224101340421341300230070ustar0000000000000000#ifndef JEMALLOC_INTERNAL_WITNESS_H #define JEMALLOC_INTERNAL_WITNESS_H #include "jemalloc/internal/ql.h" /******************************************************************************/ /* LOCK RANKS */ /******************************************************************************/ /* * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness * machinery. */ #define WITNESS_RANK_OMIT 0U #define WITNESS_RANK_MIN 1U #define WITNESS_RANK_INIT 1U #define WITNESS_RANK_CTL 1U #define WITNESS_RANK_TCACHES 2U #define WITNESS_RANK_ARENAS 3U #define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U #define WITNESS_RANK_PROF_DUMP 5U #define WITNESS_RANK_PROF_BT2GCTX 6U #define WITNESS_RANK_PROF_TDATAS 7U #define WITNESS_RANK_PROF_TDATA 8U #define WITNESS_RANK_PROF_GCTX 9U #define WITNESS_RANK_BACKGROUND_THREAD 10U /* * Used as an argument to witness_assert_depth_to_rank() in order to validate * depth excluding non-core locks with lower ranks. Since the rank argument to * witness_assert_depth_to_rank() is inclusive rather than exclusive, this * definition can have the same value as the minimally ranked core lock. */ #define WITNESS_RANK_CORE 11U #define WITNESS_RANK_DECAY 11U #define WITNESS_RANK_TCACHE_QL 12U #define WITNESS_RANK_EXTENT_GROW 13U #define WITNESS_RANK_EXTENTS 14U #define WITNESS_RANK_EXTENT_AVAIL 15U #define WITNESS_RANK_EXTENT_POOL 16U #define WITNESS_RANK_RTREE 17U #define WITNESS_RANK_BASE 18U #define WITNESS_RANK_ARENA_LARGE 19U #define WITNESS_RANK_LEAF 0xffffffffU #define WITNESS_RANK_BIN WITNESS_RANK_LEAF #define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF #define WITNESS_RANK_DSS WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF /******************************************************************************/ /* PER-WITNESS DATA */ /******************************************************************************/ #if defined(JEMALLOC_DEBUG) # define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} #else # define WITNESS_INITIALIZER(name, rank) #endif typedef struct witness_s witness_t; typedef unsigned witness_rank_t; typedef ql_head(witness_t) witness_list_t; typedef int witness_comp_t (const witness_t *, void *, const witness_t *, void *); struct witness_s { /* Name, used for printing lock order reversal messages. */ const char *name; /* * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses * must be acquired in order of increasing rank. */ witness_rank_t rank; /* * If two witnesses are of equal rank and they have the samp comp * function pointer, it is called as a last attempt to differentiate * between witnesses of equal rank. */ witness_comp_t *comp; /* Opaque data, passed to comp(). */ void *opaque; /* Linkage for thread's currently owned locks. */ ql_elm(witness_t) link; }; /******************************************************************************/ /* PER-THREAD DATA */ /******************************************************************************/ typedef struct witness_tsd_s witness_tsd_t; struct witness_tsd_s { witness_list_t witnesses; bool forking; }; #define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } #define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) /******************************************************************************/ /* (PER-THREAD) NULLABILITY HELPERS */ /******************************************************************************/ typedef struct witness_tsdn_s witness_tsdn_t; struct witness_tsdn_s { witness_tsd_t witness_tsd; }; JEMALLOC_ALWAYS_INLINE witness_tsdn_t * witness_tsd_tsdn(witness_tsd_t *witness_tsd) { return (witness_tsdn_t *)witness_tsd; } JEMALLOC_ALWAYS_INLINE bool witness_tsdn_null(witness_tsdn_t *witness_tsdn) { return witness_tsdn == NULL; } JEMALLOC_ALWAYS_INLINE witness_tsd_t * witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { assert(!witness_tsdn_null(witness_tsdn)); return &witness_tsdn->witness_tsd; } /******************************************************************************/ /* API */ /******************************************************************************/ void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp, void *opaque); typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *JET_MUTABLE witness_lock_error; typedef void (witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *JET_MUTABLE witness_owner_error; typedef void (witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; typedef void (witness_depth_error_t)(const witness_list_t *, witness_rank_t rank_inclusive, unsigned depth); extern witness_depth_error_t *JET_MUTABLE witness_depth_error; void witnesses_cleanup(witness_tsd_t *witness_tsd); void witness_prefork(witness_tsd_t *witness_tsd); void witness_postfork_parent(witness_tsd_t *witness_tsd); void witness_postfork_child(witness_tsd_t *witness_tsd); /* Helper, not intended for direct use. */ static inline bool witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { witness_list_t *witnesses; witness_t *w; cassert(config_debug); witnesses = &witness_tsd->witnesses; ql_foreach(w, witnesses, link) { if (w == witness) { return true; } } return false; } static inline void witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { witness_tsd_t *witness_tsd; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } if (witness_owner(witness_tsd, witness)) { return; } witness_owner_error(witness); } static inline void witness_assert_not_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } witnesses = &witness_tsd->witnesses; ql_foreach(w, witnesses, link) { if (w == witness) { witness_not_owner_error(witness); } } } static inline void witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, witness_rank_t rank_inclusive, unsigned depth) { witness_tsd_t *witness_tsd; unsigned d; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); d = 0; witnesses = &witness_tsd->witnesses; w = ql_last(witnesses, link); if (w != NULL) { ql_reverse_foreach(w, witnesses, link) { if (w->rank < rank_inclusive) { break; } d++; } } if (d != depth) { witness_depth_error(witnesses, rank_inclusive, depth); } } static inline void witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); } static inline void witness_assert_lockless(witness_tsdn_t *witness_tsdn) { witness_assert_depth(witness_tsdn, 0); } static inline void witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } witness_assert_not_owner(witness_tsdn, witness); witnesses = &witness_tsd->witnesses; w = ql_last(witnesses, link); if (w == NULL) { /* No other locks; do nothing. */ } else if (witness_tsd->forking && w->rank <= witness->rank) { /* Forking, and relaxed ranking satisfied. */ } else if (w->rank > witness->rank) { /* Not forking, rank order reversal. */ witness_lock_error(witnesses, witness); } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > 0)) { /* * Missing/incompatible comparison function, or comparison * function indicates rank order reversal. */ witness_lock_error(witnesses, witness); } ql_elm_new(witness, link); ql_tail_insert(witnesses, witness, link); } static inline void witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } /* * Check whether owner before removal, rather than relying on * witness_assert_owner() to abort, so that unit tests can test this * function's failure mode without causing undefined behavior. */ if (witness_owner(witness_tsd, witness)) { witnesses = &witness_tsd->witnesses; ql_remove(witnesses, witness, link); } else { witness_assert_owner(witness_tsdn, witness); } } #endif /* JEMALLOC_INTERNAL_WITNESS_H */ jemalloc-sys-0.3.2/jemalloc/include/jemalloc/jemalloc.sh010075500007650000024000000007111340421340100214500ustar0000000000000000#!/bin/sh objroot=$1 cat < #include #include #include #include #define JEMALLOC_VERSION "@jemalloc_version@" #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" #define MALLOCX_LG_ALIGN(la) ((int)(la)) #if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) #else # define MALLOCX_ALIGN(a) \ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ ffs((int)(((size_t)(a))>>32))+31)) #endif #define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ #define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) #define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ #define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) /* * Use as arena index in "arena..{purge,decay,dss}" and * "stats.arenas..*" mallctl interfaces to select all arenas. This * definition is intentionally specified in raw decimal format to support * cpp-based string concatenation, e.g. * * #define STRINGIFY_HELPER(x) #x * #define STRINGIFY(x) STRINGIFY_HELPER(x) * * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, * 0); */ #define MALLCTL_ARENAS_ALL 4096 /* * Use as arena index in "stats.arenas..*" mallctl interfaces to select * destroyed arenas. */ #define MALLCTL_ARENAS_DESTROYED 4097 #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #if defined(_MSC_VER) # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) # else # define JEMALLOC_NOTHROW # endif # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) # if _MSC_VER >= 1900 && !defined(__EDG__) # define JEMALLOC_ALLOCATOR __declspec(allocator) # else # define JEMALLOC_ALLOCATOR # endif #elif defined(JEMALLOC_HAVE_ATTR) # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # endif # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #endif jemalloc-sys-0.3.2/jemalloc/include/jemalloc/jemalloc_mangle.sh010075500007650000024000000023561340421340100230020ustar0000000000000000#!/bin/sh -eu public_symbols_txt=$1 symbol_prefix=$2 cat < /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as * a built-in type. */ #ifndef __clang__ typedef BOOL _Bool; #endif #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif /* stdbool_h */ jemalloc-sys-0.3.2/jemalloc/include/msvc_compat/C99/stdint.h010064400007650000024000000170601340421340100220770ustar0000000000000000// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] jemalloc-sys-0.3.2/jemalloc/include/msvc_compat/strings.h010064400007650000024000000020311340421340100217070ustar0000000000000000#ifndef strings_h #define strings_h /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER # include # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) { return i + 1; } return 0; } static __forceinline int ffs(int x) { return ffsl(x); } # ifdef _M_X64 # pragma intrinsic(_BitScanForward64) # endif static __forceinline int ffsll(unsigned __int64 x) { unsigned long i; #ifdef _M_X64 if (_BitScanForward64(&i, x)) { return i + 1; } return 0; #else // Fallback for 32-bit build where 64-bit version not available // assuming little endian union { unsigned __int64 ll; unsigned long l[2]; } s; s.ll = x; if (_BitScanForward(&i, s.l[0])) { return i + 1; } else if(_BitScanForward(&i, s.l[1])) { return i + 33; } return 0; #endif } #else # define ffsll(x) __builtin_ffsll(x) # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */ jemalloc-sys-0.3.2/jemalloc/include/msvc_compat/windows_extra.h010064400007650000024000000002061340421340100231150ustar0000000000000000#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H #define MSVC_COMPAT_WINDOWS_EXTRA_H #include #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ jemalloc-sys-0.3.2/jemalloc/INSTALL.md010064400007650000024000000351431340421341300155540ustar0000000000000000Building and installing a packaged release of jemalloc can be as simple as typing the following while in the root directory of the source tree: ./configure make make install If building from unpackaged developer sources, the simplest command sequence that might work is: ./autogen.sh make dist make make install Note that documentation is not built by the default target because doing so would create a dependency on xsltproc in packaged releases, hence the requirement to either run 'make dist' or avoid installing docs via the various install_* targets documented below. ## Advanced configuration The 'configure' script supports numerous options that allow control of which functionality is enabled, where jemalloc is installed, etc. Optionally, pass any of the following arguments (not a definitive list) to 'configure': * `--help` Print a definitive list of options. * `--prefix=` Set the base directory in which to install. For example: ./configure --prefix=/usr/local will cause files to be installed into /usr/local/include, /usr/local/lib, and /usr/local/man. * `--with-version=(..--g|VERSION)` The VERSION file is mandatory for successful configuration, and the following steps are taken to assure its presence: 1) If --with-version=..--g is specified, generate VERSION using the specified value. 2) If --with-version is not specified in either form and the source directory is inside a git repository, try to generate VERSION via 'git describe' invocations that pattern-match release tags. 3) If VERSION is missing, generate it with a bogus version: 0.0.0-0-g0000000000000000000000000000000000000000 Note that --with-version=VERSION bypasses (1) and (2), which simplifies VERSION configuration when embedding a jemalloc release into another project's git repository. * `--with-rpath=` Embed one or more library paths, so that libjemalloc can find the libraries it is linked to. This works only on ELF-based systems. * `--with-mangling=` Mangle public symbols specified in which is a comma-separated list of name:mangled pairs. For example, to use ld's --wrap option as an alternative method for overriding libc's malloc implementation, specify something like: --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] Note that mangling happens prior to application of the prefix specified by --with-jemalloc-prefix, and mangled symbols are then ignored when applying the prefix. * `--with-jemalloc-prefix=` Prefix all public APIs with . For example, if is "prefix_", API changes like the following occur: malloc() --> prefix_malloc() malloc_conf --> prefix_malloc_conf /etc/malloc.conf --> /etc/prefix_malloc.conf MALLOC_CONF --> PREFIX_MALLOC_CONF This makes it possible to use jemalloc at the same time as the system allocator, or even to use multiple copies of jemalloc simultaneously. By default, the prefix is "", except on OS X, where it is "je_". On OS X, jemalloc overlays the default malloc zone, but makes no attempt to actually replace the "malloc", "calloc", etc. symbols. * `--without-export` Don't export public APIs. This can be useful when building jemalloc as a static library, or to avoid exporting public APIs when using the zone allocator on OSX. * `--with-private-namespace=` Prefix all library-private APIs with je_. For shared libraries, symbol visibility mechanisms prevent these symbols from being exported, but for static libraries, naming collisions are a real possibility. By default, is empty, which results in a symbol prefix of je_ . * `--with-install-suffix=` Append to the base name of all installed files, such that multiple versions of jemalloc can coexist in the same installation directory. For example, libjemalloc.so.0 becomes libjemalloc.so.0. * `--with-malloc-conf=` Embed `` as a run-time options string that is processed prior to the malloc_conf global variable, the /etc/malloc.conf symlink, and the MALLOC_CONF environment variable. For example, to change the default decay time to 30 seconds: --with-malloc-conf=decay_ms:30000 * `--enable-debug` Enable assertions and validation code. This incurs a substantial performance hit, but is very useful during application development. * `--disable-stats` Disable statistics gathering functionality. See the "opt.stats_print" option documentation for usage details. * `--enable-prof` Enable heap profiling and leak detection functionality. See the "opt.prof" option documentation for usage details. When enabled, there are several approaches to backtracing, and the configure script chooses the first one in the following list that appears to function correctly: + libunwind (requires --enable-prof-libunwind) + libgcc (unless --disable-prof-libgcc) + gcc intrinsics (unless --disable-prof-gcc) * `--enable-prof-libunwind` Use the libunwind library (http://www.nongnu.org/libunwind/) for stack backtracing. * `--disable-prof-libgcc` Disable the use of libgcc's backtracing functionality. * `--disable-prof-gcc` Disable the use of gcc intrinsics for backtracing. * `--with-static-libunwind=` Statically link against the specified libunwind.a rather than dynamically linking with -lunwind. * `--disable-fill` Disable support for junk/zero filling of memory. See the "opt.junk" and "opt.zero" option documentation for usage details. * `--disable-zone-allocator` Disable zone allocator for Darwin. This means jemalloc won't be hooked as the default allocator on OSX/iOS. * `--enable-utrace` Enable utrace(2)-based allocation tracing. This feature is not broadly portable (FreeBSD has it, but Linux and OS X do not). * `--enable-xmalloc` Enable support for optional immediate termination due to out-of-memory errors, as is commonly implemented by "xmalloc" wrapper function for malloc. See the "opt.xmalloc" option documentation for usage details. * `--enable-lazy-lock` Enable code that wraps pthread_create() to detect when an application switches from single-threaded to multi-threaded mode, so that it can avoid mutex locking/unlocking operations while in single-threaded mode. In practice, this feature usually has little impact on performance unless thread-specific caching is disabled. * `--disable-cache-oblivious` Disable cache-oblivious large allocation alignment for large allocation requests with no alignment constraints. If this feature is disabled, all large allocations are page-aligned as an implementation artifact, which can severely harm CPU cache utilization. However, the cache-oblivious layout comes at the cost of one extra page per large allocation, which in the most extreme case increases physical memory usage for the 16 KiB size class to 20 KiB. * `--disable-syscall` Disable use of syscall(2) rather than {open,read,write,close}(2). This is intended as a workaround for systems that place security limitations on syscall(2). * `--disable-cxx` Disable C++ integration. This will cause new and delete operator implementations to be omitted. * `--with-xslroot=` Specify where to find DocBook XSL stylesheets when building the documentation. * `--with-lg-page=` Specify the base 2 log of the allocator page size, which must in turn be at least as large as the system page size. By default the configure script determines the host's page size and sets the allocator page size equal to the system page size, so this option need not be specified unless the system page size may change between configuration and execution, e.g. when cross compiling. * `--with-lg-page-sizes=` Specify the comma-separated base 2 logs of the page sizes to support. This option may be useful when cross compiling in combination with `--with-lg-page`, but its primary use case is for integration with FreeBSD's libc, wherein jemalloc is embedded. * `--with-lg-hugepage=` Specify the base 2 log of the system huge page size. This option is useful when cross compiling, or when overriding the default for systems that do not explicitly support huge pages. * `--with-lg-quantum=` Specify the base 2 log of the minimum allocation alignment. jemalloc needs to know the minimum alignment that meets the following C standard requirement (quoted from the April 12, 2011 draft of the C11 standard): > The pointer returned if the allocation succeeds is suitably aligned so that it may be assigned to a pointer to any type of object with a fundamental alignment requirement and then used to access such an object or an array of such objects in the space allocated [...] This setting is architecture-specific, and although jemalloc includes known safe values for the most commonly used modern architectures, there is a wrinkle related to GNU libc (glibc) that may impact your choice of . On most modern architectures, this mandates 16-byte alignment (=4), but the glibc developers chose not to meet this requirement for performance reasons. An old discussion can be found at . Unlike glibc, jemalloc does follow the C standard by default (caveat: jemalloc technically cheats for size classes smaller than the quantum), but the fact that Linux systems already work around this allocator noncompliance means that it is generally safe in practice to let jemalloc's minimum alignment follow glibc's lead. If you specify `--with-lg-quantum=3` during configuration, jemalloc will provide additional size classes that are not 16-byte-aligned (24, 40, and 56). * `--with-lg-vaddr=` Specify the number of significant virtual address bits. By default, the configure script attempts to detect virtual address size on those platforms where it knows how, and picks a default otherwise. This option may be useful when cross-compiling. * `--disable-initial-exec-tls` Disable the initial-exec TLS model for jemalloc's internal thread-local storage (on those platforms that support explicit settings). This can allow jemalloc to be dynamically loaded after program startup (e.g. using dlopen). Note that in this case, there will be two malloc implementations operating in the same process, which will almost certainly result in confusing runtime crashes if pointers leak from one implementation to the other. The following environment variables (not a definitive list) impact configure's behavior: * `CFLAGS="?"` * `CXXFLAGS="?"` Pass these flags to the C/C++ compiler. Any flags set by the configure script are prepended, which means explicitly set flags generally take precedence. Take care when specifying flags such as -Werror, because configure tests may be affected in undesirable ways. * `EXTRA_CFLAGS="?"` * `EXTRA_CXXFLAGS="?"` Append these flags to CFLAGS/CXXFLAGS, without passing them to the compiler(s) during configuration. This makes it possible to add flags such as -Werror, while allowing the configure script to determine what other flags are appropriate for the specified configuration. * `CPPFLAGS="?"` Pass these flags to the C preprocessor. Note that CFLAGS is not passed to 'cpp' when 'configure' is looking for include files, so you must use CPPFLAGS instead if you need to help 'configure' find header files. * `LD_LIBRARY_PATH="?"` 'ld' uses this colon-separated list to find libraries. * `LDFLAGS="?"` Pass these flags when linking. * `PATH="?"` 'configure' uses this to find programs. In some cases it may be necessary to work around configuration results that do not match reality. For example, Linux 4.5 added support for the MADV_FREE flag to madvise(2), which can cause problems if building on a host with MADV_FREE support and deploying to a target without. To work around this, use a cache file to override the relevant configuration variable defined in configure.ac, e.g.: echo "je_cv_madv_free=no" > config.cache && ./configure -C ## Advanced compilation To build only parts of jemalloc, use the following targets: build_lib_shared build_lib_static build_lib build_doc_html build_doc_man build_doc To install only parts of jemalloc, use the following targets: install_bin install_include install_lib_shared install_lib_static install_lib_pc install_lib install_doc_html install_doc_man install_doc To clean up build results to varying degrees, use the following make targets: clean distclean relclean ## Advanced installation Optionally, define make variables when invoking make, including (not exclusively): * `INCLUDEDIR="?"` Use this as the installation prefix for header files. * `LIBDIR="?"` Use this as the installation prefix for libraries. * `MANDIR="?"` Use this as the installation prefix for man pages. * `DESTDIR="?"` Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful when installing to a different path than was specified via --prefix. * `CC="?"` Use this to invoke the C compiler. * `CFLAGS="?"` Pass these flags to the compiler. * `CPPFLAGS="?"` Pass these flags to the C preprocessor. * `LDFLAGS="?"` Pass these flags when linking. * `PATH="?"` Use this to search for programs used during configuration and building. ## Development If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' script rather than 'configure'. This re-generates 'configure', enables configuration dependency rules, and enables re-generation of automatically generated source files. The build system supports using an object directory separate from the source tree. For example, you can create an 'obj' directory, and from within that directory, issue configuration and build commands: autoconf mkdir obj cd obj ../configure --enable-autogen make ## Documentation The manual page is generated in both html and roff formats. Any web browser can be used to view the html manual. The roff manual page can be formatted prior to installation via the following command: nroff -man -t doc/jemalloc.3 jemalloc-sys-0.3.2/jemalloc/jemalloc.pc.in010064400007650000024000000007211340421340100166320ustar0000000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ install_suffix=@install_suffix@ Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. URL: http://jemalloc.net/ Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@ Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix} jemalloc-sys-0.3.2/jemalloc/m4/ax_cxx_compile_stdcxx.m4010064400007650000024000000330001340421340100212700ustar0000000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html # =========================================================================== # # SYNOPSIS # # AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) # # DESCRIPTION # # Check for baseline language coverage in the compiler for the specified # version of the C++ standard. If necessary, add switches to CXX and # CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) # or '14' (for the C++14 standard). # # The second argument, if specified, indicates whether you insist on an # extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. # -std=c++11). If neither is specified, you get whatever works, with # preference for an extended mode. # # The third argument, if specified 'mandatory' or if left unspecified, # indicates that baseline support for the specified C++ standard is # required and that the macro should error out if no mode with that # support is found. If specified 'optional', then configuration proceeds # regardless, after defining HAVE_CXX${VERSION} if and only if a # supporting mode is found. # # LICENSE # # Copyright (c) 2008 Benjamin Kosnik # Copyright (c) 2012 Zack Weinberg # Copyright (c) 2013 Roy Stogner # Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov # Copyright (c) 2015 Paul Norman # Copyright (c) 2015 Moritz Klammler # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 4 dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro dnl (serial version number 13). AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl m4_if([$1], [11], [], [$1], [14], [], [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])], [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl m4_if([$2], [], [], [$2], [ext], [], [$2], [noext], [], [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], [$3], [optional], [ax_cxx_compile_cxx$1_required=false], [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) AC_LANG_PUSH([C++])dnl ac_success=no AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, ax_cv_cxx_compile_cxx$1, [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [ax_cv_cxx_compile_cxx$1=yes], [ax_cv_cxx_compile_cxx$1=no])]) if test x$ax_cv_cxx_compile_cxx$1 = xyes; then ac_success=yes fi m4_if([$2], [noext], [], [dnl if test x$ac_success = xno; then for switch in -std=gnu++$1 -std=gnu++0x; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, $cachevar, [ac_save_CXX="$CXX" CXX="$CXX $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [eval $cachevar=yes], [eval $cachevar=no]) CXX="$ac_save_CXX"]) if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi]) m4_if([$2], [ext], [], [dnl if test x$ac_success = xno; then dnl HP's aCC needs +std=c++11 according to: dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf dnl Cray's crayCC needs "-h std=c++11" for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, $cachevar, [ac_save_CXX="$CXX" CXX="$CXX $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [eval $cachevar=yes], [eval $cachevar=no]) CXX="$ac_save_CXX"]) if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi]) AC_LANG_POP([C++]) if test x$ax_cxx_compile_cxx$1_required = xtrue; then if test x$ac_success = xno; then AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) fi fi if test x$ac_success = xno; then HAVE_CXX$1=0 AC_MSG_NOTICE([No compiler with C++$1 support was found]) else HAVE_CXX$1=1 AC_DEFINE(HAVE_CXX$1,1, [define if the compiler supports basic C++$1 syntax]) fi AC_SUBST(HAVE_CXX$1) ]) dnl Test body for checking C++11 support m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 ) dnl Test body for checking C++14 support m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 ) dnl Tests for new features in C++11 m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L ]]) dnl Tests for new features in C++14 m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L ]]) jemalloc-sys-0.3.2/jemalloc/Makefile.in010064400007650000024000000515041340421341300161700ustar0000000000000000# Clear out all vpaths, then set just one (default vpath) for the main build # directory. vpath vpath % . # Clear the default suffixes, so that built-in rules are not used. .SUFFIXES : SHELL := /bin/sh CC := @CC@ CXX := @CXX@ # Configuration parameters. DESTDIR = BINDIR := $(DESTDIR)@BINDIR@ INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ LIBDIR := $(DESTDIR)@LIBDIR@ DATADIR := $(DESTDIR)@DATADIR@ MANDIR := $(DESTDIR)@MANDIR@ srcroot := @srcroot@ objroot := @objroot@ abs_srcroot := @abs_srcroot@ abs_objroot := @abs_objroot@ # Build parameters. CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@ SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@ EXTRA_CFLAGS := @EXTRA_CFLAGS@ CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS)) CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@ SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@ EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@ CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS)) LDFLAGS := @LDFLAGS@ EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ LIBS := @LIBS@ RPATH_EXTRA := @RPATH_EXTRA@ SO := @so@ IMPORTLIB := @importlib@ O := @o@ A := @a@ EXE := @exe@ LIBPREFIX := @libprefix@ REV := @rev@ install_suffix := @install_suffix@ ABI := @abi@ XSLTPROC := @XSLTPROC@ AUTOCONF := @AUTOCONF@ _RPATH = @RPATH@ RPATH = $(if $(1),$(call _RPATH,$(1))) cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@) cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ enable_prof := @enable_prof@ enable_zone_allocator := @enable_zone_allocator@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF link_whole_archive := @link_whole_archive@ DSO_LDFLAGS = @DSO_LDFLAGS@ SOREV = @SOREV@ PIC_CFLAGS = @PIC_CFLAGS@ CTARGET = @CTARGET@ LDTARGET = @LDTARGET@ TEST_LD_MODE = @TEST_LD_MODE@ MKLIB = @MKLIB@ AR = @AR@ ARFLAGS = @ARFLAGS@ DUMP_SYMS = @DUMP_SYMS@ AWK := @AWK@ CC_MM = @CC_MM@ LM := @LM@ INSTALL = @INSTALL@ ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" else ifeq (pecoff, $(ABI)) TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" else TEST_LIBRARY_PATH := endif endif LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/arena.c \ $(srcroot)src/background_thread.c \ $(srcroot)src/base.c \ $(srcroot)src/bin.c \ $(srcroot)src/bitmap.c \ $(srcroot)src/ckh.c \ $(srcroot)src/ctl.c \ $(srcroot)src/div.c \ $(srcroot)src/extent.c \ $(srcroot)src/extent_dss.c \ $(srcroot)src/extent_mmap.c \ $(srcroot)src/hash.c \ $(srcroot)src/hooks.c \ $(srcroot)src/large.c \ $(srcroot)src/log.c \ $(srcroot)src/malloc_io.c \ $(srcroot)src/mutex.c \ $(srcroot)src/mutex_pool.c \ $(srcroot)src/nstime.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ $(srcroot)src/rtree.c \ $(srcroot)src/stats.c \ $(srcroot)src/sz.c \ $(srcroot)src/tcache.c \ $(srcroot)src/ticker.c \ $(srcroot)src/tsd.c \ $(srcroot)src/witness.c ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) endif ifdef PIC_CFLAGS STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) else STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) endif DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif ifeq (1, $(link_whole_archive)) LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive else LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c ifeq (1, $(link_whole_archive)) C_UTIL_INTEGRATION_SRCS := C_UTIL_CPP_SRCS := else C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c endif TESTS_UNIT := \ $(srcroot)test/unit/a0.c \ $(srcroot)test/unit/arena_reset.c \ $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/background_thread.c \ $(srcroot)test/unit/background_thread_enable.c \ $(srcroot)test/unit/base.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/decay.c \ $(srcroot)test/unit/div.c \ $(srcroot)test/unit/emitter.c \ $(srcroot)test/unit/extent_quantize.c \ $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hooks.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/log.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/malloc_io.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ $(srcroot)test/unit/pack.c \ $(srcroot)test/unit/pages.c \ $(srcroot)test/unit/ph.c \ $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_reset.c \ $(srcroot)test/unit/prof_tctx.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ $(srcroot)test/unit/qr.c \ $(srcroot)test/unit/rb.c \ $(srcroot)test/unit/retained.c \ $(srcroot)test/unit/rtree.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/slab.c \ $(srcroot)test/unit/smoothstep.c \ $(srcroot)test/unit/spin.c \ $(srcroot)test/unit/stats.c \ $(srcroot)test/unit/stats_print.c \ $(srcroot)test/unit/ticker.c \ $(srcroot)test/unit/nstime.c \ $(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/witness.c \ $(srcroot)test/unit/zero.c ifeq (@enable_prof@, 1) TESTS_UNIT += \ $(srcroot)test/unit/arena_reset_prof.c endif TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/extent.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ $(srcroot)test/integration/rallocx.c \ $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/thread_arena.c \ $(srcroot)test/integration/thread_tcache_enabled.c \ $(srcroot)test/integration/xallocx.c ifeq (@enable_cxx@, 1) CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp else CPP_SRCS := TESTS_INTEGRATION_CPP := endif TESTS_STRESS := $(srcroot)test/stress/microbench.c TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS) PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h) C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O)) C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym) C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O)) C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O)) C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O)) C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym) C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O)) TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean .SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS) # Default target. all: build_lib dist: build_doc $(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< $(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< build_doc_html: $(DOCS_HTML) build_doc_man: $(DOCS_MAN3) build_doc: $(DOCS) # # Include generated dependency files. # ifdef CC_MM -include $(C_SYM_OBJS:%.$(O)=%.d) -include $(C_OBJS:%.$(O)=%.d) -include $(CPP_OBJS:%.$(O)=%.d) -include $(C_PIC_OBJS:%.$(O)=%.d) -include $(CPP_PIC_OBJS:%.$(O)=%.d) -include $(C_JET_SYM_OBJS:%.$(O)=%.d) -include $(C_JET_OBJS:%.$(O)=%.d) -include $(C_TESTLIB_OBJS:%.$(O)=%.d) -include $(TESTS_OBJS:%.$(O)=%.d) -include $(TESTS_CPP_OBJS:%.$(O)=%.d) endif $(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c $(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O) $(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c $(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp $(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) $(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp $(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS) $(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c $(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O) $(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c $(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET $(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c $(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB $(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST $(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST $(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c $(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp $(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) $(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT endif # Dependencies. ifndef CC_MM HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))) $(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h endif $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h $(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif $(C_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@ $(C_JET_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@ $(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ $(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ %.h: %.gen.h @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi $(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O): @mkdir -p $(@D) $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif ifneq ($(SOREV),$(SO)) %.$(SO) : %.$(SOREV) @mkdir -p $(@D) ln -sf $( $(srcroot)config.stamp.in $(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure ./$(objroot)config.status @touch $@ # There must be some action in order for make to re-read Makefile when it is # out of date. $(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp @true endif jemalloc-sys-0.3.2/jemalloc/msvc/jemalloc_vc2015.sln010064400007650000024000000074501340421340100203750ustar0000000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.24720.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" ProjectSection(SolutionItems) = preProject ReadMe.txt = ReadMe.txt EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Debug-static|x64 = Debug-static|x64 Debug-static|x86 = Debug-static|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 Release-static|x64 = Release-static|x64 Release-static|x86 = Release-static|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal jemalloc-sys-0.3.2/jemalloc/msvc/jemalloc_vc2017.sln010064400007650000024000000074501340421340100203770ustar0000000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.24720.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" ProjectSection(SolutionItems) = preProject ReadMe.txt = ReadMe.txt EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Debug-static|x64 = Debug-static|x64 Debug-static|x86 = Debug-static|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 Release-static|x64 = Release-static|x64 Release-static|x86 = Release-static|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj010064400007650000024000000453661340421341300246460ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A} Win32Proj jemalloc 8.1 DynamicLibrary true v140 MultiByte StaticLibrary true v140 MultiByte DynamicLibrary false v140 true MultiByte StaticLibrary false v140 true MultiByte DynamicLibrary true v140 MultiByte StaticLibrary true v140 MultiByte DynamicLibrary false v140 true MultiByte StaticLibrary false v140 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 OldStyle false Windows true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 OldStyle Windows true true true jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters010064400007650000024000000066751340421341300263150ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj010064400007650000024000000455261340421340100264470ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {09028CFD-4EB7-491D-869C-0708DB97ED44} Win32Proj test_threads 8.1 Application true v140 MultiByte Application true v140 MultiByte Application false v140 true MultiByte Application false v140 true MultiByte Application true v140 MultiByte Application true v140 MultiByte Application false v140 true MultiByte Application false v140 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true true $(SolutionDir)$(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false Level3 Disabled WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true $(SolutionDir)$(Platform)\$(Configuration) jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled _DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 MaxSpeed true true WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) {8d6bb292-9e1c-413d-9f98-4864bdc1514a} jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters010064400007650000024000000017251340421340100301070ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hh;hpp;hxx;hm;inl;inc;xsd Source Files Source Files Header Files jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj010064400007650000024000000450711340421341300246410ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A} Win32Proj jemalloc DynamicLibrary true v141 MultiByte StaticLibrary true v141 MultiByte DynamicLibrary false v141 true MultiByte StaticLibrary false v141 true MultiByte DynamicLibrary true v141 MultiByte StaticLibrary true v141 MultiByte DynamicLibrary false v141 true MultiByte StaticLibrary false v141 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) Level3 Disabled _REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 OldStyle false Windows true Level3 MaxSpeed true true _REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true _REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 OldStyle Windows true true true jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters010064400007650000024000000066751340421341300263170ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj010064400007650000024000000454211340421340100264430ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {09028CFD-4EB7-491D-869C-0708DB97ED44} Win32Proj test_threads Application true v141 MultiByte Application true v141 MultiByte Application false v141 true MultiByte Application false v141 true MultiByte Application true v141 MultiByte Application true v141 MultiByte Application false v141 true MultiByte Application false v141 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true true $(SolutionDir)$(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false Level3 Disabled WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true $(SolutionDir)$(Platform)\$(Configuration) jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled _DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 MaxSpeed true true WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) {8d6bb292-9e1c-413d-9f98-4864bdc1514a} jemalloc-sys-0.3.2/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters010064400007650000024000000017251340421340100301110ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hh;hpp;hxx;hm;inl;inc;xsd Source Files Source Files Header Files jemalloc-sys-0.3.2/jemalloc/msvc/ReadMe.txt010064400007650000024000000010331340421340100167560ustar0000000000000000 How to build jemalloc for Windows ================================= 1. Install Cygwin with at least the following packages: * autoconf * autogen * gawk * grep * sed 2. Install Visual Studio 2015 or 2017 with Visual C++ 3. Add Cygwin\bin to the PATH environment variable 4. Open "x64 Native Tools Command Prompt for VS 2017" (note: x86/x64 doesn't matter at this point) 5. Generate header files: sh -c "CC=cl ./autogen.sh" 6. Now the project can be opened and built in Visual Studio: msvc\jemalloc_vc2017.sln jemalloc-sys-0.3.2/jemalloc/msvc/test_threads/test_threads.cpp010064400007650000024000000061511340421340100227540ustar0000000000000000// jemalloc C++ threaded test // Author: Rustam Abdullaev // Public Domain #include #include #include #include #include #include #include #include using std::vector; using std::thread; using std::uniform_int_distribution; using std::minstd_rand; int test_threads() { je_malloc_conf = "narenas:3"; int narenas = 0; size_t sz = sizeof(narenas); je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); if (narenas != 3) { printf("Error: unexpected number of arenas: %d\n", narenas); return 1; } static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); vector workers; static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; je_malloc_stats_print(NULL, NULL, NULL); size_t allocated1; size_t sz1 = sizeof(allocated1); je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); printf("\nPress Enter to start threads...\n"); getchar(); printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); for (int i = 0; i < numThreads; i++) { workers.emplace_back([tid=i]() { uniform_int_distribution sizeDist(0, numSizes - 1); minstd_rand rnd(tid * 17); uint8_t* ptrs[numAllocsMax]; int ptrsz[numAllocsMax]; for (int i = 0; i < numIter1; ++i) { thread t([&]() { for (int i = 0; i < numIter2; ++i) { const int numAllocs = numAllocsMax - sizeDist(rnd); for (int j = 0; j < numAllocs; j += 64) { const int x = sizeDist(rnd); const int sz = sizes[x]; ptrsz[j] = sz; ptrs[j] = (uint8_t*)je_malloc(sz); if (!ptrs[j]) { printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); exit(1); } for (int k = 0; k < sz; k++) ptrs[j][k] = tid + k; } for (int j = 0; j < numAllocs; j += 64) { for (int k = 0, sz = ptrsz[j]; k < sz; k++) if (ptrs[j][k] != (uint8_t)(tid + k)) { printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); exit(1); } je_free(ptrs[j]); } } }); t.join(); } }); } for (thread& t : workers) { t.join(); } je_malloc_stats_print(NULL, NULL, NULL); size_t allocated2; je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); size_t leaked = allocated2 - allocated1; printf("\nDone. Leaked: %zd bytes\n", leaked); bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); printf("\nPress Enter to continue...\n"); getchar(); return failed ? 1 : 0; } jemalloc-sys-0.3.2/jemalloc/msvc/test_threads/test_threads.h010064400007650000024000000000421340421340100224120ustar0000000000000000#pragma once int test_threads(); jemalloc-sys-0.3.2/jemalloc/msvc/test_threads/test_threads_main.cpp010064400007650000024000000003101340421340100237470ustar0000000000000000#include "test_threads.h" #include #include #include using namespace std::chrono_literals; int main(int argc, char** argv) { int rc = test_threads(); return rc; } jemalloc-sys-0.3.2/jemalloc/README010064400007650000024000000020271340421340100147740ustar0000000000000000jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, and therefore versatility remains critical. Ongoing development efforts trend toward making jemalloc among the best allocators for a broad range of demanding applications, and eliminating/mitigating weaknesses that have practical repercussions for real world applications. The COPYING file contains copyright and licensing information. The INSTALL file contains information on how to configure, build, and install jemalloc. The ChangeLog file contains a brief summary of changes for each release. URL: http://jemalloc.net/ jemalloc-sys-0.3.2/jemalloc/run_tests.sh010075500007650000024000000000601340421340100164740ustar0000000000000000$(dirname "$)")/scripts/gen_run_tests.py | bash jemalloc-sys-0.3.2/jemalloc/scripts/gen_run_tests.py010075500007650000024000000067561340421341300210570ustar0000000000000000#!/usr/bin/env python import sys from itertools import combinations from os import uname from multiprocessing import cpu_count # Later, we want to test extended vaddr support. Apparently, the "real" way of # checking this is flaky on OS X. bits_64 = sys.maxsize > 2**32 nparallel = cpu_count() * 2 uname = uname()[0] def powerset(items): result = [] for i in xrange(len(items) + 1): result += combinations(items, i) return result possible_compilers = [('gcc', 'g++'), ('clang', 'clang++')] possible_compiler_opts = [ '-m32', ] possible_config_opts = [ '--enable-debug', '--enable-prof', '--disable-stats', ] if bits_64: possible_config_opts.append('--with-lg-vaddr=56') possible_malloc_conf_opts = [ 'tcache:false', 'dss:primary', 'percpu_arena:percpu', 'background_thread:true', ] print 'set -e' print 'if [ -f Makefile ] ; then make relclean ; fi' print 'autoconf' print 'rm -rf run_tests.out' print 'mkdir run_tests.out' print 'cd run_tests.out' ind = 0 for cc, cxx in possible_compilers: for compiler_opts in powerset(possible_compiler_opts): for config_opts in powerset(possible_config_opts): for malloc_conf_opts in powerset(possible_malloc_conf_opts): if cc is 'clang' \ and '-m32' in possible_compiler_opts \ and '--enable-prof' in config_opts: continue config_line = ( 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror ' + 'CC="{} {}" '.format(cc, " ".join(compiler_opts)) + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts)) + '../../configure ' + " ".join(config_opts) + (' --with-malloc-conf=' + ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0 else '') ) # We don't want to test large vaddr spaces in 32-bit mode. if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in config_opts): continue # Per CPU arenas are only supported on Linux. linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \ or 'background_thread:true' in malloc_conf_opts) # Heap profiling and dss are not supported on OS X. darwin_unsupported = ('--enable-prof' in config_opts or \ 'dss:primary' in malloc_conf_opts) if (uname == 'Linux' and linux_supported) \ or (not linux_supported and (uname != 'Darwin' or \ not darwin_unsupported)): print """cat < run_test_%(ind)d.sh #!/bin/sh set -e abort() { echo "==> Error" >> run_test.log echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log" exit 255 # Special exit code tells xargs to terminate. } # Environment variables are not supported. run_cmd() { echo "==> \$@" >> run_test.log \$@ >> run_test.log 2>&1 || abort } echo "=> run_test_%(ind)d: %(config_line)s" mkdir run_test_%(ind)d.out cd run_test_%(ind)d.out echo "==> %(config_line)s" >> run_test.log %(config_line)s >> run_test.log 2>&1 || abort run_cmd make all tests run_cmd make check run_cmd make distclean EOF chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line} ind += 1 print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} jemalloc-sys-0.3.2/jemalloc/scripts/gen_travis.py010075500007650000024000000061571340421341300203340ustar0000000000000000#!/usr/bin/env python from itertools import combinations travis_template = """\ language: generic matrix: include: %s before_script: - autoconf - ./configure ${COMPILER_FLAGS:+ \ CC="$CC $COMPILER_FLAGS" \ CXX="$CXX $COMPILER_FLAGS" } \ $CONFIGURE_FLAGS - make -j3 - make -j3 tests script: - make check """ # The 'default' configuration is gcc, on linux, with no compiler or configure # flags. We also test with clang, -m32, --enable-debug, --enable-prof, # --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing # travis though, we don't test all 2**7 = 128 possible combinations of these; # instead, we only test combinations of up to 2 'unusual' settings, under the # hope that bugs involving interactions of such settings are rare. # Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29 MAX_UNUSUAL_OPTIONS = 2 os_default = 'linux' os_unusual = 'osx' compilers_default = 'CC=gcc CXX=g++' compilers_unusual = 'CC=clang CXX=clang++' compiler_flag_unusuals = ['-m32'] configure_flag_unusuals = [ '--enable-debug', '--enable-prof', '--disable-stats', ] malloc_conf_unusuals = [ 'tcache:false', 'dss:primary', 'percpu_arena:percpu', 'background_thread:true', ] all_unusuals = ( [os_unusual] + [compilers_unusual] + compiler_flag_unusuals + configure_flag_unusuals + malloc_conf_unusuals ) unusual_combinations_to_test = [] for i in xrange(MAX_UNUSUAL_OPTIONS + 1): unusual_combinations_to_test += combinations(all_unusuals, i) include_rows = "" for unusual_combination in unusual_combinations_to_test: os = os_default if os_unusual in unusual_combination: os = os_unusual compilers = compilers_default if compilers_unusual in unusual_combination: compilers = compilers_unusual compiler_flags = [ x for x in unusual_combination if x in compiler_flag_unusuals] configure_flags = [ x for x in unusual_combination if x in configure_flag_unusuals] malloc_conf = [ x for x in unusual_combination if x in malloc_conf_unusuals] # Filter out unsupported configurations on OS X. if os == 'osx' and ('dss:primary' in malloc_conf or \ 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ in malloc_conf): continue if len(malloc_conf) > 0: configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) # Filter out an unsupported configuration - heap profiling on OS X. if os == 'osx' and '--enable-prof' in configure_flags: continue # We get some spurious errors when -Warray-bounds is enabled. env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( compilers, " ".join(compiler_flags), " ".join(configure_flags)) include_rows += ' - os: %s\n' % os include_rows += ' env: %s\n' % env_string if '-m32' in unusual_combination and os == 'linux': include_rows += ' addons:\n' include_rows += ' apt:\n' include_rows += ' packages:\n' include_rows += ' - gcc-multilib\n' print travis_template % include_rows jemalloc-sys-0.3.2/jemalloc/src/arena.c010064400007650000024000001666211340421341300161530ustar0000000000000000#define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/div.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * Define names for both unininitialized and initialized phases, so that * options and mallctl processing are straightforward. */ const char *percpu_arena_mode_names[] = { "percpu", "phycpu", "disabled", "percpu", "phycpu" }; percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default; const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { #define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; static div_info_t arena_binind_div_info[NBINS]; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread); static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); /******************************************************************************/ void arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { *nthreads += arena_nthreads_get(arena, false); *dss = dss_prec_names[arena_dss_prec_get(arena)]; *dirty_decay_ms = arena_dirty_decay_ms_get(arena); *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); *ndirty += extents_npages_get(&arena->extents_dirty); *nmuzzy += extents_npages_get(&arena->extents_muzzy); } void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, bin_stats_t *bstats, arena_stats_large_t *lstats) { cassert(config_stats); arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, muzzy_decay_ms, nactive, ndirty, nmuzzy); size_t base_allocated, base_resident, base_mapped, metadata_thp; base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, &base_mapped, &metadata_thp); arena_stats_lock(tsdn, &arena->stats); arena_stats_accum_zu(&astats->mapped, base_mapped + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); arena_stats_accum_zu(&astats->retained, extents_npages_get(&arena->extents_retained) << LG_PAGE); arena_stats_accum_u64(&astats->decay_dirty.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.npurge)); arena_stats_accum_u64(&astats->decay_dirty.nmadvise, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.nmadvise)); arena_stats_accum_u64(&astats->decay_dirty.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.purged)); arena_stats_accum_u64(&astats->decay_muzzy.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.npurge)); arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.nmadvise)); arena_stats_accum_u64(&astats->decay_muzzy.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.purged)); arena_stats_accum_zu(&astats->base, base_allocated); arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); arena_stats_accum_zu(&astats->resident, base_resident + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + extents_npages_get(&arena->extents_dirty) + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); for (szind_t i = 0; i < NSIZES - NBINS; i++) { uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nmalloc); arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].ndalloc); arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nrequests); arena_stats_accum_u64(&lstats[i].nrequests, nmalloc + nrequests); arena_stats_accum_u64(&astats->nrequests_large, nmalloc + nrequests); assert(nmalloc >= ndalloc); assert(nmalloc - ndalloc <= SIZE_T_MAX); size_t curlextents = (size_t)(nmalloc - ndalloc); lstats[i].curlextents += curlextents; arena_stats_accum_zu(&astats->allocated_large, curlextents * sz_index2size(NBINS + i)); } arena_stats_unlock(tsdn, &arena->stats); /* tcache_bytes counts currently cached bytes. */ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); cache_bin_array_descriptor_t *descriptor; ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { szind_t i = 0; for (; i < NBINS; i++) { cache_bin_t *tbin = &descriptor->bins_small[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); } for (; i < nhbins; i++) { cache_bin_t *tbin = &descriptor->bins_large[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); } } malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[arena_prof_mutex_tcache_list], &arena->tcache_ql_mtx); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ malloc_mutex_lock(tsdn, &arena->mtx); \ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ &arena->mtx); \ malloc_mutex_unlock(tsdn, &arena->mtx); /* Gather per arena mutex profiling data. */ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, arena_prof_mutex_extent_avail) READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, arena_prof_mutex_extents_dirty) READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, arena_prof_mutex_extents_muzzy) READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, arena_prof_mutex_extents_retained) READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, arena_prof_mutex_decay_dirty) READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, arena_prof_mutex_decay_muzzy) READ_ARENA_MUTEX_PROF_DATA(base->mtx, arena_prof_mutex_base) #undef READ_ARENA_MUTEX_PROF_DATA nstime_copy(&astats->uptime, &arena->create_time); nstime_update(&astats->uptime); nstime_subtract(&astats->uptime, &arena->create_time); for (szind_t i = 0; i < NBINS; i++) { bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); } } void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, extent); if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { arena_background_thread_inactivity_check(tsdn, arena, false); } } static void * arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { void *ret; arena_slab_data_t *slab_data = extent_slab_data_get(slab); size_t regind; assert(extent_nfree_get(slab) > 0); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); ret = (void *)((uintptr_t)extent_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); extent_nfree_dec(slab); return ret; } #ifndef JEMALLOC_JET static #endif size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { size_t diff, regind; /* Freeing a pointer outside the slab can cause assertion failure. */ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos[binind].reg_size == 0); diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); /* Avoid doing division with a variable divisor. */ regind = div_compute(&arena_binind_div_info[binind], diff); assert(regind < bin_infos[binind].nregs); return regind; } static void arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; size_t regind = arena_slab_regind(slab, binind, ptr); assert(extent_nfree_get(slab) < bin_info->nregs); /* Freeing an unallocated pointer can cause assertion failure. */ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); extent_nfree_inc(slab); } static void arena_nactive_add(arena_t *arena, size_t add_pages) { atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); } static void arena_nactive_sub(arena_t *arena, size_t sub_pages) { assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); } static void arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); if (usize < LARGE_MINCLASS) { usize = LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= NBINS) ? index - NBINS : 0; arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].nmalloc, 1); } static void arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); if (usize < LARGE_MINCLASS) { usize = LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= NBINS) ? index - NBINS : 0; arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].ndalloc, 1); } static void arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, size_t usize) { arena_large_dalloc_stats_update(tsdn, arena, oldusize); arena_large_malloc_stats_update(tsdn, arena, usize); } extent_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); szind_t szind = sz_size2index(usize); size_t mapped_add; bool commit = true; extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); if (extent == NULL) { extent = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); } size_t size = usize + sz_large_pad; if (extent == NULL) { extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); if (config_stats) { /* * extent may be NULL on OOM, but in that case * mapped_add isn't used below, so there's no need to * conditionlly set it to 0 here. */ mapped_add = size; } } else if (config_stats) { mapped_add = 0; } if (extent != NULL) { if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_malloc_stats_update(tsdn, arena, usize); if (mapped_add != 0) { arena_stats_add_zu(tsdn, &arena->stats, &arena->stats.mapped, mapped_add); } arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_add(arena, size >> LG_PAGE); } return extent; } void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_dalloc_stats_update(tsdn, arena, extent_usize_get(extent)); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = oldusize - usize; if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, udiff >> LG_PAGE); } void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = usize - oldusize; if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_add(arena, udiff >> LG_PAGE); } static ssize_t arena_decay_ms_read(arena_decay_t *decay) { return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); } static void arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); } static void arena_decay_deadline_init(arena_decay_t *decay) { /* * Generate a new deadline that is uniformly random within the next * epoch after the current one. */ nstime_copy(&decay->deadline, &decay->epoch); nstime_add(&decay->deadline, &decay->interval); if (arena_decay_ms_read(decay) > 0) { nstime_t jitter; nstime_init(&jitter, prng_range_u64(&decay->jitter_state, nstime_ns(&decay->interval))); nstime_add(&decay->deadline, &jitter); } } static bool arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { return (nstime_compare(&decay->deadline, time) <= 0); } static size_t arena_decay_backlog_npages_limit(const arena_decay_t *decay) { uint64_t sum; size_t npages_limit_backlog; unsigned i; /* * For each element of decay_backlog, multiply by the corresponding * fixed-point smoothstep decay factor. Sum the products, then divide * to round down to the nearest whole number of pages. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += decay->backlog[i] * h_steps[i]; } npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); return npages_limit_backlog; } static void arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { size_t npages_delta = (current_npages > decay->nunpurged) ? current_npages - decay->nunpurged : 0; decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; if (config_debug) { if (current_npages > decay->ceil_npages) { decay->ceil_npages = current_npages; } size_t npages_limit = arena_decay_backlog_npages_limit(decay); assert(decay->ceil_npages >= npages_limit); if (decay->ceil_npages > npages_limit) { decay->ceil_npages = npages_limit; } } } static void arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, size_t current_npages) { if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * sizeof(size_t)); } else { size_t nadvance_z = (size_t)nadvance_u64; assert((uint64_t)nadvance_z == nadvance_u64); memmove(decay->backlog, &decay->backlog[nadvance_z], (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); if (nadvance_z > 1) { memset(&decay->backlog[SMOOTHSTEP_NSTEPS - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); } } arena_decay_backlog_update_last(decay, current_npages); } static void arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, size_t current_npages, size_t npages_limit, bool is_background_thread) { if (current_npages > npages_limit) { arena_decay_to_limit(tsdn, arena, decay, extents, false, npages_limit, current_npages - npages_limit, is_background_thread); } } static void arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, size_t current_npages) { assert(arena_decay_deadline_reached(decay, time)); nstime_t delta; nstime_copy(&delta, time); nstime_subtract(&delta, &decay->epoch); uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); assert(nadvance_u64 > 0); /* Add nadvance_u64 decay intervals to epoch. */ nstime_copy(&delta, &decay->interval); nstime_imultiply(&delta, nadvance_u64); nstime_add(&decay->epoch, &delta); /* Set a new deadline. */ arena_decay_deadline_init(decay); /* Update the backlog. */ arena_decay_backlog_update(decay, nadvance_u64, current_npages); } static void arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, const nstime_t *time, bool is_background_thread) { size_t current_npages = extents_npages_get(extents); arena_decay_epoch_advance_helper(decay, time, current_npages); size_t npages_limit = arena_decay_backlog_npages_limit(decay); /* We may unlock decay->mtx when try_purge(). Finish logging first. */ decay->nunpurged = (npages_limit > current_npages) ? npages_limit : current_npages; if (!background_thread_enabled() || is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, current_npages, npages_limit, is_background_thread); } } static void arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { arena_decay_ms_write(decay, decay_ms); if (decay_ms > 0) { nstime_init(&decay->interval, (uint64_t)decay_ms * KQU(1000000)); nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); } nstime_init(&decay->epoch, 0); nstime_update(&decay->epoch); decay->jitter_state = (uint64_t)(uintptr_t)decay; arena_decay_deadline_init(decay); decay->nunpurged = 0; memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } static bool arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, arena_stats_decay_t *stats) { if (config_debug) { for (size_t i = 0; i < sizeof(arena_decay_t); i++) { assert(((char *)decay)[i] == 0); } decay->ceil_npages = 0; } if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, malloc_mutex_rank_exclusive)) { return true; } decay->purging = false; arena_decay_reinit(decay, decay_ms); /* Memory is zeroed, so there is no need to clear stats. */ if (config_stats) { decay->stats = stats; } return false; } static bool arena_decay_ms_valid(ssize_t decay_ms) { if (decay_ms < -1) { return false; } if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * KQU(1000)) { return true; } return false; } static bool arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread) { malloc_mutex_assert_owner(tsdn, &decay->mtx); /* Purge all or nothing if the option is disabled. */ ssize_t decay_ms = arena_decay_ms_read(decay); if (decay_ms <= 0) { if (decay_ms == 0) { arena_decay_to_limit(tsdn, arena, decay, extents, false, 0, extents_npages_get(extents), is_background_thread); } return false; } nstime_t time; nstime_init(&time, 0); nstime_update(&time); if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) > 0)) { /* * Time went backwards. Move the epoch back in time and * generate a new deadline, with the expectation that time * typically flows forward for long enough periods of time that * epochs complete. Unfortunately, this strategy is susceptible * to clock jitter triggering premature epoch advances, but * clock jitter estimation and compensation isn't feasible here * because calls into this code are event-driven. */ nstime_copy(&decay->epoch, &time); arena_decay_deadline_init(decay); } else { /* Verify that time does not go backwards. */ assert(nstime_compare(&decay->epoch, &time) <= 0); } /* * If the deadline has been reached, advance to the current epoch and * purge to the new limit if necessary. Note that dirty pages created * during the current epoch are not subject to purge until a future * epoch, so as a result purging only happens during epoch advances, or * being triggered by background threads (scheduled event). */ bool advance_epoch = arena_decay_deadline_reached(decay, &time); if (advance_epoch) { arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, is_background_thread); } else if (is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, extents_npages_get(extents), arena_decay_backlog_npages_limit(decay), is_background_thread); } return advance_epoch; } static ssize_t arena_decay_ms_get(arena_decay_t *decay) { return arena_decay_ms_read(decay); } ssize_t arena_dirty_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_dirty); } ssize_t arena_muzzy_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_muzzy); } static bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } malloc_mutex_lock(tsdn, &decay->mtx); /* * Restart decay backlog from scratch, which may cause many dirty pages * to be immediately purged. It would conceptually be possible to map * the old backlog onto the new backlog, but there is no justification * for such complexity since decay_ms changes are intended to be * infrequent, either between the {-1, 0, >0} states, or a one-time * arbitrary change during initial arena configuration. */ arena_decay_reinit(decay, decay_ms); arena_maybe_decay(tsdn, arena, decay, extents, false); malloc_mutex_unlock(tsdn, &decay->mtx); return false; } bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, decay_ms); } bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, decay_ms); } static size_t arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, size_t npages_decay_max, extent_list_t *decay_extents) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Stash extents according to npages_limit. */ size_t nstashed = 0; extent_t *extent; while (nstashed < npages_decay_max && (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, npages_limit)) != NULL) { extent_list_append(decay_extents, extent); nstashed += extent_size_get(extent) >> LG_PAGE; } return nstashed; } static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, bool all, extent_list_t *decay_extents, bool is_background_thread) { UNUSED size_t nmadvise, nunmapped; size_t npurged; if (config_stats) { nmadvise = 0; nunmapped = 0; } npurged = 0; ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); for (extent_t *extent = extent_list_first(decay_extents); extent != NULL; extent = extent_list_first(decay_extents)) { if (config_stats) { nmadvise++; } size_t npages = extent_size_get(extent) >> LG_PAGE; npurged += npages; extent_list_remove(decay_extents, extent); switch (extents_state_get(extents)) { case extent_state_active: not_reached(); case extent_state_dirty: if (!all && muzzy_decay_ms != 0 && !extent_purge_lazy_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_muzzy, extent); arena_background_thread_inactivity_check(tsdn, arena, is_background_thread); break; } /* Fall through. */ case extent_state_muzzy: extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent); if (config_stats) { nunmapped += npages; } break; case extent_state_retained: default: not_reached(); } } if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 1); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->nmadvise, nmadvise); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, npurged); arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, nunmapped << LG_PAGE); arena_stats_unlock(tsdn, &arena->stats); } return npurged; } /* * npages_limit: Decay at most npages_decay_max pages without violating the * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper * bound on number of pages in order to prevent unbounded growth (namely in * stashed), otherwise unbounded new pages could be added to extents during the * current decay run, so that the purging thread never finishes. */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); malloc_mutex_assert_owner(tsdn, &decay->mtx); if (decay->purging) { return; } decay->purging = true; malloc_mutex_unlock(tsdn, &decay->mtx); extent_hooks_t *extent_hooks = extent_hooks_get(arena); extent_list_t decay_extents; extent_list_init(&decay_extents); size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, &extent_hooks, decay, extents, all, &decay_extents, is_background_thread); assert(npurged == npurge); } malloc_mutex_lock(tsdn, &decay->mtx); decay->purging = false; } static bool arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread, bool all) { if (all) { malloc_mutex_lock(tsdn, &decay->mtx); arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, extents_npages_get(extents), is_background_thread); malloc_mutex_unlock(tsdn, &decay->mtx); return false; } if (malloc_mutex_trylock(tsdn, &decay->mtx)) { /* No need to wait if another thread is in progress. */ return true; } bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, is_background_thread); UNUSED size_t npages_new; if (epoch_advanced) { /* Backlog is updated on epoch advance. */ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; } malloc_mutex_unlock(tsdn, &decay->mtx); if (have_background_thread && background_thread_enabled() && epoch_advanced && !is_background_thread) { background_thread_interval_check(tsdn, arena, decay, npages_new); } return false; } static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, is_background_thread, all); } static bool arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, is_background_thread, all); } void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { return; } arena_decay_muzzy(tsdn, arena, is_background_thread, all); } static void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); } static void arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) > 0); extent_heap_insert(&bin->slabs_nonfull, slab); } static void arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); } static extent_t * arena_bin_slabs_nonfull_tryget(bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { return NULL; } if (config_stats) { bin->stats.reslabs++; } return slab; } static void arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) == 0); /* * Tracking extents is required by arena_reset, which is not allowed * for auto arenas. Bypass this step to avoid touching the extent * linkage (often results in cache misses) for auto arenas. */ if (arena_is_auto(arena)) { return; } extent_list_append(&bin->slabs_full, slab); } static void arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { if (arena_is_auto(arena)) { return; } extent_list_remove(&bin->slabs_full, slab); } void arena_reset(tsd_t *tsd, arena_t *arena) { /* * Locking in this function is unintuitive. The caller guarantees that * no concurrent operations are happening in this arena, but there are * still reasons that some locking is necessary: * * - Some of the functions in the transitive closure of calls assume * appropriate locks are held, and in some cases these locks are * temporarily dropped to avoid lock order reversal or deadlock due to * reentry. * - mallctl("epoch", ...) may concurrently refresh stats. While * strictly speaking this is a "concurrent operation", disallowing * stats refreshes would impose an inconvenient burden. */ /* Large allocations. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); for (extent_t *extent = extent_list_first(&arena->large); extent != NULL; extent = extent_list_first(&arena->large)) { void *ptr = extent_base_get(extent); size_t usize; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != NSIZES); if (config_stats || (config_prof && opt_prof)) { usize = sz_index2size(alloc_ctx.szind); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); } /* Remove large allocation from prof sample set. */ if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, &alloc_ctx); } large_dalloc(tsd_tsdn(tsd), extent); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); /* Bins. */ for (unsigned i = 0; i < NBINS; i++) { extent_t *slab; bin_t *bin = &arena->bins[i]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (bin->slabcur != NULL) { slab = bin->slabcur; bin->slabcur = NULL; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } for (slab = extent_list_first(&bin->slabs_full); slab != NULL; slab = extent_list_first(&bin->slabs_full)) { arena_bin_slabs_full_remove(arena, bin, slab); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } if (config_stats) { bin->stats.curregs = 0; bin->stats.curslabs = 0; } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); } static void arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { /* * Iterate over the retained extents and destroy them. This gives the * extent allocator underlying the extent hooks an opportunity to unmap * all retained memory without having to keep its own metadata * structures. In practice, virtual memory for dss-allocated extents is * leaked here, so best practice is to avoid dss for arenas to be * destroyed, or provide custom extent hooks that track retained * dss-based extents for later reuse. */ extent_hooks_t *extent_hooks = extent_hooks_get(arena); extent_t *extent; while ((extent = extents_evict(tsdn, arena, &extent_hooks, &arena->extents_retained, 0)) != NULL) { extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } } void arena_destroy(tsd_t *tsd, arena_t *arena) { assert(base_ind_get(arena->base) >= narenas_auto); assert(arena_nthreads_get(arena, false) == 0); assert(arena_nthreads_get(arena, true) == 0); /* * No allocations have occurred since arena_reset() was called. * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * extents, so only retained extents may remain. */ assert(extents_npages_get(&arena->extents_dirty) == 0); assert(extents_npages_get(&arena->extents_muzzy) == 0); /* Deallocate retained memory. */ arena_destroy_retained(tsd_tsdn(tsd), arena); /* * Remove the arena pointer from the arenas array. We rely on the fact * that there is no way for the application to get a dirty read from the * arenas array unless there is an inherent race in the application * involving access of an arena being concurrently destroyed. The * application must synchronize knowledge of the arena's validity, so as * long as we use an atomic write to update the arenas array, the * application will get a clean read any time after it synchronizes * knowledge that the arena is no longer valid. */ arena_set(base_ind_get(arena->base), NULL); /* * Destroy the base allocator, which manages all metadata ever mapped by * this arena. */ base_delete(tsd_tsdn(tsd), arena->base); } static extent_t * arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, szind_t szind) { extent_t *slab; bool zero, commit; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); zero = false; commit = true; slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); if (config_stats && slab != NULL) { arena_stats_mapped_add(tsdn, &arena->stats, bin_info->slab_size); } return slab; } static extent_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, const bin_info_t *bin_info) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; szind_t szind = sz_size2index(bin_info->reg_size); bool zero = false; bool commit = true; extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); if (slab == NULL) { slab = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); } if (slab == NULL) { slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, bin_info, szind); if (slab == NULL) { return NULL; } } assert(extent_slab_get(slab)); /* Initialize slab internals. */ arena_slab_data_t *slab_data = extent_slab_data_get(slab); extent_nfree_set(slab, bin_info->nregs); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); return slab; } static extent_t * arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind) { extent_t *slab; const bin_info_t *bin_info; /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; } /* No existing slabs have any space available. */ bin_info = &bin_infos[binind]; /* Allocate a new slab. */ malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ slab = arena_slab_alloc(tsdn, arena, binind, bin_info); /********************************/ malloc_mutex_lock(tsdn, &bin->lock); if (slab != NULL) { if (config_stats) { bin->stats.nslabs++; bin->stats.curslabs++; } return slab; } /* * arena_slab_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; } return NULL; } /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind) { const bin_info_t *bin_info; extent_t *slab; bin_info = &bin_infos[binind]; if (!arena_is_auto(arena) && bin->slabcur != NULL) { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; } slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); if (bin->slabcur != NULL) { /* * Another thread updated slabcur while this one ran without the * bin lock in arena_bin_nonfull_slab_get(). */ if (extent_nfree_get(bin->slabcur) > 0) { void *ret = arena_slab_reg_alloc(bin->slabcur, bin_info); if (slab != NULL) { /* * arena_slab_alloc() may have allocated slab, * or it may have been pulled from * slabs_nonfull. Therefore it is unsafe to * make any assumptions about how slab has * previously been used, and * arena_bin_lower_slab() must be called, as if * a region were just deallocated from the slab. */ if (extent_nfree_get(slab) == bin_info->nregs) { arena_dalloc_bin_slab(tsdn, arena, slab, bin); } else { arena_bin_lower_slab(tsdn, arena, slab, bin); } } return ret; } arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; } if (slab == NULL) { return NULL; } bin->slabcur = slab; assert(extent_nfree_get(bin->slabcur) > 0); return arena_slab_reg_alloc(slab, bin_info); } void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; bin_t *bin; assert(tbin->ncached == 0); if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { prof_idump(tsdn); } bin = &arena->bins[binind]; malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tcache->lg_fill_div[binind]); i < nfill; i++) { extent_t *slab; void *ptr; if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); } else { ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); } if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must * be moved just before tbin->avail before bailing out. */ if (i > 0) { memmove(tbin->avail - i, tbin->avail - nfill, i * sizeof(void *)); } break; } if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ptr, &bin_infos[binind], true); } /* Insert such that low regions get used first. */ *(tbin->avail - nfill + i) = ptr; } if (config_stats) { bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; arena_decay_tick(tsdn, arena); } void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { if (!zero) { memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } static void arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = arena_dalloc_junk_small_impl; static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; bin_t *bin; size_t usize; extent_t *slab; assert(binind < NBINS); bin = &arena->bins[binind]; usize = sz_index2size(binind); malloc_mutex_lock(tsdn, &bin->lock); if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); } else { ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); } if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); return NULL; } if (config_stats) { bin->stats.nmalloc++; bin->stats.nrequests++; bin->stats.curregs++; } malloc_mutex_unlock(tsdn, &bin->lock); if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); } if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); } arena_decay_tick(tsdn, arena); return ret; } void * arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero) { assert(!tsdn_null(tsdn) || arena != NULL); if (likely(!tsdn_null(tsdn))) { arena = arena_choose(tsdn_tsd(tsdn), arena); } if (unlikely(arena == NULL)) { return NULL; } if (likely(size <= SMALL_MAXCLASS)) { return arena_malloc_small(tsdn, arena, ind, zero); } return large_malloc(tsdn, arena, sz_index2size(ind), zero); } void * arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special slab placement. */ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), zero, tcache, true); } else { if (likely(alignment <= CACHELINE)) { ret = large_malloc(tsdn, arena, usize, zero); } else { ret = large_palloc(tsdn, arena, usize, alignment, zero); } } return ret; } void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); assert(usize <= SMALL_MAXCLASS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); arena_t *arena = extent_arena_get(extent); szind_t szind = sz_size2index(usize); extent_szind_set(extent, szind); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, szind, false); prof_accum_cancel(tsdn, &arena->prof_accum, usize); assert(isalloc(tsdn, ptr) == usize); } static size_t arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { cassert(config_prof); assert(ptr != NULL); extent_szind_set(extent, NBINS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, NBINS, false); assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); return LARGE_MINCLASS; } void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { cassert(config_prof); assert(opt_prof); extent_t *extent = iealloc(tsdn, ptr); size_t usize = arena_prof_demote(tsdn, extent, ptr); if (usize <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, sz_size2index(usize), slow_path); } else { large_dalloc(tsdn, extent); } } static void arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { /* Dissociate slab from bin. */ if (slab == bin->slabcur) { bin->slabcur = NULL; } else { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; /* * The following block's conditional is necessary because if the * slab only contains one region, then it never gets inserted * into the non-full slabs heap. */ if (bin_info->nregs == 1) { arena_bin_slabs_full_remove(arena, bin, slab); } else { arena_bin_slabs_nonfull_remove(bin, slab); } } } static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(slab != bin->slabcur); malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ arena_slab_dalloc(tsdn, arena, slab); /****************************/ malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) { bin->stats.curslabs--; } } static void arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(extent_nfree_get(slab) > 0); /* * Make sure that if bin->slabcur is non-NULL, it refers to the * oldest/lowest non-full slab. It is okay to NULL slabcur out rather * than proactively keeping it pointing at the oldest/lowest non-full * slab. */ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { /* Switch slabcur. */ if (extent_nfree_get(bin->slabcur) > 0) { arena_bin_slabs_nonfull_insert(bin, bin->slabcur); } else { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); } bin->slabcur = slab; if (config_stats) { bin->stats.reslabs++; } } else { arena_bin_slabs_nonfull_insert(bin, slab); } } static void arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, void *ptr, bool junked) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); szind_t binind = extent_szind_get(slab); bin_t *bin = &arena->bins[binind]; const bin_info_t *bin_info = &bin_infos[binind]; if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); } arena_slab_reg_dalloc(slab, slab_data, ptr); unsigned nfree = extent_nfree_get(slab); if (nfree == bin_info->nregs) { arena_dissociate_bin_slab(arena, slab, bin); arena_dalloc_bin_slab(tsdn, arena, slab, bin); } else if (nfree == 1 && slab != bin->slabcur) { arena_bin_slabs_full_remove(arena, bin, slab); arena_bin_lower_slab(tsdn, arena, slab, bin); } if (config_stats) { bin->stats.ndalloc++; bin->stats.curregs--; } } void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); } static void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { szind_t binind = extent_szind_get(extent); bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(tsdn, &bin->lock); arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); malloc_mutex_unlock(tsdn, &bin->lock); } void arena_dalloc_small(tsdn_t *tsdn, void *ptr) { extent_t *extent = iealloc(tsdn, ptr); arena_t *arena = extent_arena_get(extent); arena_dalloc_bin(tsdn, arena, extent, ptr); arena_decay_tick(tsdn, arena); } bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= LARGE_MAXCLASS); if (unlikely(size > LARGE_MAXCLASS)) { return true; } extent_t *extent = iealloc(tsdn, ptr); size_t usize_min = sz_s2u(size); size_t usize_max = sz_s2u(size + extra); if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { /* * Avoid moving the allocation if the size class can be left the * same. */ assert(bin_infos[sz_size2index(oldsize)].reg_size == oldsize); if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != sz_size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) { return true; } arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, zero); } return true; } static void * arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) { return arena_malloc(tsdn, arena, usize, sz_size2index(usize), zero, tcache, true); } usize = sz_sa2u(usize, alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return NULL; } return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } void * arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache) { size_t usize = sz_s2u(size); if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { return NULL; } if (likely(usize <= SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { return ptr; } } if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, alignment, zero, tcache); } /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and copying. */ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero, tcache); if (ret == NULL) { return NULL; } /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ size_t copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return ret; } dss_prec_t arena_dss_prec_get(arena_t *arena) { return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); } bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) { return (dss_prec != dss_prec_disabled); } atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); return false; } ssize_t arena_dirty_decay_ms_default_get(void) { return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); } bool arena_dirty_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; } ssize_t arena_muzzy_decay_ms_default_get(void) { return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); } bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; } bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit) { assert(opt_retain); pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); if (new_limit != NULL) { size_t limit = *new_limit; /* Grow no more than the new limit. */ if ((new_ind = sz_psz2ind(limit + 1) - 1) > EXTENT_GROW_MAX_PIND) { return true; } } malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); if (old_limit != NULL) { *old_limit = sz_pind2sz(arena->retain_grow_limit); } if (new_limit != NULL) { arena->retain_grow_limit = new_ind; } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); return false; } unsigned arena_nthreads_get(arena_t *arena, bool internal) { return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); } void arena_nthreads_inc(arena_t *arena, bool internal) { atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } void arena_nthreads_dec(arena_t *arena, bool internal) { atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } size_t arena_extent_sn_next(arena_t *arena) { return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); } arena_t * arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; base_t *base; unsigned i; if (ind == 0) { base = b0get(); } else { base = base_new(tsdn, ind, extent_hooks); if (base == NULL) { return NULL; } } arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); if (arena == NULL) { goto label_error; } atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); arena->last_thd = NULL; if (config_stats) { if (arena_stats_init(tsdn, &arena->stats)) { goto label_error; } ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { goto label_error; } } if (config_prof) { if (prof_accum_init(tsdn, &arena->prof_accum)) { goto label_error; } } if (config_cache_oblivious) { /* * A nondeterministic seed based on the address of arena reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ atomic_store_zu(&arena->offset_state, config_debug ? ind : (size_t)(uintptr_t)arena, ATOMIC_RELAXED); } atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), ATOMIC_RELAXED); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); extent_list_init(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; } /* * Delay coalescing for dirty extents despite the disruptive effect on * memory layout for best-fit extent allocation, since cached extents * are likely to be reused soon after deallocation, and the cost of * merging/splitting extents is non-trivial. */ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, true)) { goto label_error; } /* * Coalesce muzzy extents immediately, because operations on them are in * the critical path much less often than for dirty extents. */ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, false)) { goto label_error; } /* * Coalesce retained extents immediately, in part because they will * never be evicted (and therefore there's no opportunity for delayed * coalescing), but also because operations on retained extents are not * in the critical path. */ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, false)) { goto label_error; } if (arena_decay_init(&arena->decay_dirty, arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { goto label_error; } if (arena_decay_init(&arena->decay_muzzy, arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { goto label_error; } arena->extent_grow_next = sz_psz2ind(HUGEPAGE); arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { goto label_error; } extent_avail_new(&arena->extent_avail); if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { goto label_error; } /* Initialize bins. */ for (i = 0; i < NBINS; i++) { bool err = bin_init(&arena->bins[i]); if (err) { goto label_error; } } arena->base = base; /* Set arena before creating background threads. */ arena_set(ind, arena); nstime_init(&arena->create_time, 0); nstime_update(&arena->create_time); /* We don't support reentrancy for arena 0 bootstrapping. */ if (ind != 0) { /* * If we're here, then arena 0 already exists, so bootstrapping * is done enough that we should have tsd. */ assert(!tsdn_null(tsdn)); pre_reentrancy(tsdn_tsd(tsdn), arena); if (hooks_arena_new_hook) { hooks_arena_new_hook(); } post_reentrancy(tsdn_tsd(tsdn)); } return arena; label_error: if (ind != 0) { base_delete(tsdn, base); } return NULL; } void arena_boot(void) { arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); #define REGIND_bin_yes(index, reg_size) \ div_init(&arena_binind_div_info[(index)], (reg_size)); #define REGIND_bin_no(index, reg_size) #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ lg_delta_lookup) \ REGIND_bin_##bin(index, (1U<decay_dirty.mtx); malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); } void arena_prefork1(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); } } void arena_prefork2(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); } void arena_prefork3(tsdn_t *tsdn, arena_t *arena) { extents_prefork(tsdn, &arena->extents_dirty); extents_prefork(tsdn, &arena->extents_muzzy); extents_prefork(tsdn, &arena->extents_retained); } void arena_prefork4(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); } void arena_prefork5(tsdn_t *tsdn, arena_t *arena) { base_prefork(tsdn, arena->base); } void arena_prefork6(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->large_mtx); } void arena_prefork7(tsdn_t *tsdn, arena_t *arena) { for (unsigned i = 0; i < NBINS; i++) { bin_prefork(tsdn, &arena->bins[i]); } } void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) { bin_postfork_parent(tsdn, &arena->bins[i]); } malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); base_postfork_parent(tsdn, arena->base); malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); extents_postfork_parent(tsdn, &arena->extents_dirty); extents_postfork_parent(tsdn, &arena->extents_muzzy); extents_postfork_parent(tsdn, &arena->extents_retained); malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); } } void arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { arena_nthreads_inc(arena, false); } if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { arena_nthreads_inc(arena, true); } if (config_stats) { ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); if (tcache != NULL && tcache->arena == arena) { ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); cache_bin_array_descriptor_init( &tcache->cache_bin_array_descriptor, tcache->bins_small, tcache->bins_large); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); } } for (i = 0; i < NBINS; i++) { bin_postfork_child(tsdn, &arena->bins[i]); } malloc_mutex_postfork_child(tsdn, &arena->large_mtx); base_postfork_child(tsdn, arena->base); malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); extents_postfork_child(tsdn, &arena->extents_dirty); extents_postfork_child(tsdn, &arena->extents_muzzy); extents_postfork_child(tsdn, &arena->extents_retained); malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); } } jemalloc-sys-0.3.2/jemalloc/src/background_thread.c010064400007650000024000000646001340421341300205250ustar0000000000000000#define JEMALLOC_BACKGROUND_THREAD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" /******************************************************************************/ /* Data. */ /* This option should be opt-in only. */ #define BACKGROUND_THREAD_DEFAULT false /* Read-only after initialization. */ bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT; /* Used for thread creation, termination and stats. */ malloc_mutex_t background_thread_lock; /* Indicates global state. Atomic because decay reads this w/o locking. */ atomic_b_t background_thread_enabled_state; size_t n_background_threads; size_t max_background_threads; /* Thread info per-index. */ background_thread_info_t *background_thread_info; /* False if no necessary runtime support. */ bool can_enable_background_thread; /******************************************************************************/ #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER #include static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void pthread_create_wrapper_init(void) { #ifdef JEMALLOC_LAZY_LOCK if (!isthreaded) { isthreaded = true; } #endif } int pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *__restrict arg) { pthread_create_wrapper_init(); return pthread_create_fptr(thread, attr, start_routine, arg); } #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ #ifndef JEMALLOC_BACKGROUND_THREAD #define NOT_REACHED { not_reached(); } bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED bool background_threads_enable(tsd_t *tsd) NOT_REACHED bool background_threads_disable(tsd_t *tsd) NOT_REACHED void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new) NOT_REACHED void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) NOT_REACHED void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED #undef NOT_REACHED #else static bool background_thread_enabled_at_fork; static void background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { background_thread_wakeup_time_set(tsdn, info, 0); info->npages_to_purge_new = 0; if (config_stats) { info->tot_n_runs = 0; nstime_init(&info->tot_sleep_time, 0); } } static inline bool set_current_thread_affinity(UNUSED int cpu) { #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); return (ret != 0); #else return false; #endif } /* Threshold for determining when to wake up the background thread. */ #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) #define BILLION UINT64_C(1000000000) /* Minimal sleep interval 100 ms. */ #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) static inline size_t decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { size_t i; uint64_t sum = 0; for (i = 0; i < interval; i++) { sum += decay->backlog[i] * h_steps[i]; } for (; i < SMOOTHSTEP_NSTEPS; i++) { sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); } return (size_t)(sum >> SMOOTHSTEP_BFP); } static uint64_t arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, extents_t *extents) { if (malloc_mutex_trylock(tsdn, &decay->mtx)) { /* Use minimal interval if decay is contended. */ return BACKGROUND_THREAD_MIN_INTERVAL_NS; } uint64_t interval; ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); if (decay_time <= 0) { /* Purging is eagerly done or disabled currently. */ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; goto label_done; } uint64_t decay_interval_ns = nstime_ns(&decay->interval); assert(decay_interval_ns > 0); size_t npages = extents_npages_get(extents); if (npages == 0) { unsigned i; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { if (decay->backlog[i] > 0) { break; } } if (i == SMOOTHSTEP_NSTEPS) { /* No dirty pages recorded. Sleep indefinitely. */ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; goto label_done; } } if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { /* Use max interval. */ interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; goto label_done; } size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; size_t ub = SMOOTHSTEP_NSTEPS; /* Minimal 2 intervals to ensure reaching next epoch deadline. */ lb = (lb < 2) ? 2 : lb; if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || (lb + 2 > ub)) { interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; goto label_done; } assert(lb + 2 <= ub); size_t npurge_lb, npurge_ub; npurge_lb = decay_npurge_after_interval(decay, lb); if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { interval = decay_interval_ns * lb; goto label_done; } npurge_ub = decay_npurge_after_interval(decay, ub); if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { interval = decay_interval_ns * ub; goto label_done; } unsigned n_search = 0; size_t target, npurge; while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) && (lb + 2 < ub)) { target = (lb + ub) / 2; npurge = decay_npurge_after_interval(decay, target); if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { ub = target; npurge_ub = npurge; } else { lb = target; npurge_lb = npurge; } assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); } interval = decay_interval_ns * (ub + lb) / 2; label_done: interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; malloc_mutex_unlock(tsdn, &decay->mtx); return interval; } /* Compute purge interval for background threads. */ static uint64_t arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { uint64_t i1, i2; i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, &arena->extents_dirty); if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { return i1; } i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, &arena->extents_muzzy); return i1 < i2 ? i1 : i2; } static void background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, uint64_t interval) { if (config_stats) { info->tot_n_runs++; } info->npages_to_purge_new = 0; struct timeval tv; /* Specific clock required by timedwait. */ gettimeofday(&tv, NULL); nstime_t before_sleep; nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); int ret; if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { assert(background_thread_indefinite_sleep(info)); ret = pthread_cond_wait(&info->cond, &info->mtx.lock); assert(ret == 0); } else { assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); /* We need malloc clock (can be different from tv). */ nstime_t next_wakeup; nstime_init(&next_wakeup, 0); nstime_update(&next_wakeup); nstime_iadd(&next_wakeup, interval); assert(nstime_ns(&next_wakeup) < BACKGROUND_THREAD_INDEFINITE_SLEEP); background_thread_wakeup_time_set(tsdn, info, nstime_ns(&next_wakeup)); nstime_t ts_wakeup; nstime_copy(&ts_wakeup, &before_sleep); nstime_iadd(&ts_wakeup, interval); struct timespec ts; ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); assert(!background_thread_indefinite_sleep(info)); ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); assert(ret == ETIMEDOUT || ret == 0); background_thread_wakeup_time_set(tsdn, info, BACKGROUND_THREAD_INDEFINITE_SLEEP); } if (config_stats) { gettimeofday(&tv, NULL); nstime_t after_sleep; nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); if (nstime_compare(&after_sleep, &before_sleep) > 0) { nstime_subtract(&after_sleep, &before_sleep); nstime_add(&info->tot_sleep_time, &after_sleep); } } } static bool background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { if (unlikely(info->state == background_thread_paused)) { malloc_mutex_unlock(tsdn, &info->mtx); /* Wait on global lock to update status. */ malloc_mutex_lock(tsdn, &background_thread_lock); malloc_mutex_unlock(tsdn, &background_thread_lock); malloc_mutex_lock(tsdn, &info->mtx); return true; } return false; } static inline void background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; unsigned narenas = narenas_total_get(); for (unsigned i = ind; i < narenas; i += max_background_threads) { arena_t *arena = arena_get(tsdn, i, false); if (!arena) { continue; } arena_decay(tsdn, arena, true, false); if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { /* Min interval will be used. */ continue; } uint64_t interval = arena_decay_compute_purge_interval(tsdn, arena); assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); if (min_interval > interval) { min_interval = interval; } } background_thread_sleep(tsdn, info, min_interval); } static bool background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { if (info == &background_thread_info[0]) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); } else { malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &background_thread_lock); } pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); bool has_thread; assert(info->state != background_thread_paused); if (info->state == background_thread_started) { has_thread = true; info->state = background_thread_stopped; pthread_cond_signal(&info->cond); } else { has_thread = false; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (!has_thread) { post_reentrancy(tsd); return false; } void *ret; if (pthread_join(info->thread, &ret)) { post_reentrancy(tsd); return true; } assert(ret == NULL); n_background_threads--; post_reentrancy(tsd); return false; } static void *background_thread_entry(void *ind_arg); static int background_thread_create_signals_masked(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { /* * Mask signals during thread creation so that the thread inherits * an empty signal set. */ sigset_t set; sigfillset(&set); sigset_t oldset; int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); if (mask_err != 0) { return mask_err; } int create_err = pthread_create_wrapper(thread, attr, start_routine, arg); /* * Restore the signal mask. Failure to restore the signal mask here * changes program behavior. */ int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); if (restore_err != 0) { malloc_printf(": background thread creation " "failed (%d), and signal mask restoration failed " "(%d)\n", create_err, restore_err); if (opt_abort) { abort(); } } return create_err; } static bool check_background_thread_creation(tsd_t *tsd, unsigned *n_created, bool *created_threads) { bool ret = false; if (likely(*n_created == n_background_threads)) { return ret; } tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); for (unsigned i = 1; i < max_background_threads; i++) { if (created_threads[i]) { continue; } background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); /* * In case of the background_thread_paused state because of * arena reset, delay the creation. */ bool create = (info->state == background_thread_started); malloc_mutex_unlock(tsdn, &info->mtx); if (!create) { continue; } pre_reentrancy(tsd, NULL); int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)(uintptr_t)i); post_reentrancy(tsd); if (err == 0) { (*n_created)++; created_threads[i] = true; } else { malloc_printf(": background thread " "creation failed (%d)\n", err); if (opt_abort) { abort(); } } /* Return to restart the loop since we unlocked. */ ret = true; break; } malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); return ret; } static void background_thread0_work(tsd_t *tsd) { /* Thread0 is also responsible for launching / terminating threads. */ VARIABLE_ARRAY(bool, created_threads, max_background_threads); unsigned i; for (i = 1; i < max_background_threads; i++) { created_threads[i] = false; } /* Start working, and create more threads when asked. */ unsigned n_created = 1; while (background_thread_info[0].state != background_thread_stopped) { if (background_thread_pause_check(tsd_tsdn(tsd), &background_thread_info[0])) { continue; } if (check_background_thread_creation(tsd, &n_created, (bool *)&created_threads)) { continue; } background_work_sleep_once(tsd_tsdn(tsd), &background_thread_info[0], 0); } /* * Shut down other threads at exit. Note that the ctl thread is holding * the global background_thread mutex (and is waiting) for us. */ assert(!background_thread_enabled()); for (i = 1; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; assert(info->state != background_thread_paused); if (created_threads[i]) { background_threads_disable_single(tsd, info); } else { malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); if (info->state != background_thread_stopped) { /* The thread was not created. */ assert(info->state == background_thread_started); n_background_threads--; info->state = background_thread_stopped; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } } background_thread_info[0].state = background_thread_stopped; assert(n_background_threads == 1); } static void background_work(tsd_t *tsd, unsigned ind) { background_thread_info_t *info = &background_thread_info[ind]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); background_thread_wakeup_time_set(tsd_tsdn(tsd), info, BACKGROUND_THREAD_INDEFINITE_SLEEP); if (ind == 0) { background_thread0_work(tsd); } else { while (info->state != background_thread_stopped) { if (background_thread_pause_check(tsd_tsdn(tsd), info)) { continue; } background_work_sleep_once(tsd_tsdn(tsd), info, ind); } } assert(info->state == background_thread_stopped); background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } static void * background_thread_entry(void *ind_arg) { unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; assert(thread_ind < max_background_threads); #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); #endif if (opt_percpu_arena != percpu_arena_disabled) { set_current_thread_affinity((int)thread_ind); } /* * Start periodic background work. We use internal tsd which avoids * side effects, for example triggering new arena creation (which in * turn triggers another background thread creation). */ background_work(tsd_internal_fetch(), thread_ind); assert(pthread_equal(pthread_self(), background_thread_info[thread_ind].thread)); return NULL; } static void background_thread_init(tsd_t *tsd, background_thread_info_t *info) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); info->state = background_thread_started; background_thread_info_init(tsd_tsdn(tsd), info); n_background_threads++; } /* Create a new background thread if needed. */ bool background_thread_create(tsd_t *tsd, unsigned arena_ind) { assert(have_background_thread); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); /* We create at most NCPUs threads. */ size_t thread_ind = arena_ind % max_background_threads; background_thread_info_t *info = &background_thread_info[thread_ind]; bool need_new_thread; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); need_new_thread = background_thread_enabled() && (info->state == background_thread_stopped); if (need_new_thread) { background_thread_init(tsd, info); } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (!need_new_thread) { return false; } if (arena_ind != 0) { /* Threads are created asynchronously by Thread 0. */ background_thread_info_t *t0 = &background_thread_info[0]; malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); assert(t0->state == background_thread_started); pthread_cond_signal(&t0->cond); malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); return false; } pre_reentrancy(tsd, NULL); /* * To avoid complications (besides reentrancy), create internal * background threads with the underlying pthread_create. */ int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)thread_ind); post_reentrancy(tsd); if (err != 0) { malloc_printf(": arena 0 background thread creation " "failed (%d)\n", err); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_stopped; n_background_threads--; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); return true; } return false; } bool background_threads_enable(tsd_t *tsd) { assert(n_background_threads == 0); assert(background_thread_enabled()); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); VARIABLE_ARRAY(bool, marked, max_background_threads); unsigned i, nmarked; for (i = 0; i < max_background_threads; i++) { marked[i] = false; } nmarked = 0; /* Thread 0 is required and created at the end. */ marked[0] = true; /* Mark the threads we need to create for thread 0. */ unsigned n = narenas_total_get(); for (i = 1; i < n; i++) { if (marked[i % max_background_threads] || arena_get(tsd_tsdn(tsd), i, false) == NULL) { continue; } background_thread_info_t *info = &background_thread_info[ i % max_background_threads]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); assert(info->state == background_thread_stopped); background_thread_init(tsd, info); malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); marked[i % max_background_threads] = true; if (++nmarked == max_background_threads) { break; } } return background_thread_create(tsd, 0); } bool background_threads_disable(tsd_t *tsd) { assert(!background_thread_enabled()); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); /* Thread 0 will be responsible for terminating other threads. */ if (background_threads_disable_single(tsd, &background_thread_info[0])) { return true; } assert(n_background_threads == 0); return false; } /* Check if we need to signal the background thread early. */ void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new) { background_thread_info_t *info = arena_background_thread_info_get( arena); if (malloc_mutex_trylock(tsdn, &info->mtx)) { /* * Background thread may hold the mutex for a long period of * time. We'd like to avoid the variance on application * threads. So keep this non-blocking, and leave the work to a * future epoch. */ return; } if (info->state != background_thread_started) { goto label_done; } if (malloc_mutex_trylock(tsdn, &decay->mtx)) { goto label_done; } ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); if (decay_time <= 0) { /* Purging is eagerly done or disabled currently. */ goto label_done_unlock2; } uint64_t decay_interval_ns = nstime_ns(&decay->interval); assert(decay_interval_ns > 0); nstime_t diff; nstime_init(&diff, background_thread_wakeup_time_get(info)); if (nstime_compare(&diff, &decay->epoch) <= 0) { goto label_done_unlock2; } nstime_subtract(&diff, &decay->epoch); if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { goto label_done_unlock2; } if (npages_new > 0) { size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); /* * Compute how many new pages we would need to purge by the next * wakeup, which is used to determine if we should signal the * background thread. */ uint64_t npurge_new; if (n_epoch >= SMOOTHSTEP_NSTEPS) { npurge_new = npages_new; } else { uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; assert(h_steps_max >= h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); npurge_new = npages_new * (h_steps_max - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); npurge_new >>= SMOOTHSTEP_BFP; } info->npages_to_purge_new += npurge_new; } bool should_signal; if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { should_signal = true; } else if (unlikely(background_thread_indefinite_sleep(info)) && (extents_npages_get(&arena->extents_dirty) > 0 || extents_npages_get(&arena->extents_muzzy) > 0 || info->npages_to_purge_new > 0)) { should_signal = true; } else { should_signal = false; } if (should_signal) { info->npages_to_purge_new = 0; pthread_cond_signal(&info->cond); } label_done_unlock2: malloc_mutex_unlock(tsdn, &decay->mtx); label_done: malloc_mutex_unlock(tsdn, &info->mtx); } void background_thread_prefork0(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &background_thread_lock); background_thread_enabled_at_fork = background_thread_enabled(); } void background_thread_prefork1(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); } } void background_thread_postfork_parent(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_postfork_parent(tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_parent(tsdn, &background_thread_lock); } void background_thread_postfork_child(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_postfork_child(tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_child(tsdn, &background_thread_lock); if (!background_thread_enabled_at_fork) { return; } /* Clear background_thread state (reset to disabled for child). */ malloc_mutex_lock(tsdn, &background_thread_lock); n_background_threads = 0; background_thread_enabled_set(tsdn, false); for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); info->state = background_thread_stopped; int ret = pthread_cond_init(&info->cond, NULL); assert(ret == 0); background_thread_info_init(tsdn, info); malloc_mutex_unlock(tsdn, &info->mtx); } malloc_mutex_unlock(tsdn, &background_thread_lock); } bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { assert(config_stats); malloc_mutex_lock(tsdn, &background_thread_lock); if (!background_thread_enabled()) { malloc_mutex_unlock(tsdn, &background_thread_lock); return true; } stats->num_threads = n_background_threads; uint64_t num_runs = 0; nstime_init(&stats->run_interval, 0); for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); if (info->state != background_thread_stopped) { num_runs += info->tot_n_runs; nstime_add(&stats->run_interval, &info->tot_sleep_time); } malloc_mutex_unlock(tsdn, &info->mtx); } stats->num_runs = num_runs; if (num_runs > 0) { nstime_idivide(&stats->run_interval, num_runs); } malloc_mutex_unlock(tsdn, &background_thread_lock); return false; } #undef BACKGROUND_THREAD_NPAGES_THRESHOLD #undef BILLION #undef BACKGROUND_THREAD_MIN_INTERVAL_NS static bool pthread_create_fptr_init(void) { if (pthread_create_fptr != NULL) { return false; } pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { can_enable_background_thread = false; if (config_lazy_lock || opt_background_thread) { malloc_write(": Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } } else { can_enable_background_thread = true; } return false; } /* * When lazy lock is enabled, we need to make sure setting isthreaded before * taking any background_thread locks. This is called early in ctl (instead of * wait for the pthread_create calls to trigger) because the mutex is required * before creating background threads. */ void background_thread_ctl_init(tsdn_t *tsdn) { malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER pthread_create_fptr_init(); pthread_create_wrapper_init(); #endif } #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ bool background_thread_boot0(void) { if (!have_background_thread && opt_background_thread) { malloc_printf(": option background_thread currently " "supports pthread only\n"); return true; } #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER if ((config_lazy_lock || opt_background_thread) && pthread_create_fptr_init()) { return true; } #endif return false; } bool background_thread_boot1(tsdn_t *tsdn) { #ifdef JEMALLOC_BACKGROUND_THREAD assert(have_background_thread); assert(narenas_total_get() > 0); if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT && ncpus < MAX_BACKGROUND_THREAD_LIMIT) { opt_max_background_threads = ncpus; } max_background_threads = opt_max_background_threads; background_thread_enabled_set(tsdn, opt_background_thread); if (malloc_mutex_init(&background_thread_lock, "background_thread_global", WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, malloc_mutex_rank_exclusive)) { return true; } background_thread_info = (background_thread_info_t *)base_alloc(tsdn, b0get(), opt_max_background_threads * sizeof(background_thread_info_t), CACHELINE); if (background_thread_info == NULL) { return true; } for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; /* Thread mutex is rank_inclusive because of thread0. */ if (malloc_mutex_init(&info->mtx, "background_thread", WITNESS_RANK_BACKGROUND_THREAD, malloc_mutex_address_ordered)) { return true; } if (pthread_cond_init(&info->cond, NULL)) { return true; } malloc_mutex_lock(tsdn, &info->mtx); info->state = background_thread_stopped; background_thread_info_init(tsdn, info); malloc_mutex_unlock(tsdn, &info->mtx); } #endif return false; } jemalloc-sys-0.3.2/jemalloc/src/base.c010064400007650000024000000362151340421341300157720ustar0000000000000000#define JEMALLOC_BASE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sz.h" /******************************************************************************/ /* Data. */ static base_t *b0; metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; const char *metadata_thp_mode_names[] = { "disabled", "auto", "always" }; /******************************************************************************/ static inline bool metadata_thp_madvise(void) { return (metadata_thp_enabled() && (init_system_thp_mode == thp_mode_default)); } static void * base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ assert(size == HUGEPAGE_CEILING(size)); size_t alignment = HUGEPAGE; if (extent_hooks == &extent_hooks_default) { addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); } else { /* No arena context as we are creating new arenas. */ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); pre_reentrancy(tsd, NULL); addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, &zero, &commit, ind); post_reentrancy(tsd); } return addr; } static void base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, * stopping at first success. This cascade is performed for consistency * with the cascade in extent_dalloc_wrapper() because an application's * custom hooks may not support e.g. dalloc. This function is only ever * called as a side effect of arena destruction, so although it might * seem pointless to do anything besides dalloc here, the application * may in fact want the end state of all associated virtual memory to be * in some consistent-but-allocated state. */ if (extent_hooks == &extent_hooks_default) { if (!extent_dalloc_mmap(addr, size)) { goto label_done; } if (!pages_decommit(addr, size)) { goto label_done; } if (!pages_purge_forced(addr, size)) { goto label_done; } if (!pages_purge_lazy(addr, size)) { goto label_done; } /* Nothing worked. This should never happen. */ not_reached(); } else { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); pre_reentrancy(tsd, NULL); if (extent_hooks->dalloc != NULL && !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) { goto label_post_reentrancy; } if (extent_hooks->decommit != NULL && !extent_hooks->decommit(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_forced != NULL && !extent_hooks->purge_forced(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_lazy != NULL && !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } /* Nothing worked. That's the application's problem. */ label_post_reentrancy: post_reentrancy(tsd); } label_done: if (metadata_thp_madvise()) { /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && (size & HUGEPAGE_MASK) == 0); pages_nohuge(addr, size); } } static void base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, size_t size) { size_t sn; sn = *extent_sn_next; (*extent_sn_next)++; extent_binit(extent, addr, size, sn); } static size_t base_get_num_blocks(base_t *base, bool with_new_block) { base_block_t *b = base->blocks; assert(b != NULL); size_t n_blocks = with_new_block ? 2 : 1; while (b->next != NULL) { n_blocks++; b = b->next; } return n_blocks; } static void base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { assert(opt_metadata_thp == metadata_thp_auto); malloc_mutex_assert_owner(tsdn, &base->mtx); if (base->auto_thp_switched) { return; } /* Called when adding a new block. */ bool should_switch; if (base_ind_get(base) != 0) { should_switch = (base_get_num_blocks(base, true) == BASE_AUTO_THP_THRESHOLD); } else { should_switch = (base_get_num_blocks(base, true) == BASE_AUTO_THP_THRESHOLD_A0); } if (!should_switch) { return; } base->auto_thp_switched = true; assert(!config_stats || base->n_thp == 0); /* Make the initial blocks THP lazily. */ base_block_t *block = base->blocks; while (block != NULL) { assert((block->size & HUGEPAGE_MASK) == 0); pages_huge(block, block->size); if (config_stats) { base->n_thp += HUGEPAGE_CEILING(block->size - extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; } block = block->next; assert(block == NULL || (base_ind_get(base) == 0)); } } static void * base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, size_t alignment) { void *ret; assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(size == ALIGNMENT_CEILING(size, alignment)); *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), alignment) - (uintptr_t)extent_addr_get(extent); ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); assert(extent_bsize_get(extent) >= *gap_size + size); extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, extent_sn_get(extent)); return ret; } static void base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, void *addr, size_t size) { if (extent_bsize_get(extent) > 0) { /* * Compute the index for the largest size class that does not * exceed extent's size. */ szind_t index_floor = sz_size2index(extent_bsize_get(extent) + 1) - 1; extent_heap_insert(&base->avail[index_floor], extent); } if (config_stats) { base->allocated += size; /* * Add one PAGE to base_resident for every page boundary that is * crossed by the new allocation. Adjust n_thp similarly when * metadata_thp is enabled. */ base->resident += PAGE_CEILING((uintptr_t)addr + size) - PAGE_CEILING((uintptr_t)addr - gap_size); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); if (metadata_thp_madvise() && (opt_metadata_thp == metadata_thp_always || base->auto_thp_switched)) { base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> LG_HUGEPAGE; assert(base->mapped >= base->n_thp << LG_HUGEPAGE); } } } static void * base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, size_t alignment) { void *ret; size_t gap_size; ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); base_extent_bump_alloc_post(base, extent, gap_size, ret, size); return ret; } /* * Allocate a block of virtual memory that is large enough to start with a * base_block_t header, followed by an object of specified size and alignment. * On success a pointer to the initialized base_block_t header is returned. */ static base_block_t * base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, size_t alignment) { alignment = ALIGNMENT_CEILING(alignment, QUANTUM); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t header_size = sizeof(base_block_t); size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size; /* * Create increasingly larger blocks in order to limit the total number * of disjoint virtual memory ranges. Choose the next size in the page * size class series (skipping size classes that are not a multiple of * HUGEPAGE), or a size large enough to satisfy the requested size and * alignment, whichever is larger. */ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + usize)); pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : *pind_last; size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size : next_block_size; base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, block_size); if (block == NULL) { return NULL; } if (metadata_thp_madvise()) { void *addr = (void *)block; assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && (block_size & HUGEPAGE_MASK) == 0); if (opt_metadata_thp == metadata_thp_always) { pages_huge(addr, block_size); } else if (opt_metadata_thp == metadata_thp_auto && base != NULL) { /* base != NULL indicates this is not a new base. */ malloc_mutex_lock(tsdn, &base->mtx); base_auto_thp_switch(tsdn, base); if (base->auto_thp_switched) { pages_huge(addr, block_size); } malloc_mutex_unlock(tsdn, &base->mtx); } } *pind_last = sz_psz2ind(block_size); block->size = block_size; block->next = NULL; assert(block_size >= header_size); base_extent_init(extent_sn_next, &block->extent, (void *)((uintptr_t)block + header_size), block_size - header_size); return block; } /* * Allocate an extent that is at least as large as specified size, with * specified alignment. */ static extent_t * base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { malloc_mutex_assert_owner(tsdn, &base->mtx); extent_hooks_t *extent_hooks = base_extent_hooks_get(base); /* * Drop mutex during base_block_alloc(), because an extent hook will be * called. */ malloc_mutex_unlock(tsdn, &base->mtx); base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, alignment); malloc_mutex_lock(tsdn, &base->mtx); if (block == NULL) { return NULL; } block->next = base->blocks; base->blocks = block; if (config_stats) { base->allocated += sizeof(base_block_t); base->resident += PAGE_CEILING(sizeof(base_block_t)); base->mapped += block->size; if (metadata_thp_madvise() && !(opt_metadata_thp == metadata_thp_auto && !base->auto_thp_switched)) { assert(base->n_thp > 0); base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE; } assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } return &block->extent; } base_t * b0get(void) { return b0; } base_t * base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { pszind_t pind_last = 0; size_t extent_sn_next = 0; base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); if (block == NULL) { return NULL; } size_t gap_size; size_t base_alignment = CACHELINE; size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, &gap_size, base_size, base_alignment); base->ind = ind; atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, malloc_mutex_rank_exclusive)) { base_unmap(tsdn, extent_hooks, ind, block, block->size); return NULL; } base->pind_last = pind_last; base->extent_sn_next = extent_sn_next; base->blocks = block; base->auto_thp_switched = false; for (szind_t i = 0; i < NSIZES; i++) { extent_heap_new(&base->avail[i]); } if (config_stats) { base->allocated = sizeof(base_block_t); base->resident = PAGE_CEILING(sizeof(base_block_t)); base->mapped = block->size; base->n_thp = (opt_metadata_thp == metadata_thp_always) && metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE : 0; assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } base_extent_bump_alloc_post(base, &block->extent, gap_size, base, base_size); return base; } void base_delete(tsdn_t *tsdn, base_t *base) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *next = base->blocks; do { base_block_t *block = next; next = block->next; base_unmap(tsdn, extent_hooks, base_ind_get(base), block, block->size); } while (next != NULL); } extent_hooks_t * base_extent_hooks_get(base_t *base) { return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, ATOMIC_ACQUIRE); } extent_hooks_t * base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); return old_extent_hooks; } static void * base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, size_t *esn) { alignment = QUANTUM_CEILING(alignment); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t asize = usize + alignment - QUANTUM; extent_t *extent = NULL; malloc_mutex_lock(tsdn, &base->mtx); for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { extent = extent_heap_remove_first(&base->avail[i]); if (extent != NULL) { /* Use existing space. */ break; } } if (extent == NULL) { /* Try to allocate more space. */ extent = base_extent_alloc(tsdn, base, usize, alignment); } void *ret; if (extent == NULL) { ret = NULL; goto label_return; } ret = base_extent_bump_alloc(base, extent, usize, alignment); if (esn != NULL) { *esn = extent_sn_get(extent); } label_return: malloc_mutex_unlock(tsdn, &base->mtx); return ret; } /* * base_alloc() returns zeroed memory, which is always demand-zeroed for the * auto arenas, in order to make multi-page sparse data structures such as radix * tree nodes efficient with respect to physical memory usage. Upon success a * pointer to at least size bytes with specified alignment is returned. Note * that size is rounded up to the nearest multiple of alignment to avoid false * sharing. */ void * base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { return base_alloc_impl(tsdn, base, size, alignment, NULL); } extent_t * base_alloc_extent(tsdn_t *tsdn, base_t *base) { size_t esn; extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), CACHELINE, &esn); if (extent == NULL) { return NULL; } extent_esn_set(extent, esn); return extent; } void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, size_t *mapped, size_t *n_thp) { cassert(config_stats); malloc_mutex_lock(tsdn, &base->mtx); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); *allocated = base->allocated; *resident = base->resident; *mapped = base->mapped; *n_thp = base->n_thp; malloc_mutex_unlock(tsdn, &base->mtx); } void base_prefork(tsdn_t *tsdn, base_t *base) { malloc_mutex_prefork(tsdn, &base->mtx); } void base_postfork_parent(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_parent(tsdn, &base->mtx); } void base_postfork_child(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_child(tsdn, &base->mtx); } bool base_boot(tsdn_t *tsdn) { b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); return (b0 == NULL); } jemalloc-sys-0.3.2/jemalloc/src/bin.c010064400007650000024000000024611340421341300156240ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/bin.h" #include "jemalloc/internal/witness.h" const bin_info_t bin_infos[NBINS] = { #define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, #define BIN_INFO_bin_no(reg_size, slab_size, nregs) #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ lg_delta_lookup) \ BIN_INFO_bin_##bin((1U<lock, "bin", WITNESS_RANK_BIN, malloc_mutex_rank_exclusive)) { return true; } bin->slabcur = NULL; extent_heap_new(&bin->slabs_nonfull); extent_list_init(&bin->slabs_full); if (config_stats) { memset(&bin->stats, 0, sizeof(bin_stats_t)); } return false; } void bin_prefork(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_prefork(tsdn, &bin->lock); } void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_postfork_parent(tsdn, &bin->lock); } void bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_postfork_child(tsdn, &bin->lock); } jemalloc-sys-0.3.2/jemalloc/src/bitmap.c010064400007650000024000000061741340421340100163320ustar0000000000000000#define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" /******************************************************************************/ #ifdef BITMAP_USE_TREE void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); /* * Compute the number of groups necessary to store nbits bits, and * progressively work upward through the levels until reaching a level * that requires only one group. */ binfo->levels[0].group_offset = 0; group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; group_count = BITMAP_BITS2GROUPS(group_count); } binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; binfo->nbits = nbits; } static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return binfo->levels[binfo->nlevels].group_offset; } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap * interface. */ if (fill) { /* The "filled" bitmap starts out with all 0 bits. */ memset(bitmap, 0, bitmap_size(binfo)); return; } /* * The "empty" bitmap starts out with all 1 bits, except for trailing * unused bits (if any). Note that each group uses bit 0 to correspond * to the first logical bit in the group, so extra bits are the most * significant bits of the last group. */ memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; } } } #else /* BITMAP_USE_TREE */ void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); binfo->ngroups = BITMAP_BITS2GROUPS(nbits); binfo->nbits = nbits; } static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return binfo->ngroups; } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; if (fill) { memset(bitmap, 0, bitmap_size(binfo)); return; } memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->ngroups - 1] >>= extra; } } #endif /* BITMAP_USE_TREE */ size_t bitmap_size(const bitmap_info_t *binfo) { return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); } jemalloc-sys-0.3.2/jemalloc/src/ckh.c010064400007650000024000000345601340421341300156260ustar0000000000000000/* ******************************************************************************* * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash * functions are employed. The original cuckoo hashing algorithm was described * in: * * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms * 51(2):122-144. * * Generalization of cuckoo hashing was discussed in: * * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical * alternative to traditional hash tables. In Proceedings of the 7th * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, * January 2006. * * This implementation uses precisely two hash functions because that is the * fewest that can work, and supporting multiple hashes is an implementation * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) * that shows approximate expected maximum load factors for various * configurations: * * | #cells/bucket | * #hashes | 1 | 2 | 4 | 8 | * --------+-------+-------+-------+-------+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 | * 2 | 0.49 | 0.86 |>0.93< |>0.96< | * 3 | 0.91 | 0.97 | 0.98 | 0.999 | * 4 | 0.97 | 0.99 | 0.999 | | * * The number of cells per bucket is chosen such that a bucket fits in one cache * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, * respectively. * ******************************************************************************/ #define JEMALLOC_CKH_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ /* * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ static size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; if (cell->key != NULL && ckh->keycomp(key, cell->key)) { return (bucket << LG_CKH_BUCKET_CELLS) + i; } } return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ static size_t ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); ckh->hash(key, hashes); /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); if (cell != SIZE_T_MAX) { return cell; } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); return cell; } static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, const void *data) { ckhc_t *cell; unsigned offset, i; /* * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; if (cell->key == NULL) { cell->key = key; cell->data = data; ckh->count++; return false; } } return true; } /* * No space is available in bucket. Randomly evict an item, then try to find an * alternate location for that item. Iteratively repeat this * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; unsigned i; bucket = argbucket; key = *argkey; data = *argdata; while (true) { /* * Choose a random item within the bucket to evict. This is * critical to correct function, because without (eventually) * evicting all items within a bucket during iteration, it * would be possible to get stuck in an infinite loop if there * were an item for which both hashes indicated the same * bucket. */ i = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); /* Swap cell->{key,data} and {key,data} (evict). */ tkey = cell->key; tdata = cell->data; cell->key = key; cell->data = data; key = tkey; data = tdata; #ifdef CKH_COUNT ckh->nrelocs++; #endif /* Find the alternate bucket for the evicted item. */ ckh->hash(key, hashes); tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (tbucket == bucket) { tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); /* * It may be that (tbucket == bucket) still, if the * item's hashes both indicate this bucket. However, * we are guaranteed to eventually escape this bucket * during iteration, assuming pseudo-random item * selection (true randomness would make infinite * looping a remote possibility). The reason we can * never get trapped forever is that there are two * cases: * * 1) This bucket == argbucket, so we will quickly * detect an eviction cycle and terminate. * 2) An item was evicted to this bucket from another, * which means that at least one item in this bucket * has hashes that indicate distinct buckets. */ } /* Check for a cycle. */ if (tbucket == argbucket) { *argkey = key; *argdata = data; return true; } bucket = tbucket; if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } } } static bool ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; ckh->hash(key, hashes); /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } /* * Try to find a place for this item via iterative eviction/relocation. */ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ static bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; count = ckh->count; ckh->count = 0; for (i = nins = 0; nins < count; i++) { if (aTab[i].key != NULL) { key = aTab[i].key; data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; return true; } nins++; } } return false; } static bool ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; #endif /* * It is possible (though unlikely, given well behaved hashes) that the * table will have to be doubled more than once in order to create a * usable table. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; while (true) { size_t usize; lg_curcells++; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: return ret; } static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; size_t usize; unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the * table rebuild will fail. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't * prevent this or future operations from proceeding. */ return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif return; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT ckh->nshrinkfails++; #endif } bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); assert(hash != NULL); assert(keycomp != NULL); #ifdef CKH_COUNT ckh->ngrows = 0; ckh->nshrinks = 0; ckh->nshrinkfails = 0; ckh->ninserts = 0; ckh->nrelocs = 0; #endif ckh->prng_state = 42; /* Value doesn't really matter. */ ckh->count = 0; /* * Find the minimum power of 2 that is large enough to fit minitems * entries. We are using (2+,2) cuckoo hashing, which has an expected * maximum load factor of at least ~0.86, so 0.75 is a conservative load * factor that will typically allow mincells items to fit without ever * growing the table. */ assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; lg_mincells++) { /* Do nothing. */ } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; } ret = false; label_return: return ret; } void ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); if (config_debug) { memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } } size_t ckh_count(ckh_t *ckh) { assert(ckh != NULL); return ckh->count; } bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { if (key != NULL) { *key = (void *)ckh->tab[i].key; } if (data != NULL) { *data = (void *)ckh->tab[i].data; } *tabind = i + 1; return false; } } return true; } bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); assert(ckh_search(ckh, key, NULL, NULL)); #ifdef CKH_COUNT ckh->ninserts++; #endif while (ckh_try_insert(ckh, &key, &data)) { if (ckh_grow(tsd, ckh)) { ret = true; goto label_return; } } ret = false; label_return: return ret; } bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) { *key = (void *)ckh->tab[cell].key; } if (data != NULL) { *data = (void *)ckh->tab[cell].data; } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ ckh->count--; /* Try to halve the table if it is less than 1/4 full. */ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ ckh_shrink(tsd, ckh); } return false; } return true; } bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) { *key = (void *)ckh->tab[cell].key; } if (data != NULL) { *data = (void *)ckh->tab[cell].data; } return false; } return true; } void ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); return !strcmp((char *)k1, (char *)k2); } void ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; } u; assert(sizeof(u.v) == sizeof(u.i)); u.v = key; hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); } bool ckh_pointer_keycomp(const void *k1, const void *k2) { return (k1 == k2); } jemalloc-sys-0.3.2/jemalloc/src/ctl.c010064400007650000024000002367471340421341300156560ustar0000000000000000#define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: * - ctl_stats->* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; static ctl_stats_t *ctl_stats; static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ static const ctl_named_node_t * ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } static const ctl_named_node_t * ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } static const ctl_indexed_node_t * ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) CTL_PROTO(background_thread) CTL_PROTO(max_background_threads) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) CTL_PROTO(config_malloc_conf) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_abort_conf) CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) CTL_PROTO(opt_narenas) CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_background_thread) CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_thp) CTL_PROTO(opt_lg_extent_max_active_fit) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_thread_active_init) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) CTL_PROTO(arena_i_initialized) CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dirty_decay_ms) CTL_PROTO(arena_i_muzzy_decay_ms) CTL_PROTO(arena_i_extent_hooks) CTL_PROTO(arena_i_retain_grow_limit) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_slab_size) INDEX_PROTO(arenas_bin_i) CTL_PROTO(arenas_lextent_i_size) INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_dirty_decay_ms) CTL_PROTO(arenas_muzzy_decay_ms) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nlextents) CTL_PROTO(arenas_create) CTL_PROTO(arenas_lookup) CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) CTL_PROTO(prof_gdump) CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) CTL_PROTO(lg_prof_sample) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_nrequests) CTL_PROTO(stats_arenas_i_lextents_j_curlextents) INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dirty_decay_ms) CTL_PROTO(stats_arenas_i_muzzy_decay_ms) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_dirty_npurge) CTL_PROTO(stats_arenas_i_dirty_nmadvise) CTL_PROTO(stats_arenas_i_dirty_purged) CTL_PROTO(stats_arenas_i_muzzy_npurge) CTL_PROTO(stats_arenas_i_muzzy_nmadvise) CTL_PROTO(stats_arenas_i_muzzy_purged) CTL_PROTO(stats_arenas_i_base) CTL_PROTO(stats_arenas_i_internal) CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_resident) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_background_thread_num_threads) CTL_PROTO(stats_background_thread_num_runs) CTL_PROTO(stats_background_thread_run_interval) CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata_thp) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) CTL_PROTO(stats_retained) #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ CTL_PROTO(stats_##n##_num_wait) \ CTL_PROTO(stats_##n##_num_spin_acq) \ CTL_PROTO(stats_##n##_num_owner_switch) \ CTL_PROTO(stats_##n##_total_wait_time) \ CTL_PROTO(stats_##n##_max_wait_time) \ CTL_PROTO(stats_##n##_max_num_thds) /* Global mutexes. */ #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) MUTEX_PROF_GLOBAL_MUTEXES #undef OP /* Per arena mutexes. */ #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) MUTEX_PROF_ARENA_MUTEXES #undef OP /* Arena bin mutexes. */ MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) #undef MUTEX_STATS_CTL_PROTO_GEN CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ #define NAME(n) {true}, n #define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL #define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ #define INDEX(i) {false}, i##_index static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; static const ctl_named_node_t thread_prof_node[] = { {NAME("name"), CTL(thread_prof_name)}, {NAME("active"), CTL(thread_prof_active)} }; static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("tcache"), CHILD(named, thread_tcache)}, {NAME("prof"), CHILD(named, thread_prof)} }; static const ctl_named_node_t config_node[] = { {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("malloc_conf"), CTL(config_malloc_conf)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("stats"), CTL(config_stats)}, {NAME("utrace"), CTL(config_utrace)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("abort_conf"), CTL(opt_abort_conf)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)}, {NAME("background_thread"), CTL(opt_background_thread)}, {NAME("max_background_threads"), CTL(opt_max_background_threads)}, {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, {NAME("thp"), CTL(opt_thp)}, {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_active"), CTL(opt_prof_active)}, {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_accum"), CTL(opt_prof_accum)} }; static const ctl_named_node_t tcache_node[] = { {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { {NAME("initialized"), CTL(arena_i_initialized)}, {NAME("decay"), CTL(arena_i_decay)}, {NAME("purge"), CTL(arena_i_purge)}, {NAME("reset"), CTL(arena_i_reset)}, {NAME("destroy"), CTL(arena_i_destroy)}, {NAME("dss"), CTL(arena_i_dss)}, {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} }; static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} }; static const ctl_indexed_node_t arena_node[] = { {INDEX(arena_i)} }; static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("slab_size"), CTL(arenas_bin_i_slab_size)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { {NAME(""), CHILD(named, arenas_bin_i)} }; static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; static const ctl_named_node_t arenas_lextent_i_node[] = { {NAME("size"), CTL(arenas_lextent_i_size)} }; static const ctl_named_node_t super_arenas_lextent_i_node[] = { {NAME(""), CHILD(named, arenas_lextent_i)} }; static const ctl_indexed_node_t arenas_lextent_node[] = { {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("nlextents"), CTL(arenas_nlextents)}, {NAME("lextent"), CHILD(indexed, arenas_lextent)}, {NAME("create"), CTL(arenas_create)}, {NAME("lookup"), CTL(arenas_lookup)} }; static const ctl_named_node_t prof_node[] = { {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, {NAME("gdump"), CTL(prof_gdump)}, {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, {NAME("lg_sample"), CTL(lg_prof_sample)} }; static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; #define MUTEX_PROF_DATA_NODE(prefix) \ static const ctl_named_node_t stats_##prefix##_node[] = { \ {NAME("num_ops"), \ CTL(stats_##prefix##_num_ops)}, \ {NAME("num_wait"), \ CTL(stats_##prefix##_num_wait)}, \ {NAME("num_spin_acq"), \ CTL(stats_##prefix##_num_spin_acq)}, \ {NAME("num_owner_switch"), \ CTL(stats_##prefix##_num_owner_switch)}, \ {NAME("total_wait_time"), \ CTL(stats_##prefix##_total_wait_time)}, \ {NAME("max_wait_time"), \ CTL(stats_##prefix##_max_wait_time)}, \ {NAME("max_num_thds"), \ CTL(stats_##prefix##_max_num_thds)} \ /* Note that # of current waiting thread not provided. */ \ }; MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { {INDEX(stats_arenas_i_lextents_j)} }; #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) MUTEX_PROF_ARENA_MUTEXES #undef OP static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, MUTEX_PROF_ARENA_MUTEXES #undef OP }; static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("uptime"), CTL(stats_arenas_i_uptime)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("retained"), CTL(stats_arenas_i_retained)}, {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, {NAME("base"), CTL(stats_arenas_i_base)}, {NAME("internal"), CTL(stats_arenas_i_internal)}, {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, {NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} }; static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; static const ctl_named_node_t stats_background_thread_node[] = { {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, {NAME("run_interval"), CTL(stats_background_thread_run_interval)} }; #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) MUTEX_PROF_GLOBAL_MUTEXES #undef OP static const ctl_named_node_t stats_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, MUTEX_PROF_GLOBAL_MUTEXES #undef OP {NAME("reset"), CTL(stats_mutexes_reset)} }; #undef MUTEX_PROF_DATA_NODE static const ctl_named_node_t stats_node[] = { {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata_thp"), CTL(stats_metadata_thp)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, {NAME("retained"), CTL(stats_retained)}, {NAME("background_thread"), CHILD(named, stats_background_thread)}, {NAME("mutexes"), CHILD(named, stats_mutexes)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, {NAME("background_thread"), CTL(background_thread)}, {NAME("max_background_threads"), CTL(max_background_threads)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, {NAME("tcache"), CHILD(named, tcache)}, {NAME("arena"), CHILD(indexed, arena)}, {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, {NAME("stats"), CHILD(named, stats)} }; static const ctl_named_node_t super_root_node[] = { {NAME(""), CHILD(named, root)} }; #undef NAME #undef CHILD #undef CTL #undef INDEX /******************************************************************************/ /* * Sets *dst + *src non-atomically. This is safe, since everything is * synchronized by the ctl mutex. */ static void ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { #ifdef JEMALLOC_ATOMIC_U64 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); #else *dst += *src; #endif } /* Likewise: with ctl mutex synchronization, reading is simple. */ static uint64_t ctl_arena_stats_read_u64(arena_stats_u64_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_u64(p, ATOMIC_RELAXED); #else return *p; #endif } static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); } /******************************************************************************/ static unsigned arenas_i2a_impl(size_t i, bool compat, bool validate) { unsigned a; switch (i) { case MALLCTL_ARENAS_ALL: a = 0; break; case MALLCTL_ARENAS_DESTROYED: a = 1; break; default: if (compat && i == ctl_arenas->narenas) { /* * Provide deprecated backward compatibility for * accessing the merged stats at index narenas rather * than via MALLCTL_ARENAS_ALL. This is scheduled for * removal in 6.0.0. */ a = 0; } else if (validate && i >= ctl_arenas->narenas) { a = UINT_MAX; } else { /* * This function should never be called for an index * more than one past the range of indices that have * initialized ctl data. */ assert(i < ctl_arenas->narenas || (!validate && i == ctl_arenas->narenas)); a = (unsigned)i + 2; } break; } return a; } static unsigned arenas_i2a(size_t i) { return arenas_i2a_impl(i, true, false); } static ctl_arena_t * arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { ctl_arena_t *ret; assert(!compat || !init); ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; if (init && ret == NULL) { if (config_stats) { struct container_s { ctl_arena_t ctl_arena; ctl_arena_stats_t astats; }; struct container_s *cont = (struct container_s *)base_alloc(tsd_tsdn(tsd), b0get(), sizeof(struct container_s), QUANTUM); if (cont == NULL) { return NULL; } ret = &cont->ctl_arena; ret->astats = &cont->astats; } else { ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), sizeof(ctl_arena_t), QUANTUM); if (ret == NULL) { return NULL; } } ret->arena_ind = (unsigned)i; ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; } assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); return ret; } static ctl_arena_t * arenas_i(size_t i) { ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); assert(ret != NULL); return ret; } static void ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->nthreads = 0; ctl_arena->dss = dss_prec_names[dss_prec_limit]; ctl_arena->dirty_decay_ms = -1; ctl_arena->muzzy_decay_ms = -1; ctl_arena->pactive = 0; ctl_arena->pdirty = 0; ctl_arena->pmuzzy = 0; if (config_stats) { memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); ctl_arena->astats->allocated_small = 0; ctl_arena->astats->nmalloc_small = 0; ctl_arena->astats->ndalloc_small = 0; ctl_arena->astats->nrequests_small = 0; memset(ctl_arena->astats->bstats, 0, NBINS * sizeof(bin_stats_t)); memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * sizeof(arena_stats_large_t)); } } static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; if (config_stats) { arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, &ctl_arena->dss, &ctl_arena->dirty_decay_ms, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->astats->astats, ctl_arena->astats->bstats, ctl_arena->astats->lstats); for (i = 0; i < NBINS; i++) { ctl_arena->astats->allocated_small += ctl_arena->astats->bstats[i].curregs * sz_index2size(i); ctl_arena->astats->nmalloc_small += ctl_arena->astats->bstats[i].nmalloc; ctl_arena->astats->ndalloc_small += ctl_arena->astats->bstats[i].ndalloc; ctl_arena->astats->nrequests_small += ctl_arena->astats->bstats[i].nrequests; } } else { arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, &ctl_arena->dss, &ctl_arena->dirty_decay_ms, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy); } } static void ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, bool destroyed) { unsigned i; if (!destroyed) { ctl_sdarena->nthreads += ctl_arena->nthreads; ctl_sdarena->pactive += ctl_arena->pactive; ctl_sdarena->pdirty += ctl_arena->pdirty; ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; } else { assert(ctl_arena->nthreads == 0); assert(ctl_arena->pactive == 0); assert(ctl_arena->pdirty == 0); assert(ctl_arena->pmuzzy == 0); } if (config_stats) { ctl_arena_stats_t *sdstats = ctl_sdarena->astats; ctl_arena_stats_t *astats = ctl_arena->astats; if (!destroyed) { accum_atomic_zu(&sdstats->astats.mapped, &astats->astats.mapped); accum_atomic_zu(&sdstats->astats.retained, &astats->astats.retained); } ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, &astats->astats.decay_dirty.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, &astats->astats.decay_dirty.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, &astats->astats.decay_dirty.purged); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, &astats->astats.decay_muzzy.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, &astats->astats.decay_muzzy.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, &astats->astats.decay_muzzy.purged); #define OP(mtx) malloc_mutex_prof_merge( \ &(sdstats->astats.mutex_prof_data[ \ arena_prof_mutex_##mtx]), \ &(astats->astats.mutex_prof_data[ \ arena_prof_mutex_##mtx])); MUTEX_PROF_ARENA_MUTEXES #undef OP if (!destroyed) { accum_atomic_zu(&sdstats->astats.base, &astats->astats.base); accum_atomic_zu(&sdstats->astats.internal, &astats->astats.internal); accum_atomic_zu(&sdstats->astats.resident, &astats->astats.resident); accum_atomic_zu(&sdstats->astats.metadata_thp, &astats->astats.metadata_thp); } else { assert(atomic_load_zu( &astats->astats.internal, ATOMIC_RELAXED) == 0); } if (!destroyed) { sdstats->allocated_small += astats->allocated_small; } else { assert(astats->allocated_small == 0); } sdstats->nmalloc_small += astats->nmalloc_small; sdstats->ndalloc_small += astats->ndalloc_small; sdstats->nrequests_small += astats->nrequests_small; if (!destroyed) { accum_atomic_zu(&sdstats->astats.allocated_large, &astats->astats.allocated_large); } else { assert(atomic_load_zu(&astats->astats.allocated_large, ATOMIC_RELAXED) == 0); } ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, &astats->astats.nmalloc_large); ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, &astats->astats.ndalloc_large); ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, &astats->astats.nrequests_large); accum_atomic_zu(&sdstats->astats.tcache_bytes, &astats->astats.tcache_bytes); if (ctl_arena->arena_ind == 0) { sdstats->astats.uptime = astats->astats.uptime; } for (i = 0; i < NBINS; i++) { sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sdstats->bstats[i].nrequests += astats->bstats[i].nrequests; if (!destroyed) { sdstats->bstats[i].curregs += astats->bstats[i].curregs; } else { assert(astats->bstats[i].curregs == 0); } sdstats->bstats[i].nfills += astats->bstats[i].nfills; sdstats->bstats[i].nflushes += astats->bstats[i].nflushes; sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; if (!destroyed) { sdstats->bstats[i].curslabs += astats->bstats[i].curslabs; } else { assert(astats->bstats[i].curslabs == 0); } malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); } for (i = 0; i < NSIZES - NBINS; i++) { ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, &astats->lstats[i].nmalloc); ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, &astats->lstats[i].ndalloc); ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, &astats->lstats[i].nrequests); if (!destroyed) { sdstats->lstats[i].curlextents += astats->lstats[i].curlextents; } else { assert(astats->lstats[i].curlextents == 0); } } } } static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, unsigned i, bool destroyed) { ctl_arena_t *ctl_arena = arenas_i(i); ctl_arena_clear(ctl_arena); ctl_arena_stats_amerge(tsdn, ctl_arena, arena); /* Merge into sum stats as well. */ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); } static unsigned ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { unsigned arena_ind; ctl_arena_t *ctl_arena; if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != NULL) { ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_ind = ctl_arena->arena_ind; } else { arena_ind = ctl_arenas->narenas; } /* Trigger stats allocation. */ if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { return UINT_MAX; } /* Initialize new arena. */ if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { return UINT_MAX; } if (arena_ind == ctl_arenas->narenas) { ctl_arenas->narenas++; } return arena_ind; } static void ctl_background_thread_stats_read(tsdn_t *tsdn) { background_thread_stats_t *stats = &ctl_stats->background_thread; if (!have_background_thread || background_thread_stats_read(tsdn, stats)) { memset(stats, 0, sizeof(background_thread_stats_t)); nstime_init(&stats->run_interval, 0); } } static void ctl_refresh(tsdn_t *tsdn) { unsigned i; ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_arena_clear(ctl_sarena); for (i = 0; i < ctl_arenas->narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); } for (i = 0; i < ctl_arenas->narenas; i++) { ctl_arena_t *ctl_arena = arenas_i(i); bool initialized = (tarenas[i] != NULL); ctl_arena->initialized = initialized; if (initialized) { ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, false); } } if (config_stats) { ctl_stats->allocated = ctl_sarena->astats->allocated_small + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, ATOMIC_RELAXED); ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); ctl_stats->metadata = atomic_load_zu( &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + atomic_load_zu(&ctl_sarena->astats->astats.internal, ATOMIC_RELAXED); ctl_stats->metadata_thp = atomic_load_zu( &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); ctl_stats->resident = atomic_load_zu( &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); ctl_stats->mapped = atomic_load_zu( &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); ctl_stats->retained = atomic_load_zu( &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); ctl_background_thread_stats_read(tsdn); #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ malloc_mutex_lock(tsdn, &mtx); \ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ malloc_mutex_unlock(tsdn, &mtx); if (config_prof && opt_prof) { READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, bt2gctx_mtx); } if (have_background_thread) { READ_GLOBAL_MUTEX_PROF_DATA( global_prof_mutex_background_thread, background_thread_lock); } else { memset(&ctl_stats->mutex_prof_data[ global_prof_mutex_background_thread], 0, sizeof(mutex_prof_data_t)); } /* We own ctl mutex already. */ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], &ctl_mtx); #undef READ_GLOBAL_MUTEX_PROF_DATA } ctl_arenas->epoch++; } static bool ctl_init(tsd_t *tsd) { bool ret; tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { ctl_arena_t *ctl_sarena, *ctl_darena; unsigned i; /* * Allocate demand-zeroed space for pointers to the full * range of supported arena indices. */ if (ctl_arenas == NULL) { ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, b0get(), sizeof(ctl_arenas_t), QUANTUM); if (ctl_arenas == NULL) { ret = true; goto label_return; } } if (config_stats && ctl_stats == NULL) { ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), sizeof(ctl_stats_t), QUANTUM); if (ctl_stats == NULL) { ret = true; goto label_return; } } /* * Allocate space for the current full range of arenas * here rather than doing it lazily elsewhere, in order * to limit when OOM-caused errors can occur. */ if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, true)) == NULL) { ret = true; goto label_return; } ctl_sarena->initialized = true; if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, false, true)) == NULL) { ret = true; goto label_return; } ctl_arena_clear(ctl_darena); /* * Don't toggle ctl_darena to initialized until an arena is * actually destroyed, so that arena..initialized can be used * to query whether the stats are relevant. */ ctl_arenas->narenas = narenas_total_get(); for (i = 0; i < ctl_arenas->narenas; i++) { if (arenas_i_impl(tsd, i, false, true) == NULL) { ret = true; goto label_return; } } ql_new(&ctl_arenas->destroyed); ctl_refresh(tsdn); ctl_initialized = true; } ret = false; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } static int ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; const ctl_named_node_t *node; elm = name; /* Equivalent to strchrnul(). */ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); if (elen == 0) { ret = ENOENT; goto label_return; } node = super_root_node; for (i = 0; i < *depthp; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { const ctl_named_node_t *pnode = node; /* Children are named. */ for (j = 0; j < node->nchildren; j++) { const ctl_named_node_t *child = ctl_named_children(node, j); if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; } mibp[i] = j; break; } } if (node == pnode) { ret = ENOENT; goto label_return; } } else { uintmax_t index; const ctl_indexed_node_t *inode; /* Children are indexed. */ index = malloc_strtoumax(elm, NULL, 10); if (index == UINTMAX_MAX || index > SIZE_T_MAX) { ret = ENOENT; goto label_return; } inode = ctl_indexed_node(node->children); node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; } mibp[i] = (size_t)index; } if (node->ctl != NULL) { /* Terminal node. */ if (*dot != '\0') { /* * The name contains more elements than are * in this path through the tree. */ ret = ENOENT; goto label_return; } /* Complete lookup successful. */ *depthp = i + 1; break; } /* Update elm. */ if (*dot == '\0') { /* No more elements. */ ret = ENOENT; goto label_return; } elm = &dot[1]; dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } ret = 0; label_return: return ret; } int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) { goto label_return; } node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) { ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } label_return: return(ret); } int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } /* Iterate down the tree. */ node = super_root_node; for (i = 0; i < miblen; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { /* Children are named. */ if (node->nchildren <= mib[i]) { ret = ENOENT; goto label_return; } node = ctl_named_children(node, mib[i]); } else { const ctl_indexed_node_t *inode; /* Indexed element. */ inode = ctl_indexed_node(node->children); node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } /* Call the ctl function. */ if (node && node->ctl) { ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); } else { /* Partial MIB. */ ret = ENOENT; } label_return: return(ret); } bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, malloc_mutex_rank_exclusive)) { return true; } ctl_initialized = false; return false; } void ctl_prefork(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &ctl_mtx); } void ctl_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void ctl_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ #define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ ? sizeof(t) : *oldlenp; \ memcpy(oldp, (void *)&(v), copylen); \ ret = EINVAL; \ goto label_return; \ } \ *(t *)oldp = (v); \ } \ } while (0) #define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ goto label_return; \ } \ (v) = *(t *)newp; \ } \ } while (0) #define MIB_UNSIGNED(v, i) do { \ if (mib[i] > UINT_MAX) { \ ret = EFAULT; \ goto label_return; \ } \ v = (unsigned)mib[i]; \ } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ if (l) { \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ if (l) { \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ } \ return ret; \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ } #define CTL_RO_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_RO_NL_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ READONLY(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_RO_CONFIG_GEN(n, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = n; \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } /******************************************************************************/ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) { ctl_refresh(tsd_tsdn(tsd)); } READ(ctl_arenas->epoch, uint64_t); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!have_background_thread) { return ENOENT; } background_thread_ctl_init(tsd_tsdn(tsd)); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (newp == NULL) { oldval = background_thread_enabled(); READ(oldval, bool); } else { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = background_thread_enabled(); READ(oldval, bool); bool newval = *(bool *)newp; if (newval == oldval) { ret = 0; goto label_return; } background_thread_enabled_set(tsd_tsdn(tsd), newval); if (newval) { if (!can_enable_background_thread) { malloc_printf(": Error in dlsym(" "RTLD_NEXT, \"pthread_create\"). Cannot " "enable background_thread\n"); ret = EFAULT; goto label_return; } if (background_threads_enable(tsd)) { ret = EFAULT; goto label_return; } } else { if (background_threads_disable(tsd)) { ret = EFAULT; goto label_return; } } } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t oldval; if (!have_background_thread) { return ENOENT; } background_thread_ctl_init(tsd_tsdn(tsd)); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (newp == NULL) { oldval = max_background_threads; READ(oldval, size_t); } else { if (newlen != sizeof(size_t)) { ret = EINVAL; goto label_return; } oldval = max_background_threads; READ(oldval, size_t); size_t newval = *(size_t *)newp; if (newval == oldval) { ret = 0; goto label_return; } if (newval > opt_max_background_threads) { ret = EINVAL; goto label_return; } if (background_thread_enabled()) { if (!can_enable_background_thread) { malloc_printf(": Error in dlsym(" "RTLD_NEXT, \"pthread_create\"). Cannot " "enable background_thread\n"); ret = EFAULT; goto label_return; } background_thread_enabled_set(tsd_tsdn(tsd), false); if (background_threads_disable(tsd)) { ret = EFAULT; goto label_return; } max_background_threads = newval; background_thread_enabled_set(tsd_tsdn(tsd), true); if (background_threads_enable(tsd)) { ret = EFAULT; goto label_return; } } else { max_background_threads = newval; } } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } /******************************************************************************/ CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_CONFIG_GEN(config_fill, bool) CTL_RO_CONFIG_GEN(config_lazy_lock, bool) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) CTL_RO_CONFIG_GEN(config_prof, bool) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) CTL_RO_CONFIG_GEN(config_stats, bool) CTL_RO_CONFIG_GEN(config_utrace, bool) CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], const char *) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, size_t) CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; arena_t *oldarena; unsigned newind, oldind; oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) { return EAGAIN; } newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *newarena; if (newind >= narenas_total_get()) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { /* * If perCPU arena is enabled, thread_arena * control is not allowed for the auto arena * range. */ ret = EPERM; goto label_return; } } /* Initialize arena if necessary. */ newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); if (tcache_available(tsd)) { tcache_arena_reassociate(tsd_tsdn(tsd), tsd_tcachep_get(tsd), newarena); } } ret = 0; label_return: return ret; } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, uint64_t *) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; oldval = tcache_enabled_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } tcache_enabled_set(tsd, *(bool *)newp); } READ(oldval, bool); ret = 0; label_return: return ret; } static int thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!tcache_available(tsd)) { ret = EFAULT; goto label_return; } READONLY(); WRITEONLY(); tcache_flush(tsd); ret = 0; label_return: return ret; } static int thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_prof) { return ENOENT; } READ_XOR_WRITE(); if (newp != NULL) { if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != 0) { goto label_return; } } else { const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); } ret = 0; label_return: return ret; } static int thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } oldval = prof_thread_active_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } } READ(oldval, bool); ret = 0; label_return: return ret; } /******************************************************************************/ static int tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; goto label_return; } READ(tcache_ind, unsigned); ret = 0; label_return: return ret; } static int tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_flush(tsd, tcache_ind); ret = 0; label_return: return ret; } static int tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_destroy(tsd, tcache_ind); ret = 0; label_return: return ret; } /******************************************************************************/ static int arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsdn_t *tsdn = tsd_tsdn(tsd); unsigned arena_ind; bool initialized; READONLY(); MIB_UNSIGNED(arena_ind, 1); malloc_mutex_lock(tsdn, &ctl_mtx); initialized = arenas_i(arena_ind)->initialized; malloc_mutex_unlock(tsdn, &ctl_mtx); READ(initialized, bool); ret = 0; label_return: return ret; } static void arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_arenas->narenas; /* * Access via index narenas is deprecated, and scheduled for * removal in 6.0.0. */ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, narenas); for (i = 0; i < narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); } /* * No further need to hold ctl_mtx, since narenas and * tarenas contain everything needed below. */ malloc_mutex_unlock(tsdn, &ctl_mtx); for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) { arena_decay(tsdn, tarenas[i], false, all); } } } else { arena_t *tarena; assert(arena_ind < narenas); tarena = arena_get(tsdn, arena_ind, false); /* No further need to hold ctl_mtx. */ malloc_mutex_unlock(tsdn, &ctl_mtx); if (tarena != NULL) { arena_decay(tsdn, tarena, false, all); } } } } static int arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; READONLY(); WRITEONLY(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, false); ret = 0; label_return: return ret; } static int arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; READONLY(); WRITEONLY(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, true); ret = 0; label_return: return ret; } static int arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, arena_t **arena) { int ret; READONLY(); WRITEONLY(); MIB_UNSIGNED(*arena_ind, 1); *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); if (*arena == NULL || arena_is_auto(*arena)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return ret; } static void arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { /* Temporarily disable the background thread during arena reset. */ if (have_background_thread) { malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (background_thread_enabled()) { unsigned ind = arena_ind % ncpus; background_thread_info_t *info = &background_thread_info[ind]; assert(info->state == background_thread_started); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_paused; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } } } static void arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { if (have_background_thread) { if (background_thread_enabled()) { unsigned ind = arena_ind % ncpus; background_thread_info_t *info = &background_thread_info[ind]; assert(info->state == background_thread_paused); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_started; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); } } static int arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { return ret; } arena_reset_prepare_background_thread(tsd, arena_ind); arena_reset(tsd, arena); arena_reset_finish_background_thread(tsd, arena_ind); return ret; } static int arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; ctl_arena_t *ctl_darena, *ctl_arena; ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { goto label_return; } if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, true) != 0) { ret = EFAULT; goto label_return; } arena_reset_prepare_background_thread(tsd, arena_ind); /* Merge stats after resetting and purging arena. */ arena_reset(tsd, arena); arena_decay(tsd_tsdn(tsd), arena, false, true); ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); ctl_darena->initialized = true; ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); /* Destroy arena. */ arena_destroy(tsd, arena); ctl_arena = arenas_i(arena_ind); ctl_arena->initialized = false; /* Record arena index for later recycling via arenas.create. */ ql_elm_new(ctl_arena, destroyed_link); ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_reset_finish_background_thread(tsd, arena_ind); assert(ret == 0); label_return: return ret; } static int arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; unsigned arena_ind; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); MIB_UNSIGNED(arena_ind, 1); if (dss != NULL) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strcmp(dss_prec_names[i], dss) == 0) { dss_prec = i; match = true; break; } } if (!match) { ret = EINVAL; goto label_return; } } /* * Access via index narenas is deprecated, and scheduled for removal in * 6.0.0. */ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == ctl_arenas->narenas) { if (dss_prec != dss_prec_limit && extent_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } dss_prec_old = extent_dss_prec_get(); } else { arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; goto label_return; } dss_prec_old = arena_dss_prec_get(arena); } dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; unsigned arena_ind; arena_t *arena; MIB_UNSIGNED(arena_ind, 1); arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : arena_muzzy_decay_ms_get(arena); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), arena, *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return ret; } static int arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } static int arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); if (arena_ind < narenas_total_get()) { extent_hooks_t *old_extent_hooks; arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { if (arena_ind >= narenas_auto) { ret = EFAULT; goto label_return; } old_extent_hooks = (extent_hooks_t *)&extent_hooks_default; READ(old_extent_hooks, extent_hooks_t *); if (newp != NULL) { /* Initialize a new arena as a side effect. */ extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); arena = arena_init(tsd_tsdn(tsd), arena_ind, new_extent_hooks); if (arena == NULL) { ret = EFAULT; goto label_return; } } } else { if (newp != NULL) { extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); old_extent_hooks = extent_hooks_set(tsd, arena, new_extent_hooks); READ(old_extent_hooks, extent_hooks_t *); } else { old_extent_hooks = extent_hooks_get(arena); READ(old_extent_hooks, extent_hooks_t *); } } } else { ret = EFAULT; goto label_return; } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; if (!opt_retain) { /* Only relevant when retain is enabled. */ return ENOENT; } malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); if (arena_ind < narenas_total_get() && (arena = arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { size_t old_limit, new_limit; if (newp != NULL) { WRITE(new_limit, size_t); } bool err = arena_retain_grow_limit_get_set(tsd, arena, &old_limit, newp != NULL ? &new_limit : NULL); if (!err) { READ(old_limit, size_t); ret = 0; } else { ret = EFAULT; } } else { ret = EFAULT; } label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static const ctl_named_node_t * arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); switch (i) { case MALLCTL_ARENAS_ALL: case MALLCTL_ARENAS_DESTROYED: break; default: if (i > ctl_arenas->narenas) { ret = NULL; goto label_return; } break; } ret = super_arena_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } /******************************************************************************/ static int arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } narenas = ctl_arenas->narenas; READ(narenas, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; if (oldp != NULL && oldlenp != NULL) { size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : arena_muzzy_decay_ms_default_get()); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return ret; } static int arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) static const ctl_named_node_t * arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) { return NULL; } return super_arenas_bin_i_node; } CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NSIZES - NBINS) { return NULL; } return super_arenas_lextent_i_node; } static int arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; extent_hooks_t *extent_hooks; unsigned arena_ind; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); extent_hooks = (extent_hooks_t *)&extent_hooks_default; WRITE(extent_hooks, extent_hooks_t *); if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { ret = EAGAIN; goto label_return; } READ(arena_ind, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; void *ptr; extent_t *extent; arena_t *arena; ptr = NULL; ret = EINVAL; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(ptr, void *); extent = iealloc(tsd_tsdn(tsd), ptr); if (extent == NULL) goto label_return; arena = extent_arena_get(extent); if (arena == NULL) goto label_return; arena_ind = arena_ind_get(arena); READ(arena_ind, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } /******************************************************************************/ static int prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_thread_active_init_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_active_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; if (!config_prof) { return ENOENT; } WRITEONLY(); WRITE(filename, const char *); if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return ret; } static int prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_gdump_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; if (!config_prof) { return ENOENT; } WRITEONLY(); WRITE(lg_sample, size_t); if (lg_sample >= (sizeof(uint64_t) << 3)) { lg_sample = (sizeof(uint64_t) << 3) - 1; } prof_reset(tsd, lg_sample); ret = 0; label_return: return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, ctl_stats->background_thread.num_threads, size_t) CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, ctl_stats->background_thread.num_runs, uint64_t) CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, ssize_t) CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_uptime, nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained, atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_base, atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_internal, atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_resident, atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, arenas_i(mib[2])->astats->nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) /* * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. */ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ /* Lock profiling related APIs below. */ #define RO_MUTEX_CTL_GEN(n, l) \ CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ l.n_lock_ops, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ l.n_wait_times, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ l.n_spin_acquired, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ l.n_owner_switches, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ nstime_ns(&l.tot_wait_time), uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ nstime_ns(&l.max_wait_time), uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ l.max_n_thds, uint32_t) /* Global mutexes. */ #define OP(mtx) \ RO_MUTEX_CTL_GEN(mutexes_##mtx, \ ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) MUTEX_PROF_GLOBAL_MUTEXES #undef OP /* Per arena mutexes */ #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) MUTEX_PROF_ARENA_MUTEXES #undef OP /* tcache bin mutex */ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) #undef RO_MUTEX_CTL_GEN /* Resets all mutex stats, including global, arena and bin mutexes. */ static int stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (!config_stats) { return ENOENT; } tsdn_t *tsdn = tsd_tsdn(tsd); #define MUTEX_PROF_RESET(mtx) \ malloc_mutex_lock(tsdn, &mtx); \ malloc_mutex_prof_data_reset(tsdn, &mtx); \ malloc_mutex_unlock(tsdn, &mtx); /* Global mutexes: ctl and prof. */ MUTEX_PROF_RESET(ctl_mtx); if (have_background_thread) { MUTEX_PROF_RESET(background_thread_lock); } if (config_prof && opt_prof) { MUTEX_PROF_RESET(bt2gctx_mtx); } /* Per arena mutexes. */ unsigned n = narenas_total_get(); for (unsigned i = 0; i < n; i++) { arena_t *arena = arena_get(tsdn, i, false); if (!arena) { continue; } MUTEX_PROF_RESET(arena->large_mtx); MUTEX_PROF_RESET(arena->extent_avail_mtx); MUTEX_PROF_RESET(arena->extents_dirty.mtx); MUTEX_PROF_RESET(arena->extents_muzzy.mtx); MUTEX_PROF_RESET(arena->extents_retained.mtx); MUTEX_PROF_RESET(arena->decay_dirty.mtx); MUTEX_PROF_RESET(arena->decay_muzzy.mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->base->mtx); for (szind_t i = 0; i < NBINS; i++) { bin_t *bin = &arena->bins[i]; MUTEX_PROF_RESET(bin->lock); } } #undef MUTEX_PROF_RESET return 0; } CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > NBINS) { return NULL; } return super_stats_arenas_i_bins_j_node; } CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > NSIZES - NBINS) { return NULL; } return super_stats_arenas_i_lextents_j_node; } static const ctl_named_node_t * stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; size_t a; malloc_mutex_lock(tsdn, &ctl_mtx); a = arenas_i2a_impl(i, true, true); if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } jemalloc-sys-0.3.2/jemalloc/src/div.c010064400007650000024000000030361340421340100156320ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/div.h" #include "jemalloc/internal/assert.h" /* * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. * * For any k, we have (here, all division is exact; not C-style rounding): * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where * r = (-2^k) mod d. * * Expanding this out: * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) * = floor(n / d + (r / d) * (n / 2^k)). * * The fractional part of n / d is 0 (because of the assumption that d divides n * exactly), so we have: * ... = n / d + floor((r / d) * (n / 2^k)) * * So that our initial expression is equal to the quantity we seek, so long as * (r / d) * (n / 2^k) < 1. * * r is a remainder mod d, so r < d and r / d < 1 always. We can make * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. */ void div_init(div_info_t *div_info, size_t d) { /* Nonsensical. */ assert(d != 0); /* * This would make the value of magic too high to fit into a uint32_t * (we would want magic = 2^32 exactly). This would mess with code gen * on 32-bit machines. */ assert(d != 1); uint64_t two_to_k = ((uint64_t)1 << 32); uint32_t magic = (uint32_t)(two_to_k / d); /* * We want magic = ceil(2^k / d), but C gives us floor. We have to * increment it unless the result was exact (i.e. unless d is a power of * two). */ if (two_to_k % d != 0) { magic++; } div_info->magic = magic; #ifdef JEMALLOC_DEBUG div_info->d = d; #endif } jemalloc-sys-0.3.2/jemalloc/src/extent.c010064400007650000024000002020351340421341300163620ustar0000000000000000#define JEMALLOC_EXTENT_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" /******************************************************************************/ /* Data. */ rtree_t extents_rtree; /* Keyed by the address of the extent_t being protected. */ mutex_pool_t extent_mutex_pool; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; static const bitmap_info_t extents_bitmap_info = BITMAP_INFO_INITIALIZER(NPSIZES+1); static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); static bool extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #ifdef PAGES_CAN_PURGE_LAZY static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #endif static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); #ifdef PAGES_CAN_PURGE_FORCED static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #endif static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); #ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); #endif static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained); #ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind); #endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, bool growing_retained); const extent_hooks_t extent_hooks_default = { extent_alloc_default, extent_dalloc_default, extent_destroy_default, extent_commit_default, extent_decommit_default #ifdef PAGES_CAN_PURGE_LAZY , extent_purge_lazy_default #else , NULL #endif #ifdef PAGES_CAN_PURGE_FORCED , extent_purge_forced_default #else , NULL #endif #ifdef JEMALLOC_MAPS_COALESCE , extent_split_default, extent_merge_default #endif }; /* Used exclusively for gdump triggering. */ static atomic_zu_t curpages; static atomic_zu_t highpages; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void extent_deregister(tsdn_t *tsdn, extent_t *extent); static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained); static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained); static void extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained); /******************************************************************************/ ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, extent_esnead_comp) typedef enum { lock_result_success, lock_result_failure, lock_result_no_extent } lock_result_t; static lock_result_t extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, extent_t **result) { extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, elm, true); if (extent1 == NULL) { return lock_result_no_extent; } /* * It's possible that the extent changed out from under us, and with it * the leaf->extent mapping. We have to recheck while holding the lock. */ extent_lock(tsdn, extent1); extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, elm, true); if (extent1 == extent2) { *result = extent1; return lock_result_success; } else { extent_unlock(tsdn, extent1); return lock_result_failure; } } /* * Returns a pool-locked extent_t * if there's one associated with the given * address, and NULL otherwise. */ static extent_t * extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { extent_t *ret = NULL; rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)addr, false, false); if (elm == NULL) { return NULL; } lock_result_t lock_result; do { lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); } while (lock_result == lock_result_failure); return ret; } extent_t * extent_alloc(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_t *extent = extent_avail_first(&arena->extent_avail); if (extent == NULL) { malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return base_alloc_extent(tsdn, arena->base); } extent_avail_remove(&arena->extent_avail, extent); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return extent; } void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_avail_insert(&arena->extent_avail, extent); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); } extent_hooks_t * extent_hooks_get(arena_t *arena) { return base_extent_hooks_get(arena->base); } extent_hooks_t * extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { background_thread_info_t *info; if (have_background_thread) { info = arena_background_thread_info_get(arena); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); } extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); if (have_background_thread) { malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } return ret; } static void extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks) { if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { *r_extent_hooks = extent_hooks_get(arena); } } #ifndef JEMALLOC_JET static #endif size_t extent_size_quantize_floor(size_t size) { size_t ret; pszind_t pind; assert(size > 0); assert((size & PAGE_MASK) == 0); pind = sz_psz2ind(size - sz_large_pad + 1); if (pind == 0) { /* * Avoid underflow. This short-circuit would also do the right * thing for all sizes in the range for which there are * PAGE-spaced size classes, but it's simplest to just handle * the one case that would cause erroneous results. */ return size; } ret = sz_pind2sz(pind - 1) + sz_large_pad; assert(ret <= size); return ret; } #ifndef JEMALLOC_JET static #endif size_t extent_size_quantize_ceil(size_t size) { size_t ret; assert(size > 0); assert(size - sz_large_pad <= LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); ret = extent_size_quantize_floor(size); if (ret < size) { /* * Skip a quantization that may have an adequately large extent, * because under-sized extents may be mixed in. This only * happens when an unusual size is requested, i.e. for aligned * allocation, and is just one of several places where linear * search would potentially find sufficiently aligned available * memory somewhere lower. */ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + sz_large_pad; } return ret; } /* Generate pairing heap functions. */ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, bool delay_coalesce) { if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, malloc_mutex_rank_exclusive)) { return true; } for (unsigned i = 0; i < NPSIZES+1; i++) { extent_heap_new(&extents->heaps[i]); } bitmap_init(extents->bitmap, &extents_bitmap_info, true); extent_list_init(&extents->lru); atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); extents->state = state; extents->delay_coalesce = delay_coalesce; return false; } extent_state_t extents_state_get(const extents_t *extents) { return extents->state; } size_t extents_npages_get(extents_t *extents) { return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); } static void extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &extents->mtx); assert(extent_state_get(extent) == extents->state); size_t size = extent_size_get(extent); size_t psz = extent_size_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); if (extent_heap_empty(&extents->heaps[pind])) { bitmap_unset(extents->bitmap, &extents_bitmap_info, (size_t)pind); } extent_heap_insert(&extents->heaps[pind], extent); extent_list_append(&extents->lru, extent); size_t npages = size >> LG_PAGE; /* * All modifications to npages hold the mutex (as asserted above), so we * don't need an atomic fetch-add; we can get by with a load followed by * a store. */ size_t cur_extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); atomic_store_zu(&extents->npages, cur_extents_npages + npages, ATOMIC_RELAXED); } static void extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &extents->mtx); assert(extent_state_get(extent) == extents->state); size_t size = extent_size_get(extent); size_t psz = extent_size_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); extent_heap_remove(&extents->heaps[pind], extent); if (extent_heap_empty(&extents->heaps[pind])) { bitmap_set(extents->bitmap, &extents_bitmap_info, (size_t)pind); } extent_list_remove(&extents->lru, extent); size_t npages = size >> LG_PAGE; /* * As in extents_insert_locked, we hold extents->mtx and so don't need * atomic operations for updating extents->npages. */ size_t cur_extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); assert(cur_extents_npages >= npages); atomic_store_zu(&extents->npages, cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); } /* * Find an extent with size [min_size, max_size) to satisfy the alignment * requirement. For each size, try only the first extent in the heap. */ static extent_t * extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, size_t alignment) { pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); i < pind_max; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(i < NPSIZES); assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); uintptr_t base = (uintptr_t)extent_base_get(extent); size_t candidate_size = extent_size_get(extent); assert(candidate_size >= min_size); uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, PAGE_CEILING(alignment)); if (base > next_align || base + candidate_size <= next_align) { /* Overflow or not crossing the next alignment. */ continue; } size_t leadsize = next_align - base; if (candidate_size - leadsize >= min_size) { return extent; } } return NULL; } /* Do any-best-fit extent selection, i.e. select any extent that best fits. */ static extent_t * extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t size) { pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); if (i < NPSIZES+1) { /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. */ if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { return NULL; } assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); return extent; } return NULL; } /* * Do first-fit extent selection, i.e. select the oldest/lowest extent that is * large enough. */ static extent_t * extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t size) { extent_t *ret = NULL; pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); if (ret == NULL || extent_snad_comp(extent, ret) < 0) { ret = extent; } if (i == NPSIZES) { break; } assert(i < NPSIZES); } return ret; } /* * Do {best,first}-fit extent selection, where the selection policy choice is * based on extents->delay_coalesce. Best-fit selection requires less * searching, but its layout policy is less stable and may cause higher virtual * memory fragmentation as a side effect. */ static extent_t * extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t esize, size_t alignment) { malloc_mutex_assert_owner(tsdn, &extents->mtx); size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (max_size < esize) { return NULL; } extent_t *extent = extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena, extents, max_size) : extents_first_fit_locked(tsdn, arena, extents, max_size); if (alignment > PAGE && extent == NULL) { /* * max_size guarantees the alignment requirement but is rather * pessimistic. Next we try to satisfy the aligned allocation * with sizes in [esize, max_size). */ extent = extents_fit_alignment(extents, esize, max_size, alignment); } return extent; } static bool extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent) { extent_state_set(extent, extent_state_active); bool coalesced; extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, &coalesced, false); extent_state_set(extent, extents_state_get(extents)); if (!coalesced) { return true; } extents_insert_locked(tsdn, extents, extent); return false; } extent_t * extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { assert(size + pad != 0); assert(alignment != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr, size, pad, alignment, slab, szind, zero, commit, false); assert(extent == NULL || extent_dumpable_get(extent)); return extent; } void extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent) { assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); assert(extent_dumpable_get(extent)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_addr_set(extent, extent_base_get(extent)); extent_zeroed_set(extent, false); extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); } extent_t * extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); malloc_mutex_lock(tsdn, &extents->mtx); /* * Get the LRU coalesced extent, if any. If coalescing was delayed, * the loop will iterate until the LRU extent is fully coalesced. */ extent_t *extent; while (true) { /* Get the LRU extent, if any. */ extent = extent_list_first(&extents->lru); if (extent == NULL) { goto label_return; } /* Check the eviction limit. */ size_t extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); if (extents_npages <= npages_min) { extent = NULL; goto label_return; } extents_remove_locked(tsdn, extents, extent); if (!extents->delay_coalesce) { break; } /* Try to coalesce. */ if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent)) { break; } /* * The LRU extent was just coalesced and the result placed in * the LRU at its neighbor's position. Start over. */ } /* * Either mark the extent active or deregister it to protect against * concurrent operations. */ switch (extents_state_get(extents)) { case extent_state_active: not_reached(); case extent_state_dirty: case extent_state_muzzy: extent_state_set(extent, extent_state_active); break; case extent_state_retained: extent_deregister(tsdn, extent); break; default: not_reached(); } label_return: malloc_mutex_unlock(tsdn, &extents->mtx); return extent; } static void extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained) { /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. */ if (extents_state_get(extents) == extent_state_dirty) { if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained)) { extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained); } } extent_dalloc(tsdn, arena, extent); } void extents_prefork(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_prefork(tsdn, &extents->mtx); } void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_postfork_parent(tsdn, &extents->mtx); } void extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_postfork_child(tsdn, &extents->mtx); } static void extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { assert(extent_arena_get(extent) == arena); assert(extent_state_get(extent) == extent_state_active); extent_state_set(extent, extents_state_get(extents)); extents_insert_locked(tsdn, extents, extent); } static void extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { malloc_mutex_lock(tsdn, &extents->mtx); extent_deactivate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); } static void extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { assert(extent_arena_get(extent) == arena); assert(extent_state_get(extent) == extents_state_get(extents)); extents_remove_locked(tsdn, extents, extent); extent_state_set(extent, extent_state_active); } static bool extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, const extent_t *extent, bool dependent, bool init_missing, rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), dependent, init_missing); if (!dependent && *r_elm_a == NULL) { return true; } assert(*r_elm_a != NULL); *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_last_get(extent), dependent, init_missing); if (!dependent && *r_elm_b == NULL) { return true; } assert(*r_elm_b != NULL); return false; } static void extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); if (elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, slab); } } static void extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, szind_t szind) { assert(extent_slab_get(extent)); /* Register interior. */ for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { rtree_write(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << LG_PAGE), extent, szind, true); } } static void extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); /* prof_gdump() requirement. */ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (opt_prof && extent_state_get(extent) == extent_state_active) { size_t nadd = extent_size_get(extent) >> LG_PAGE; size_t cur = atomic_fetch_add_zu(&curpages, nadd, ATOMIC_RELAXED) + nadd; size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); while (cur > high && !atomic_compare_exchange_weak_zu( &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highpages update race. * Note that high is updated in case of CAS failure. */ } if (cur > high && prof_gdump_get_unlocked()) { prof_gdump(tsdn); } } } static void extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); if (opt_prof && extent_state_get(extent) == extent_state_active) { size_t nsub = extent_size_get(extent) >> LG_PAGE; assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); } } static bool extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *elm_a, *elm_b; /* * We need to hold the lock to protect against a concurrent coalesce * operation that sees us in a partial state. */ extent_lock(tsdn, extent); if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, &elm_a, &elm_b)) { return true; } szind_t szind = extent_szind_get_maybe_invalid(extent); bool slab = extent_slab_get(extent); extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); if (slab) { extent_interior_register(tsdn, rtree_ctx, extent, szind); } extent_unlock(tsdn, extent); if (config_prof && gdump_add) { extent_gdump_add(tsdn, extent); } return false; } static bool extent_register(tsdn_t *tsdn, extent_t *extent) { return extent_register_impl(tsdn, extent, true); } static bool extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { return extent_register_impl(tsdn, extent, false); } static void extent_reregister(tsdn_t *tsdn, extent_t *extent) { bool err = extent_register(tsdn, extent); assert(!err); } /* * Removes all pointers to the given extent from the global rtree indices for * its interior. This is relevant for slab extents, for which we need to do * metadata lookups at places other than the head of the extent. We deregister * on the interior, then, when an extent moves from being an active slab to an * inactive state. */ static void extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent) { size_t i; assert(extent_slab_get(extent)); for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { rtree_clear(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << LG_PAGE)); } } /* * Removes all pointers to the given extent from the global rtree. */ static void extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *elm_a, *elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, &elm_a, &elm_b); extent_lock(tsdn, extent); extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); } extent_unlock(tsdn, extent); if (config_prof && gdump) { extent_gdump_sub(tsdn, extent); } } static void extent_deregister(tsdn_t *tsdn, extent_t *extent) { extent_deregister_impl(tsdn, extent, true); } static void extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { extent_deregister_impl(tsdn, extent, false); } /* * Tries to find and remove an extent from extents that can be used for the * given allocation request. */ static extent_t * extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); assert(alignment > 0); if (config_debug && new_addr != NULL) { /* * Non-NULL new_addr has two use cases: * * 1) Recycle a known-extant extent, e.g. during purging. * 2) Perform in-place expanding reallocation. * * Regardless of use case, new_addr must either refer to a * non-existing extent, or to the base of an extant extent, * since only active slabs support interior lookups (which of * course cannot be recycled). */ assert(PAGE_ADDR2BASE(new_addr) == new_addr); assert(pad == 0); assert(alignment <= PAGE); } size_t esize = size + pad; malloc_mutex_lock(tsdn, &extents->mtx); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_t *extent; if (new_addr != NULL) { extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); if (extent != NULL) { /* * We might null-out extent to report an error, but we * still need to unlock the associated mutex after. */ extent_t *unlock_extent = extent; assert(extent_base_get(extent) == new_addr); if (extent_arena_get(extent) != arena || extent_size_get(extent) < esize || extent_state_get(extent) != extents_state_get(extents)) { extent = NULL; } extent_unlock(tsdn, unlock_extent); } } else { extent = extents_fit_locked(tsdn, arena, extents, esize, alignment); } if (extent == NULL) { malloc_mutex_unlock(tsdn, &extents->mtx); return NULL; } extent_activate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); return extent; } /* * Given an allocation request and an extent guaranteed to be able to satisfy * it, this splits off lead and trail extents, leaving extent pointing to an * extent satisfying the allocation. * This function doesn't put lead or trail into any extents_t; it's the caller's * job to ensure that they can be reused. */ typedef enum { /* * Split successfully. lead, extent, and trail, are modified to extents * describing the ranges before, in, and after the given allocation. */ extent_split_interior_ok, /* * The extent can't satisfy the given allocation request. None of the * input extent_t *s are touched. */ extent_split_interior_cant_alloc, /* * In a potentially invalid state. Must leak (if *to_leak is non-NULL), * and salvage what's still salvageable (if *to_salvage is non-NULL). * None of lead, extent, or trail are valid. */ extent_split_interior_error } extent_split_interior_result_t; static extent_split_interior_result_t extent_split_interior(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, /* The result of splitting, in case of success. */ extent_t **extent, extent_t **lead, extent_t **trail, /* The mess to clean up, in case of error. */ extent_t **to_leak, extent_t **to_salvage, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool growing_retained) { size_t esize = size + pad; size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); assert(new_addr == NULL || leadsize == 0); if (extent_size_get(*extent) < leadsize + esize) { return extent_split_interior_cant_alloc; } size_t trailsize = extent_size_get(*extent) - leadsize - esize; *lead = NULL; *trail = NULL; *to_leak = NULL; *to_salvage = NULL; /* Split the lead. */ if (leadsize != 0) { *lead = *extent; *extent = extent_split_impl(tsdn, arena, r_extent_hooks, *lead, leadsize, NSIZES, false, esize + trailsize, szind, slab, growing_retained); if (*extent == NULL) { *to_leak = *lead; *lead = NULL; return extent_split_interior_error; } } /* Split the trail. */ if (trailsize != 0) { *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, esize, szind, slab, trailsize, NSIZES, false, growing_retained); if (*trail == NULL) { *to_leak = *extent; *to_salvage = *lead; *lead = NULL; *extent = NULL; return extent_split_interior_error; } } if (leadsize == 0 && trailsize == 0) { /* * Splitting causes szind to be set as a side effect, but no * splitting occurred. */ extent_szind_set(*extent, szind); if (szind != NSIZES) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_addr_get(*extent), szind, slab); if (slab && extent_size_get(*extent) > PAGE) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_past_get(*extent) - (uintptr_t)PAGE, szind, slab); } } } return extent_split_interior_ok; } /* * This fulfills the indicated allocation request out of the given extent (which * the caller should have ensured was big enough). If there's any unused space * before or after the resulting allocation, that space is given its own extent * and put back into extents. */ static extent_t * extent_recycle_split(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, extent_t *extent, bool growing_retained) { extent_t *lead; extent_t *trail; extent_t *to_leak; extent_t *to_salvage; extent_split_interior_result_t result = extent_split_interior( tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, growing_retained); if (result == extent_split_interior_ok) { if (lead != NULL) { extent_deactivate(tsdn, arena, extents, lead); } if (trail != NULL) { extent_deactivate(tsdn, arena, extents, trail); } return extent; } else { /* * We should have picked an extent that was large enough to * fulfill our allocation request. */ assert(result == extent_split_interior_error); if (to_salvage != NULL) { extent_deregister(tsdn, to_salvage); } if (to_leak != NULL) { void *leak = extent_base_get(to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak); extents_leak(tsdn, arena, r_extent_hooks, extents, to_leak, growing_retained); assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) == NULL); } return NULL; } unreachable(); } /* * Tries to satisfy the given allocation request by reusing one of the extents * in the given extents_t. */ static extent_t * extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); assert(new_addr == NULL || !slab); assert(pad == 0 || !slab); assert(!*zero || !slab); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, rtree_ctx, extents, new_addr, size, pad, alignment, slab, growing_retained); if (extent == NULL) { return NULL; } extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, extents, new_addr, size, pad, alignment, slab, szind, extent, growing_retained); if (extent == NULL) { return NULL; } if (*commit && !extent_committed_get(extent)) { if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained)) { extent_record(tsdn, arena, r_extent_hooks, extents, extent, growing_retained); return NULL; } extent_zeroed_set(extent, true); } if (extent_committed_get(extent)) { *commit = true; } if (extent_zeroed_get(extent)) { *zero = true; } if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } assert(extent_state_get(extent) == extent_state_active); if (slab) { extent_slab_set(extent, slab); extent_interior_register(tsdn, rtree_ctx, extent, szind); } if (*zero) { void *addr = extent_base_get(extent); size_t size = extent_size_get(extent); if (!extent_zeroed_get(extent)) { if (pages_purge_forced(addr, size)) { memset(addr, 0, size); } } else if (config_debug) { size_t *p = (size_t *)(uintptr_t)addr; for (size_t i = 0; i < size / sizeof(size_t); i++) { assert(p[i] == 0); } } } return extent; } /* * If the caller specifies (!*zero), it is still possible to receive zeroed * memory, in which case *zero is toggled to true. arena_extent_alloc() takes * advantage of this to avoid demanding zeroed extents, but taking advantage of * them if they are returned. */ static void * extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; assert(size != 0); assert(alignment != 0); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* mmap. */ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* All strategies for allocation failed. */ return NULL; } static void * extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED)); if (have_madvise_huge && ret) { pages_set_thp_state(ret, size); } return ret; } static void * extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { tsdn_t *tsdn; arena_t *arena; tsdn = tsdn_fetch(); arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. */ assert(arena != NULL); return extent_alloc_default_impl(tsdn, arena, new_addr, size, alignment, zero, commit); } static void extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { /* * The only legitimate case of customized extent hooks for a0 is * hooks with no allocation activities. One such example is to * place metadata on pre-allocated resources such as huge pages. * In that case, rely on reentrancy_level checks to catch * infinite recursions. */ pre_reentrancy(tsd, NULL); } else { pre_reentrancy(tsd, arena); } } static void extent_hook_post_reentrancy(tsdn_t *tsdn) { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); post_reentrancy(tsd); } /* * If virtual memory is retained, create increasingly larger extents from which * to split requested extents in order to limit the total number of disjoint * virtual memory ranges retained by each arena. */ static extent_t * extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); assert(pad == 0 || !slab); assert(!*zero || !slab); size_t esize = size + pad; size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (alloc_size_min < esize) { goto label_err; } /* * Find the next extent size in the series that would be large enough to * satisfy this request. */ pszind_t egn_skip = 0; size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); while (alloc_size < alloc_size_min) { egn_skip++; if (arena->extent_grow_next + egn_skip == NPSIZES) { /* Outside legal range. */ goto label_err; } assert(arena->extent_grow_next + egn_skip < NPSIZES); alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); } extent_t *extent = extent_alloc(tsdn, arena); if (extent == NULL) { goto label_err; } bool zeroed = false; bool committed = false; void *ptr; if (*r_extent_hooks == &extent_hooks_default) { ptr = extent_alloc_default_impl(tsdn, arena, NULL, alloc_size, PAGE, &zeroed, &committed); } else { extent_hook_pre_reentrancy(tsdn, arena); ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, alloc_size, PAGE, &zeroed, &committed, arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } extent_init(extent, arena, ptr, alloc_size, false, NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed, committed, true); if (ptr == NULL) { extent_dalloc(tsdn, arena, extent); goto label_err; } if (extent_register_no_gdump_add(tsdn, extent)) { extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, true); goto label_err; } if (extent_zeroed_get(extent) && extent_committed_get(extent)) { *zero = true; } if (extent_committed_get(extent)) { *commit = true; } rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *lead; extent_t *trail; extent_t *to_leak; extent_t *to_salvage; extent_split_interior_result_t result = extent_split_interior( tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, true); if (result == extent_split_interior_ok) { if (lead != NULL) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, lead, true); } if (trail != NULL) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, trail, true); } } else { /* * We should have allocated a sufficiently large extent; the * cant_alloc case should not occur. */ assert(result == extent_split_interior_error); if (to_salvage != NULL) { if (config_prof) { extent_gdump_add(tsdn, to_salvage); } extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, to_salvage, true); } if (to_leak != NULL) { extent_deregister_no_gdump_sub(tsdn, to_leak); extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, to_leak, true); } goto label_err; } if (*commit && !extent_committed_get(extent)) { if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), true)) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, true); goto label_err; } extent_zeroed_set(extent, true); } /* * Increment extent_grow_next if doing so wouldn't exceed the allowed * range. */ if (arena->extent_grow_next + egn_skip + 1 <= arena->retain_grow_limit) { arena->extent_grow_next += egn_skip + 1; } else { arena->extent_grow_next = arena->retain_grow_limit; } /* All opportunities for failure are past. */ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); if (config_prof) { /* Adjust gdump stats now that extent is final size. */ extent_gdump_add(tsdn, extent); } if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } if (slab) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_slab_set(extent, true); extent_interior_register(tsdn, rtree_ctx, extent, szind); } if (*zero && !extent_zeroed_get(extent)) { void *addr = extent_base_get(extent); size_t size = extent_size_get(extent); if (pages_purge_forced(addr, size)) { memset(addr, 0, size); } } return extent; label_err: malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); return NULL; } static extent_t * extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { assert(size != 0); assert(alignment != 0); malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, &arena->extents_retained, new_addr, size, pad, alignment, slab, szind, zero, commit, true); if (extent != NULL) { malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); if (config_prof) { extent_gdump_add(tsdn, extent); } } else if (opt_retain && new_addr == NULL) { extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, pad, alignment, slab, szind, zero, commit); /* extent_grow_retained() always releases extent_grow_mtx. */ } else { malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); } malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); return extent; } static extent_t * extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { size_t esize = size + pad; extent_t *extent = extent_alloc(tsdn, arena); if (extent == NULL) { return NULL; } void *addr; if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, alignment, zero, commit); } else { extent_hook_pre_reentrancy(tsdn, arena); addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, esize, alignment, zero, commit, arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } if (addr == NULL) { extent_dalloc(tsdn, arena, extent); return NULL; } extent_init(extent, arena, addr, esize, slab, szind, arena_extent_sn_next(arena), extent_state_active, *zero, *commit, true); if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } if (extent_register(tsdn, extent)) { extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, false); return NULL; } return extent; } extent_t * extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, new_addr, size, pad, alignment, slab, szind, zero, commit); if (extent == NULL) { if (opt_retain && new_addr != NULL) { /* * When retain is enabled and new_addr is set, we do not * attempt extent_alloc_wrapper_hard which does mmap * that is very unlikely to succeed (unless it happens * to be at the end). */ return NULL; } extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, new_addr, size, pad, alignment, slab, szind, zero, commit); } assert(extent == NULL || extent_dumpable_get(extent)); return extent; } static bool extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, const extent_t *outer) { assert(extent_arena_get(inner) == arena); if (extent_arena_get(outer) != arena) { return false; } assert(extent_state_get(inner) == extent_state_active); if (extent_state_get(outer) != extents->state) { return false; } if (extent_committed_get(inner) != extent_committed_get(outer)) { return false; } return true; } static bool extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *inner, extent_t *outer, bool forward, bool growing_retained) { assert(extent_can_coalesce(arena, extents, inner, outer)); extent_activate_locked(tsdn, arena, extents, outer); malloc_mutex_unlock(tsdn, &extents->mtx); bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, forward ? inner : outer, forward ? outer : inner, growing_retained); malloc_mutex_lock(tsdn, &extents->mtx); if (err) { extent_deactivate_locked(tsdn, arena, extents, outer); } return err; } static extent_t * extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained) { /* * Continue attempting to coalesce until failure, to protect against * races with other threads that are thwarted by this one. */ bool again; do { again = false; /* Try to coalesce forward. */ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, extent_past_get(extent)); if (next != NULL) { /* * extents->mtx only protects against races for * like-state extents, so call extent_can_coalesce() * before releasing next's pool lock. */ bool can_coalesce = extent_can_coalesce(arena, extents, extent, next); extent_unlock(tsdn, next); if (can_coalesce && !extent_coalesce(tsdn, arena, r_extent_hooks, extents, extent, next, true, growing_retained)) { if (extents->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; return extent; } again = true; } } /* Try to coalesce backward. */ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, extent_before_get(extent)); if (prev != NULL) { bool can_coalesce = extent_can_coalesce(arena, extents, extent, prev); extent_unlock(tsdn, prev); if (can_coalesce && !extent_coalesce(tsdn, arena, r_extent_hooks, extents, extent, prev, false, growing_retained)) { extent = prev; if (extents->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; return extent; } again = true; } } } while (again); if (extents->delay_coalesce) { *coalesced = false; } return extent; } /* * Does the metadata management portions of putting an unused extent into the * given extents_t (coalesces, deregisters slab interiors, the heap operations). */ static void extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); assert((extents_state_get(extents) != extent_state_dirty && extents_state_get(extents) != extent_state_muzzy) || !extent_zeroed_get(extent)); malloc_mutex_lock(tsdn, &extents->mtx); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_szind_set(extent, NSIZES); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); } assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), true) == extent); if (!extents->delay_coalesce) { extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, NULL, growing_retained); } else if (extent_size_get(extent) >= LARGE_MINCLASS) { /* Always coalesce large extents eagerly. */ bool coalesced; size_t prev_size; do { prev_size = extent_size_get(extent); assert(extent_state_get(extent) == extent_state_active); extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, &coalesced, growing_retained); } while (coalesced && extent_size_get(extent) >= prev_size + LARGE_MINCLASS); } extent_deactivate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); } void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (extent_register(tsdn, extent)) { extents_leak(tsdn, arena, &extent_hooks, &arena->extents_retained, extent, false); return; } extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); } static bool extent_dalloc_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { return extent_dalloc_mmap(addr, size); } return true; } static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { return extent_dalloc_default_impl(addr, size); } static bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { bool err; assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_addr_set(extent, extent_base_get(extent)); extent_hooks_assure_initialized(arena, r_extent_hooks); /* Try to deallocate. */ if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ err = extent_dalloc_default_impl(extent_base_get(extent), extent_size_get(extent)); } else { extent_hook_pre_reentrancy(tsdn, arena); err = ((*r_extent_hooks)->dalloc == NULL || (*r_extent_hooks)->dalloc(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena))); extent_hook_post_reentrancy(tsdn); } if (!err) { extent_dalloc(tsdn, arena, extent); } return err; } void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { assert(extent_dumpable_get(extent)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* * Deregister first to avoid a race with other allocating threads, and * reregister if deallocation fails. */ extent_deregister(tsdn, extent); if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { return; } extent_reregister(tsdn, extent); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } /* Try to decommit; purge if that fails. */ bool zeroed; if (!extent_committed_get(extent)) { zeroed = true; } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { zeroed = true; } else if ((*r_extent_hooks)->purge_forced != NULL && !(*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, extent_size_get(extent), arena_ind_get(arena))) { zeroed = true; } else if (extent_state_get(extent) == extent_state_muzzy || ((*r_extent_hooks)->purge_lazy != NULL && !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, extent_size_get(extent), arena_ind_get(arena)))) { zeroed = false; } else { zeroed = false; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_zeroed_set(extent, zeroed); if (config_prof) { extent_gdump_sub(tsdn, extent); } extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, false); } static void extent_destroy_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { pages_unmap(addr, size); } } static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { extent_destroy_default_impl(addr, size); } void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Deregister first to avoid a race with other allocating threads. */ extent_deregister(tsdn, extent); extent_addr_set(extent, extent_base_get(extent)); extent_hooks_assure_initialized(arena, r_extent_hooks); /* Try to destroy; silently fail otherwise. */ if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ extent_destroy_default_impl(extent_base_get(extent), extent_size_get(extent)); } else if ((*r_extent_hooks)->destroy != NULL) { extent_hook_pre_reentrancy(tsdn, arena); (*r_extent_hooks)->destroy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } extent_dalloc(tsdn, arena, extent); } static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), length); } static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->commit == NULL || (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) || !err); return err; } bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } static bool extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), length); } bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->decommit == NULL || (*r_extent_hooks)->decommit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) && err); return err; } #ifdef PAGES_CAN_PURGE_LAZY static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(addr != NULL); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), length); } #endif static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->purge_lazy == NULL) { return true; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } return err; } bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } #ifdef PAGES_CAN_PURGE_FORCED static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(addr != NULL); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return pages_purge_forced((void *)((uintptr_t)addr + (uintptr_t)offset), length); } #endif static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->purge_forced == NULL) { return true; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } return err; } bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } #ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { return !maps_coalesce; } #endif /* * Accepts the extent to split, and the characteristics of each side of the * split. The 'a' parameters go with the 'lead' of the resulting pair of * extents (the lower addressed portion of the split), and the 'b' parameters go * with the trail (the higher addressed portion). This makes 'extent' the lead, * and returns the trail (except in case of error). */ static extent_t * extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained) { assert(extent_size_get(extent) == size_a + size_b); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->split == NULL) { return NULL; } extent_t *trail = extent_alloc(tsdn, arena); if (trail == NULL) { goto label_error_a; } extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_dumpable_get(extent)); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; { extent_t lead; extent_init(&lead, arena, extent_addr_get(extent), size_a, slab_a, szind_a, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_dumpable_get(extent)); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, true, &lead_elm_a, &lead_elm_b); } rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, &trail_elm_a, &trail_elm_b); if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL || trail_elm_b == NULL) { goto label_error_b; } extent_lock2(tsdn, extent, trail); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), size_a + size_b, size_a, size_b, extent_committed_get(extent), arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } if (err) { goto label_error_c; } extent_size_set(extent, size_a); extent_szind_set(extent, szind_a); extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, szind_a, slab_a); extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, szind_b, slab_b); extent_unlock2(tsdn, extent, trail); return trail; label_error_c: extent_unlock2(tsdn, extent, trail); label_error_b: extent_dalloc(tsdn, arena, trail); label_error_a: return NULL; } extent_t * extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, szind_a, slab_a, size_b, szind_b, slab_b, false); } static bool extent_merge_default_impl(void *addr_a, void *addr_b) { if (!maps_coalesce) { return true; } if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { return true; } return false; } #ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { return extent_merge_default_impl(addr_a, addr_b); } #endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->merge == NULL) { return true; } bool err; if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ err = extent_merge_default_impl(extent_base_get(a), extent_base_get(b)); } else { extent_hook_pre_reentrancy(tsdn, arena); err = (*r_extent_hooks)->merge(*r_extent_hooks, extent_base_get(a), extent_size_get(a), extent_base_get(b), extent_size_get(b), extent_committed_get(a), arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } if (err) { return true; } /* * The rtree writes must happen while all the relevant elements are * owned, so the following code uses decomposed helper functions rather * than extent_{,de}register() to do things in the right order. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, &a_elm_b); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, &b_elm_b); extent_lock2(tsdn, a, b); if (a_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, NSIZES, false); } if (b_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, NSIZES, false); } else { b_elm_b = b_elm_a; } extent_size_set(a, extent_size_get(a) + extent_size_get(b)); extent_szind_set(a, NSIZES); extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? extent_sn_get(a) : extent_sn_get(b)); extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); extent_unlock2(tsdn, a, b); extent_dalloc(tsdn, extent_arena_get(b), b); return false; } bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); } bool extent_boot(void) { if (rtree_new(&extents_rtree, true)) { return true; } if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", WITNESS_RANK_EXTENT_POOL)) { return true; } if (have_dss) { extent_dss_boot(); } return false; } jemalloc-sys-0.3.2/jemalloc/src/extent_dss.c010064400007650000024000000153361340421341300172410ustar0000000000000000#define JEMALLOC_EXTENT_DSS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/spin.h" /******************************************************************************/ /* Data. */ const char *opt_dss = DSS_DEFAULT; const char *dss_prec_names[] = { "disabled", "primary", "secondary", "N/A" }; /* * Current dss precedence default, used when creating new arenas. NB: This is * stored as unsigned rather than dss_prec_t because in principle there's no * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use * atomic operations to synchronize the setting. */ static atomic_u_t dss_prec_default = ATOMIC_INIT( (unsigned)DSS_PREC_DEFAULT); /* Base address of the DSS. */ static void *dss_base; /* Atomic boolean indicating whether a thread is currently extending DSS. */ static atomic_b_t dss_extending; /* Atomic boolean indicating whether the DSS is exhausted. */ static atomic_b_t dss_exhausted; /* Atomic current upper limit on DSS addresses. */ static atomic_p_t dss_max; /******************************************************************************/ static void * extent_dss_sbrk(intptr_t increment) { #ifdef JEMALLOC_DSS return sbrk(increment); #else not_implemented(); return NULL; #endif } dss_prec_t extent_dss_prec_get(void) { dss_prec_t ret; if (!have_dss) { return dss_prec_disabled; } ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); return ret; } bool extent_dss_prec_set(dss_prec_t dss_prec) { if (!have_dss) { return (dss_prec != dss_prec_disabled); } atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); return false; } static void extent_dss_extending_start(void) { spin_t spinner = SPIN_INITIALIZER; while (true) { bool expected = false; if (atomic_compare_exchange_weak_b(&dss_extending, &expected, true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { break; } spin_adaptive(&spinner); } } static void extent_dss_extending_finish(void) { assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); } static void * extent_dss_max_update(void *new_addr) { /* * Get the current end of the DSS as max_cur and assure that dss_max is * up to date. */ void *max_cur = extent_dss_sbrk(0); if (max_cur == (void *)-1) { return NULL; } atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); /* Fixed new_addr can only be supported if it is at the edge of DSS. */ if (new_addr != NULL && max_cur != new_addr) { return NULL; } return max_cur; } void * extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { extent_t *gap; cassert(have_dss); assert(size > 0); assert(alignment > 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a large allocation request as a negative increment. */ if ((intptr_t)size < 0) { return NULL; } gap = extent_alloc(tsdn, arena); if (gap == NULL) { return NULL; } extent_dss_extending_start(); if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ while (true) { void *max_cur = extent_dss_max_update(new_addr); if (max_cur == NULL) { goto label_oom; } /* * Compute how much page-aligned gap space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ void *gap_addr_page = (void *)(PAGE_CEILING( (uintptr_t)max_cur)); void *ret = (void *)ALIGNMENT_CEILING( (uintptr_t)gap_addr_page, alignment); size_t gap_size_page = (uintptr_t)ret - (uintptr_t)gap_addr_page; if (gap_size_page != 0) { extent_init(gap, arena, gap_addr_page, gap_size_page, false, NSIZES, arena_extent_sn_next(arena), extent_state_active, false, true, true); } /* * Compute the address just past the end of the desired * allocation space. */ void *dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)max_cur || (uintptr_t)dss_next < (uintptr_t)max_cur) { goto label_oom; /* Wrap-around. */ } /* Compute the increment, including subpage bytes. */ void *gap_addr_subpage = max_cur; size_t gap_size_subpage = (uintptr_t)ret - (uintptr_t)gap_addr_subpage; intptr_t incr = gap_size_subpage + size; assert((uintptr_t)max_cur + incr == (uintptr_t)ret + size); /* Try to allocate. */ void *dss_prev = extent_dss_sbrk(incr); if (dss_prev == max_cur) { /* Success. */ atomic_store_p(&dss_max, dss_next, ATOMIC_RELEASE); extent_dss_extending_finish(); if (gap_size_page != 0) { extent_dalloc_gap(tsdn, arena, gap); } else { extent_dalloc(tsdn, arena, gap); } if (!*commit) { *commit = pages_decommit(ret, size); } if (*zero && *commit) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; extent_t extent; extent_init(&extent, arena, ret, size, size, false, NSIZES, extent_state_active, false, true, true); if (extent_purge_forced_wrapper(tsdn, arena, &extent_hooks, &extent, 0, size)) { memset(ret, 0, size); } } return ret; } /* * Failure, whether due to OOM or a race with a raw * sbrk() call from outside the allocator. */ if (dss_prev == (void *)-1) { /* OOM. */ atomic_store_b(&dss_exhausted, true, ATOMIC_RELEASE); goto label_oom; } } } label_oom: extent_dss_extending_finish(); extent_dalloc(tsdn, arena, gap); return NULL; } static bool extent_in_dss_helper(void *addr, void *max) { return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < (uintptr_t)max); } bool extent_in_dss(void *addr) { cassert(have_dss); return extent_in_dss_helper(addr, atomic_load_p(&dss_max, ATOMIC_ACQUIRE)); } bool extent_dss_mergeable(void *addr_a, void *addr_b) { void *max; cassert(have_dss); if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < (uintptr_t)dss_base) { return true; } max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); return (extent_in_dss_helper(addr_a, max) == extent_in_dss_helper(addr_b, max)); } void extent_dss_boot(void) { cassert(have_dss); dss_base = extent_dss_sbrk(0); atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); } /******************************************************************************/ jemalloc-sys-0.3.2/jemalloc/src/extent_mmap.c010064400007650000024000000016111340421340100173660ustar0000000000000000#define JEMALLOC_EXTENT_MMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" /******************************************************************************/ /* Data. */ bool opt_retain = #ifdef JEMALLOC_RETAIN true #else false #endif ; /******************************************************************************/ void * extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, PAGE), commit); if (ret == NULL) { return NULL; } assert(ret != NULL); if (*commit) { *zero = true; } return ret; } bool extent_dalloc_mmap(void *addr, size_t size) { if (!opt_retain) { pages_unmap(addr, size); } return opt_retain; } jemalloc-sys-0.3.2/jemalloc/src/hash.c010064400007650000024000000002041340421340100157650ustar0000000000000000#define JEMALLOC_HASH_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/jemalloc/src/hooks.c010064400007650000024000000006151340421341300161760ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" /* * The hooks are a little bit screwy -- they're not genuinely exported in the * sense that we want them available to end-users, but we do want them visible * from outside the generated library, so that we can use them in test code. */ JEMALLOC_EXPORT void (*hooks_arena_new_hook)() = NULL; JEMALLOC_EXPORT void (*hooks_libc_hook)() = NULL; jemalloc-sys-0.3.2/jemalloc/src/jemalloc.c010064400007650000024000002514021340421341300166430ustar0000000000000000#define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/log.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/spin.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* Runtime configuration options. */ const char *je_malloc_conf #ifndef _WIN32 JEMALLOC_ATTR(weak) #endif ; bool opt_abort = #ifdef JEMALLOC_DEBUG true #else false #endif ; bool opt_abort_conf = #ifdef JEMALLOC_DEBUG true #else false #endif ; const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" #else "false" #endif ; bool opt_junk_alloc = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; bool opt_utrace = false; bool opt_xmalloc = false; bool opt_zero = false; unsigned opt_narenas = 0; unsigned ncpus; /* Protects arenas initialization. */ malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. * * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. * * Points to an arena_t. */ JEMALLOC_ALIGNED(CACHELINE) atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; static atomic_u_t narenas_total; /* Use narenas_total_*(). */ static arena_t *a0; /* arenas[0]; read-only after initialization. */ unsigned narenas_auto; /* Read-only after initialization. */ typedef enum { malloc_init_uninitialized = 3, malloc_init_a0_initialized = 2, malloc_init_recursible = 1, malloc_init_initialized = 0 /* Common case --> jnz. */ } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; /* False should be the common case. Set to true to trigger initialization. */ bool malloc_slow = true; /* When malloc_slow is true, set the corresponding bits for sanity check. */ enum { flag_opt_junk_alloc = (1U), flag_opt_junk_free = (1U << 1), flag_opt_zero = (1U << 2), flag_opt_utrace = (1U << 3), flag_opt_xmalloc = (1U << 4) }; static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ # define NO_INITIALIZER ((unsigned long)0) # define INITIALIZER pthread_self() # define IS_INITIALIZER (malloc_initializer == pthread_self()) static pthread_t malloc_initializer = NO_INITIALIZER; #else # define NO_INITIALIZER false # define INITIALIZER true # define IS_INITIALIZER malloc_initializer static bool malloc_initializer = NO_INITIALIZER; #endif /* Used to avoid initialization races. */ #ifdef _WIN32 #if _WIN32_WINNT >= 0x0600 static malloc_mutex_t init_lock = SRWLOCK_INIT; #else static malloc_mutex_t init_lock; static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI _init_init_lock(void) { /* * If another constructor in the same binary is using mallctl to e.g. * set up extent hooks, it may end up running before this one, and * malloc_init_hard will crash trying to lock the uninitialized lock. So * we force an initialization of the lock in malloc_init_hard as well. * We don't try to care about atomicity of the accessed to the * init_lock_initialized boolean, since it really only matters early in * the process creation, before any separate thread normally starts * doing anything. */ if (!init_lock_initialized) { malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, malloc_mutex_rank_exclusive); } init_lock_initialized = true; } #ifdef _MSC_VER # pragma section(".CRT$XCU", read) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif #endif #else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif typedef struct { void *p; /* Input pointer (as in realloc(p, s)). */ size_t s; /* Request size. */ void *r; /* Result pointer. */ } malloc_utrace_t; #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ ut.s = (b); \ ut.r = (c); \ utrace(&ut, sizeof(ut)); \ errno = utrace_serrno; \ } \ } while (0) #else # define UTRACE(a, b, c) #endif /* Whether encountered any invalid config options. */ static bool had_conf_error = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool malloc_init_hard_a0(void); static bool malloc_init_hard(void); /******************************************************************************/ /* * Begin miscellaneous support functions. */ bool malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); } JEMALLOC_ALWAYS_INLINE bool malloc_init_a0(void) { if (unlikely(malloc_init_state == malloc_init_uninitialized)) { return malloc_init_hard_a0(); } return false; } JEMALLOC_ALWAYS_INLINE bool malloc_init(void) { if (unlikely(!malloc_initialized()) && malloc_init_hard()) { return true; } return false; } /* * The a0*() functions are used instead of i{d,}alloc() in situations that * cannot tolerate TLS variable access. */ static void * a0ialloc(size_t size, bool zero, bool is_internal) { if (unlikely(malloc_init_a0())) { return NULL; } return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, is_internal, arena_get(TSDN_NULL, 0, true), true); } static void a0idalloc(void *ptr, bool is_internal) { idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); } void * a0malloc(size_t size) { return a0ialloc(size, false, true); } void a0dalloc(void *ptr) { a0idalloc(ptr, true); } /* * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive * situations that cannot tolerate TLS variable access (TLS allocation and very * early internal data structure initialization). */ void * bootstrap_malloc(size_t size) { if (unlikely(size == 0)) { size = 1; } return a0ialloc(size, false, false); } void * bootstrap_calloc(size_t num, size_t size) { size_t num_size; num_size = num * size; if (unlikely(num_size == 0)) { assert(num == 0 || size == 0); num_size = 1; } return a0ialloc(num_size, true, false); } void bootstrap_free(void *ptr) { if (unlikely(ptr == NULL)) { return; } a0idalloc(ptr, false); } void arena_set(unsigned ind, arena_t *arena) { atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); } static void narenas_total_set(unsigned narenas) { atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); } static void narenas_total_inc(void) { atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); } unsigned narenas_total_get(void) { return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); } /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; assert(ind <= narenas_total_get()); if (ind >= MALLOCX_ARENA_LIMIT) { return NULL; } if (ind == narenas_total_get()) { narenas_total_inc(); } /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); return arena; } /* Actually initialize the arena. */ arena = arena_new(tsdn, ind, extent_hooks); return arena; } static void arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { if (ind == 0) { return; } if (have_background_thread) { bool err; malloc_mutex_lock(tsdn, &background_thread_lock); err = background_thread_create(tsdn_tsd(tsdn), ind); malloc_mutex_unlock(tsdn, &background_thread_lock); if (err) { malloc_printf(": error in background thread " "creation for arena %u. Abort.\n", ind); abort(); } } } arena_t * arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; malloc_mutex_lock(tsdn, &arenas_lock); arena = arena_init_locked(tsdn, ind, extent_hooks); malloc_mutex_unlock(tsdn, &arenas_lock); arena_new_create_background_thread(tsdn, ind); return arena; } static void arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_inc(arena, internal); if (internal) { tsd_iarena_set(tsd, arena); } else { tsd_arena_set(tsd, arena); } } void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; oldarena = arena_get(tsd_tsdn(tsd), oldind, false); newarena = arena_get(tsd_tsdn(tsd), newind, false); arena_nthreads_dec(oldarena, false); arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); } static void arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_dec(arena, internal); if (internal) { tsd_iarena_set(tsd, NULL); } else { tsd_arena_set(tsd, NULL); } } arena_tdata_t * arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata, *arenas_tdata_old; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); unsigned narenas_tdata_old, i; unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); unsigned narenas_actual = narenas_total_get(); /* * Dissociate old tdata array (and set up for deallocation upon return) * if it's too small. */ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { arenas_tdata_old = arenas_tdata; narenas_tdata_old = narenas_tdata; arenas_tdata = NULL; narenas_tdata = 0; tsd_arenas_tdata_set(tsd, arenas_tdata); tsd_narenas_tdata_set(tsd, narenas_tdata); } else { arenas_tdata_old = NULL; narenas_tdata_old = 0; } /* Allocate tdata array if it's missing. */ if (arenas_tdata == NULL) { bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { *arenas_tdata_bypassp = true; arenas_tdata = (arena_tdata_t *)a0malloc( sizeof(arena_tdata_t) * narenas_tdata); *arenas_tdata_bypassp = false; } if (arenas_tdata == NULL) { tdata = NULL; goto label_return; } assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); tsd_arenas_tdata_set(tsd, arenas_tdata); tsd_narenas_tdata_set(tsd, narenas_tdata); } /* * Copy to tdata array. It's possible that the actual number of arenas * has increased since narenas_total_get() was called above, but that * causes no correctness issues unless two threads concurrently execute * the arenas.create mallctl, which we trust mallctl synchronization to * prevent. */ /* Copy/initialize tickers. */ for (i = 0; i < narenas_actual; i++) { if (i < narenas_tdata_old) { ticker_copy(&arenas_tdata[i].decay_ticker, &arenas_tdata_old[i].decay_ticker); } else { ticker_init(&arenas_tdata[i].decay_ticker, DECAY_NTICKS_PER_UPDATE); } } if (narenas_tdata > narenas_actual) { memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) * (narenas_tdata - narenas_actual)); } /* Read the refreshed tdata array. */ tdata = &arenas_tdata[ind]; label_return: if (arenas_tdata_old != NULL) { a0dalloc(arenas_tdata_old); } return tdata; } /* Slow path, called only by arena_choose(). */ arena_t * arena_choose_hard(tsd_t *tsd, bool internal) { arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { unsigned choose = percpu_arena_choose(); ret = arena_get(tsd_tsdn(tsd), choose, true); assert(ret != NULL); arena_bind(tsd, arena_ind_get(ret), false); arena_bind(tsd, arena_ind_get(ret), true); return ret; } if (narenas_auto > 1) { unsigned i, j, choose[2], first_null; bool is_new_arena[2]; /* * Determine binding for both non-internal and internal * allocation. * * choose[0]: For application allocation. * choose[1]: For internal metadata allocation. */ for (j = 0; j < 2; j++) { choose[j] = 0; is_new_arena[j] = false; } first_null = narenas_auto; malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get( tsd_tsdn(tsd), i, false), !!j) < arena_nthreads_get(arena_get( tsd_tsdn(tsd), choose[j], false), !!j)) { choose[j] = i; } } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized * arena, in case all extant arenas are in use. * * NB: It is possible for there to be * discontinuities in terms of initialized * versus uninitialized arenas, due to the * "thread.arena" mallctl. */ first_null = i; } } for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), choose[j], false), !!j) == 0 || first_null == narenas_auto) { /* * Use an unloaded arena, or the least loaded * arena if all arenas are already initialized. */ if (!!j == internal) { ret = arena_get(tsd_tsdn(tsd), choose[j], false); } } else { arena_t *arena; /* Initialize a new arena. */ choose[j] = first_null; arena = arena_init_locked(tsd_tsdn(tsd), choose[j], (extent_hooks_t *)&extent_hooks_default); if (arena == NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); return NULL; } is_new_arena[j] = true; if (!!j == internal) { ret = arena; } } arena_bind(tsd, choose[j], !!j); } malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); for (j = 0; j < 2; j++) { if (is_new_arena[j]) { assert(choose[j] > 0); arena_new_create_background_thread( tsd_tsdn(tsd), choose[j]); } } } else { ret = arena_get(tsd_tsdn(tsd), 0, false); arena_bind(tsd, 0, false); arena_bind(tsd, 0, true); } return ret; } void iarena_cleanup(tsd_t *tsd) { arena_t *iarena; iarena = tsd_iarena_get(tsd); if (iarena != NULL) { arena_unbind(tsd, arena_ind_get(iarena), true); } } void arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); if (arena != NULL) { arena_unbind(tsd, arena_ind_get(arena), false); } } void arenas_tdata_cleanup(tsd_t *tsd) { arena_tdata_t *arenas_tdata; /* Prevent tsd->arenas_tdata from being (re)created. */ *tsd_arenas_tdata_bypassp_get(tsd) = true; arenas_tdata = tsd_arenas_tdata_get(tsd); if (arenas_tdata != NULL) { tsd_arenas_tdata_set(tsd, NULL); a0dalloc(arenas_tdata); } } static void stats_print_atexit(void) { if (config_stats) { tsdn_t *tsdn; unsigned narenas, i; tsdn = tsdn_fetch(); /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats * events. As a consequence, the final stats may be slightly * out of date by the time they are reported, if other threads * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_foreach(tcache, &arena->tcache_ql, link) { tcache_stats_merge(tsdn, tcache, arena); } malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } } je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); } /* * Ensure that we don't hold any locks upon entry to or exit from allocator * code (in a "broad" sense that doesn't count a reentrant allocation as an * entrance or exit). */ JEMALLOC_ALWAYS_INLINE void check_entry_exit_locking(tsdn_t *tsdn) { if (!config_debug) { return; } if (tsdn_null(tsdn)) { return; } tsd_t *tsd = tsdn_tsd(tsdn); /* * It's possible we hold locks at entry/exit if we're in a nested * allocation. */ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); if (reentrancy_level != 0) { return; } witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); } /* * End miscellaneous support functions. */ /******************************************************************************/ /* * Begin initialization functions. */ static char * jemalloc_secure_getenv(const char *name) { #ifdef JEMALLOC_HAVE_SECURE_GETENV return secure_getenv(name); #else # ifdef JEMALLOC_HAVE_ISSETUGID if (issetugid() != 0) { return NULL; } # endif return getenv(name); #endif } static unsigned malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) /* * glibc >= 2.6 has the CPU_COUNT macro. * * glibc's sysconf() uses isspace(). glibc allocates for the first time * *before* setting up the isspace tables. Therefore we need a * different method to get the number of CPUs. */ { cpu_set_t set; pthread_getaffinity_np(pthread_self(), sizeof(set), &set); result = CPU_COUNT(&set); } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } static void init_opt_stats_print_opts(const char *v, size_t vlen) { size_t opts_len = strlen(opt_stats_print_opts); assert(opts_len <= stats_print_tot_num_options); for (size_t i = 0; i < vlen; i++) { switch (v[i]) { #define OPTION(o, v, d, s) case o: break; STATS_PRINT_OPTIONS #undef OPTION default: continue; } if (strchr(opt_stats_print_opts, v[i]) != NULL) { /* Ignore repeated. */ continue; } opt_stats_print_opts[opts_len++] = v[i]; opt_stats_print_opts[opts_len] = '\0'; assert(opts_len <= stats_print_tot_num_options); } assert(opts_len == strlen(opt_stats_print_opts)); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; for (accept = false; !accept;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '_': opts++; break; case ':': opts++; *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; *v_p = opts; accept = true; break; case '\0': if (opts != *opts_p) { malloc_write(": Conf string ends " "with key\n"); } return true; default: malloc_write(": Malformed conf string\n"); return true; } } for (accept = false; !accept;) { switch (*opts) { case ',': opts++; /* * Look ahead one character here, because the next time * this function is called, it will assume that end of * input has been cleanly reached if no input remains, * but we have optimistically already consumed the * comma if one exists. */ if (*opts == '\0') { malloc_write(": Conf string ends " "with comma\n"); } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; accept = true; break; case '\0': *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; accept = true; break; default: opts++; break; } } *opts_p = opts; return false; } static void malloc_abort_invalid_conf(void) { assert(opt_abort_conf); malloc_printf(": Abort (abort_conf:true) on invalid conf " "value (see above).\n"); abort(); } static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); /* If abort_conf is set, error out after processing all options. */ had_conf_error = true; } static void malloc_slow_flag_init(void) { /* * Combine the runtime options into malloc_slow for fast path. Called * after processing all the options. */ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | (opt_junk_free ? flag_opt_junk_free : 0) | (opt_zero ? flag_opt_zero : 0) | (opt_utrace ? flag_opt_utrace : 0) | (opt_xmalloc ? flag_opt_xmalloc : 0); malloc_slow = (malloc_slow_flags != 0); } static void malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: opts = config_malloc_conf; break; case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the * program. */ opts = je_malloc_conf; } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; case 2: { ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = # ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" # else "/etc/malloc.conf" # endif ; /* * Try to use the contents of the "/etc/malloc.conf" * symbolic link's name. */ linklen = readlink(linkname, buf, sizeof(buf) - 1); if (linklen == -1) { /* No configuration specified. */ linklen = 0; /* Restore errno. */ set_errno(saved_errno); } #endif buf[linklen] = '\0'; opts = buf; break; } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" #else "MALLOC_CONF" #endif ; if ((opts = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment * variable. */ } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; } default: not_reached(); buf[0] = '\0'; opts = buf; } while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { #define CONF_MATCH(n) \ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) #define CONF_MATCH_VALUE(n) \ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) #define CONF_HANDLE_BOOL(o, n) \ if (CONF_MATCH(n)) { \ if (CONF_MATCH_VALUE("true")) { \ o = true; \ } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ continue; \ } #define CONF_MIN_no(um, min) false #define CONF_MIN_yes(um, min) ((um) < (min)) #define CONF_MAX_no(um, max) false #define CONF_MAX_yes(um, max) ((um) > (max)) #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ \ set_errno(0); \ um = malloc_strtoumax(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ if (CONF_MIN_##check_min(um, \ (t)(min))) { \ o = (t)(min); \ } else if ( \ CONF_MAX_##check_max(um, \ (t)(max))) { \ o = (t)(max); \ } else { \ o = (t)um; \ } \ } else { \ if (CONF_MIN_##check_min(um, \ (t)(min)) || \ CONF_MAX_##check_max(um, \ (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ } else { \ o = (t)um; \ } \ } \ continue; \ } #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ clip) \ CONF_HANDLE_T_U(unsigned, o, n, min, max, \ check_min, check_max, clip) #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ CONF_HANDLE_T_U(size_t, o, n, min, max, \ check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ char *end; \ \ set_errno(0); \ l = strtol(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (l < (ssize_t)(min) || l > \ (ssize_t)(max)) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else { \ o = l; \ } \ continue; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ strncpy(o, v, cpylen); \ o[cpylen] = '\0'; \ continue; \ } CONF_HANDLE_BOOL(opt_abort, "abort") CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") if (strncmp("metadata_thp", k, klen) == 0) { int i; bool match = false; for (i = 0; i < metadata_thp_mode_limit; i++) { if (strncmp(metadata_thp_mode_names[i], v, vlen) == 0) { opt_metadata_thp = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { if (extent_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); } else { opt_dss = dss_prec_names[i]; match = true; break; } } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, UINT_MAX, yes, no, false) CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : SSIZE_MAX); CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : SSIZE_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { init_opt_stats_print_opts(v, vlen); continue; } if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { opt_junk = "true"; opt_junk_alloc = opt_junk_free = true; } else if (CONF_MATCH_VALUE("false")) { opt_junk = "false"; opt_junk_alloc = opt_junk_free = false; } else if (CONF_MATCH_VALUE("alloc")) { opt_junk = "alloc"; opt_junk_alloc = true; opt_junk_free = false; } else if (CONF_MATCH_VALUE("free")) { opt_junk = "free"; opt_junk_alloc = false; opt_junk_free = true; } else { malloc_conf_error( "Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace") } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } CONF_HANDLE_BOOL(opt_tcache, "tcache") CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, "lg_extent_max_active_fit", 0, (sizeof(size_t) << 3), yes, yes, false) CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) if (strncmp("percpu_arena", k, klen) == 0) { bool match = false; for (int i = percpu_arena_mode_names_base; i < percpu_arena_mode_names_limit; i++) { if (strncmp(percpu_arena_mode_names[i], v, vlen) == 0) { if (!have_percpu_arena) { malloc_conf_error( "No getcpu support", k, klen, v, vlen); } opt_percpu_arena = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_background_thread, "background_thread"); CONF_HANDLE_SIZE_T(opt_max_background_threads, "max_background_threads", 1, opt_max_background_threads, yes, yes, true); if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") } if (config_log) { if (CONF_MATCH("log")) { size_t cpylen = ( vlen <= sizeof(log_var_names) ? vlen : sizeof(log_var_names) - 1); strncpy(log_var_names, v, cpylen); log_var_names[cpylen] = '\0'; continue; } } if (CONF_MATCH("thp")) { bool match = false; for (int i = 0; i < thp_mode_names_limit; i++) { if (strncmp(thp_mode_names[i],v, vlen) == 0) { if (!have_madvise_huge) { malloc_conf_error( "No THP support", k, klen, v, vlen); } opt_thp = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH #undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL #undef CONF_MIN_no #undef CONF_MIN_yes #undef CONF_MAX_no #undef CONF_MAX_yes #undef CONF_HANDLE_T_U #undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P } if (opt_abort_conf && had_conf_error) { malloc_abort_invalid_conf(); } } atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); } static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ return false; } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ spin_t spinner = SPIN_INITIALIZER; do { malloc_mutex_unlock(TSDN_NULL, &init_lock); spin_adaptive(&spinner); malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); return false; } #endif return true; } static bool malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; if (config_prof) { prof_boot0(); } malloc_conf_init(); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) { abort(); } } } if (pages_boot()) { return true; } if (base_boot(TSDN_NULL)) { return true; } if (extent_boot()) { return true; } if (ctl_boot()) { return true; } if (config_prof) { prof_boot1(); } arena_boot(); if (tcache_boot(TSDN_NULL)) { return true; } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, malloc_mutex_rank_exclusive)) { return true; } /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ narenas_auto = 1; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) == NULL) { return true; } a0 = arena_get(TSDN_NULL, 0, false); malloc_init_state = malloc_init_a0_initialized; return false; } static bool malloc_init_hard_a0(void) { bool ret; malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); malloc_mutex_unlock(TSDN_NULL, &init_lock); return ret; } /* Initialize data structures which may trigger recursive allocation. */ static bool malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) { abort(); } return true; } #endif if (background_thread_boot0()) { return true; } return false; } static unsigned malloc_narenas_default(void) { assert(ncpus > 0); /* * For SMP systems, create more than one arena per CPU by * default. */ if (ncpus > 1) { return ncpus << 2; } else { return 1; } } static percpu_arena_mode_t percpu_arena_as_initialized(percpu_arena_mode_t mode) { assert(!malloc_initialized()); assert(mode <= percpu_arena_disabled); if (mode != percpu_arena_disabled) { mode += percpu_arena_mode_enabled_base; } return mode; } static bool malloc_init_narenas(void) { assert(ncpus > 0); if (opt_percpu_arena != percpu_arena_disabled) { if (!have_percpu_arena || malloc_getcpu() < 0) { opt_percpu_arena = percpu_arena_disabled; malloc_printf(": perCPU arena getcpu() not " "available. Setting narenas to %u.\n", opt_narenas ? opt_narenas : malloc_narenas_default()); if (opt_abort) { abort(); } } else { if (ncpus >= MALLOCX_ARENA_LIMIT) { malloc_printf(": narenas w/ percpu" "arena beyond limit (%d)\n", ncpus); if (opt_abort) { abort(); } return true; } /* NB: opt_percpu_arena isn't fully initialized yet. */ if (percpu_arena_as_initialized(opt_percpu_arena) == per_phycpu_arena && ncpus % 2 != 0) { malloc_printf(": invalid " "configuration -- per physical CPU arena " "with odd number (%u) of CPUs (no hyper " "threading?).\n", ncpus); if (opt_abort) abort(); } unsigned n = percpu_arena_ind_limit( percpu_arena_as_initialized(opt_percpu_arena)); if (opt_narenas < n) { /* * If narenas is specified with percpu_arena * enabled, actual narenas is set as the greater * of the two. percpu_arena_choose will be free * to use any of the arenas based on CPU * id. This is conservative (at a small cost) * but ensures correctness. * * If for some reason the ncpus determined at * boot is not the actual number (e.g. because * of affinity setting from numactl), reserving * narenas this way provides a workaround for * percpu_arena. */ opt_narenas = n; } } } if (opt_narenas == 0) { opt_narenas = malloc_narenas_default(); } assert(opt_narenas > 0); narenas_auto = opt_narenas; /* * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ if (narenas_auto >= MALLOCX_ARENA_LIMIT) { narenas_auto = MALLOCX_ARENA_LIMIT - 1; malloc_printf(": Reducing narenas to limit (%d)\n", narenas_auto); } narenas_total_set(narenas_auto); return false; } static void malloc_init_percpu(void) { opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); } static bool malloc_init_hard_finish(void) { if (malloc_mutex_boot()) { return true; } malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); return false; } static void malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { malloc_mutex_assert_owner(tsdn, &init_lock); malloc_mutex_unlock(tsdn, &init_lock); if (reentrancy_set) { assert(!tsdn_null(tsdn)); tsd_t *tsd = tsdn_tsd(tsdn); assert(tsd_reentrancy_level_get(tsd) > 0); post_reentrancy(tsd); } } static bool malloc_init_hard(void) { tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif malloc_mutex_lock(TSDN_NULL, &init_lock); #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ malloc_init_hard_cleanup(tsdn, reentrancy); \ return ret; if (!malloc_init_hard_needed()) { UNLOCK_RETURN(TSDN_NULL, false, false) } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { UNLOCK_RETURN(TSDN_NULL, true, false) } malloc_mutex_unlock(TSDN_NULL, &init_lock); /* Recursive allocation relies on functional tsd. */ tsd = malloc_tsd_boot0(); if (tsd == NULL) { return true; } if (malloc_init_hard_recursible()) { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); /* Set reentrancy level to 1 during init. */ pre_reentrancy(tsd, NULL); /* Initialize narenas before prof_boot2 (for allocation). */ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } if (config_prof && prof_boot2(tsd)) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } malloc_init_percpu(); if (malloc_init_hard_finish()) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } post_reentrancy(tsd); malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); witness_assert_lockless(witness_tsd_tsdn( tsd_witness_tsdp_get_unsafe(tsd))); malloc_tsd_boot1(); /* Update TSD after tsd_boot1. */ tsd = tsd_fetch(); if (opt_background_thread) { assert(have_background_thread); /* * Need to finish init & unlock first before creating background * threads (pthread_create depends on malloc). ctl_init (which * sets isthreaded) needs to be called without holding any lock. */ background_thread_ctl_init(tsd_tsdn(tsd)); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); bool err = background_thread_create(tsd, 0); malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); if (err) { return true; } } #undef UNLOCK_RETURN return false; } /* * End initialization functions. */ /******************************************************************************/ /* * Begin allocation-path internal functions and data structures. */ /* * Settings determined by the documented behavior of the allocation functions. */ typedef struct static_opts_s static_opts_t; struct static_opts_s { /* Whether or not allocation size may overflow. */ bool may_overflow; /* Whether or not allocations of size 0 should be treated as size 1. */ bool bump_empty_alloc; /* * Whether to assert that allocations are not of size 0 (after any * bumping). */ bool assert_nonempty_alloc; /* * Whether or not to modify the 'result' argument to malloc in case of * error. */ bool null_out_result_on_error; /* Whether to set errno when we encounter an error condition. */ bool set_errno_on_error; /* * The minimum valid alignment for functions requesting aligned storage. */ size_t min_alignment; /* The error string to use if we oom. */ const char *oom_string; /* The error string to use if the passed-in alignment is invalid. */ const char *invalid_alignment_string; /* * False if we're configured to skip some time-consuming operations. * * This isn't really a malloc "behavior", but it acts as a useful * summary of several other static (or at least, static after program * initialization) options. */ bool slow; }; JEMALLOC_ALWAYS_INLINE void static_opts_init(static_opts_t *static_opts) { static_opts->may_overflow = false; static_opts->bump_empty_alloc = false; static_opts->assert_nonempty_alloc = false; static_opts->null_out_result_on_error = false; static_opts->set_errno_on_error = false; static_opts->min_alignment = 0; static_opts->oom_string = ""; static_opts->invalid_alignment_string = ""; static_opts->slow = false; } /* * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we * should have one constant here per magic value there. Note however that the * representations need not be related. */ #define TCACHE_IND_NONE ((unsigned)-1) #define TCACHE_IND_AUTOMATIC ((unsigned)-2) #define ARENA_IND_AUTOMATIC ((unsigned)-1) typedef struct dynamic_opts_s dynamic_opts_t; struct dynamic_opts_s { void **result; size_t num_items; size_t item_size; size_t alignment; bool zero; unsigned tcache_ind; unsigned arena_ind; }; JEMALLOC_ALWAYS_INLINE void dynamic_opts_init(dynamic_opts_t *dynamic_opts) { dynamic_opts->result = NULL; dynamic_opts->num_items = 0; dynamic_opts->item_size = 0; dynamic_opts->alignment = 0; dynamic_opts->zero = false; dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; } /* ind is ignored if dopts->alignment > 0. */ JEMALLOC_ALWAYS_INLINE void * imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t size, size_t usize, szind_t ind) { tcache_t *tcache; arena_t *arena; /* Fill in the tcache. */ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { if (likely(!sopts->slow)) { /* Getting tcache ptr unconditionally. */ tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { tcache = tcache_get(tsd); } } else if (dopts->tcache_ind == TCACHE_IND_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, dopts->tcache_ind); } /* Fill in the arena. */ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { /* * In case of automatic arena management, we defer arena * computation until as late as we can, hoping to fill the * allocation out of the tcache. */ arena = NULL; } else { arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); } if (unlikely(dopts->alignment != 0)) { return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, dopts->zero, tcache, arena); } return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, arena, sopts->slow); } JEMALLOC_ALWAYS_INLINE void * imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t usize, szind_t ind) { void *ret; /* * For small allocations, sampling bumps the usize. If so, we allocate * from the ind_large bucket. */ szind_t ind_large; size_t bumped_usize = usize; if (usize <= SMALL_MAXCLASS) { assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : sz_sa2u(LARGE_MINCLASS, dopts->alignment)) == LARGE_MINCLASS); ind_large = sz_size2index(LARGE_MINCLASS); bumped_usize = sz_s2u(LARGE_MINCLASS); ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, bumped_usize, ind_large); if (unlikely(ret == NULL)) { return NULL; } arena_prof_promote(tsd_tsdn(tsd), ret, usize); } else { ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); } return ret; } /* * Returns true if the allocation will overflow, and false otherwise. Sets * *size to the product either way. */ JEMALLOC_ALWAYS_INLINE bool compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, size_t *size) { /* * This function is just num_items * item_size, except that we may have * to check for overflow. */ if (!may_overflow) { assert(dopts->num_items == 1); *size = dopts->item_size; return false; } /* A size_t with its high-half bits all set to 1. */ static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); *size = dopts->item_size * dopts->num_items; if (unlikely(*size == 0)) { return (dopts->num_items != 0 && dopts->item_size != 0); } /* * We got a non-zero size, but we don't know if we overflowed to get * there. To avoid having to do a divide, we'll be clever and note that * if both A and B can be represented in N/2 bits, then their product * can be represented in N bits (without the possibility of overflow). */ if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { return false; } if (likely(*size / dopts->item_size == dopts->num_items)) { return false; } return true; } JEMALLOC_ALWAYS_INLINE int imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { /* Where the actual allocated memory will live. */ void *allocation = NULL; /* Filled in by compute_size_with_overflow below. */ size_t size = 0; /* * For unaligned allocations, we need only ind. For aligned * allocations, or in case of stats or profiling we need usize. * * These are actually dead stores, in that their values are reset before * any branch on their value is taken. Sometimes though, it's * convenient to pass them as arguments before this point. To avoid * undefined behavior then, we initialize them with dummy stores. */ szind_t ind = 0; size_t usize = 0; /* Reentrancy is only checked on slow path. */ int8_t reentrancy_level; /* Compute the amount of memory the user wants. */ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, &size))) { goto label_oom; } /* Validate the user input. */ if (sopts->bump_empty_alloc) { if (unlikely(size == 0)) { size = 1; } } if (sopts->assert_nonempty_alloc) { assert (size != 0); } if (unlikely(dopts->alignment < sopts->min_alignment || (dopts->alignment & (dopts->alignment - 1)) != 0)) { goto label_invalid_alignment; } /* This is the beginning of the "core" algorithm. */ if (dopts->alignment == 0) { ind = sz_size2index(size); if (unlikely(ind >= NSIZES)) { goto label_oom; } if (config_stats || (config_prof && opt_prof)) { usize = sz_index2size(ind); assert(usize > 0 && usize <= LARGE_MAXCLASS); } } else { usize = sz_sa2u(size, dopts->alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { goto label_oom; } } check_entry_exit_locking(tsd_tsdn(tsd)); /* * If we need to handle reentrancy, we can do it out of a * known-initialized arena (i.e. arena 0). */ reentrancy_level = tsd_reentrancy_level_get(tsd); if (sopts->slow && unlikely(reentrancy_level > 0)) { /* * We should never specify particular arenas or tcaches from * within our internal allocations. */ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || dopts->tcache_ind == TCACHE_IND_NONE); assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); dopts->tcache_ind = TCACHE_IND_NONE; /* We know that arena 0 has already been initialized. */ dopts->arena_ind = 0; } /* If profiling is on, get our profiling context. */ if (config_prof && opt_prof) { /* * Note that if we're going down this path, usize must have been * initialized in the previous if statement. */ prof_tctx_t *tctx = prof_alloc_prep( tsd, usize, prof_active_get_unlocked(), true); alloc_ctx_t alloc_ctx; if (likely((uintptr_t)tctx == (uintptr_t)1U)) { alloc_ctx.slab = (usize <= SMALL_MAXCLASS); allocation = imalloc_no_sample( sopts, dopts, tsd, usize, usize, ind); } else if ((uintptr_t)tctx > (uintptr_t)1U) { /* * Note that ind might still be 0 here. This is fine; * imalloc_sample ignores ind if dopts->alignment > 0. */ allocation = imalloc_sample( sopts, dopts, tsd, usize, ind); alloc_ctx.slab = false; } else { allocation = NULL; } if (unlikely(allocation == NULL)) { prof_alloc_rollback(tsd, tctx, true); goto label_oom; } prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); } else { /* * If dopts->alignment > 0, then ind is still 0, but usize was * computed in the previous if statement. Down the positive * alignment path, imalloc_no_sample ignores ind and size * (relying only on usize). */ allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, ind); if (unlikely(allocation == NULL)) { goto label_oom; } } /* * Allocation has been done at this point. We still have some * post-allocation work to do though. */ assert(dopts->alignment == 0 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); if (config_stats) { assert(usize == isalloc(tsd_tsdn(tsd), allocation)); *tsd_thread_allocatedp_get(tsd) += usize; } if (sopts->slow) { UTRACE(0, size, allocation); } /* Success! */ check_entry_exit_locking(tsd_tsdn(tsd)); *dopts->result = allocation; return 0; label_oom: if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->oom_string); abort(); } if (sopts->slow) { UTRACE(NULL, size, NULL); } check_entry_exit_locking(tsd_tsdn(tsd)); if (sopts->set_errno_on_error) { set_errno(ENOMEM); } if (sopts->null_out_result_on_error) { *dopts->result = NULL; } return ENOMEM; /* * This label is only jumped to by one goto; we move it out of line * anyways to avoid obscuring the non-error paths, and for symmetry with * the oom case. */ label_invalid_alignment: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->invalid_alignment_string); abort(); } if (sopts->set_errno_on_error) { set_errno(EINVAL); } if (sopts->slow) { UTRACE(NULL, size, NULL); } check_entry_exit_locking(tsd_tsdn(tsd)); if (sopts->null_out_result_on_error) { *dopts->result = NULL; } return EINVAL; } /* Returns the errno-style error code of the allocation. */ JEMALLOC_ALWAYS_INLINE int imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->oom_string); abort(); } UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); set_errno(ENOMEM); *dopts->result = NULL; return ENOMEM; } /* We always need the tsd. Let's grab it right away. */ tsd_t *tsd = tsd_fetch(); assert(tsd); if (likely(tsd_fast(tsd))) { /* Fast and common path. */ tsd_assert_fast(tsd); sopts->slow = false; return imalloc_body(sopts, dopts, tsd); } else { sopts->slow = true; return imalloc_body(sopts, dopts, tsd); } } /******************************************************************************/ /* * Begin malloc(3)-compatible functions. */ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.malloc.entry", "size: %zu", size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.bump_empty_alloc = true; sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.oom_string = ": Error in malloc(): out of memory\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; imalloc(&sopts, &dopts); LOG("core.malloc.exit", "result: %p", ret); return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { int ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " "size: %zu", memptr, alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.bump_empty_alloc = true; sopts.min_alignment = sizeof(void *); sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = memptr; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; ret = imalloc(&sopts, &dopts); LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, *memptr); return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) je_aligned_alloc(size_t alignment, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.bump_empty_alloc = true; sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.min_alignment = 1; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; imalloc(&sopts, &dopts); LOG("core.aligned_alloc.exit", "result: %p", ret); return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.may_overflow = true; sopts.bump_empty_alloc = true; sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.oom_string = ": Error in calloc(): out of memory\n"; dopts.result = &ret; dopts.num_items = num; dopts.item_size = size; dopts.zero = true; imalloc(&sopts, &dopts); LOG("core.calloc.exit", "result: %p", ret); return ret; } static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) { return NULL; } if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) { return NULL; } arena_prof_promote(tsd_tsdn(tsd), p, usize); } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); } return p; } JEMALLOC_ALWAYS_INLINE void * irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, alloc_ctx_t *alloc_ctx) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return NULL; } prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); return p; } JEMALLOC_ALWAYS_INLINE void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (!slow_path) { tsd_assert_fast(tsd); } check_entry_exit_locking(tsd_tsdn(tsd)); if (tsd_reentrancy_level_get(tsd) != 0) { assert(slow_path); } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != NSIZES); size_t usize; if (config_prof && opt_prof) { usize = sz_index2size(alloc_ctx.szind); prof_free(tsd, ptr, usize, &alloc_ctx); } else if (config_stats) { usize = sz_index2size(alloc_ctx.szind); } if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } if (likely(!slow_path)) { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, false); } else { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, true); } } JEMALLOC_ALWAYS_INLINE void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { if (!slow_path) { tsd_assert_fast(tsd); } check_entry_exit_locking(tsd_tsdn(tsd)); if (tsd_reentrancy_level_get(tsd) != 0) { assert(slow_path); } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); alloc_ctx_t alloc_ctx, *ctx; if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { /* * When cache_oblivious is disabled and ptr is not page aligned, * the allocation was not sampled -- usize can be used to * determine szind directly. */ alloc_ctx.szind = sz_size2index(usize); alloc_ctx.slab = true; ctx = &alloc_ctx; if (config_debug) { alloc_ctx_t dbg_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, &dbg_ctx.slab); assert(dbg_ctx.szind == alloc_ctx.szind); assert(dbg_ctx.slab == alloc_ctx.slab); } } else if (config_prof && opt_prof) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind == sz_size2index(usize)); ctx = &alloc_ctx; } else { ctx = NULL; } if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, ctx); } if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } if (likely(!slow_path)) { isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); } else { isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); } } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { void *ret; tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tcache_t *tcache; tsd_t *tsd = tsd_fetch(); if (tsd_reentrancy_level_get(tsd) == 0) { tcache = tcache_get(tsd); } else { tcache = NULL; } ifree(tsd, ptr, tcache, true); LOG("core.realloc.exit", "result: %p", NULL); return NULL; } size = 1; } if (likely(ptr != NULL)) { assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { usize = sz_s2u(size); ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? NULL : irealloc_prof(tsd, ptr, old_usize, usize, &alloc_ctx); } else { if (config_stats) { usize = sz_s2u(size); } ret = iralloc(tsd, ptr, old_usize, size, 0, false); } tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ void *ret = je_malloc(size); LOG("core.realloc.exit", "result: %p", ret); return ret; } if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { tsd_t *tsd; assert(usize == isalloc(tsdn, ret)); tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); check_entry_exit_locking(tsdn); LOG("core.realloc.exit", "result: %p", ret); return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { LOG("core.free.entry", "ptr: %p", ptr); UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { /* * We avoid setting up tsd fully (e.g. tcache, arena binding) * based on only free() calls -- other activities trigger the * minimal to full transition. This is because free() may * happen during thread shutdown after tls deallocation: if a * thread never had any malloc activities until then, a * fully-setup tsd won't be destructed properly. */ tsd_t *tsd = tsd_fetch_min(); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (likely(tsd_fast(tsd))) { tsd_assert_fast(tsd); /* Unconditionally get tcache ptr on fast path. */ tcache = tsd_tcachep_get(tsd); ifree(tsd, ptr, tcache, false); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } ifree(tsd, ptr, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); } LOG("core.free.exit", ""); } /* * End malloc(3)-compatible functions. */ /******************************************************************************/ /* * Begin non-standard override functions. */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.bump_empty_alloc = true; sopts.min_alignment = 1; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; sopts.null_out_result_on_error = true; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; imalloc(&sopts, &dopts); LOG("core.memalign.exit", "result: %p", ret); return ret; } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_valloc(size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.valloc.entry", "size: %zu\n", size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.bump_empty_alloc = true; sopts.null_out_result_on_error = true; sopts.min_alignment = PAGE; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = PAGE; imalloc(&sopts, &dopts); LOG("core.valloc.exit", "result: %p\n", ret); return ret; } #endif #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). * * These definitions interpose hooks in glibc. The functions are actually * passed an extra argument for the caller return address, which will be * ignored. */ JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif # ifdef CPU_COUNT /* * To enable static linking with glibc, the libc specific malloc interface must * be implemented also, so none of glibc's malloc.o functions are added to the * link. */ # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) /* To force macro expansion of je_ prefix before stringification. */ # define PREALIAS(je_fn) ALIAS(je_fn) # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_FREE void __libc_free(void* ptr) PREALIAS(je_free); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC void *__libc_malloc(size_t size) PREALIAS(je_malloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC void *__libc_valloc(size_t size) PREALIAS(je_valloc); # endif # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); # endif # undef PREALIAS # undef ALIAS # endif #endif /* * End non-standard override functions. */ /******************************************************************************/ /* * Begin non-standard functions. */ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.assert_nonempty_alloc = true; sopts.null_out_result_on_error = true; sopts.oom_string = ": Error in mallocx(): out of memory\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; if (unlikely(flags != 0)) { if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); } dopts.zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { dopts.tcache_ind = TCACHE_IND_NONE; } else { dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); } } else { dopts.tcache_ind = TCACHE_IND_AUTOMATIC; } if ((flags & MALLOCX_ARENA_MASK) != 0) dopts.arena_ind = MALLOCX_ARENA_GET(flags); } imalloc(&sopts, &dopts); LOG("core.mallocx.exit", "result: %p", ret); return ret; } static void * irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) { void *p; if (tctx == NULL) { return NULL; } if (usize <= SMALL_MAXCLASS) { p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, alignment, zero, tcache, arena); if (p == NULL) { return NULL; } arena_prof_promote(tsdn, p, usize); } else { p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, tcache, arena); } return p; } JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, arena_t *arena, alloc_ctx_t *alloc_ctx) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, *usize, alignment, zero, tcache, arena, tctx); } else { p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, zero, tcache, arena); } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, false); return NULL; } if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested * alignment, and that the alignment constraint was * serendipitously satisfied. Additionally, old_usize may not * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ *usize = isalloc(tsd_tsdn(tsd), p); } prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); return p; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { void *p; tsd_t *tsd; size_t usize; size_t old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; arena_t *arena; tcache_t *tcache; LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); assert(ptr != NULL); assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) { goto label_oom; } } else { arena = NULL; } if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { tcache = tcache_get(tsd); } alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { usize = (alignment == 0) ? sz_s2u(size) : sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { goto label_oom; } p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, zero, tcache, arena, &alloc_ctx); if (unlikely(p == NULL)) { goto label_oom; } } else { p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, zero, tcache, arena); if (unlikely(p == NULL)) { goto label_oom; } if (config_stats) { usize = isalloc(tsd_tsdn(tsd), p); } } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", p); return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", NULL); return NULL; } JEMALLOC_ALWAYS_INLINE size_t ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize; if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { return old_usize; } usize = isalloc(tsdn, ptr); return usize; } static size_t ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) { return old_usize; } usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, zero); return usize; } JEMALLOC_ALWAYS_INLINE size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ if (alignment == 0) { usize_max = sz_s2u(size+extra); assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); } else { usize_max = sz_sa2u(size+extra, alignment); if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { /* * usize_max is out of range, and chances are that * allocation will fail, but use the maximum possible * value and carry on with prof_alloc_prep(), just in * case allocation succeeds. */ usize_max = LARGE_MAXCLASS; } } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero, tctx); } else { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); return usize; } prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, old_tctx); return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " "flags: %d", ptr, size, extra, flags); assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); /* * The API explicitly absolves itself of protecting against (size + * extra) numerical overflow, but we may need to clamp extra to avoid * exceeding LARGE_MAXCLASS. * * Ordinarily, size limit checking is handled deeper down, but here we * have to check as part of (size + extra) clamping, since we need the * clamped value in the above helper functions. */ if (unlikely(size > LARGE_MAXCLASS)) { usize = old_usize; goto label_not_resized; } if (unlikely(LARGE_MAXCLASS - size < extra)) { extra = LARGE_MAXCLASS - size; } if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero, &alloc_ctx); } else { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } if (unlikely(usize == old_usize)) { goto label_not_resized; } if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } label_not_resized: UTRACE(ptr, size, ptr); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.xallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, UNUSED int flags) { size_t usize; tsdn_t *tsdn; LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(malloc_initialized() || IS_INITIALIZER); assert(ptr != NULL); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); if (config_debug || force_ivsalloc) { usize = ivsalloc(tsdn, ptr); assert(force_ivsalloc || usize != 0); } else { usize = isalloc(tsdn, ptr); } check_entry_exit_locking(tsdn); LOG("core.sallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags) { LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ assert(tsd_reentrancy_level_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { if (likely(fast)) { tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } } } UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); ifree(tsd, ptr, tcache, false); } else { ifree(tsd, ptr, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.dallocx.exit", ""); } JEMALLOC_ALWAYS_INLINE size_t inallocx(tsdn_t *tsdn, size_t size, int flags) { check_entry_exit_locking(tsdn); size_t usize; if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { usize = sz_s2u(size); } else { usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); } check_entry_exit_locking(tsdn); return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ assert(tsd_reentrancy_level_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { if (likely(fast)) { tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } } } UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); isfree(tsd, ptr, usize, tcache, false); } else { isfree(tsd, ptr, usize, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.sdallocx.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { size_t usize; tsdn_t *tsdn; assert(size != 0); if (unlikely(malloc_init())) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); usize = inallocx(tsdn, size, flags); if (unlikely(usize > LARGE_MAXCLASS)) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } check_entry_exit_locking(tsdn); LOG("core.nallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; LOG("core.mallctl.entry", "name: %s", name); if (unlikely(malloc_init())) { LOG("core.mallctl.exit", "result: %d", EAGAIN); return EAGAIN; } tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctl.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; LOG("core.mallctlnametomib.entry", "name: %s", name); if (unlikely(malloc_init())) { LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); return EAGAIN; } tsd_t *tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_nametomib(tsd, name, mibp, miblenp); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctlnametomib.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; LOG("core.mallctlbymib.entry", ""); if (unlikely(malloc_init())) { LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); return EAGAIN; } tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctlbymib.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { tsdn_t *tsdn; LOG("core.malloc_stats_print.entry", ""); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); stats_print(write_cb, cbopaque, opts); check_entry_exit_locking(tsdn); LOG("core.malloc_stats_print.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; tsdn_t *tsdn; LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); assert(malloc_initialized() || IS_INITIALIZER); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); if (unlikely(ptr == NULL)) { ret = 0; } else { if (config_debug || force_ivsalloc) { ret = ivsalloc(tsdn, ptr); assert(force_ivsalloc || ret != 0); } else { ret = isalloc(tsdn, ptr); } } check_entry_exit_locking(tsdn); LOG("core.malloc_usable_size.exit", "result: %zu", ret); return ret; } /* * End non-standard functions. */ /******************************************************************************/ /* * The following functions are used by threading libraries for protection of * malloc during fork(). */ /* * If an application creates a thread before doing any allocation in the main * thread, then calls fork(2) in the main thread followed by memory allocation * in the child process, a race can occur that results in deadlock within the * child: the main thread may have forked while the created thread had * partially initialized the allocator. Ordinarily jemalloc prevents * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library * constructor is a partial solution to this problem. It may still be possible * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ #ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void jemalloc_constructor(void) { malloc_init(); } #endif #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_prefork(void) #else JEMALLOC_EXPORT void _malloc_prefork(void) #endif { tsd_t *tsd; unsigned i, j, narenas; arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { return; } #endif assert(malloc_initialized()); tsd = tsd_fetch(); narenas = narenas_total_get(); witness_prefork(tsd_witness_tsdp_get(tsd)); /* Acquire all mutexes in a safe order. */ ctl_prefork(tsd_tsdn(tsd)); tcache_prefork(tsd_tsdn(tsd)); malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); if (have_background_thread) { background_thread_prefork0(tsd_tsdn(tsd)); } prof_prefork0(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_prefork1(tsd_tsdn(tsd)); } /* Break arena prefork into stages to preserve lock order. */ for (i = 0; i < 8; i++) { for (j = 0; j < narenas; j++) { if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != NULL) { switch (i) { case 0: arena_prefork0(tsd_tsdn(tsd), arena); break; case 1: arena_prefork1(tsd_tsdn(tsd), arena); break; case 2: arena_prefork2(tsd_tsdn(tsd), arena); break; case 3: arena_prefork3(tsd_tsdn(tsd), arena); break; case 4: arena_prefork4(tsd_tsdn(tsd), arena); break; case 5: arena_prefork5(tsd_tsdn(tsd), arena); break; case 6: arena_prefork6(tsd_tsdn(tsd), arena); break; case 7: arena_prefork7(tsd_tsdn(tsd), arena); break; default: not_reached(); } } } } prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_postfork_parent(void) #else JEMALLOC_EXPORT void _malloc_postfork(void) #endif { tsd_t *tsd; unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { return; } #endif assert(malloc_initialized()); tsd = tsd_fetch(); witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_parent(tsd_tsdn(tsd), arena); } } prof_postfork_parent(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_postfork_parent(tsd_tsdn(tsd)); } malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_parent(tsd_tsdn(tsd)); ctl_postfork_parent(tsd_tsdn(tsd)); } void jemalloc_postfork_child(void) { tsd_t *tsd; unsigned i, narenas; assert(malloc_initialized()); tsd = tsd_fetch(); witness_postfork_child(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_child(tsd_tsdn(tsd), arena); } } prof_postfork_child(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_postfork_child(tsd_tsdn(tsd)); } malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_child(tsd_tsdn(tsd)); ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ jemalloc-sys-0.3.2/jemalloc/src/jemalloc_cpp.cpp010064400007650000024000000057231340421340100200450ustar0000000000000000#include #include #define JEMALLOC_CPP_CPP_ #ifdef __cplusplus extern "C" { #endif #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #ifdef __cplusplus } #endif // All operators in this file are exported. // Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt // thunk? // // extern __typeof (sdallocx) sdallocx_int // __attribute ((alias ("sdallocx"), // visibility ("hidden"))); // // ... but it needs to work with jemalloc namespaces. void *operator new(std::size_t size); void *operator new[](std::size_t size); void *operator new(std::size_t size, const std::nothrow_t &) noexcept; void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; void operator delete(void *ptr) noexcept; void operator delete[](void *ptr) noexcept; void operator delete(void *ptr, const std::nothrow_t &) noexcept; void operator delete[](void *ptr, const std::nothrow_t &) noexcept; #if __cpp_sized_deallocation >= 201309 /* C++14's sized-delete operators. */ void operator delete(void *ptr, std::size_t size) noexcept; void operator delete[](void *ptr, std::size_t size) noexcept; #endif JEMALLOC_NOINLINE static void * handleOOM(std::size_t size, bool nothrow) { void *ptr = nullptr; while (ptr == nullptr) { std::new_handler handler; // GCC-4.8 and clang 4.0 do not have std::get_new_handler. { static std::mutex mtx; std::lock_guard lock(mtx); handler = std::set_new_handler(nullptr); std::set_new_handler(handler); } if (handler == nullptr) break; try { handler(); } catch (const std::bad_alloc &) { break; } ptr = je_malloc(size); } if (ptr == nullptr && !nothrow) std::__throw_bad_alloc(); return ptr; } template JEMALLOC_ALWAYS_INLINE void * newImpl(std::size_t size) noexcept(IsNoExcept) { void *ptr = je_malloc(size); if (likely(ptr != nullptr)) return ptr; return handleOOM(size, IsNoExcept); } void * operator new(std::size_t size) { return newImpl(size); } void * operator new[](std::size_t size) { return newImpl(size); } void * operator new(std::size_t size, const std::nothrow_t &) noexcept { return newImpl(size); } void * operator new[](std::size_t size, const std::nothrow_t &) noexcept { return newImpl(size); } void operator delete(void *ptr) noexcept { je_free(ptr); } void operator delete[](void *ptr) noexcept { je_free(ptr); } void operator delete(void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } void operator delete[](void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } #if __cpp_sized_deallocation >= 201309 void operator delete(void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } je_sdallocx(ptr, size, /*flags=*/0); } void operator delete[](void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } je_sdallocx(ptr, size, /*flags=*/0); } #endif // __cpp_sized_deallocation jemalloc-sys-0.3.2/jemalloc/src/large.c010064400007650000024000000254171340421341300161540ustar0000000000000000#define JEMALLOC_LARGE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/util.h" /******************************************************************************/ void * large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == sz_s2u(usize)); return large_palloc(tsdn, arena, usize, CACHELINE, zero); } void * large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { size_t ausize; extent_t *extent; bool is_zeroed; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); assert(!tsdn_null(tsdn) || arena != NULL); ausize = sz_sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { return NULL; } if (config_fill && unlikely(opt_zero)) { zero = true; } /* * Copy zero into is_zeroed and pass the copy when allocating the * extent, so that it is possible to make correct junk/zero fill * decisions below, even if is_zeroed ends up true when zero is false. */ is_zeroed = zero; if (likely(!tsdn_null(tsdn))) { arena = arena_choose(tsdn_tsd(tsdn), arena); } if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, arena, usize, alignment, &is_zeroed)) == NULL) { return NULL; } /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { /* Insert extent into large. */ malloc_mutex_lock(tsdn, &arena->large_mtx); extent_list_append(&arena->large, extent); malloc_mutex_unlock(tsdn, &arena->large_mtx); } if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); } if (zero) { assert(is_zeroed); } else if (config_fill && unlikely(opt_junk_alloc)) { memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, extent_usize_get(extent)); } arena_decay_tick(tsdn, arena); return extent_addr_get(extent); } static void large_dalloc_junk_impl(void *ptr, size_t size) { memset(ptr, JEMALLOC_FREE_JUNK, size); } large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; static void large_dalloc_maybe_junk_impl(void *ptr, size_t size) { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the extent isn't about to be * unmapped. */ if (opt_retain || (have_dss && extent_in_dss(ptr))) { large_dalloc_junk(ptr, size); } } } large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = large_dalloc_maybe_junk_impl; static bool large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t diff = extent_size_get(extent) - (usize + sz_large_pad); assert(oldusize > usize); if (extent_hooks->split == NULL) { return true; } /* Split excess pages. */ if (diff != 0) { extent_t *trail = extent_split_wrapper(tsdn, arena, &extent_hooks, extent, usize + sz_large_pad, sz_size2index(usize), false, diff, NSIZES, false); if (trail == NULL) { return true; } if (config_fill && unlikely(opt_junk_free)) { large_dalloc_maybe_junk(extent_addr_get(trail), extent_size_get(trail)); } arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); } arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); return false; } static bool large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, bool zero) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t trailsize = usize - oldusize; if (extent_hooks->merge == NULL) { return true; } if (config_fill && unlikely(opt_zero)) { zero = true; } /* * Copy zero into is_zeroed_trail and pass the copy when allocating the * extent, so that it is possible to make correct junk/zero fill * decisions below, even if is_zeroed_trail ends up true when zero is * false. */ bool is_zeroed_trail = zero; bool commit = true; extent_t *trail; bool new_mapping; if ((trail = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, extent_past_get(extent), trailsize, 0, CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL || (trail = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { if (config_stats) { new_mapping = false; } } else { if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, extent_past_get(extent), trailsize, 0, CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) == NULL) { return true; } if (config_stats) { new_mapping = true; } } if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); return true; } rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind = sz_size2index(usize); extent_szind_set(extent, szind); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_addr_get(extent), szind, false); if (config_stats && new_mapping) { arena_stats_mapped_add(tsdn, &arena->stats, trailsize); } if (zero) { if (config_cache_oblivious) { /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. * There will always be trailing bytes, because ptr's * offset from the beginning of the extent is a multiple * of CACHELINE in [0 .. PAGE). */ void *zbase = (void *) ((uintptr_t)extent_addr_get(extent) + oldusize); void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + PAGE)); size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; assert(nzero > 0); memset(zbase, 0, nzero); } assert(is_zeroed_trail); } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), JEMALLOC_ALLOC_JUNK, usize - oldusize); } arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); return false; } bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t usize_max, bool zero) { size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); if (usize_max > oldusize) { /* Attempt to expand the allocation in-place. */ if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } /* Try again, this time with usize_min. */ if (usize_min < usize_max && usize_min > oldusize && large_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } } /* * Avoid moving the allocation if the existing extent size accommodates * the new size. */ if (oldusize >= usize_min && oldusize <= usize_max) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } /* Attempt to shrink the allocation in-place. */ if (oldusize > usize_max) { if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } } return true; } static void * large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= CACHELINE) { return large_malloc(tsdn, arena, usize, zero); } return large_palloc(tsdn, arena, usize, alignment, zero); } void * large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ assert(usize > 0 && usize <= LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); /* Try to avoid moving the allocation. */ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { return extent_addr_get(extent); } /* * usize and old size are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero); if (ret == NULL) { return NULL; } size_t copysize = (usize < oldusize) ? usize : oldusize; memcpy(ret, extent_addr_get(extent), copysize); isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); return ret; } /* * junked_locked indicates whether the extent's data have been junk-filled, and * whether the arena's large_mtx is currently held. */ static void large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, bool junked_locked) { if (!junked_locked) { /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { malloc_mutex_lock(tsdn, &arena->large_mtx); extent_list_remove(&arena->large, extent); malloc_mutex_unlock(tsdn, &arena->large_mtx); } large_dalloc_maybe_junk(extent_addr_get(extent), extent_usize_get(extent)); } else { malloc_mutex_assert_owner(tsdn, &arena->large_mtx); if (!arena_is_auto(arena)) { extent_list_remove(&arena->large, extent); } } arena_extent_dalloc_large_prep(tsdn, arena, extent); } static void large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); } void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); } void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); } void large_dalloc(tsdn_t *tsdn, extent_t *extent) { arena_t *arena = extent_arena_get(extent); large_dalloc_prep_impl(tsdn, arena, extent, false); large_dalloc_finish_impl(tsdn, arena, extent); arena_decay_tick(tsdn, arena); } size_t large_salloc(tsdn_t *tsdn, const extent_t *extent) { return extent_usize_get(extent); } prof_tctx_t * large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { return extent_prof_tctx_get(extent); } void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { extent_prof_tctx_set(extent, tctx); } void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); } jemalloc-sys-0.3.2/jemalloc/src/log.c010064400007650000024000000046751340421340100156430ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/log.h" char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; atomic_b_t log_init_done = ATOMIC_INIT(false); /* * Returns true if we were able to pick out a segment. Fills in r_segment_end * with a pointer to the first character after the end of the string. */ static const char * log_var_extract_segment(const char* segment_begin) { const char *end; for (end = segment_begin; *end != '\0' && *end != '|'; end++) { } return end; } static bool log_var_matches_segment(const char *segment_begin, const char *segment_end, const char *log_var_begin, const char *log_var_end) { assert(segment_begin <= segment_end); assert(log_var_begin < log_var_end); ptrdiff_t segment_len = segment_end - segment_begin; ptrdiff_t log_var_len = log_var_end - log_var_begin; /* The special '.' segment matches everything. */ if (segment_len == 1 && *segment_begin == '.') { return true; } if (segment_len == log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0; } else if (segment_len < log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0 && log_var_begin[segment_len] == '.'; } else { return false; } } unsigned log_var_update_state(log_var_t *log_var) { const char *log_var_begin = log_var->name; const char *log_var_end = log_var->name + strlen(log_var->name); /* Pointer to one before the beginning of the current segment. */ const char *segment_begin = log_var_names; /* * If log_init done is false, we haven't parsed the malloc conf yet. To * avoid log-spew, we default to not displaying anything. */ if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { return LOG_INITIALIZED_NOT_ENABLED; } while (true) { const char *segment_end = log_var_extract_segment( segment_begin); assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); if (log_var_matches_segment(segment_begin, segment_end, log_var_begin, log_var_end)) { atomic_store_u(&log_var->state, LOG_ENABLED, ATOMIC_RELAXED); return LOG_ENABLED; } if (*segment_end == '\0') { /* Hit the end of the segment string with no match. */ atomic_store_u(&log_var->state, LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); return LOG_INITIALIZED_NOT_ENABLED; } /* Otherwise, skip the delimiter and continue. */ segment_begin = segment_end + 1; } } jemalloc-sys-0.3.2/jemalloc/src/malloc_io.c010064400007650000024000000345141340421340100170130ustar0000000000000000#define JEMALLOC_MALLOC_IO_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/util.h" #ifdef assert # undef assert #endif #ifdef not_reached # undef not_reached #endif #ifdef not_implemented # undef not_implemented #endif #ifdef assert_not_implemented # undef assert_not_implemented #endif /* * Define simple versions of assertion macros that won't recurse in case * of assertion failures in malloc_*printf(). */ #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write(": Failed assertion\n"); \ abort(); \ } \ } while (0) #define not_reached() do { \ if (config_debug) { \ malloc_write(": Unreachable code reached\n"); \ abort(); \ } \ unreachable(); \ } while (0) #define not_implemented() do { \ if (config_debug) { \ malloc_write(": Not implemented\n"); \ abort(); \ } \ } while (0) #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) { \ not_implemented(); \ } \ } while (0) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void wrtmessage(void *cbopaque, const char *s); #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); #define D2S_BUFSIZE (1 + U2S_BUFSIZE) static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); #define O2S_BUFSIZE (1 + U2S_BUFSIZE) static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); #define X2S_BUFSIZE (2 + U2S_BUFSIZE) static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ static void wrtmessage(void *cbopaque, const char *s) { malloc_write_fd(STDERR_FILENO, s, strlen(s)); } JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); /* * Wrapper around malloc_message() that avoids the need for * je_malloc_message(...) throughout the code. */ void malloc_write(const char *s) { if (je_malloc_message != NULL) { je_malloc_message(NULL, s); } else { wrtmessage(NULL, s); } } /* * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so * provide a wrapper. */ int buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, (DWORD)buflen, NULL); return 0; #elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } return 0; #else return strerror_r(err, buf, buflen); #endif } uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; unsigned b; bool neg; const char *p, *ns; p = nptr; if (base < 0 || base == 1 || base > 36) { ns = p; set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': p++; break; case '-': neg = true; /* Fall through. */ case '+': p++; /* Fall through. */ default: goto label_prefix; } } /* Get prefix, if any. */ label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. * " -x"). */ ns = p; if (*p == '0') { switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (b == 0) { b = 8; } if (b == 8) { p++; } break; case 'X': case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': if (b == 0) { b = 16; } if (b == 16) { p += 2; } break; default: break; } break; default: p++; ret = 0; goto label_return; } } if (b == 0) { b = 10; } /* Convert. */ ret = 0; while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { uintmax_t pret = ret; ret *= b; ret += digit; if (ret < pret) { /* Overflow. */ set_errno(ERANGE); ret = UINTMAX_MAX; goto label_return; } p++; } if (neg) { ret = (uintmax_t)(-((intmax_t)ret)); } if (p == ns) { /* No conversion performed. */ set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } label_return: if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; } else { *endptr = (char *)p; } } return ret; } static char * u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; s[i] = '\0'; switch (base) { case 10: do { i--; s[i] = "0123456789"[x % (uint64_t)10]; x /= (uint64_t)10; } while (x > 0); break; case 16: { const char *digits = (uppercase) ? "0123456789ABCDEF" : "0123456789abcdef"; do { i--; s[i] = digits[x & 0xf]; x >>= 4; } while (x > 0); break; } default: { const char *digits = (uppercase) ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" : "0123456789abcdefghijklmnopqrstuvwxyz"; assert(base >= 2 && base <= 36); do { i--; s[i] = digits[x % (uint64_t)base]; x /= (uint64_t)base; } while (x > 0); }} *slen_p = U2S_BUFSIZE - 1 - i; return &s[i]; } static char * d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; if ((neg = (x < 0))) { x = -x; } s = u2s(x, 10, false, s, slen_p); if (neg) { sign = '-'; } switch (sign) { case '-': if (!neg) { break; } /* Fall through. */ case ' ': case '+': s--; (*slen_p)++; *s = sign; break; default: not_reached(); } return s; } static char * o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } return s; } static char * x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } return s; } size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; const char *f; #define APPEND_C(c) do { \ if (i < size) { \ str[i] = (c); \ } \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) #define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (!left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ } \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ } \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ break; \ case '?' | 0x80: \ val = va_arg(ap, unsigned int); \ break; \ case 'l': \ val = va_arg(ap, long); \ break; \ case 'l' | 0x80: \ val = va_arg(ap, unsigned long); \ break; \ case 'q': \ val = va_arg(ap, long long); \ break; \ case 'q' | 0x80: \ val = va_arg(ap, unsigned long long); \ break; \ case 'j': \ val = va_arg(ap, intmax_t); \ break; \ case 'j' | 0x80: \ val = va_arg(ap, uintmax_t); \ break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ case 'z': \ val = va_arg(ap, ssize_t); \ break; \ case 'z' | 0x80: \ val = va_arg(ap, size_t); \ break; \ case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ default: \ not_reached(); \ val = 0; \ } \ } while (0) i = 0; f = format; while (true) { switch (*f) { case '\0': goto label_out; case '%': { bool alt_form = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; char *s; size_t slen; f++; /* Flags. */ while (true) { switch (*f) { case '#': assert(!alt_form); alt_form = true; break; case '-': assert(!left_justify); left_justify = true; break; case ' ': assert(!plus_space); plus_space = true; break; case '+': assert(!plus_plus); plus_plus = true; break; default: goto label_width; } f++; } /* Width. */ label_width: switch (*f) { case '*': width = va_arg(ap, int); f++; if (width < 0) { left_justify = true; width = -width; } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; break; } default: break; } /* Width/precision separator. */ if (*f == '.') { f++; } else { goto label_length; } /* Precision. */ switch (*f) { case '*': prec = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uprec; set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); assert(uprec != UINTMAX_MAX || get_errno() != ERANGE); prec = (int)uprec; break; } default: break; } /* Length. */ label_length: switch (*f) { case 'l': f++; if (*f == 'l') { len = 'q'; f++; } else { len = 'l'; } break; case 'q': case 'j': case 't': case 'z': len = *f; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { case '%': /* %% */ APPEND_C(*f); f++; break; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'x': case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'c': { unsigned char val; char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); val = va_arg(ap, int); buf[0] = val; buf[1] = '\0'; APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; } case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); slen = (prec < 0) ? strlen(s) : (size_t)prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; case 'p': { uintmax_t val; char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } default: not_reached(); } break; } default: { APPEND_C(*f); f++; break; }} } label_out: if (i < size) { str[i] = '\0'; } else { str[size - 1] = '\0'; } #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC return i; } JEMALLOC_FORMAT_PRINTF(3, 4) size_t malloc_snprintf(char *str, size_t size, const char *format, ...) { size_t ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); return ret; } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ write_cb = (je_malloc_message != NULL) ? je_malloc_message : wrtmessage; cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); write_cb(cbopaque, buf); } /* * Print to a callback function in such a way as to (hopefully) avoid memory * allocation. */ JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(write_cb, cbopaque, format, ap); va_end(ap); } /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_FORMAT_PRINTF(1, 2) void malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } /* * Restore normal assertion macros, in order to make it possible to compile all * C files as a single concatenation. */ #undef assert #undef not_reached #undef not_implemented #undef assert_not_implemented #include "jemalloc/internal/assert.h" jemalloc-sys-0.3.2/jemalloc/src/mutex.c010064400007650000024000000131331340421341300162140ustar0000000000000000#define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/spin.h" #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ /* Data. */ #ifdef JEMALLOC_LAZY_LOCK bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the * process goes multi-threaded. */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { return pthread_create_wrapper(thread, attr, start_routine, arg); } #endif /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; if (ncpus == 1) { goto label_spin_done; } int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; do { spin_cpu_spinwait(); if (!malloc_mutex_trylock_final(mutex)) { data->n_spin_acquired++; return; } } while (cnt++ < max_cnt); if (!config_stats) { /* Only spin is useful when stats is off. */ malloc_mutex_lock_final(mutex); return; } label_spin_done: nstime_update(&before); /* Copy before to after to avoid clock skews. */ nstime_t after; nstime_copy(&after, &before); uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED) + 1; /* One last try as above two calls may take quite some cycles. */ if (!malloc_mutex_trylock_final(mutex)) { atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); data->n_spin_acquired++; return; } /* True slow path. */ malloc_mutex_lock_final(mutex); /* Update more slow-path only counters. */ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); nstime_update(&after); nstime_t delta; nstime_copy(&delta, &after); nstime_subtract(&delta, &before); data->n_wait_times++; nstime_add(&data->tot_wait_time, &delta); if (nstime_compare(&data->max_wait_time, &delta) < 0) { nstime_copy(&data->max_wait_time, &delta); } if (n_thds > data->max_n_thds) { data->max_n_thds = n_thds; } } static void mutex_prof_data_init(mutex_prof_data_t *data) { memset(data, 0, sizeof(mutex_prof_data_t)); nstime_init(&data->max_wait_time, 0); nstime_init(&data->tot_wait_time, 0); data->prev_owner = NULL; } void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_assert_owner(tsdn, mutex); mutex_prof_data_init(&mutex->prof_data); } static int mutex_addr_comp(const witness_t *witness1, void *mutex1, const witness_t *witness2, void *mutex2) { assert(mutex1 != NULL); assert(mutex2 != NULL); uintptr_t mu1int = (uintptr_t)mutex1; uintptr_t mu2int = (uintptr_t)mutex2; if (mu1int < mu2int) { return -1; } else if (mu1int == mu2int) { return 0; } else { return 1; } } bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) { return true; } # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) { return true; } } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) { return true; } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return true; } pthread_mutexattr_destroy(&attr); #endif if (config_debug) { mutex->lock_order = lock_order; if (lock_order == malloc_mutex_address_ordered) { witness_init(&mutex->witness, name, rank, mutex_addr_comp, mutex); } else { witness_init(&mutex->witness, name, rank, NULL, NULL); } } return false; } void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn, mutex); } void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); } void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); #else if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank, mutex->lock_order)) { malloc_printf(": Error re-initializing mutex in " "child\n"); if (opt_abort) { abort(); } } #endif } bool malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) { return true; } postponed_mutexes = postponed_mutexes->postponed_next; } #endif return false; } jemalloc-sys-0.3.2/jemalloc/src/mutex_pool.c010064400007650000024000000007411340421340100172430ustar0000000000000000#define JEMALLOC_MUTEX_POOL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { if (malloc_mutex_init(&pool->mutexes[i], name, rank, malloc_mutex_address_ordered)) { return true; } } return false; } jemalloc-sys-0.3.2/jemalloc/src/nstime.c010064400007650000024000000066531340421340100163570ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/assert.h" #define BILLION UINT64_C(1000000000) #define MILLION UINT64_C(1000000) void nstime_init(nstime_t *time, uint64_t ns) { time->ns = ns; } void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { time->ns = sec * BILLION + nsec; } uint64_t nstime_ns(const nstime_t *time) { return time->ns; } uint64_t nstime_msec(const nstime_t *time) { return time->ns / MILLION; } uint64_t nstime_sec(const nstime_t *time) { return time->ns / BILLION; } uint64_t nstime_nsec(const nstime_t *time) { return time->ns % BILLION; } void nstime_copy(nstime_t *time, const nstime_t *source) { *time = *source; } int nstime_compare(const nstime_t *a, const nstime_t *b) { return (a->ns > b->ns) - (a->ns < b->ns); } void nstime_add(nstime_t *time, const nstime_t *addend) { assert(UINT64_MAX - time->ns >= addend->ns); time->ns += addend->ns; } void nstime_iadd(nstime_t *time, uint64_t addend) { assert(UINT64_MAX - time->ns >= addend); time->ns += addend; } void nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { assert(nstime_compare(time, subtrahend) >= 0); time->ns -= subtrahend->ns; } void nstime_isubtract(nstime_t *time, uint64_t subtrahend) { assert(time->ns >= subtrahend); time->ns -= subtrahend; } void nstime_imultiply(nstime_t *time, uint64_t multiplier) { assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); time->ns *= multiplier; } void nstime_idivide(nstime_t *time, uint64_t divisor) { assert(divisor != 0); time->ns /= divisor; } uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor) { assert(divisor->ns != 0); return time->ns / divisor->ns; } #ifdef _WIN32 # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { FILETIME ft; uint64_t ticks_100ns; GetSystemTimeAsFileTime(&ft); ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; nstime_init(time, ticks_100ns * 100); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { nstime_init(time, mach_absolute_time()); } #else # define NSTIME_MONOTONIC false static void nstime_get(nstime_t *time) { struct timeval tv; gettimeofday(&tv, NULL); nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); } #endif static bool nstime_monotonic_impl(void) { return NSTIME_MONOTONIC; #undef NSTIME_MONOTONIC } nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; static bool nstime_update_impl(nstime_t *time) { nstime_t old_time; nstime_copy(&old_time, time); nstime_get(time); /* Handle non-monotonic clocks. */ if (unlikely(nstime_compare(&old_time, time) > 0)) { nstime_copy(time, &old_time); return true; } return false; } nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; jemalloc-sys-0.3.2/jemalloc/src/pages.c010064400007650000024000000342231340421341300161540ustar0000000000000000#define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT #include #ifdef __FreeBSD__ #include #endif #endif /******************************************************************************/ /* Data. */ /* Actual operating system page size, detected during bootstrap, <= PAGE. */ static size_t os_page; #ifndef _WIN32 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) # define PAGES_PROT_DECOMMIT (PROT_NONE) static int mmap_flags; #endif static bool os_overcommits; const char *thp_mode_names[] = { "default", "always", "never", "not supported" }; thp_mode_t opt_thp = THP_MODE_DEFAULT; thp_mode_t init_system_thp_mode; /* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ static bool pages_can_purge_lazy_runtime = true; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void os_pages_unmap(void *addr, size_t size); /******************************************************************************/ static void * os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); assert(ALIGNMENT_CEILING(size, os_page) == size); assert(size != 0); if (os_overcommits) { *commit = true; } void *ret; #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ { int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; ret = mmap(addr, size, prot, mmap_flags, -1, 0); } assert(ret != NULL); if (ret == MAP_FAILED) { ret = NULL; } else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ os_pages_unmap(ret, size); ret = NULL; } #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return ret; } static void * os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit) { void *ret = (void *)((uintptr_t)addr + leadsize); assert(alloc_size >= leadsize + size); #ifdef _WIN32 os_pages_unmap(addr, alloc_size); void *new_addr = os_pages_map(ret, size, PAGE, commit); if (new_addr == ret) { return ret; } if (new_addr != NULL) { os_pages_unmap(new_addr, size); } return NULL; #else size_t trailsize = alloc_size - leadsize - size; if (leadsize != 0) { os_pages_unmap(addr, leadsize); } if (trailsize != 0) { os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); } return ret; #endif } static void os_pages_unmap(void *addr, size_t size) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else if (munmap(addr, size) == -1) #endif { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif "(): %s\n", buf); if (opt_abort) { abort(); } } } static void * pages_map_slow(size_t size, size_t alignment, bool *commit) { size_t alloc_size = size + alignment - os_page; /* Beware size_t wrap-around. */ if (alloc_size < size) { return NULL; } void *ret; do { void *pages = os_pages_map(NULL, alloc_size, alignment, commit); if (pages == NULL) { return NULL; } size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); } while (ret == NULL); assert(ret != NULL); assert(PAGE_ADDR2BASE(ret) == ret); return ret; } void * pages_map(void *addr, size_t size, size_t alignment, bool *commit) { assert(alignment >= PAGE); assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); /* * Ideally, there would be a way to specify alignment to mmap() (like * NetBSD has), but in the absence of such a feature, we have to work * hard to efficiently create aligned mappings. The reliable, but * slow method is to create a mapping that is over-sized, then trim the * excess. However, that always results in one or two calls to * os_pages_unmap(), and it can leave holes in the process's virtual * memory map if memory grows downward. * * Optimistically try mapping precisely the right amount before falling * back to the slow method, with the expectation that the optimistic * approach works most of the time. */ void *ret = os_pages_map(addr, size, os_page, commit); if (ret == NULL || ret == addr) { return ret; } assert(addr == NULL); if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { os_pages_unmap(ret, size); return pages_map_slow(size, alignment, commit); } assert(PAGE_ADDR2BASE(ret) == ret); return ret; } void pages_unmap(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); os_pages_unmap(addr, size); } static bool pages_commit_impl(void *addr, size_t size, bool commit) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); if (os_overcommits) { return true; } #ifdef _WIN32 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); #else { int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, -1, 0); if (result == MAP_FAILED) { return true; } if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ os_pages_unmap(result, size); return true; } return false; } #endif } bool pages_commit(void *addr, size_t size) { return pages_commit_impl(addr, size, true); } bool pages_decommit(void *addr, size_t size) { return pages_commit_impl(addr, size, false); } bool pages_purge_lazy(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); if (!pages_can_purge_lazy) { return true; } if (!pages_can_purge_lazy_runtime) { /* * Built with lazy purge enabled, but detected it was not * supported on the current system. */ return true; } #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); return false; #elif defined(JEMALLOC_PURGE_MADVISE_FREE) return (madvise(addr, size, # ifdef MADV_FREE MADV_FREE # else JEMALLOC_MADV_FREE # endif ) != 0); #elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); #else not_reached(); #endif } bool pages_purge_forced(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); if (!pages_can_purge_forced) { return true; } #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); #elif defined(JEMALLOC_MAPS_COALESCE) /* Try to overlay a new demand-zeroed mapping. */ return pages_commit(addr, size); #else not_reached(); #endif } static bool pages_huge_impl(void *addr, size_t size, bool aligned) { if (aligned) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); } #ifdef JEMALLOC_HAVE_MADVISE_HUGE return (madvise(addr, size, MADV_HUGEPAGE) != 0); #else return true; #endif } bool pages_huge(void *addr, size_t size) { return pages_huge_impl(addr, size, true); } static bool pages_huge_unaligned(void *addr, size_t size) { return pages_huge_impl(addr, size, false); } static bool pages_nohuge_impl(void *addr, size_t size, bool aligned) { if (aligned) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); } #ifdef JEMALLOC_HAVE_MADVISE_HUGE return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); #else return false; #endif } bool pages_nohuge(void *addr, size_t size) { return pages_nohuge_impl(addr, size, true); } static bool pages_nohuge_unaligned(void *addr, size_t size) { return pages_nohuge_impl(addr, size, false); } bool pages_dontdump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); #ifdef JEMALLOC_MADVISE_DONTDUMP return madvise(addr, size, MADV_DONTDUMP) != 0; #else return false; #endif } bool pages_dodump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); #ifdef JEMALLOC_MADVISE_DONTDUMP return madvise(addr, size, MADV_DODUMP) != 0; #else return false; #endif } static size_t os_page_detect(void) { #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; #elif defined(__FreeBSD__) return getpagesize(); #else long result = sysconf(_SC_PAGESIZE); if (result == -1) { return LG_PAGE; } return (size_t)result; #endif } #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool os_overcommits_sysctl(void) { int vm_overcommit; size_t sz; sz = sizeof(vm_overcommit); #if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) int mib[2]; mib[0] = CTL_VM; mib[1] = VM_OVERCOMMIT; if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } #else if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } #endif return ((vm_overcommit & 0x3) == 0); } #endif #ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY /* * Use syscall(2) rather than {open,read,close}(2) when possible to avoid * reentry during bootstrapping if another library has interposed system call * wrappers. */ static bool os_overcommits_proc(void) { int fd; char buf[1]; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) #if defined(O_CLOEXEC) fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) #if defined(O_CLOEXEC) fd = (int)syscall(SYS_openat, AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = (int)syscall(SYS_openat, AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #else #if defined(O_CLOEXEC) fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #endif if (fd == -1) { return false; /* Error. */ } ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) syscall(SYS_close, fd); #else close(fd); #endif if (nread < 1) { return false; /* Error. */ } /* * /proc/sys/vm/overcommit_memory meanings: * 0: Heuristic overcommit. * 1: Always overcommit. * 2: Never overcommit. */ return (buf[0] == '0' || buf[0] == '1'); } #endif void pages_set_thp_state (void *ptr, size_t size) { if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { return; } assert(opt_thp != thp_mode_not_supported && init_system_thp_mode != thp_mode_not_supported); if (opt_thp == thp_mode_always && init_system_thp_mode != thp_mode_never) { assert(init_system_thp_mode == thp_mode_default); pages_huge_unaligned(ptr, size); } else if (opt_thp == thp_mode_never) { assert(init_system_thp_mode == thp_mode_default || init_system_thp_mode == thp_mode_always); pages_nohuge_unaligned(ptr, size); } } static void init_thp_state(void) { if (!have_madvise_huge) { if (metadata_thp_enabled() && opt_abort) { malloc_write(": no MADV_HUGEPAGE support\n"); abort(); } goto label_error; } static const char sys_state_madvise[] = "always [madvise] never\n"; static const char sys_state_always[] = "[always] madvise never\n"; static const char sys_state_never[] = "always madvise [never]\n"; char buf[sizeof(sys_state_madvise)]; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) int fd = (int)syscall(SYS_open, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #else int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #endif if (fd == -1) { goto label_error; } ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) syscall(SYS_close, fd); #else close(fd); #endif if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_default; } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_always; } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_never; } else { goto label_error; } return; label_error: opt_thp = init_system_thp_mode = thp_mode_not_supported; } bool pages_boot(void) { os_page = os_page_detect(); if (os_page > PAGE) { malloc_write(": Unsupported system page size\n"); if (opt_abort) { abort(); } return true; } #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; #endif #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT os_overcommits = os_overcommits_sysctl(); #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) os_overcommits = os_overcommits_proc(); # ifdef MAP_NORESERVE if (os_overcommits) { mmap_flags |= MAP_NORESERVE; } # endif #else os_overcommits = false; #endif init_thp_state(); /* Detect lazy purge runtime support. */ if (pages_can_purge_lazy) { bool committed = false; void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); if (madv_free_page == NULL) { return true; } assert(pages_can_purge_lazy_runtime); if (pages_purge_lazy(madv_free_page, PAGE)) { pages_can_purge_lazy_runtime = false; } os_pages_unmap(madv_free_page, PAGE); } return false; } jemalloc-sys-0.3.2/jemalloc/src/prng.c010064400007650000024000000002041340421340100160100ustar0000000000000000#define JEMALLOC_PRNG_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/jemalloc/src/prof.c010064400007650000024000001653551340421341300160360ustar0000000000000000#define JEMALLOC_PROF_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include #endif #ifdef JEMALLOC_PROF_LIBGCC /* * We have a circular dependency -- jemalloc_internal.h tells us if we should * use libgcc's unwinding functionality, but after we've included that, we've * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. */ #undef _Unwind_Backtrace #include #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /******************************************************************************/ /* Data. */ bool opt_prof = false; bool opt_prof_active = true; bool opt_prof_thread_active_init = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). */ bool prof_active; static malloc_mutex_t prof_active_mtx; /* * Initialized as opt_prof_thread_active_init, and accessed via * prof_thread_active_init_[gs]et(). */ static bool prof_thread_active_init; static malloc_mutex_t prof_thread_active_init_mtx; /* * Initialized as opt_prof_gdump, and accessed via * prof_gdump_[gs]et{_unlocked,}(). */ bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; uint64_t prof_interval = 0; size_t lg_prof_sample; /* * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. * The primary motivation for this sharing though is that gctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ static malloc_mutex_t *gctx_locks; static atomic_u_t cum_gctxs; /* Atomic counter. */ /* * Table of mutexes that are shared among tdata's. No operations require * holding multiple tdata locks, so there is no problem with using them for more * than one tdata at the same time, even though a gctx lock may be acquired * while holding a tdata lock. */ static malloc_mutex_t *tdata_locks; /* * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; /* * Tree of all extant prof_tdata_t structures, regardless of state, * {attached,detached,expired}. */ static prof_tdata_tree_t tdatas; static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; /* * This buffer is rather large for stack allocation, so use a single buffer for * all profile dumps. */ static malloc_mutex_t prof_dump_mtx; static char prof_dump_buf[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PROF_DUMP_BUFSIZE #else 1 #endif ]; static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ static int prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); if (ret == 0) { uint64_t a_thr_discrim = a->thr_discrim; uint64_t b_thr_discrim = b->thr_discrim; ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < b_thr_discrim); if (ret == 0) { uint64_t a_tctx_uid = a->tctx_uid; uint64_t b_tctx_uid = b->tctx_uid; ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid); } } return ret; } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) static int prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); } return ret; } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) static int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; ret = ((a_uid > b_uid) - (a_uid < b_uid)); if (ret == 0) { uint64_t a_discrim = a->thr_discrim; uint64_t b_discrim = b->thr_discrim; ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } return ret; } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, prof_tdata_comp) /******************************************************************************/ void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); if (updated) { /* * Compute a new sample threshold. This isn't very important in * practice, because this function is rarely executed, so the * potential for sample bias is minimal except in contrived * programs. */ tdata = prof_tdata_get(tsd, true); if (tdata != NULL) { prof_sample_threshold_update(tdata); } } if ((uintptr_t)tctx > (uintptr_t)1U) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } } void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { prof_tctx_set(tsdn, ptr, usize, NULL, tctx); malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { tctx->cnts.accumobjs++; tctx->cnts.accumbytes += usize; } tctx->prepared = false; malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } static void prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } static void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; tdata->enq_idump = false; gdump = tdata->enq_gdump; tdata->enq_gdump = false; if (idump) { prof_idump(tsd_tsdn(tsd)); } if (gdump) { prof_gdump(tsd_tsdn(tsd)); } } } #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); if (nframes <= 0) { return; } bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); return _URC_NO_REASON; } static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); if (ip == NULL) { return _URC_END_OF_STACK; } data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) { return _URC_END_OF_STACK; } return _URC_NO_REASON; } void prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); _Unwind_Backtrace(prof_unwind_callback, &data); } #elif (defined(JEMALLOC_PROF_GCC)) void prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ if (__builtin_frame_address(i) == 0) { \ return; \ } \ p = __builtin_return_address(i); \ if (p == NULL) { \ return; \ } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ } else { \ return; \ } cassert(config_prof); BT_FRAME(0) BT_FRAME(1) BT_FRAME(2) BT_FRAME(3) BT_FRAME(4) BT_FRAME(5) BT_FRAME(6) BT_FRAME(7) BT_FRAME(8) BT_FRAME(9) BT_FRAME(10) BT_FRAME(11) BT_FRAME(12) BT_FRAME(13) BT_FRAME(14) BT_FRAME(15) BT_FRAME(16) BT_FRAME(17) BT_FRAME(18) BT_FRAME(19) BT_FRAME(20) BT_FRAME(21) BT_FRAME(22) BT_FRAME(23) BT_FRAME(24) BT_FRAME(25) BT_FRAME(26) BT_FRAME(27) BT_FRAME(28) BT_FRAME(29) BT_FRAME(30) BT_FRAME(31) BT_FRAME(32) BT_FRAME(33) BT_FRAME(34) BT_FRAME(35) BT_FRAME(36) BT_FRAME(37) BT_FRAME(38) BT_FRAME(39) BT_FRAME(40) BT_FRAME(41) BT_FRAME(42) BT_FRAME(43) BT_FRAME(44) BT_FRAME(45) BT_FRAME(46) BT_FRAME(47) BT_FRAME(48) BT_FRAME(49) BT_FRAME(50) BT_FRAME(51) BT_FRAME(52) BT_FRAME(53) BT_FRAME(54) BT_FRAME(55) BT_FRAME(56) BT_FRAME(57) BT_FRAME(58) BT_FRAME(59) BT_FRAME(60) BT_FRAME(61) BT_FRAME(62) BT_FRAME(63) BT_FRAME(64) BT_FRAME(65) BT_FRAME(66) BT_FRAME(67) BT_FRAME(68) BT_FRAME(69) BT_FRAME(70) BT_FRAME(71) BT_FRAME(72) BT_FRAME(73) BT_FRAME(74) BT_FRAME(75) BT_FRAME(76) BT_FRAME(77) BT_FRAME(78) BT_FRAME(79) BT_FRAME(80) BT_FRAME(81) BT_FRAME(82) BT_FRAME(83) BT_FRAME(84) BT_FRAME(85) BT_FRAME(86) BT_FRAME(87) BT_FRAME(88) BT_FRAME(89) BT_FRAME(90) BT_FRAME(91) BT_FRAME(92) BT_FRAME(93) BT_FRAME(94) BT_FRAME(95) BT_FRAME(96) BT_FRAME(97) BT_FRAME(98) BT_FRAME(99) BT_FRAME(100) BT_FRAME(101) BT_FRAME(102) BT_FRAME(103) BT_FRAME(104) BT_FRAME(105) BT_FRAME(106) BT_FRAME(107) BT_FRAME(108) BT_FRAME(109) BT_FRAME(110) BT_FRAME(111) BT_FRAME(112) BT_FRAME(113) BT_FRAME(114) BT_FRAME(115) BT_FRAME(116) BT_FRAME(117) BT_FRAME(118) BT_FRAME(119) BT_FRAME(120) BT_FRAME(121) BT_FRAME(122) BT_FRAME(123) BT_FRAME(124) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) #undef BT_FRAME } #else void prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } static prof_gctx_t * prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (gctx == NULL) { return NULL; } gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); /* Duplicate bt. */ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; return gctx; } static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_tdata_t *tdata) { cassert(config_prof); /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ prof_enter(tsd, tdata_self); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); } prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); if (opt_prof_accum) { return false; } if (tctx->cnts.curobjs != 0) { return false; } if (tctx->prepared) { return false; } return true; } static bool prof_gctx_should_destroy(prof_gctx_t *gctx) { if (opt_prof_accum) { return false; } if (!tctx_tree_empty(&gctx->tctxs)) { return false; } if (gctx->nlimbo != 0) { return false; } return true; } static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); destroy_tctx = true; if (prof_gctx_should_destroy(gctx)) { /* * Increment gctx->nlimbo in order to keep another * thread from winning the race to destroy gctx while * this one has gctx->lock dropped. Without this, it * would be possible for another thread to: * * 1) Sample an allocation associated with gctx. * 2) Deallocate the sampled object. * 3) Successfully prof_gctx_try_destroy(gctx). * * The result would be that gctx no longer exists by the * time this thread accesses it in * prof_gctx_try_destroy(). */ gctx->nlimbo++; destroy_gctx = true; } else { destroy_gctx = false; } break; case prof_tctx_state_dumping: /* * A dumping thread needs tctx to remain valid until dumping * has finished. Change state such that the dumping thread will * complete destruction during a late dump iteration phase. */ tctx->state = prof_tctx_state_purgatory; destroy_tctx = false; destroy_gctx = false; break; default: not_reached(); destroy_tctx = false; destroy_gctx = false; } malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); } if (destroy_tctx) { idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); } } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; } gctx, tgctx; union { prof_bt_t *p; void *v; } btkey; bool new_gctx; prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ prof_leave(tsd, tdata); tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (tgctx.v == NULL) { return true; } prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { gctx.p = tgctx.p; btkey.p = &gctx.p->bt; if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, true, true); return true; } new_gctx = true; } else { new_gctx = false; } } else { tgctx.v = NULL; new_gctx = false; } if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; if (tgctx.v != NULL) { /* Lost race to insert. */ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true); } } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return false; } prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; } ret; prof_tdata_t *tdata; bool not_found; cassert(config_prof); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) { /* Note double negative! */ ret.p->prepared = true; } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { void *btkey; prof_gctx_t *gctx; bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { return NULL; } /* Link a prof_tctx_t into gctx for this thread. */ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } return NULL; } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } return ret.p; } /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with * floating point support completely disabled. Avoiding floating point code is * important on memory-constrained systems, but it also enables a workaround for * versions of glibc that don't properly save/restore floating point registers * during dynamic lazy symbol loading (which internally calls into whatever * malloc implementation happens to be integrated into the application). Note * that some compilers (e.g. gcc 4.8) may use floating point registers for fast * memory moves, so jemalloc must be compiled with such optimizations disabled * (e.g. * -mno-sse) in order for the workaround to be complete. */ void prof_sample_threshold_update(prof_tdata_t *tdata) { #ifdef JEMALLOC_PROF uint64_t r; double u; if (!config_prof) { return; } if (lg_prof_sample == 0) { tdata->bytes_until_sample = 0; return; } /* * Compute sample interval as a geometrically distributed random * variable with mean (2^lg_prof_sample). * * __ __ * | log(u) | 1 * tdata->bytes_until_sample = | -------- |, where p = --------------- * | log(1-p) | lg_prof_sample * 2 * * For more information on the math, see: * * Non-Uniform Random Variate Generation * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ r = prng_lg_range_u64(&tdata->prng_state, 53); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; #endif } #ifdef JEMALLOC_JET static prof_tdata_t * prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; return NULL; } size_t prof_tdata_count(void) { size_t tdata_count = 0; tsdn_t *tsdn; tsdn = tsdn_fetch(); malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); malloc_mutex_unlock(tsdn, &tdatas_mtx); return tdata_count; } size_t prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); return bt_count; } #endif static int prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf(": creat(\"%s\"), 0644) failed\n", filename); if (opt_abort) { abort(); } } return fd; } prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (!propagate_err) { malloc_write(": write() failed during heap " "profile flush\n"); if (opt_abort) { abort(); } } ret = true; } prof_dump_buf_end = 0; return ret; } static bool prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); ret = prof_dump_flush(propagate_err); close(prof_dump_fd); prof_dump_fd = -1; return ret; } static bool prof_dump_write(bool propagate_err, const char *s) { size_t i, slen, n; cassert(config_prof); i = 0; slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_flush(propagate_err) && propagate_err) { return true; } } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ n = slen - i; } else { /* Write as much of s as will fit. */ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; } memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); prof_dump_buf_end += n; i += n; } return false; } JEMALLOC_FORMAT_PRINTF(2, 3) static bool prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); ret = prof_dump_write(propagate_err, buf); return ret; } static void prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; tdata->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: not_reached(); } } static void prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } } static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); } return NULL; } struct prof_tctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; }; static prof_tctx_t * prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { struct prof_tctx_dump_iter_arg_s *arg = (struct prof_tctx_dump_iter_arg_s *)opaque; malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: case prof_tctx_state_nominal: /* Not captured by this dump. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) { return tctx; } break; default: not_reached(); } return NULL; } static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: tctx->state = prof_tctx_state_nominal; break; case prof_tctx_state_purgatory: ret = tctx; goto label_return; default: not_reached(); } ret = NULL; label_return: return ret; } static void prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); malloc_mutex_unlock(tsdn, gctx->lock); } struct prof_gctx_merge_iter_arg_s { tsdn_t *tsdn; size_t leak_ngctx; }; static prof_gctx_t * prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) { arg->leak_ngctx++; } malloc_mutex_unlock(arg->tsdn, gctx->lock); return NULL; } static void prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; /* * Standard tree iteration won't work here, because as soon as we * decrement gctx->nlimbo and unlock gctx, another thread can * concurrently destroy it, which will corrupt the tree. Therefore, * tear down the tree one node at a time during iteration. */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; next = NULL; do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter, (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); idalloctm(tsd_tsdn(tsd), to_destroy, NULL, NULL, true, true); } else { next = NULL; } } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } } struct prof_tdata_merge_iter_arg_s { tsdn_t *tsdn; prof_cnt_t cnt_all; }; static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *opaque) { struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { prof_tctx_t *p; void *v; } tctx; tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); } arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } } else { tdata->dumping = false; } malloc_mutex_unlock(arg->tsdn, tdata->lock); return NULL; } static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; if (!tdata->dumping) { return NULL; } if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", tdata->thr_uid, tdata->cnt_summed.curobjs, tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", (tdata->thread_name != NULL) ? tdata->thread_name : "")) { return tdata; } return NULL; } static bool prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { return true; } malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); malloc_mutex_unlock(tsdn, &tdatas_mtx); return ret; } prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; static bool prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); ret = false; goto label_return; } if (prof_dump_printf(propagate_err, "@")) { ret = true; goto label_return; } for (i = 0; i < bt->len; i++) { if (prof_dump_printf(propagate_err, " %#"FMTxPTR, (uintptr_t)bt->vec[i])) { ret = true; goto label_return; } } if (prof_dump_printf(propagate_err, "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { ret = true; goto label_return; } prof_tctx_dump_iter_arg.tsdn = tsdn; prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: return ret; } #ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); #if defined(O_CLOEXEC) mfd = open(filename, O_RDONLY | O_CLOEXEC); #else mfd = open(filename, O_RDONLY); if (mfd != -1) { fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); } #endif return mfd; } #endif static int prof_getpid(void) { #ifdef _WIN32 return GetCurrentProcessId(); #else return getpid(); #endif } static bool prof_dump_maps(bool propagate_err) { bool ret; int mfd; cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); #elif defined(_WIN32) mfd = -1; // Not implemented #else { int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); if (mfd == -1) { mfd = prof_open_maps("/proc/%d/maps", pid); } } #endif if (mfd != -1) { ssize_t nread; if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && propagate_err) { ret = true; goto label_return; } nread = 0; do { prof_dump_buf_end += nread; if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { /* Make space in prof_dump_buf before read(). */ if (prof_dump_flush(propagate_err) && propagate_err) { ret = true; goto label_return; } } nread = malloc_read_fd(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); } while (nread > 0); } else { ret = true; goto label_return; } ret = false; label_return: if (mfd != -1) { close(mfd); } return ret; } /* * See prof_sample_threshold_update() comment for why the body of this function * is conditionally compiled. */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { #ifdef JEMALLOC_PROF /* * Scaling is equivalent AdjustSamples() in jeprof, but the result may * differ slightly from what jeprof reports, because here we scale the * summary values, whereas jeprof scales each context individually and * reports the sums of the scaled values. */ if (cnt_all->curbytes != 0) { double sample_period = (double)((uint64_t)1 << lg_prof_sample); double ratio = (((double)cnt_all->curbytes) / (double)cnt_all->curobjs) / sample_period; double scale_factor = 1.0 / (1.0 - exp(-ratio)); uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) * scale_factor); uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * scale_factor); malloc_printf(": Leak approximation summary: ~%"FMTu64 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( ": Run jeprof on \"%s\" for leak detail\n", filename); } #endif } struct prof_gctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; }; static prof_gctx_t * prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; struct prof_gctx_dump_iter_arg_s *arg = (struct prof_gctx_dump_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: malloc_mutex_unlock(arg->tsdn, gctx->lock); return ret; } static void prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, prof_gctx_tree_t *gctxs) { size_t tabind; union { prof_gctx_t *p; void *v; } gctx; prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ gctx_tree_new(gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); } /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)prof_tdata_merge_iter_arg); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_merge_iter_arg->leak_ngctx = 0; gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, (void *)prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); } static bool prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, prof_gctx_tree_t *gctxs) { /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { return true; } /* Dump profile header. */ if (prof_dump_header(tsd_tsdn(tsd), propagate_err, &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; } /* Dump per gctx profile stats. */ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_dump_iter_arg->propagate_err = propagate_err; if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; } /* Dump /proc//maps if possible. */ if (prof_dump_maps(propagate_err)) { goto label_write_error; } if (prof_dump_close(propagate_err)) { return true; } return false; label_write_error: prof_dump_close(propagate_err); return true; } static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); prof_tdata_t * tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return true; } pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_gctx_tree_t gctxs; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &gctxs); bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &prof_gctx_dump_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); post_reentrancy(tsd); if (err) { return true; } if (leakcheck) { prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, prof_gctx_merge_iter_arg.leak_ngctx, filename); } return false; } #ifdef JEMALLOC_JET void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes) { tsd_t *tsd; prof_tdata_t *tdata; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; prof_gctx_tree_t gctxs; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { if (curobjs != NULL) { *curobjs = 0; } if (curbytes != NULL) { *curbytes = 0; } if (accumobjs != NULL) { *accumobjs = 0; } if (accumbytes != NULL) { *accumbytes = 0; } return; } prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); if (curobjs != NULL) { *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; } if (curbytes != NULL) { *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; } if (accumobjs != NULL) { *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; } if (accumbytes != NULL) { *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; } } #endif #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); if (!prof_booted) { return; } tsd = tsd_fetch(); assert(tsd_reentrancy_level_get(tsd) == 0); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { cassert(config_prof); #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { return true; } prof_accum->accumbytes = 0; #else atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); #endif return false; } void prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return; } if (tdata->enq) { tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); if (!opt_prof || !prof_booted) { return true; } char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } return prof_dump(tsd, true, filename, false); } void prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return; } if (tdata->enq) { tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); } static bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); if (bt1->len != bt2->len) { return false; } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); return thr_uid; } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; cassert(config_prof); /* Initialize an empty cache for this thread. */ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { return NULL; } tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; tdata->thread_name = thread_name; tdata->attached = true; tdata->expired = false; tdata->tctx_uid_next = 0; if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); return NULL; } tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); tdata->enq = false; tdata->enq_idump = false; tdata->enq_gdump = false; tdata->dumping = false; tdata->active = active; malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); return tdata; } prof_tdata_t * prof_tdata_init(tsd_t *tsd) { return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); } static bool prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) { return false; } if (ckh_count(&tdata->bt2tctx) != 0) { return false; } return true; } static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); if (tdata->thread_name != NULL) { idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); } ckh_delete(tsd, &tdata->bt2tctx); idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ if (!destroy_tdata) { tdata->attached = false; } tsd_prof_tdata_set(tsd, NULL); } else { destroy_tdata = false; } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); } } prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, active); } static bool prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : prof_tdata_should_destroy(tsdn, tdata, false); } else { destroy_tdata = false; } malloc_mutex_unlock(tsdn, tdata->lock); return destroy_tdata; } static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); } else { next = NULL; } } while (next != NULL); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; if (!config_prof) { return; } tdata = tsd_prof_tdata_get(tsd); if (tdata != NULL) { prof_tdata_detach(tsd, tdata); } } bool prof_active_get(tsdn_t *tsdn) { bool prof_active_current; malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; malloc_mutex_unlock(tsdn, &prof_active_mtx); return prof_active_current; } bool prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; malloc_mutex_unlock(tsdn, &prof_active_mtx); return prof_active_old; } const char * prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return ""; } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; if (thread_name == NULL) { return NULL; } size = strlen(thread_name) + 1; if (size == 1) { return ""; } ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) { return NULL; } memcpy(ret, thread_name, size); return ret; } int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return EAGAIN; } /* Validate input. */ if (thread_name == NULL) { return EFAULT; } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; if (!isgraph(c) && !isblank(c)) { return EFAULT; } } s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) { return EAGAIN; } if (tdata->thread_name != NULL) { idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) { tdata->thread_name = s; } return 0; } bool prof_thread_active_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return false; } return tdata->active; } bool prof_thread_active_set(tsd_t *tsd, bool active) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return true; } tdata->active = active; return false; } bool prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return active_init; } bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return active_init_old; } bool prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return prof_gdump_current; } bool prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return prof_gdump_old; } void prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT)); } void prof_boot1(void) { cassert(config_prof); /* * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. */ if (opt_prof_leak && !opt_prof) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. */ opt_prof = true; opt_prof_gdump = false; } else if (opt_prof) { if (opt_lg_prof_interval >= 0) { prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); } } } bool prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; } prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, malloc_mutex_rank_exclusive)) { return true; } if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { return true; } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; } next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { return true; } if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) { abort(); } } gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (gctx_locks == NULL) { return true; } for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX, malloc_mutex_rank_exclusive)) { return true; } } tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (tdata_locks == NULL) { return true; } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", WITNESS_RANK_PROF_TDATA, malloc_mutex_rank_exclusive)) { return true; } } } #ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal state * before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); #endif prof_booted = true; return false; } void prof_prefork0(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_prefork(tsdn, &tdata_locks[i]); } for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); } } } void prof_prefork1(tsdn_t *tsdn) { if (config_prof && opt_prof) { malloc_mutex_prefork(tsdn, &prof_active_mtx); malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); malloc_mutex_prefork(tsdn, &prof_gdump_mtx); malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } void prof_postfork_parent(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_postfork_parent(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); } malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void prof_postfork_child(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_child(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); } malloc_mutex_postfork_child(tsdn, &tdatas_mtx); malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } /******************************************************************************/ jemalloc-sys-0.3.2/jemalloc/src/rtree.c010064400007650000024000000211451340421341300161750ustar0000000000000000#define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" /* * Only the most significant bits of keys passed to rtree_{read,write}() are * used. */ bool rtree_new(rtree_t *rtree, bool zeroed) { #ifdef JEMALLOC_JET if (!zeroed) { memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ } #else assert(zeroed); #endif if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, malloc_mutex_rank_exclusive)) { return true; } return false; } static rtree_node_elm_t * rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * sizeof(rtree_node_elm_t), CACHELINE); } rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; static void rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { /* Nodes are never deleted during normal operation. */ not_reached(); } UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = rtree_node_dalloc_impl; static rtree_leaf_elm_t * rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * sizeof(rtree_leaf_elm_t), CACHELINE); } rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; static void rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { /* Leaves are never deleted during normal operation. */ not_reached(); } UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = rtree_leaf_dalloc_impl; #ifdef JEMALLOC_JET # if RTREE_HEIGHT > 1 static void rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, unsigned level) { size_t nchildren = ZU(1) << rtree_levels[level].bits; if (level + 2 < RTREE_HEIGHT) { for (size_t i = 0; i < nchildren; i++) { rtree_node_elm_t *node = (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, ATOMIC_RELAXED); if (node != NULL) { rtree_delete_subtree(tsdn, rtree, node, level + 1); } } } else { for (size_t i = 0; i < nchildren; i++) { rtree_leaf_elm_t *leaf = (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, ATOMIC_RELAXED); if (leaf != NULL) { rtree_leaf_dalloc(tsdn, rtree, leaf); } } } if (subtree != rtree->root) { rtree_node_dalloc(tsdn, rtree, subtree); } } # endif void rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { # if RTREE_HEIGHT > 1 rtree_delete_subtree(tsdn, rtree, rtree->root, 0); # endif } #endif static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); if (node == NULL) { node = rtree_node_alloc(tsdn, rtree, ZU(1) << rtree_levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, node, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return node; } static rtree_leaf_elm_t * rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); if (leaf == NULL) { leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << rtree_levels[RTREE_HEIGHT-1].bits); if (leaf == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, leaf, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return leaf; } static bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node != (uintptr_t)0); } static bool rtree_leaf_valid(rtree_leaf_elm_t *leaf) { return ((uintptr_t)leaf != (uintptr_t)0); } static rtree_node_elm_t * rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *node; if (dependent) { node = (rtree_node_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); } else { node = (rtree_node_elm_t *)atomic_load_p(&elm->child, ATOMIC_ACQUIRE); } assert(!dependent || node != NULL); return node; } static rtree_node_elm_t * rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_node_elm_t *node; node = rtree_child_node_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(node))) { node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); } assert(!dependent || node != NULL); return node; } static rtree_leaf_elm_t * rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_leaf_elm_t *leaf; if (dependent) { leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); } else { leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, ATOMIC_ACQUIRE); } assert(!dependent || leaf != NULL); return leaf; } static rtree_leaf_elm_t * rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_leaf_elm_t *leaf; leaf = rtree_child_leaf_tryread(elm, dependent); if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { leaf = rtree_leaf_init(tsdn, rtree, &elm->child); } assert(!dependent || leaf != NULL); return leaf; } rtree_leaf_elm_t * rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing) { rtree_node_elm_t *node; rtree_leaf_elm_t *leaf; #if RTREE_HEIGHT > 1 node = rtree->root; #else leaf = rtree->root; #endif if (config_debug) { uintptr_t leafkey = rtree_leafkey(key); for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { assert(rtree_ctx->cache[i].leafkey != leafkey); } for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { assert(rtree_ctx->l2_cache[i].leafkey != leafkey); } } #define RTREE_GET_CHILD(level) { \ assert(level < RTREE_HEIGHT-1); \ if (level != 0 && !dependent && \ unlikely(!rtree_node_valid(node))) { \ return NULL; \ } \ uintptr_t subkey = rtree_subkey(key, level); \ if (level + 2 < RTREE_HEIGHT) { \ node = init_missing ? \ rtree_child_node_read(tsdn, rtree, \ &node[subkey], level, dependent) : \ rtree_child_node_tryread(&node[subkey], \ dependent); \ } else { \ leaf = init_missing ? \ rtree_child_leaf_read(tsdn, rtree, \ &node[subkey], level, dependent) : \ rtree_child_leaf_tryread(&node[subkey], \ dependent); \ } \ } /* * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): * (1) evict last entry in L2 cache; (2) move the collision slot from L1 * cache down to L2; and 3) fill L1. */ #define RTREE_GET_LEAF(level) { \ assert(level == RTREE_HEIGHT-1); \ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ return NULL; \ } \ if (RTREE_CTX_NCACHE_L2 > 1) { \ memmove(&rtree_ctx->l2_cache[1], \ &rtree_ctx->l2_cache[0], \ sizeof(rtree_ctx_cache_elm_t) * \ (RTREE_CTX_NCACHE_L2 - 1)); \ } \ size_t slot = rtree_cache_direct_map(key); \ rtree_ctx->l2_cache[0].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[0].leaf = \ rtree_ctx->cache[slot].leaf; \ uintptr_t leafkey = rtree_leafkey(key); \ rtree_ctx->cache[slot].leafkey = leafkey; \ rtree_ctx->cache[slot].leaf = leaf; \ uintptr_t subkey = rtree_subkey(key, level); \ return &leaf[subkey]; \ } if (RTREE_HEIGHT > 1) { RTREE_GET_CHILD(0) } if (RTREE_HEIGHT > 2) { RTREE_GET_CHILD(1) } if (RTREE_HEIGHT > 3) { for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { RTREE_GET_CHILD(i) } } RTREE_GET_LEAF(RTREE_HEIGHT-1) #undef RTREE_GET_CHILD #undef RTREE_GET_LEAF not_reached(); } void rtree_ctx_data_init(rtree_ctx_t *ctx) { for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; cache->leafkey = RTREE_LEAFKEY_INVALID; cache->leaf = NULL; } for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; cache->leafkey = RTREE_LEAFKEY_INVALID; cache->leaf = NULL; } } jemalloc-sys-0.3.2/jemalloc/src/stats.c010064400007650000024000001177221340421341300162210ustar0000000000000000#define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/emitter.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" const char *global_mutex_names[mutex_prof_num_global_mutexes] = { #define OP(mtx) #mtx, MUTEX_PROF_GLOBAL_MUTEXES #undef OP }; const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { #define OP(mtx) #mtx, MUTEX_PROF_ARENA_MUTEXES #undef OP }; #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_GET(n, i, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_M4_GET(n, i, j, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ bool opt_stats_print = false; char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; /******************************************************************************/ /* Calculate x.yyy and output a string (takes a fixed sized char array). */ static bool get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { if (divisor == 0 || dividend > divisor) { /* The rate is not supposed to be greater than 1. */ return true; } if (dividend > 0) { assert(UINT64_MAX / dividend >= 1000); } unsigned n = (unsigned)((dividend * 1000) / divisor); if (n < 10) { malloc_snprintf(str, 6, "0.00%u", n); } else if (n < 100) { malloc_snprintf(str, 6, "0.0%u", n); } else if (n < 1000) { malloc_snprintf(str, 6, "0.%u", n); } else { malloc_snprintf(str, 6, "1"); } return false; } #define MUTEX_CTL_STR_MAX_LENGTH 128 static void gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, const char *mutex, const char *counter) { malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); } static void mutex_stats_init_cols(emitter_row_t *row, const char *table_name, emitter_col_t *name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; emitter_col_t *col; if (name != NULL) { emitter_col_init(name, row); name->justify = emitter_justify_left; name->width = 21; name->type = emitter_type_title; name->str_val = table_name; } #define WIDTH_uint32_t 12 #define WIDTH_uint64_t 16 #define OP(counter, counter_type, human) \ col = &col_##counter_type[k_##counter_type]; \ ++k_##counter_type; \ emitter_col_init(col, row); \ col->justify = emitter_justify_right; \ col->width = WIDTH_##counter_type; \ col->type = emitter_type_title; \ col->str_val = human; MUTEX_PROF_COUNTERS #undef OP #undef WIDTH_uint32_t #undef WIDTH_uint64_t } static void mutex_stats_read_global(const char *name, emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "mutexes", name, #counter); \ CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } static void mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, const char *name, emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ CTL_M2_GET(cmd, arena_ind, \ (counter_type *)&dst->bool_val, counter_type); MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } static void mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "arenas.0.bins.0","mutex", #counter); \ CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ (counter_type *)&dst->bool_val, counter_type); MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } /* "row" can be NULL to avoid emitting in table mode. */ static void mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { if (row != NULL) { emitter_table_row(emitter, row); } mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; emitter_col_t *col; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, type, human) \ col = &col_##type[k_##type]; \ ++k_##type; \ emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ (const void *)&col->bool_val); MUTEX_PROF_COUNTERS; #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } static void stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i) { size_t page; bool in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); CTL_GET("arenas.nbins", &nbins, unsigned); emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; emitter_row_init(&row); #define COL(name, left_or_right, col_width, etype) \ emitter_col_t col_##name; \ emitter_col_init(&col_##name, &row); \ col_##name.justify = emitter_justify_##left_or_right; \ col_##name.width = col_width; \ col_##name.type = emitter_type_##etype; \ emitter_col_t header_col_##name; \ emitter_col_init(&header_col_##name, &header_row); \ header_col_##name.justify = emitter_justify_##left_or_right; \ header_col_##name.width = col_width; \ header_col_##name.type = emitter_type_title; \ header_col_##name.str_val = #name; COL(size, right, 20, size) COL(ind, right, 4, unsigned) COL(allocated, right, 13, uint64) COL(nmalloc, right, 13, uint64) COL(ndalloc, right, 13, uint64) COL(nrequests, right, 13, uint64) COL(curregs, right, 13, size) COL(curslabs, right, 13, size) COL(regs, right, 5, unsigned) COL(pgs, right, 4, size) /* To buffer a right- and left-justified column. */ COL(justify_spacer, right, 1, title) COL(util, right, 6, title) COL(nfills, right, 13, uint64) COL(nflushes, right, 13, uint64) COL(nslabs, right, 13, uint64) COL(nreslabs, right, 13, uint64) #undef COL /* Don't want to actually print the name. */ header_col_justify_spacer.str_val = " "; col_justify_spacer.str_val = " "; emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters]; emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters]; emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters]; emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; if (mutex) { mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, col_mutex32); mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, header_mutex32); } /* * We print a "bins:" header as part of the table row; we need to adjust * the header size column to compensate. */ header_col_size.width -=5; emitter_table_printf(emitter, "bins:"); emitter_table_row(emitter, &header_row); emitter_json_arr_begin(emitter, "bins"); for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nslabs; size_t reg_size, slab_size, curregs; size_t curslabs; uint32_t nregs; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nreslabs; CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, uint64_t); in_gap_prev = in_gap; in_gap = (nslabs == 0); if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, size_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, &nrequests, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, size_t); if (mutex) { mutex_stats_read_arena_bin(i, j, col_mutex64, col_mutex32); } emitter_json_arr_obj_begin(emitter); emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, &nmalloc); emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, &ndalloc); emitter_json_kv(emitter, "curregs", emitter_type_size, &curregs); emitter_json_kv(emitter, "nrequests", emitter_type_uint64, &nrequests); emitter_json_kv(emitter, "nfills", emitter_type_uint64, &nfills); emitter_json_kv(emitter, "nflushes", emitter_type_uint64, &nflushes); emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, &nreslabs); emitter_json_kv(emitter, "curslabs", emitter_type_size, &curslabs); if (mutex) { emitter_json_dict_begin(emitter, "mutex"); mutex_stats_emit(emitter, NULL, col_mutex64, col_mutex32); emitter_json_dict_end(emitter); } emitter_json_arr_obj_end(emitter); size_t availregs = nregs * curslabs; char util[6]; if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) { if (availregs == 0) { malloc_snprintf(util, sizeof(util), "1"); } else if (curregs > availregs) { /* * Race detected: the counters were read in * separate mallctl calls and concurrent * operations happened in between. In this case * no meaningful utilization can be computed. */ malloc_snprintf(util, sizeof(util), " race"); } else { not_reached(); } } col_size.size_val = reg_size; col_ind.unsigned_val = j; col_allocated.size_val = curregs * reg_size; col_nmalloc.uint64_val = nmalloc; col_ndalloc.uint64_val = ndalloc; col_nrequests.uint64_val = nrequests; col_curregs.size_val = curregs; col_curslabs.size_val = curslabs; col_regs.unsigned_val = nregs; col_pgs.size_val = slab_size / page; col_util.str_val = util; col_nfills.uint64_val = nfills; col_nflushes.uint64_val = nflushes; col_nslabs.uint64_val = nslabs; col_nreslabs.uint64_val = nreslabs; /* * Note that mutex columns were initialized above, if mutex == * true. */ emitter_table_row(emitter, &row); } emitter_json_arr_end(emitter); /* Close "bins". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } } static void stats_arena_lextents_print(emitter_t *emitter, unsigned i) { unsigned nbins, nlextents, j; bool in_gap, in_gap_prev; CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlextents", &nlextents, unsigned); emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; emitter_row_init(&row); #define COL(name, left_or_right, col_width, etype) \ emitter_col_t header_##name; \ emitter_col_init(&header_##name, &header_row); \ header_##name.justify = emitter_justify_##left_or_right; \ header_##name.width = col_width; \ header_##name.type = emitter_type_title; \ header_##name.str_val = #name; \ \ emitter_col_t col_##name; \ emitter_col_init(&col_##name, &row); \ col_##name.justify = emitter_justify_##left_or_right; \ col_##name.width = col_width; \ col_##name.type = emitter_type_##etype; COL(size, right, 20, size) COL(ind, right, 4, unsigned) COL(allocated, right, 13, size) COL(nmalloc, right, 13, uint64) COL(ndalloc, right, 13, uint64) COL(nrequests, right, 13, uint64) COL(curlextents, right, 13, size) #undef COL /* As with bins, we label the large extents table. */ header_size.width -= 6; emitter_table_printf(emitter, "large:"); emitter_table_row(emitter, &header_row); emitter_json_arr_begin(emitter, "lextents"); for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t lextent_size, curlextents; CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, &nrequests, uint64_t); in_gap_prev = in_gap; in_gap = (nrequests == 0); if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, &curlextents, size_t); emitter_json_arr_obj_begin(emitter); emitter_json_kv(emitter, "curlextents", emitter_type_size, &curlextents); emitter_json_arr_obj_end(emitter); col_size.size_val = lextent_size; col_ind.unsigned_val = nbins + j; col_allocated.size_val = curlextents * lextent_size; col_nmalloc.uint64_val = nmalloc; col_ndalloc.uint64_val = ndalloc; col_nrequests.uint64_val = nrequests; col_curlextents.size_val = curlextents; if (!in_gap) { emitter_table_row(emitter, &row); } } emitter_json_arr_end(emitter); /* Close "lextents". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } } static void stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind) { emitter_row_t row; emitter_col_t col_name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; emitter_col_t col32[mutex_prof_num_uint32_t_counters]; emitter_row_init(&row); mutex_stats_init_cols(&row, "", &col_name, col64, col32); emitter_json_dict_begin(emitter, "mutexes"); emitter_table_row(emitter, &row); for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; i++) { const char *name = arena_mutex_names[i]; emitter_json_dict_begin(emitter, name); mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, col32); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_dict_end(emitter); /* Close the mutex dict. */ } emitter_json_dict_end(emitter); /* End "mutexes". */ } static void stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, bool mutex) { unsigned nthreads; const char *dss; ssize_t dirty_decay_ms, muzzy_decay_ms; size_t page, pactive, pdirty, pmuzzy, mapped, retained; size_t base, internal, resident, metadata_thp; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; size_t tcache_bytes; uint64_t uptime; CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); emitter_kv(emitter, "nthreads", "assigned threads", emitter_type_unsigned, &nthreads); CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, &uptime); CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); emitter_kv(emitter, "dss", "dss allocation precedence", emitter_type_string, &dss); CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); emitter_row_t decay_row; emitter_row_init(&decay_row); /* JSON-style emission. */ emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &dirty_decay_ms); emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &muzzy_decay_ms); emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, &dirty_npurge); emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, &dirty_nmadvise); emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, &dirty_purged); emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, &muzzy_npurge); emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, &muzzy_nmadvise); emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, &muzzy_purged); /* Table-style emission. */ emitter_col_t decay_type; emitter_col_init(&decay_type, &decay_row); decay_type.justify = emitter_justify_right; decay_type.width = 9; decay_type.type = emitter_type_title; decay_type.str_val = "decaying:"; emitter_col_t decay_time; emitter_col_init(&decay_time, &decay_row); decay_time.justify = emitter_justify_right; decay_time.width = 6; decay_time.type = emitter_type_title; decay_time.str_val = "time"; emitter_col_t decay_npages; emitter_col_init(&decay_npages, &decay_row); decay_npages.justify = emitter_justify_right; decay_npages.width = 13; decay_npages.type = emitter_type_title; decay_npages.str_val = "npages"; emitter_col_t decay_sweeps; emitter_col_init(&decay_sweeps, &decay_row); decay_sweeps.justify = emitter_justify_right; decay_sweeps.width = 13; decay_sweeps.type = emitter_type_title; decay_sweeps.str_val = "sweeps"; emitter_col_t decay_madvises; emitter_col_init(&decay_madvises, &decay_row); decay_madvises.justify = emitter_justify_right; decay_madvises.width = 13; decay_madvises.type = emitter_type_title; decay_madvises.str_val = "madvises"; emitter_col_t decay_purged; emitter_col_init(&decay_purged, &decay_row); decay_purged.justify = emitter_justify_right; decay_purged.width = 13; decay_purged.type = emitter_type_title; decay_purged.str_val = "purged"; /* Title row. */ emitter_table_row(emitter, &decay_row); /* Dirty row. */ decay_type.str_val = "dirty:"; if (dirty_decay_ms >= 0) { decay_time.type = emitter_type_ssize; decay_time.ssize_val = dirty_decay_ms; } else { decay_time.type = emitter_type_title; decay_time.str_val = "N/A"; } decay_npages.type = emitter_type_size; decay_npages.size_val = pdirty; decay_sweeps.type = emitter_type_uint64; decay_sweeps.uint64_val = dirty_npurge; decay_madvises.type = emitter_type_uint64; decay_madvises.uint64_val = dirty_nmadvise; decay_purged.type = emitter_type_uint64; decay_purged.uint64_val = dirty_purged; emitter_table_row(emitter, &decay_row); /* Muzzy row. */ decay_type.str_val = "muzzy:"; if (muzzy_decay_ms >= 0) { decay_time.type = emitter_type_ssize; decay_time.ssize_val = muzzy_decay_ms; } else { decay_time.type = emitter_type_title; decay_time.str_val = "N/A"; } decay_npages.type = emitter_type_size; decay_npages.size_val = pmuzzy; decay_sweeps.type = emitter_type_uint64; decay_sweeps.uint64_val = muzzy_npurge; decay_madvises.type = emitter_type_uint64; decay_madvises.uint64_val = muzzy_nmadvise; decay_purged.type = emitter_type_uint64; decay_purged.uint64_val = muzzy_purged; emitter_table_row(emitter, &decay_row); /* Small / large / total allocation counts. */ emitter_row_t alloc_count_row; emitter_row_init(&alloc_count_row); emitter_col_t alloc_count_title; emitter_col_init(&alloc_count_title, &alloc_count_row); alloc_count_title.justify = emitter_justify_left; alloc_count_title.width = 25; alloc_count_title.type = emitter_type_title; alloc_count_title.str_val = ""; emitter_col_t alloc_count_allocated; emitter_col_init(&alloc_count_allocated, &alloc_count_row); alloc_count_allocated.justify = emitter_justify_right; alloc_count_allocated.width = 12; alloc_count_allocated.type = emitter_type_title; alloc_count_allocated.str_val = "allocated"; emitter_col_t alloc_count_nmalloc; emitter_col_init(&alloc_count_nmalloc, &alloc_count_row); alloc_count_nmalloc.justify = emitter_justify_right; alloc_count_nmalloc.width = 12; alloc_count_nmalloc.type = emitter_type_title; alloc_count_nmalloc.str_val = "nmalloc"; emitter_col_t alloc_count_ndalloc; emitter_col_init(&alloc_count_ndalloc, &alloc_count_row); alloc_count_ndalloc.justify = emitter_justify_right; alloc_count_ndalloc.width = 12; alloc_count_ndalloc.type = emitter_type_title; alloc_count_ndalloc.str_val = "ndalloc"; emitter_col_t alloc_count_nrequests; emitter_col_init(&alloc_count_nrequests, &alloc_count_row); alloc_count_nrequests.justify = emitter_justify_right; alloc_count_nrequests.width = 12; alloc_count_nrequests.type = emitter_type_title; alloc_count_nrequests.str_val = "nrequests"; emitter_table_row(emitter, &alloc_count_row); #define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ &small_or_large##_##name, valtype##_t); \ emitter_json_kv(emitter, #name, emitter_type_##valtype, \ &small_or_large##_##name); \ alloc_count_##name.type = emitter_type_##valtype; \ alloc_count_##name.valtype##_val = small_or_large##_##name; emitter_json_dict_begin(emitter, "small"); alloc_count_title.str_val = "small:"; GET_AND_EMIT_ALLOC_STAT(small, allocated, size) GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) emitter_table_row(emitter, &alloc_count_row); emitter_json_dict_end(emitter); /* Close "small". */ emitter_json_dict_begin(emitter, "large"); alloc_count_title.str_val = "large:"; GET_AND_EMIT_ALLOC_STAT(large, allocated, size) GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) emitter_table_row(emitter, &alloc_count_row); emitter_json_dict_end(emitter); /* Close "large". */ #undef GET_AND_EMIT_ALLOC_STAT /* Aggregated small + large stats are emitter only in table mode. */ alloc_count_title.str_val = "total:"; alloc_count_allocated.size_val = small_allocated + large_allocated; alloc_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; alloc_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; alloc_count_nrequests.uint64_val = small_nrequests + large_nrequests; emitter_table_row(emitter, &alloc_count_row); emitter_row_t mem_count_row; emitter_row_init(&mem_count_row); emitter_col_t mem_count_title; emitter_col_init(&mem_count_title, &mem_count_row); mem_count_title.justify = emitter_justify_left; mem_count_title.width = 25; mem_count_title.type = emitter_type_title; mem_count_title.str_val = ""; emitter_col_t mem_count_val; emitter_col_init(&mem_count_val, &mem_count_row); mem_count_val.justify = emitter_justify_right; mem_count_val.width = 12; mem_count_val.type = emitter_type_title; mem_count_val.str_val = ""; emitter_table_row(emitter, &mem_count_row); mem_count_val.type = emitter_type_size; /* Active count in bytes is emitted only in table mode. */ mem_count_title.str_val = "active:"; mem_count_val.size_val = pactive * page; emitter_table_row(emitter, &mem_count_row); #define GET_AND_EMIT_MEM_STAT(stat) \ CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ mem_count_title.str_val = #stat":"; \ mem_count_val.size_val = stat; \ emitter_table_row(emitter, &mem_count_row); GET_AND_EMIT_MEM_STAT(mapped) GET_AND_EMIT_MEM_STAT(retained) GET_AND_EMIT_MEM_STAT(base) GET_AND_EMIT_MEM_STAT(internal) GET_AND_EMIT_MEM_STAT(metadata_thp) GET_AND_EMIT_MEM_STAT(tcache_bytes) GET_AND_EMIT_MEM_STAT(resident) #undef GET_AND_EMIT_MEM_STAT if (mutex) { stats_arena_mutexes_print(emitter, i); } if (bins) { stats_arena_bins_print(emitter, mutex, i); } if (large) { stats_arena_lextents_print(emitter, i); } } static void stats_general_print(emitter_t *emitter) { const char *cpv; bool bv, bv2; unsigned uv; uint32_t u32v; uint64_t u64v; ssize_t ssv, ssv2; size_t sv, bsz, usz, ssz, sssz, cpsz; bsz = sizeof(bool); usz = sizeof(unsigned); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); /* config. */ emitter_dict_begin(emitter, "config", "Build-time option settings"); #define CONFIG_WRITE_BOOL(name) \ do { \ CTL_GET("config."#name, &bv, bool); \ emitter_kv(emitter, #name, "config."#name, \ emitter_type_bool, &bv); \ } while (0) CONFIG_WRITE_BOOL(cache_oblivious); CONFIG_WRITE_BOOL(debug); CONFIG_WRITE_BOOL(fill); CONFIG_WRITE_BOOL(lazy_lock); emitter_kv(emitter, "malloc_conf", "config.malloc_conf", emitter_type_string, &config_malloc_conf); CONFIG_WRITE_BOOL(prof); CONFIG_WRITE_BOOL(prof_libgcc); CONFIG_WRITE_BOOL(prof_libunwind); CONFIG_WRITE_BOOL(stats); CONFIG_WRITE_BOOL(utrace); CONFIG_WRITE_BOOL(xmalloc); #undef CONFIG_WRITE_BOOL emitter_dict_end(emitter); /* Close "config" dict. */ /* opt. */ #define OPT_WRITE(name, var, size, emitter_type) \ if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ 0) { \ emitter_kv(emitter, name, "opt."name, emitter_type, \ &var); \ } #define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ altname) \ if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ == 0) { \ emitter_kv_note(emitter, name, "opt."name, \ emitter_type, &var1, altname, emitter_type, \ &var2); \ } #define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) #define OPT_WRITE_BOOL_MUTABLE(name, altname) \ OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) #define OPT_WRITE_UNSIGNED(name) \ OPT_WRITE(name, uv, usz, emitter_type_unsigned) #define OPT_WRITE_SSIZE_T(name) \ OPT_WRITE(name, ssv, sssz, emitter_type_ssize) #define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ altname) #define OPT_WRITE_CHAR_P(name) \ OPT_WRITE(name, cpv, cpsz, emitter_type_string) emitter_dict_begin(emitter, "opt", "Run-time option settings"); OPT_WRITE_BOOL("abort") OPT_WRITE_BOOL("abort_conf") OPT_WRITE_BOOL("retain") OPT_WRITE_CHAR_P("dss") OPT_WRITE_UNSIGNED("narenas") OPT_WRITE_CHAR_P("percpu_arena") OPT_WRITE_CHAR_P("metadata_thp") OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") OPT_WRITE_UNSIGNED("lg_extent_max_active_fit") OPT_WRITE_CHAR_P("junk") OPT_WRITE_BOOL("zero") OPT_WRITE_BOOL("utrace") OPT_WRITE_BOOL("xmalloc") OPT_WRITE_BOOL("tcache") OPT_WRITE_SSIZE_T("lg_tcache_max") OPT_WRITE_CHAR_P("thp") OPT_WRITE_BOOL("prof") OPT_WRITE_CHAR_P("prof_prefix") OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", "prof.thread_active_init") OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") OPT_WRITE_BOOL("prof_accum") OPT_WRITE_SSIZE_T("lg_prof_interval") OPT_WRITE_BOOL("prof_gdump") OPT_WRITE_BOOL("prof_final") OPT_WRITE_BOOL("prof_leak") OPT_WRITE_BOOL("stats_print") OPT_WRITE_CHAR_P("stats_print_opts") emitter_dict_end(emitter); #undef OPT_WRITE #undef OPT_WRITE_MUTABLE #undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_UNSIGNED #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_SSIZE_T_MUTABLE #undef OPT_WRITE_CHAR_P /* prof. */ if (config_prof) { emitter_dict_begin(emitter, "prof", "Profiling settings"); CTL_GET("prof.thread_active_init", &bv, bool); emitter_kv(emitter, "thread_active_init", "prof.thread_active_init", emitter_type_bool, &bv); CTL_GET("prof.active", &bv, bool); emitter_kv(emitter, "active", "prof.active", emitter_type_bool, &bv); CTL_GET("prof.gdump", &bv, bool); emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, &bv); CTL_GET("prof.interval", &u64v, uint64_t); emitter_kv(emitter, "interval", "prof.interval", emitter_type_uint64, &u64v); CTL_GET("prof.lg_sample", &ssv, ssize_t); emitter_kv(emitter, "lg_sample", "prof.lg_sample", emitter_type_ssize, &ssv); emitter_dict_end(emitter); /* Close "prof". */ } /* arenas. */ /* * The json output sticks arena info into an "arenas" dict; the table * output puts them at the top-level. */ emitter_json_dict_begin(emitter, "arenas"); CTL_GET("arenas.narenas", &uv, unsigned); emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv); /* * Decay settings are emitted only in json mode; in table mode, they're * emitted as notes with the opt output, above. */ CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv); CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv); CTL_GET("arenas.quantum", &sv, size_t); emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv); CTL_GET("arenas.page", &sv, size_t); emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { emitter_kv(emitter, "tcache_max", "Maximum thread-cached size class", emitter_type_size, &sv); } unsigned nbins; CTL_GET("arenas.nbins", &nbins, unsigned); emitter_kv(emitter, "nbins", "Number of bin size classes", emitter_type_unsigned, &nbins); unsigned nhbins; CTL_GET("arenas.nhbins", &nhbins, unsigned); emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", emitter_type_unsigned, &nhbins); /* * We do enough mallctls in a loop that we actually want to omit them * (not just omit the printing). */ if (emitter->output == emitter_output_json) { emitter_json_arr_begin(emitter, "bin"); for (unsigned i = 0; i < nbins; i++) { emitter_json_arr_obj_begin(emitter); CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); emitter_json_kv(emitter, "nregs", emitter_type_uint32, &u32v); CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); emitter_json_kv(emitter, "slab_size", emitter_type_size, &sv); emitter_json_arr_obj_end(emitter); } emitter_json_arr_end(emitter); /* Close "bin". */ } unsigned nlextents; CTL_GET("arenas.nlextents", &nlextents, unsigned); emitter_kv(emitter, "nlextents", "Number of large size classes", emitter_type_unsigned, &nlextents); if (emitter->output == emitter_output_json) { emitter_json_arr_begin(emitter, "lextent"); for (unsigned i = 0; i < nlextents; i++) { emitter_json_arr_obj_begin(emitter); CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); emitter_json_arr_obj_end(emitter); } emitter_json_arr_end(emitter); /* Close "lextent". */ } emitter_json_dict_end(emitter); /* Close "arenas" */ } static void stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, bool unmerged, bool bins, bool large, bool mutex) { /* * These should be deleted. We keep them around for a while, to aid in * the transition to the emitter code. */ size_t allocated, active, metadata, metadata_thp, resident, mapped, retained; size_t num_background_threads; uint64_t background_thread_num_runs, background_thread_run_interval; CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); CTL_GET("stats.metadata_thp", &metadata_thp, size_t); CTL_GET("stats.resident", &resident, size_t); CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.retained", &retained, size_t); if (have_background_thread) { CTL_GET("stats.background_thread.num_threads", &num_background_threads, size_t); CTL_GET("stats.background_thread.num_runs", &background_thread_num_runs, uint64_t); CTL_GET("stats.background_thread.run_interval", &background_thread_run_interval, uint64_t); } else { num_background_threads = 0; background_thread_num_runs = 0; background_thread_run_interval = 0; } /* Generic global stats. */ emitter_json_dict_begin(emitter, "stats"); emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); emitter_json_kv(emitter, "active", emitter_type_size, &active); emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); emitter_json_kv(emitter, "metadata_thp", emitter_type_size, &metadata_thp); emitter_json_kv(emitter, "resident", emitter_type_size, &resident); emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); emitter_json_kv(emitter, "retained", emitter_type_size, &retained); emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " "retained: %zu\n", allocated, active, metadata, metadata_thp, resident, mapped, retained); /* Background thread stats. */ emitter_json_dict_begin(emitter, "background_thread"); emitter_json_kv(emitter, "num_threads", emitter_type_size, &num_background_threads); emitter_json_kv(emitter, "num_runs", emitter_type_uint64, &background_thread_num_runs); emitter_json_kv(emitter, "run_interval", emitter_type_uint64, &background_thread_run_interval); emitter_json_dict_end(emitter); /* Close "background_thread". */ emitter_table_printf(emitter, "Background threads: %zu, " "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", num_background_threads, background_thread_num_runs, background_thread_run_interval); if (mutex) { emitter_row_t row; emitter_col_t name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; emitter_col_t col32[mutex_prof_num_uint32_t_counters]; emitter_row_init(&row); mutex_stats_init_cols(&row, "", &name, col64, col32); emitter_table_row(emitter, &row); emitter_json_dict_begin(emitter, "mutexes"); for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { mutex_stats_read_global(global_mutex_names[i], &name, col64, col32); emitter_json_dict_begin(emitter, global_mutex_names[i]); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_dict_end(emitter); } emitter_json_dict_end(emitter); /* Close "mutexes". */ } emitter_json_dict_end(emitter); /* Close "stats". */ if (merged || destroyed || unmerged) { unsigned narenas; emitter_json_dict_begin(emitter, "stats.arenas"); CTL_GET("arenas.narenas", &narenas, unsigned); size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); size_t sz; VARIABLE_ARRAY(bool, initialized, narenas); bool destroyed_initialized; unsigned i, j, ninitialized; xmallctlnametomib("arena.0.initialized", mib, &miblen); for (i = ninitialized = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(bool); xmallctlbymib(mib, miblen, &initialized[i], &sz, NULL, 0); if (initialized[i]) { ninitialized++; } } mib[1] = MALLCTL_ARENAS_DESTROYED; sz = sizeof(bool); xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, NULL, 0); /* Merged stats. */ if (merged && (ninitialized > 1 || !unmerged)) { /* Print merged arena stats. */ emitter_table_printf(emitter, "Merged arenas stats:\n"); emitter_json_dict_begin(emitter, "merged"); stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, large, mutex); emitter_json_dict_end(emitter); /* Close "merged". */ } /* Destroyed stats. */ if (destroyed_initialized && destroyed) { /* Print destroyed arena stats. */ emitter_table_printf(emitter, "Destroyed arenas stats:\n"); emitter_json_dict_begin(emitter, "destroyed"); stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, bins, large, mutex); emitter_json_dict_end(emitter); /* Close "destroyed". */ } /* Unmerged stats. */ if (unmerged) { for (i = j = 0; i < narenas; i++) { if (initialized[i]) { char arena_ind_str[20]; malloc_snprintf(arena_ind_str, sizeof(arena_ind_str), "%u", i); emitter_json_dict_begin(emitter, arena_ind_str); emitter_table_printf(emitter, "arenas[%s]:\n", arena_ind_str); stats_arena_print(emitter, i, bins, large, mutex); /* Close "". */ emitter_json_dict_end(emitter); } } } emitter_json_dict_end(emitter); /* Close "stats.arenas". */ } } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; #define OPTION(o, v, d, s) bool v = d; STATS_PRINT_OPTIONS #undef OPTION /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write(": Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write(": Failure in mallctl(\"epoch\", " "...)\n"); abort(); } if (opts != NULL) { for (unsigned i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { #define OPTION(o, v, d, s) case o: v = s; break; STATS_PRINT_OPTIONS #undef OPTION default:; } } } emitter_t emitter; emitter_init(&emitter, json ? emitter_output_json : emitter_output_table, write_cb, cbopaque); emitter_begin(&emitter); emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); emitter_json_dict_begin(&emitter, "jemalloc"); if (general) { stats_general_print(&emitter); } if (config_stats) { stats_print_helper(&emitter, merged, destroyed, unmerged, bins, large, mutex); } emitter_json_dict_end(&emitter); /* Closes the "jemalloc" dict. */ emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); emitter_end(&emitter); } jemalloc-sys-0.3.2/jemalloc/src/sz.c010064400007650000024000000046401340421341300155110ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) const size_t sz_pind2sz_tab[NPSIZES+1] = { #define PSZ_yes(lg_grp, ndelta, lg_delta) \ (((ZU(1)<next_gc_bin; cache_bin_t *tbin; if (binind < NBINS) { tbin = tcache_small_bin_get(tcache, binind); } else { tbin = tcache_large_bin_get(tcache, binind); } if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); /* * Reduce fill count by 2X. Limit lg_fill_div such that * the fill count is always at least 1. */ cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; if ((tbin_info->ncached_max >> (tcache->lg_fill_div[binind] + 1)) >= 1) { tcache->lg_fill_div[binind]++; } } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } } else if (tbin->low_water < 0) { /* * Increase fill count by 2X for small bins. Make sure * lg_fill_div stays greater than 0. */ if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { tcache->lg_fill_div[binind]--; } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; } } void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; assert(tcache->arena != NULL); arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) { tcache->prof_accumbytes = 0; } ret = cache_bin_alloc_easy(tbin, tcache_success); return ret; } void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem) { bool merged_stats = false; assert(binind < NBINS); assert((cache_bin_sz_t)rem <= tbin->ncached); arena_t *arena = tcache->arena; assert(arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); /* Look up extent once per item. */ for (unsigned i = 0 ; i < nflush; i++) { item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); } while (nflush > 0) { /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; arena_t *bin_arena = extent_arena_get(extent); bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } tcache->prof_accumbytes = 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); extent = item_extent[i]; assert(ptr != NULL && extent != NULL); if (extent_arena_get(extent) == bin_arena) { arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), bin_arena, extent, ptr); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; ndeferred++; } } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); assert((cache_bin_sz_t)rem <= tbin->ncached); arena_t *arena = tcache->arena; assert(arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); /* Look up extent once per item. */ for (unsigned i = 0 ; i < nflush; i++) { item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); } while (nflush > 0) { /* Lock the arena associated with the first object. */ extent_t *extent = item_extent[0]; arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; if (config_prof) { idump = false; } malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); extent = item_extent[i]; if (extent_arena_get(extent) == locked_arena) { large_dalloc_prep_junked_locked(tsd_tsdn(tsd), extent); } } if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); extent = item_extent[i]; assert(ptr != NULL && extent != NULL); if (extent_arena_get(extent) == locked_arena) { large_dalloc_finish(tsd_tsdn(tsd), extent); } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; ndeferred++; } } if (config_prof && idump) { prof_idump(tsd_tsdn(tsd)); } arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - ndeferred); nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { assert(tcache->arena == NULL); tcache->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); cache_bin_array_descriptor_init( &tcache->cache_bin_array_descriptor, tcache->bins_small, tcache->bins_large); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } static void tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { arena_t *arena = tcache->arena; assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { bool in_ql = false; tcache_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { if (iter == tcache) { in_ql = true; break; } } assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); ql_remove(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); tcache_stats_merge(tsdn, tcache, arena); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } tcache->arena = NULL; } void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { tcache_arena_dissociate(tsdn, tcache); tcache_arena_associate(tsdn, tcache, arena); } bool tsd_tcache_enabled_data_init(tsd_t *tsd) { /* Called upon tsd initialization. */ tsd_tcache_enabled_set(tsd, opt_tcache); tsd_slow_update(tsd); if (opt_tcache) { /* Trigger tcache init. */ tsd_tcache_data_init(tsd); } return false; } /* Initialize auto tcache (embedded in TSD). */ static void tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); tcache->prof_accumbytes = 0; tcache->next_gc_bin = 0; tcache->arena = NULL; ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); size_t stack_offset = 0; assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS); memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS)); unsigned i = 0; for (; i < NBINS; i++) { tcache->lg_fill_div[i] = 1; stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); /* * avail points past the available space. Allocations will * access the slots toward higher addresses (for the benefit of * prefetch). */ tcache_small_bin_get(tcache, i)->avail = (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); } for (; i < nhbins; i++) { stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); tcache_large_bin_get(tcache, i)->avail = (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); } assert(stack_offset == stack_nelms * sizeof(void *)); } /* Initialize auto tcache (embedded in TSD). */ bool tsd_tcache_data_init(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); assert(tcache_small_bin_get(tcache, 0)->avail == NULL); size_t size = stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sz_sa2u(size, CACHELINE); void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (avail_array == NULL) { return true; } tcache_init(tsd, tcache, avail_array); /* * Initialization is a bit tricky here. After malloc init is done, all * threads can rely on arena_choose and associate tcache accordingly. * However, the thread that does actual malloc bootstrapping relies on * functional tsd, and it can only rely on a0. In that case, we * associate its tcache to a0 temporarily, and later on * arena_choose_hard() will re-associate properly. */ tcache->arena = NULL; arena_t *arena; if (!malloc_initialized()) { /* If in initialization, assign to a0. */ arena = arena_get(tsd_tsdn(tsd), 0, false); tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); } else { arena = arena_choose(tsd, NULL); /* This may happen if thread.tcache.enabled is used. */ if (tcache->arena == NULL) { tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); } } assert(arena == tcache->arena); return false; } /* Created manual tcache for tcache.create mallctl. */ tcache_t * tcache_create_explicit(tsd_t *tsd) { tcache_t *tcache; size_t size, stack_offset; size = sizeof(tcache_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sz_sa2u(size, CACHELINE); tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) { return NULL; } tcache_init(tsd, tcache, (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); return tcache; } static void tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { assert(tcache->arena != NULL); for (unsigned i = 0; i < NBINS; i++) { cache_bin_t *tbin = tcache_small_bin_get(tcache, i); tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats) { assert(tbin->tstats.nrequests == 0); } } for (unsigned i = NBINS; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats) { assert(tbin->tstats.nrequests == 0); } } if (config_prof && tcache->prof_accumbytes > 0 && arena_prof_accum(tsd_tsdn(tsd), tcache->arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } } void tcache_flush(tsd_t *tsd) { assert(tcache_available(tsd)); tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); } static void tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { tcache_flush_cache(tsd, tcache); tcache_arena_dissociate(tsd_tsdn(tsd), tcache); if (tsd_tcache) { /* Release the avail array for the TSD embedded auto tcache. */ void *avail_array = (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); } else { /* Release both the tcache struct and avail array. */ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } } /* For auto tcache (embedded in TSD) only. */ void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get(tsd); if (!tcache_available(tsd)) { assert(tsd_tcache_enabled_get(tsd) == false); if (config_debug) { assert(tcache_small_bin_get(tcache, 0)->avail == NULL); } return; } assert(tsd_tcache_enabled_get(tsd)); assert(tcache_small_bin_get(tcache, 0)->avail != NULL); tcache_destroy(tsd, tcache, true); if (config_debug) { tcache_small_bin_get(tcache, 0)->avail = NULL; } } void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { bin_t *bin = &arena->bins[i]; cache_bin_t *tbin = tcache_small_bin_get(tcache, i); malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); arena_stats_large_nrequests_add(tsdn, &arena->stats, i, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } static bool tcaches_create_prep(tsd_t *tsd) { bool err; malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches == NULL) { tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); if (tcaches == NULL) { err = true; goto label_return; } } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { err = true; goto label_return; } err = false; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); return err; } bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); bool err; if (tcaches_create_prep(tsd)) { err = true; goto label_return; } tcache_t *tcache = tcache_create_explicit(tsd); if (tcache == NULL) { err = true; goto label_return; } tcaches_t *elm; malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; *r_ind = (unsigned)(elm - tcaches); } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; *r_ind = tcaches_past; tcaches_past++; } malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); err = false; label_return: witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); return err; } static tcache_t * tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); if (elm->tcache == NULL) { return NULL; } tcache_t *tcache = elm->tcache; elm->tcache = NULL; return tcache; } void tcaches_flush(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); if (tcache != NULL) { tcache_destroy(tsd, tcache, false); } } void tcaches_destroy(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcaches_t *elm = &tcaches[ind]; tcache_t *tcache = tcaches_elm_remove(tsd, elm); elm->next = tcaches_avail; tcaches_avail = elm; malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); if (tcache != NULL) { tcache_destroy(tsd, tcache, false); } } bool tcache_boot(tsdn_t *tsdn) { /* If necessary, clamp opt_lg_tcache_max. */ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) { tcache_maxclass = SMALL_MAXCLASS; } else { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); } if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, malloc_mutex_rank_exclusive)) { return true; } nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins * sizeof(cache_bin_info_t), CACHELINE); if (tcache_bin_info == NULL) { return true; } stack_nelms = 0; unsigned i; for (i = 0; i < NBINS; i++) { if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (bin_infos[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; } stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } return false; } void tcache_prefork(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_prefork(tsdn, &tcaches_mtx); } } void tcache_postfork_parent(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); } } void tcache_postfork_child(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_child(tsdn, &tcaches_mtx); } } jemalloc-sys-0.3.2/jemalloc/src/ticker.c010064400007650000024000000002061340421340100163250ustar0000000000000000#define JEMALLOC_TICKER_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/jemalloc/src/tsd.c010064400007650000024000000214211340421341300156430ustar0000000000000000#define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; __thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; bool tsd_booted = false; #elif (defined(JEMALLOC_TLS)) __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; pthread_key_t tsd_tsd; bool tsd_booted = false; #elif (defined(_WIN32)) DWORD tsd_tsd; tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; bool tsd_booted = false; #else /* * This contains a mutex, but it's pretty convenient to allow the mutex code to * have a dependency on tsd. So we define the struct here, and only refer to it * by pointer in the header. */ struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; pthread_key_t tsd_tsd; tsd_init_head_t tsd_init_head = { ql_head_initializer(blocks), MALLOC_MUTEX_INITIALIZER }; tsd_wrapper_t tsd_boot_wrapper = { false, TSD_INITIALIZER }; bool tsd_booted = false; #endif /******************************************************************************/ void tsd_slow_update(tsd_t *tsd) { if (tsd_nominal(tsd)) { if (malloc_slow || !tsd_tcache_enabled_get(tsd) || tsd_reentrancy_level_get(tsd) > 0) { tsd->state = tsd_state_nominal_slow; } else { tsd->state = tsd_state_nominal; } } } static bool tsd_data_init(tsd_t *tsd) { /* * We initialize the rtree context first (before the tcache), since the * tcache initialization depends on it. */ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); /* * A nondeterministic seed based on the address of tsd reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ *tsd_offset_statep_get(tsd) = config_debug ? 0 : (uint64_t)(uintptr_t)tsd; return tsd_tcache_enabled_data_init(tsd); } static void assert_tsd_data_cleanup_done(tsd_t *tsd) { assert(!tsd_nominal(tsd)); assert(*tsd_arenap_get_unsafe(tsd) == NULL); assert(*tsd_iarenap_get_unsafe(tsd) == NULL); assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); } static bool tsd_data_init_nocleanup(tsd_t *tsd) { assert(tsd->state == tsd_state_reincarnated || tsd->state == tsd_state_minimal_initialized); /* * During reincarnation, there is no guarantee that the cleanup function * will be called (deallocation may happen after all tsd destructors). * We set up tsd in a way that no cleanup is needed. */ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); *tsd_arenas_tdata_bypassp_get(tsd) = true; *tsd_tcache_enabledp_get_unsafe(tsd) = false; *tsd_reentrancy_levelp_get(tsd) = 1; assert_tsd_data_cleanup_done(tsd); return false; } tsd_t * tsd_fetch_slow(tsd_t *tsd, bool minimal) { assert(!tsd_fast(tsd)); if (tsd->state == tsd_state_nominal_slow) { /* On slow path but no work needed. */ assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || tsd_reentrancy_level_get(tsd) > 0 || *tsd_arenas_tdata_bypassp_get(tsd)); } else if (tsd->state == tsd_state_uninitialized) { if (!minimal) { tsd->state = tsd_state_nominal; tsd_slow_update(tsd); /* Trigger cleanup handler registration. */ tsd_set(tsd); tsd_data_init(tsd); } else { tsd->state = tsd_state_minimal_initialized; tsd_set(tsd); tsd_data_init_nocleanup(tsd); } } else if (tsd->state == tsd_state_minimal_initialized) { if (!minimal) { /* Switch to fully initialized. */ tsd->state = tsd_state_nominal; assert(*tsd_reentrancy_levelp_get(tsd) >= 1); (*tsd_reentrancy_levelp_get(tsd))--; tsd_slow_update(tsd); tsd_data_init(tsd); } else { assert_tsd_data_cleanup_done(tsd); } } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); tsd_data_init_nocleanup(tsd); } else { assert(tsd->state == tsd_state_reincarnated); } return tsd; } void * malloc_tsd_malloc(size_t size) { return a0malloc(CACHELINE_CEILING(size)); } void malloc_tsd_dalloc(void *wrapper) { a0dalloc(wrapper); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) #ifndef _WIN32 JEMALLOC_EXPORT #endif void _malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; for (i = 0; i < ncleanups; i++) { pending[i] = true; } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); if (pending[i]) { again = true; } } } } while (again); } #endif void malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } static void tsd_do_data_cleanup(tsd_t *tsd) { prof_tdata_cleanup(tsd); iarena_cleanup(tsd); arena_cleanup(tsd); arenas_tdata_cleanup(tsd); tcache_cleanup(tsd); witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); } void tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { case tsd_state_uninitialized: /* Do nothing. */ break; case tsd_state_minimal_initialized: /* This implies the thread only did free() in its life time. */ /* Fall through. */ case tsd_state_reincarnated: /* * Reincarnated means another destructor deallocated memory * after the destructor was called. Cleanup isn't required but * is still called for testing and completeness. */ assert_tsd_data_cleanup_done(tsd); /* Fall through. */ case tsd_state_nominal: case tsd_state_nominal_slow: tsd_do_data_cleanup(tsd); tsd->state = tsd_state_purgatory; tsd_set(tsd); break; case tsd_state_purgatory: /* * The previous time this destructor was called, we set the * state to tsd_state_purgatory so that other destructors * wouldn't cause re-creation of the tsd. This time, do * nothing, and do not request another callback. */ break; default: not_reached(); } #ifdef JEMALLOC_JET test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); int *data = tsd_test_datap_get_unsafe(tsd); if (test_callback != NULL) { test_callback(data); } #endif } tsd_t * malloc_tsd_boot0(void) { tsd_t *tsd; ncleanups = 0; if (tsd_boot0()) { return NULL; } tsd = tsd_fetch(); *tsd_arenas_tdata_bypassp_get(tsd) = true; return tsd; } void malloc_tsd_boot1(void) { tsd_boot1(); tsd_t *tsd = tsd_fetch(); /* malloc_slow has been set properly. Update tsd_slow. */ tsd_slow_update(tsd); *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 static BOOL WINAPI _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: isthreaded = true; break; #endif case DLL_THREAD_DETACH: _malloc_thread_cleanup(); break; default: break; } return true; } /* * We need to be able to say "read" here (in the "pragma section"), but have * hooked "read". We won't read for the rest of the file, so we can get away * with unhooking. */ #ifdef read # undef read #endif #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") # pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") # pragma comment(linker, "/INCLUDE:tls_callback") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { malloc_mutex_unlock(TSDN_NULL, &head->lock); return iter->data; } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); return NULL; } void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif jemalloc-sys-0.3.2/jemalloc/src/witness.c010064400007650000024000000046351340421340100165520ustar0000000000000000#define JEMALLOC_WITNESS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp, void *opaque) { witness->name = name; witness->rank = rank; witness->comp = comp; witness->opaque = opaque; } static void witness_lock_error_impl(const witness_list_t *witnesses, const witness_t *witness) { witness_t *w; malloc_printf(": Lock rank order reversal:"); ql_foreach(w, witnesses, link) { malloc_printf(" %s(%u)", w->name, w->rank); } malloc_printf(" %s(%u)\n", witness->name, witness->rank); abort(); } witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; static void witness_owner_error_impl(const witness_t *witness) { malloc_printf(": Should own %s(%u)\n", witness->name, witness->rank); abort(); } witness_owner_error_t *JET_MUTABLE witness_owner_error = witness_owner_error_impl; static void witness_not_owner_error_impl(const witness_t *witness) { malloc_printf(": Should not own %s(%u)\n", witness->name, witness->rank); abort(); } witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = witness_not_owner_error_impl; static void witness_depth_error_impl(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { witness_t *w; malloc_printf(": Should own %u lock%s of rank >= %u:", depth, (depth != 1) ? "s" : "", rank_inclusive); ql_foreach(w, witnesses, link) { malloc_printf(" %s(%u)", w->name, w->rank); } malloc_printf("\n"); abort(); } witness_depth_error_t *JET_MUTABLE witness_depth_error = witness_depth_error_impl; void witnesses_cleanup(witness_tsd_t *witness_tsd) { witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); /* Do nothing. */ } void witness_prefork(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } witness_tsd->forking = true; } void witness_postfork_parent(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } witness_tsd->forking = false; } void witness_postfork_child(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } #ifndef JEMALLOC_MUTEX_INIT_CB witness_list_t *witnesses; witnesses = &witness_tsd->witnesses; ql_new(witnesses); #endif witness_tsd->forking = false; } jemalloc-sys-0.3.2/jemalloc/src/zone.c010064400007650000024000000351411340421340100160250ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif /* Definitions of the following structs in malloc/malloc.h might be too old * for the built binary to run on newer versions of OSX. So use the newest * possible version of those structs. */ typedef struct _malloc_zone_t { void *reserved1; void *reserved2; size_t (*size)(struct _malloc_zone_t *, const void *); void *(*malloc)(struct _malloc_zone_t *, size_t); void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); void *(*valloc)(struct _malloc_zone_t *, size_t); void (*free)(struct _malloc_zone_t *, void *); void *(*realloc)(struct _malloc_zone_t *, void *, size_t); void (*destroy)(struct _malloc_zone_t *); const char *zone_name; unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); struct malloc_introspection_t *introspect; unsigned version; void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); } malloc_zone_t; typedef struct { vm_address_t address; vm_size_t size; } vm_range_t; typedef struct malloc_statistics_t { unsigned blocks_in_use; size_t size_in_use; size_t max_size_in_use; size_t size_allocated; } malloc_statistics_t; typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); typedef struct malloc_introspection_t { kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); size_t (*good_size)(malloc_zone_t *, size_t); boolean_t (*check)(malloc_zone_t *); void (*print)(malloc_zone_t *, boolean_t); void (*log)(malloc_zone_t *, void *); void (*force_lock)(malloc_zone_t *); void (*force_unlock)(malloc_zone_t *); void (*statistics)(malloc_zone_t *, malloc_statistics_t *); boolean_t (*zone_locked)(malloc_zone_t *); boolean_t (*enable_discharge_checking)(malloc_zone_t *); boolean_t (*disable_discharge_checking)(malloc_zone_t *); void (*discharge)(malloc_zone_t *, void *); #ifdef __BLOCKS__ void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); #else void *enumerate_unavailable_without_blocks; #endif void (*reinit_lock)(malloc_zone_t *); } malloc_introspection_t; extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); extern malloc_zone_t *malloc_default_zone(void); extern void malloc_zone_register(malloc_zone_t *zone); extern void malloc_zone_unregister(malloc_zone_t *zone); /* * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ static malloc_zone_t *default_zone, *purgeable_zone; static malloc_zone_t jemalloc_zone; static struct malloc_introspection_t jemalloc_zone_introspect; static pid_t zone_force_lock_pid = -1; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static size_t zone_size(malloc_zone_t *zone, const void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); static void *zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); static void zone_destroy(malloc_zone_t *zone); static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); static size_t zone_good_size(malloc_zone_t *zone, size_t size); static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); static boolean_t zone_check(malloc_zone_t *zone); static void zone_print(malloc_zone_t *zone, boolean_t verbose); static void zone_log(malloc_zone_t *zone, void *address); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats); static boolean_t zone_locked(malloc_zone_t *zone); static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* * Functions. */ static size_t zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If * we knew that all pointers were owned by *some* zone, we could split * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they * reside within a mapped extent before determining size. */ return ivsalloc(tsdn_fetch(), ptr); } static void * zone_malloc(malloc_zone_t *zone, size_t size) { return je_malloc(size); } static void * zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { return je_calloc(num, size); } static void * zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); return ret; } static void zone_free(malloc_zone_t *zone, void *ptr) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; } free(ptr); } static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { return je_realloc(ptr, size); } return realloc(ptr, size); } static void * zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); return ret; } static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; alloc_size = ivsalloc(tsdn_fetch(), ptr); if (alloc_size != 0) { assert(alloc_size == size); je_free(ptr); return; } free(ptr); } static void zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); } static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) { unsigned i; for (i = 0; i < num_requested; i++) { results[i] = je_malloc(size); if (!results[i]) break; } return i; } static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed) { unsigned i; for (i = 0; i < num_to_be_freed; i++) { zone_free(zone, to_be_freed[i]); to_be_freed[i] = NULL; } } static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { return 0; } static size_t zone_good_size(malloc_zone_t *zone, size_t size) { if (size == 0) { size = 1; } return sz_s2u(size); } static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) { return KERN_SUCCESS; } static boolean_t zone_check(malloc_zone_t *zone) { return true; } static void zone_print(malloc_zone_t *zone, boolean_t verbose) { } static void zone_log(malloc_zone_t *zone, void *address) { } static void zone_force_lock(malloc_zone_t *zone) { if (isthreaded) { /* * See the note in zone_force_unlock, below, to see why we need * this. */ assert(zone_force_lock_pid == -1); zone_force_lock_pid = getpid(); jemalloc_prefork(); } } static void zone_force_unlock(malloc_zone_t *zone) { /* * zone_force_lock and zone_force_unlock are the entry points to the * forking machinery on OS X. The tricky thing is, the child is not * allowed to unlock mutexes locked in the parent, even if owned by the * forking thread (and the mutex type we use in OS X will fail an assert * if we try). In the child, we can get away with reinitializing all * the mutexes, which has the effect of unlocking them. In the parent, * doing this would mean we wouldn't wake any waiters blocked on the * mutexes we unlock. So, we record the pid of the current thread in * zone_force_lock, and use that to detect if we're in the parent or * child here, to decide which unlock logic we need. */ if (isthreaded) { assert(zone_force_lock_pid != -1); if (getpid() == zone_force_lock_pid) { jemalloc_postfork_parent(); } else { jemalloc_postfork_child(); } zone_force_lock_pid = -1; } } static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { /* We make no effort to actually fill the values */ stats->blocks_in_use = 0; stats->size_in_use = 0; stats->max_size_in_use = 0; stats->size_allocated = 0; } static boolean_t zone_locked(malloc_zone_t *zone) { /* Pretend no lock is being held */ return false; } static void zone_reinit_lock(malloc_zone_t *zone) { /* As of OSX 10.12, this function is only used when force_unlock would * be used if the zone version were < 9. So just use force_unlock. */ zone_force_unlock(zone); } static void zone_init(void) { jemalloc_zone.size = zone_size; jemalloc_zone.malloc = zone_malloc; jemalloc_zone.calloc = zone_calloc; jemalloc_zone.valloc = zone_valloc; jemalloc_zone.free = zone_free; jemalloc_zone.realloc = zone_realloc; jemalloc_zone.destroy = zone_destroy; jemalloc_zone.zone_name = "jemalloc_zone"; jemalloc_zone.batch_malloc = zone_batch_malloc; jemalloc_zone.batch_free = zone_batch_free; jemalloc_zone.introspect = &jemalloc_zone_introspect; jemalloc_zone.version = 9; jemalloc_zone.memalign = zone_memalign; jemalloc_zone.free_definite_size = zone_free_definite_size; jemalloc_zone.pressure_relief = zone_pressure_relief; jemalloc_zone_introspect.enumerator = zone_enumerator; jemalloc_zone_introspect.good_size = zone_good_size; jemalloc_zone_introspect.check = zone_check; jemalloc_zone_introspect.print = zone_print; jemalloc_zone_introspect.log = zone_log; jemalloc_zone_introspect.force_lock = zone_force_lock; jemalloc_zone_introspect.force_unlock = zone_force_unlock; jemalloc_zone_introspect.statistics = zone_statistics; jemalloc_zone_introspect.zone_locked = zone_locked; jemalloc_zone_introspect.enable_discharge_checking = NULL; jemalloc_zone_introspect.disable_discharge_checking = NULL; jemalloc_zone_introspect.discharge = NULL; #ifdef __BLOCKS__ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; #else jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; } static malloc_zone_t * zone_default_get(void) { malloc_zone_t **zones = NULL; unsigned int num_zones = 0; /* * On OSX 10.12, malloc_default_zone returns a special zone that is not * present in the list of registered zones. That zone uses a "lite zone" * if one is present (apparently enabled when malloc stack logging is * enabled), or the first registered zone otherwise. In practice this * means unless malloc stack logging is enabled, the first registered * zone is the default. So get the list of zones to get the first one, * instead of relying on malloc_default_zone. */ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) { /* * Reset the value in case the failure happened after it was * set. */ num_zones = 0; } if (num_zones) { return zones[0]; } return malloc_default_zone(); } /* As written, this function can only promote jemalloc_zone. */ static void zone_promote(void) { malloc_zone_t *zone; do { /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it * at the location of the specified zone. Unregistering the * default zone thus makes the last registered one the default. * On OSX < 10.6, unregistering shifts all registered zones. * The first registered zone then becomes the default. */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it * owns the default zone allocated pointers. We thus * unregister/re-register it in order to ensure it's always * after the default zone. On OSX < 10.6, there is no purgeable * zone, so this does nothing. On OSX >= 10.6, unregistering * replaces the purgeable zone with the last registered zone * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ if (purgeable_zone != NULL) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } zone = zone_default_get(); } while (zone != &jemalloc_zone); } JEMALLOC_ATTR(constructor) void zone_register(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ default_zone = zone_default_get(); if (!default_zone->zone_name || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { return; } /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone() is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ zone_init(); malloc_zone_register(&jemalloc_zone); /* Promote the custom zone to be default. */ zone_promote(); } jemalloc-sys-0.3.2/jemalloc/test/include/test/btalloc.h010064400007650000024000000014561340421340100212730ustar0000000000000000/* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); #define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) #define btalloc_n_gen(n) \ void * \ btalloc_##n(size_t size, unsigned bits) { \ void *p; \ \ if (bits == 0) { \ p = mallocx(size, 0); \ } else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ break; \ case 1: \ p = (btalloc_1(size, bits >> 1)); \ break; \ default: not_reached(); \ } \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ return p; \ } jemalloc-sys-0.3.2/jemalloc/test/include/test/extent_hooks.h010064400007650000024000000230351340421340100223620ustar0000000000000000/* * Boilerplate code used for testing extent hooks via interception and * passthrough. */ static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind); static extent_hooks_t *default_hooks; static extent_hooks_t hooks = { extent_alloc_hook, extent_dalloc_hook, extent_destroy_hook, extent_commit_hook, extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, extent_merge_hook }; /* Control whether hook functions pass calls through to default hooks. */ static bool try_alloc = true; static bool try_dalloc = true; static bool try_destroy = true; static bool try_commit = true; static bool try_decommit = true; static bool try_purge_lazy = true; static bool try_purge_forced = true; static bool try_split = true; static bool try_merge = true; /* Set to false prior to operations, then introspect after operations. */ static bool called_alloc; static bool called_dalloc; static bool called_destroy; static bool called_commit; static bool called_decommit; static bool called_purge_lazy; static bool called_purge_forced; static bool called_split; static bool called_merge; /* Set to false prior to operations, then introspect after operations. */ static bool did_alloc; static bool did_dalloc; static bool did_destroy; static bool did_commit; static bool did_decommit; static bool did_purge_lazy; static bool did_purge_forced; static bool did_split; static bool did_merge; #if 0 # define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) #else # define TRACE_HOOK(fmt, ...) #endif static void * extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { void *ret; TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, new_addr, size, alignment, *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook, "Wrong hook function"); called_alloc = true; if (!try_alloc) { return NULL; } ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, zero, commit, 0); did_alloc = (ret != NULL); return ret; } static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; } err = default_hooks->dalloc(default_hooks, addr, size, committed, 0); did_dalloc = !err; return err; } static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook, "Wrong hook function"); called_destroy = true; if (!try_destroy) { return; } default_hooks->destroy(default_hooks, addr, size, committed, 0); did_destroy = true; } static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->commit, extent_commit_hook, "Wrong hook function"); called_commit = true; if (!try_commit) { return true; } err = default_hooks->commit(default_hooks, addr, size, offset, length, 0); did_commit = !err; return err; } static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook, "Wrong hook function"); called_decommit = true; if (!try_decommit) { return true; } err = default_hooks->decommit(default_hooks, addr, size, offset, length, 0); did_decommit = !err; return err; } static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, "Wrong hook function"); called_purge_lazy = true; if (!try_purge_lazy) { return true; } err = default_hooks->purge_lazy == NULL || default_hooks->purge_lazy(default_hooks, addr, size, offset, length, 0); did_purge_lazy = !err; return err; } static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, "Wrong hook function"); called_purge_forced = true; if (!try_purge_forced) { return true; } err = default_hooks->purge_forced == NULL || default_hooks->purge_forced(default_hooks, addr, size, offset, length, 0); did_purge_forced = !err; return err; } static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, addr, size, size_a, size_b, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->split, extent_split_hook, "Wrong hook function"); called_split = true; if (!try_split) { return true; } err = (default_hooks->split == NULL || default_hooks->split(default_hooks, addr, size, size_a, size_b, committed, 0)); did_split = !err; return err; } static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, addr_a, size_a, addr_b, size_b, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->merge, extent_merge_hook, "Wrong hook function"); assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b, "Extents not mergeable"); called_merge = true; if (!try_merge) { return true; } err = (default_hooks->merge == NULL || default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, committed, 0)); did_merge = !err; return err; } static void extent_hooks_prep(void) { size_t sz; sz = sizeof(default_hooks); assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); } jemalloc-sys-0.3.2/jemalloc/test/include/test/jemalloc_test.h.in010064400007650000024000000107551340421341300231120ustar0000000000000000#ifdef __cplusplus extern "C" { #endif #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include #include #include #include #include #include #ifdef _WIN32 # include "msvc_compat/strings.h" #endif #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" #else # include #endif #include "test/jemalloc_test_defs.h" #ifdef JEMALLOC_OSSPIN # include #endif #if defined(HAVE_ALTIVEC) && !defined(__APPLE__) # include #endif #ifdef HAVE_SSE2 # include #endif /******************************************************************************/ /* * For unit tests, expose all public and private interfaces. */ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" /******************************************************************************/ /* * For integration tests, expose the public jemalloc interfaces, but only * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ #elif defined(JEMALLOC_INTEGRATION_TEST) || \ defined(JEMALLOC_INTEGRATION_CPP_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" # include "jemalloc/internal/hooks.h" /* Hermetic headers. */ # include "jemalloc/internal/assert.h" # include "jemalloc/internal/malloc_io.h" # include "jemalloc/internal/nstime.h" # include "jemalloc/internal/util.h" /* Non-hermetic headers. */ # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" /******************************************************************************/ /* * For stress tests, expose the public jemalloc interfaces with name mangling * so that they can be tested as e.g. malloc() and free(). Also expose the * public jemalloc interfaces with jet_ prefixes, so that stress tests can use * a separate allocator for their internal data structures. */ #elif defined(JEMALLOC_STRESS_TEST) # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET # include "jemalloc/jemalloc_rename.h" # define JEMALLOC_MANGLE # ifdef JEMALLOC_STRESS_TESTLIB # include "jemalloc/jemalloc_mangle_jet.h" # else # include "jemalloc/jemalloc_mangle.h" # endif /******************************************************************************/ /* * This header does dangerous things, the effects of which only test code * should be subject to. */ #else # error "This header cannot be included outside a testing context" #endif /******************************************************************************/ /* * Common test utilities. */ #include "test/btalloc.h" #include "test/math.h" #include "test/mtx.h" #include "test/mq.h" #include "test/test.h" #include "test/timer.h" #include "test/thd.h" #define MEXP 19937 #include "test/SFMT.h" /******************************************************************************/ /* * Define always-enabled assertion macros, so that test assertions execute even * if assertions are disabled in the library code. */ #undef assert #undef not_reached #undef not_implemented #undef assert_not_implemented #define assert(e) do { \ if (!(e)) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #define not_reached() do { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define not_implemented() do { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define assert_not_implemented(e) do { \ if (!(e)) { \ not_implemented(); \ } \ } while (0) #ifdef __cplusplus } #endif jemalloc-sys-0.3.2/jemalloc/test/include/test/jemalloc_test_defs.h.in010064400007650000024000000004421340421340100241000ustar0000000000000000#include "jemalloc/internal/jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" /* * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its * dependencies are notoriously unportable in practice. */ #undef HAVE_SSE2 #undef HAVE_ALTIVEC jemalloc-sys-0.3.2/jemalloc/test/include/test/math.h010064400007650000024000000172721340421340100206070ustar0000000000000000/* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * * This implementation is based on: * * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ static inline double ln_gamma(double x) { double f, z; assert(x > 0.0); if (x < 7.0) { f = 1.0; z = x; while (z < 7.0) { f *= z; z += 1.0; } x = z; f = -log(f); } else { f = 0.0; } z = 1.0 / (x * x); return f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - 0.002777777777778) * z + 0.083333333333333) / x; } /* * Compute the incomplete Gamma ratio for [0..x], where p is the shape * parameter, and ln_gamma_p is ln_gamma(p). * * This implementation is based on: * * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ static inline double i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; assert(p > 0.0); assert(x >= 0.0); if (x == 0.0) { return 0.0; } acu = 1.0e-10; oflo = 1.0e30; gin = 0.0; factor = exp(p * log(x) - x - ln_gamma_p); if (x <= 1.0 || x < p) { /* Calculation by series expansion. */ gin = 1.0; term = 1.0; rn = p; while (true) { rn += 1.0; term *= x / rn; gin += term; if (term <= acu) { gin *= factor / p; return gin; } } } else { /* Calculation by continued fraction. */ a = 1.0 - p; b = a + x + 1.0; term = 0.0; pn[0] = 1.0; pn[1] = x; pn[2] = x + 1.0; pn[3] = x * b; gin = pn[2] / pn[3]; while (true) { a += 1.0; b += 2.0; term += 1.0; an = a * term; for (i = 0; i < 2; i++) { pn[i+4] = b * pn[i+2] - an * pn[i]; } if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; return gin; } gin = rn; } for (i = 0; i < 4; i++) { pn[i] = pn[i+2]; } if (fabs(pn[4]) >= oflo) { for (i = 0; i < 4; i++) { pn[i] /= oflo; } } } } } /* * Given a value p in [0..1] of the lower tail area of the normal distribution, * compute the limit on the definite integral from [-inf..z] that satisfies p, * accurate to 16 decimal places. * * This implementation is based on: * * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ static inline double pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); q = p - 0.5; if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; return q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) * r + 3.3871328727963666080e0) / (((((((5.2264952788528545610e3 * r + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) * r + 1.0); } else { if (q < 0.0) { r = p; } else { r = 1.0 - p; } assert(r > 0.0); r = sqrt(-log(r)); if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; ret = ((((((((7.74545014278341407640e-4 * r + 2.27238449892691845833e-2) * r + 2.41780725177450611770e-1) * r + 1.27045825245236838258e0) * r + 3.64784832476320460504e0) * r + 5.76949722146069140550e0) * r + 4.63033784615654529590e0) * r + 1.42343711074968357734e0) / (((((((1.05075007164441684324e-9 * r + 5.47593808499534494600e-4) * r + 1.51986665636164571966e-2) * r + 1.48103976427480074590e-1) * r + 6.89767334985100004550e-1) * r + 1.67638483018380384940e0) * r + 2.05319162663775882187e0) * r + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; ret = ((((((((2.01033439929228813265e-7 * r + 2.71155556874348757815e-5) * r + 1.24266094738807843860e-3) * r + 2.65321895265761230930e-2) * r + 2.96560571828504891230e-1) * r + 1.78482653991729133580e0) * r + 5.46378491116411436990e0) * r + 6.65790464350110377720e0) / (((((((2.04426310338993978564e-15 * r + 1.42151175831644588870e-7) * r + 1.84631831751005468180e-5) * r + 7.86869131145613259100e-4) * r + 1.48753612908506148525e-2) * r + 1.36929880922735805310e-1) * r + 5.99832206555887937690e-1) * r + 1.0)); } if (q < 0.0) { ret = -ret; } return ret; } } /* * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute * the upper limit on the definite integral from [0..z] that satisfies p, * accurate to 12 decimal places. * * This implementation is based on: * * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of * the Chi^2 distribution. Applied Statistics 24(3):385-388. * * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ static inline double pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; assert(p >= 0.0 && p < 1.0); assert(df > 0.0); e = 5.0e-7; aa = 0.6931471805; xx = 0.5 * df; c = xx - 1.0; if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); if (ch - e < 0.0) { return ch; } } else { if (df > 0.32) { x = pt_norm(p); /* * Starting approximation using Wilson and Hilferty * estimate. */ p1 = 0.222222 / df; ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + ln_gamma_df_2); } } else { ch = 0.4; a = log(1.0 - p); while (true) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { break; } } } } for (i = 0; i < 20; i++) { /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; if (p1 < 0.0) { return -1.0; } p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + 60.0 * a))))) / 420.0; s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * (889.0 + 1740.0 * a))) / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); if (fabs(q / ch - 1.0) <= e) { break; } } return ch; } /* * Given a value p in [0..1] and Gamma distribution shape and scale parameters, * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ static inline double pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale; } jemalloc-sys-0.3.2/jemalloc/test/include/test/mq.h010064400007650000024000000055031340421340100202650ustar0000000000000000void mq_nanosleep(unsigned ns); /* * Simple templated message queue implementation that relies on only mutexes for * synchronization (which reduces portability issues). Given the following * setup: * * typedef struct mq_msg_s mq_msg_t; * struct mq_msg_s { * mq_msg(mq_msg_t) link; * [message data] * }; * mq_gen(, mq_, mq_t, mq_msg_t, link) * * The API is as follows: * * bool mq_init(mq_t *mq); * void mq_fini(mq_t *mq); * unsigned mq_count(mq_t *mq); * mq_msg_t *mq_tryget(mq_t *mq); * mq_msg_t *mq_get(mq_t *mq); * void mq_put(mq_t *mq, mq_msg_t *msg); * * The message queue linkage embedded in each message is to be treated as * externally opaque (no need to initialize or clean up externally). mq_fini() * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ unsigned count; \ } a_mq_type; \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ if (mtx_init(&mq->lock)) { \ return true; \ } \ ql_new(&mq->msgs); \ mq->count = 0; \ return false; \ } \ a_attr void \ a_prefix##fini(a_mq_type *mq) { \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ a_prefix##count(a_mq_type *mq) { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ return count; \ } \ a_attr a_mq_msg_type * \ a_prefix##tryget(a_mq_type *mq) { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ msg = ql_first(&mq->msgs); \ if (msg != NULL) { \ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ return msg; \ } \ a_attr a_mq_msg_type * \ a_prefix##get(a_mq_type *mq) { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ if (msg != NULL) { \ return msg; \ } \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ if (msg != NULL) { \ return msg; \ } \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ if (ns > 1000*1000*1000) { \ ns = 1000*1000*1000; \ } \ } \ } \ } \ a_attr void \ a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ ql_tail_insert(&mq->msgs, msg, a_field); \ mq->count++; \ mtx_unlock(&mq->lock); \ } jemalloc-sys-0.3.2/jemalloc/test/include/test/mtx.h010064400007650000024000000011101340421340100204460ustar0000000000000000/* * mtx is a slightly simplified version of malloc_mutex. This code duplication * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx); void mtx_fini(mtx_t *mtx); void mtx_lock(mtx_t *mtx); void mtx_unlock(mtx_t *mtx); jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-alti.h010064400007650000024000000134271340421340100213540ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt */ #ifndef SFMT_ALTI_H #define SFMT_ALTI_H /** * This function represents the recursion formula in AltiVec and BIG ENDIAN. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @return output */ JEMALLOC_ALWAYS_INLINE vector unsigned int vec_recursion(vector unsigned int a, vector unsigned int b, vector unsigned int c, vector unsigned int d) { const vector unsigned int sl1 = ALTI_SL1; const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 const vector unsigned int mask = ALTI_MSK64; const vector unsigned char perm_sl = ALTI_SL2_PERM64; const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else const vector unsigned int mask = ALTI_MSK; const vector unsigned char perm_sl = ALTI_SL2_PERM; const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif vector unsigned int v, w, x, y, z; x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); v = a; y = vec_sr(b, sr1); z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); w = vec_sl(d, sl1); z = vec_xor(z, w); y = vec_and(y, mask); v = vec_xor(v, x); z = vec_xor(z, y); z = vec_xor(z, v); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j].s = array[j + size - N].s; } for (; i < size; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; ctx->sfmt[j++].s = r; r1 = r2; r2 = r; } } #ifndef ONLY64 #if defined(__APPLE__) #define ALTI_SWAP (vector unsigned char) \ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) #else #define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} #endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. * * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ static inline void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; for (i = 0; i < size; i++) { array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); } } #endif #endif jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params.h010064400007650000024000000102761340421340100217050ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS_H #define SFMT_PARAMS_H #if !defined(MEXP) #ifdef __GNUC__ #warning "MEXP is not defined. I assume MEXP is 19937." #endif #define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS -----------------*/ /** Mersenne Exponent. The period of the sequence * is a multiple of 2^MEXP-1. * #define MEXP 19937 */ /** SFMT generator has an internal state array of 128-bit integers, * and N is its size. */ #define N (MEXP / 128 + 1) /** N32 is the size of internal state array when regarded as an array * of 32-bit integers.*/ #define N32 (N * 4) /** N64 is the size of internal state array when regarded as an array * of 64-bit integers.*/ #define N64 (N * 2) /*---------------------- the parameters of SFMT following definitions are in paramsXXXX.h file. ----------------------*/ /** the pick up position of the array. #define POS1 122 */ /** the parameter of shift left as four 32-bit registers. #define SL1 18 */ /** the parameter of shift left as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SL2 1 */ /** the parameter of shift right as four 32-bit registers. #define SR1 11 */ /** the parameter of shift right as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SR2 1 */ /** A bitmask, used in the recursion. These parameters are introduced * to break symmetry of SIMD. #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U */ /** These definitions are part of a 128-bit period certification vector. #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xc98e126aU */ #if MEXP == 607 #include "test/SFMT-params607.h" #elif MEXP == 1279 #include "test/SFMT-params1279.h" #elif MEXP == 2281 #include "test/SFMT-params2281.h" #elif MEXP == 4253 #include "test/SFMT-params4253.h" #elif MEXP == 11213 #include "test/SFMT-params11213.h" #elif MEXP == 19937 #include "test/SFMT-params19937.h" #elif MEXP == 44497 #include "test/SFMT-params44497.h" #elif MEXP == 86243 #include "test/SFMT-params86243.h" #elif MEXP == 132049 #include "test/SFMT-params132049.h" #elif MEXP == 216091 #include "test/SFMT-params216091.h" #else #ifdef __GNUC__ #error "MEXP is not valid." #undef MEXP #else #undef MEXP #endif #endif #endif /* SFMT_PARAMS_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params11213.h010064400007650000024000000067561340421340100223050ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H #define POS1 68 #define SL1 14 #define SL2 3 #define SR1 7 #define SR2 3 #define MSK1 0xeffff7fbU #define MSK2 0xffffffefU #define MSK3 0xdfdfbfffU #define MSK4 0x7fffdbfdU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xe8148000U #define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params1279.h010064400007650000024000000067401340421340100222310ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H #define POS1 7 #define SL1 14 #define SL2 3 #define SR1 5 #define SR2 1 #define MSK1 0xf7fefffdU #define MSK2 0x7fefcfffU #define MSK3 0xaff3ef3fU #define MSK4 0xb5ffff7fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params132049.h010064400007650000024000000067541340421340100223760ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H #define POS1 110 #define SL1 19 #define SL2 1 #define SR1 21 #define SR2 1 #define MSK1 0xffffbb5fU #define MSK2 0xfb6ebf95U #define MSK3 0xfffefffaU #define MSK4 0xcff77fffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xcb520000U #define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params19937.h010064400007650000024000000067501340421340100223240ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H #define POS1 122 #define SL1 18 #define SL2 1 #define SR1 11 #define SR2 1 #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params216091.h010064400007650000024000000067561340421340100224000ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H #define POS1 627 #define SL1 11 #define SL2 3 #define SR1 10 #define SR2 1 #define MSK1 0xbff7bff7U #define MSK2 0xbfffffffU #define MSK3 0xbffffa7fU #define MSK4 0xffddfbfbU #define PARITY1 0xf8000001U #define PARITY2 0x89e80709U #define PARITY3 0x3bd2b64bU #define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params2281.h010064400007650000024000000067401340421340100222230ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H #define POS1 12 #define SL1 19 #define SL2 1 #define SR1 5 #define SR2 1 #define MSK1 0xbff7ffbfU #define MSK2 0xfdfffffeU #define MSK3 0xf7ffef7fU #define MSK4 0xf2f7cbbfU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params4253.h010064400007650000024000000067401340421340100222240ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H #define POS1 17 #define SL1 20 #define SL2 1 #define SR1 7 #define SR2 1 #define MSK1 0x9f7bffffU #define MSK2 0x9fffff5fU #define MSK3 0x3efffffbU #define MSK4 0xfffff7bbU #define PARITY1 0xa8000001U #define PARITY2 0xaf5390a3U #define PARITY3 0xb740b3f8U #define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params44497.h010064400007650000024000000067561340421340100223310ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H #define POS1 330 #define SL1 5 #define SL2 3 #define SR1 9 #define SR2 3 #define MSK1 0xeffffffbU #define MSK2 0xdfbebfffU #define MSK3 0xbfbf7befU #define MSK4 0x9ffd7bffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xa3ac4000U #define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params607.h010064400007650000024000000067461340421340100221510ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H #define POS1 2 #define SL1 15 #define SL2 3 #define SR1 13 #define SR2 3 #define MSK1 0xfdff37ffU #define MSK2 0xef7f3f7dU #define MSK3 0xff777b7dU #define MSK4 0x7ff7fb2fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-params86243.h010064400007650000024000000067541340421340100223220ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H #define POS1 366 #define SL1 6 #define SL2 7 #define SR1 19 #define SR2 1 #define MSK1 0xfdbffbffU #define MSK2 0xbff7ff3fU #define MSK3 0xfd77efffU #define MSK4 0xbf9ff3ffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) #define ALTI_SL2_PERM64 \ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */ jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT-sse2.h010064400007650000024000000121231340421340100212670ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * @note We assume LITTLE ENDIAN in this file * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #ifndef SFMT_SSE2_H #define SFMT_SSE2_H /** * This function represents the recursion formula. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @param mask 128-bit mask * @return output */ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); v = _mm_slli_epi32(d, SL1); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, v); x = _mm_slli_si128(x, SL2); y = _mm_and_si128(y, mask); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, y); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { r = _mm_load_si128(&array[j + size - N].si); _mm_store_si128(&ctx->sfmt[j].si, r); } for (; i < size; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); _mm_store_si128(&ctx->sfmt[j++].si, r); r1 = r2; r2 = r; } } #endif jemalloc-sys-0.3.2/jemalloc/test/include/test/SFMT.h010064400007650000024000000123231340421340100204170ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom * number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt * * @note We assume that your system has inttypes.h. If your system * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, * and you have to define PRIu64 and PRIx64 in this file as follows: * @verbatim typedef unsigned int uint32_t typedef unsigned long long uint64_t #define PRIu64 "llu" #define PRIx64 "llx" @endverbatim * uint32_t must be exactly 32-bit unsigned integer type (no more, no * less), and uint64_t must be exactly 64-bit unsigned integer type. * PRIu64 and PRIx64 are used for printf function to print 64-bit * unsigned int and 64-bit unsigned int in hexadecimal format. */ #ifndef SFMT_H #define SFMT_H typedef struct sfmt_s sfmt_t; uint32_t gen_rand32(sfmt_t *ctx); uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); uint64_t gen_rand64(sfmt_t *ctx); uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); void fill_array32(sfmt_t *ctx, uint32_t *array, int size); void fill_array64(sfmt_t *ctx, uint64_t *array, int size); sfmt_t *init_gen_rand(uint32_t seed); sfmt_t *init_by_array(uint32_t *init_key, int key_length); void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ static inline double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ static inline double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ static inline double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ static inline double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ static inline double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ static inline double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ static inline double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ static inline double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ static inline double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ static inline double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); } #endif jemalloc-sys-0.3.2/jemalloc/test/include/test/test.h010064400007650000024000000322341340421340100206300ustar0000000000000000#define ASSERT_BUFSIZE 256 #define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) " #cmp " (%s) --> " \ "%" pri " " #neg_cmp " %" pri ": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) #define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) #define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) #define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) #define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) #define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) #define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) #define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) #define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) #define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) #define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) #define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) #define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) #define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) #define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) #define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) #define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) #define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) #define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) #define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) #define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) #define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) #define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) #define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) #define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) #define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) #define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) #define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) #define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) #define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) #define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) #define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) #define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) #define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) #define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) #define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) #define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) #define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) #define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) #define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) #define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) #define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) #define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) #define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) #define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) #define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) #define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) #define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) #define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) #define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) #define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) #define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) #define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) #define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) #define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) #define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) #define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) #define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) #define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) #define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) #define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) #define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) #define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) #define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) #define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) #define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) #define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) #define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) #define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) #define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) #define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) #define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) #define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) #define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) #define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, FMTd32, __VA_ARGS__) #define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, FMTd32, __VA_ARGS__) #define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, FMTd32, __VA_ARGS__) #define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, FMTd32, __VA_ARGS__) #define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, FMTd32, __VA_ARGS__) #define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, FMTd32, __VA_ARGS__) #define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, FMTu32, __VA_ARGS__) #define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, FMTu32, __VA_ARGS__) #define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, FMTu32, __VA_ARGS__) #define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, FMTu32, __VA_ARGS__) #define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, FMTu32, __VA_ARGS__) #define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, FMTu32, __VA_ARGS__) #define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, FMTd64, __VA_ARGS__) #define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, FMTd64, __VA_ARGS__) #define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, FMTd64, __VA_ARGS__) #define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, FMTd64, __VA_ARGS__) #define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, FMTd64, __VA_ARGS__) #define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, FMTd64, __VA_ARGS__) #define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, FMTu64, __VA_ARGS__) #define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, FMTu64, __VA_ARGS__) #define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, FMTu64, __VA_ARGS__) #define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, FMTu64, __VA_ARGS__) #define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, FMTu64, __VA_ARGS__) #define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, FMTu64, __VA_ARGS__) #define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) == (%s) --> %s != %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) != (%s) --> %s == %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) #define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) same as (%s) --> " \ "\"%s\" differs from \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) differs from (%s) --> " \ "\"%s\" same as \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Unreachable code reached: ", \ __func__, __FILE__, __LINE__); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } while (0) /* * If this enum changes, corresponding changes in test/test.sh.in are also * necessary. */ typedef enum { test_status_pass = 0, test_status_skip = 1, test_status_fail = 2, test_status_count = 3 } test_status_t; typedef void (test_t)(void); #define TEST_BEGIN(f) \ static void \ f(void) { \ p_test_init(#f); #define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) #define test_no_reentrancy(...) \ p_test_no_reentrancy(__VA_ARGS__, NULL) #define test_no_malloc_init(...) \ p_test_no_malloc_init(__VA_ARGS__, NULL) #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) bool test_is_reentrant(); void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); test_status_t p_test_no_reentrancy(test_t *t, ...); test_status_t p_test_no_malloc_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message); jemalloc-sys-0.3.2/jemalloc/test/include/test/thd.h010064400007650000024000000003401340421340100204210ustar0000000000000000/* Abstraction layer for threading in tests. */ #ifdef _WIN32 typedef HANDLE thd_t; #else typedef pthread_t thd_t; #endif void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); void thd_join(thd_t thd, void **ret); jemalloc-sys-0.3.2/jemalloc/test/include/test/timer.h010064400007650000024000000004701340421340100207660ustar0000000000000000/* Simple timer, for use in benchmark reporting. */ typedef struct { nstime_t t0; nstime_t t1; } timedelta_t; void timer_start(timedelta_t *timer); void timer_stop(timedelta_t *timer); uint64_t timer_usec(const timedelta_t *timer); void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); jemalloc-sys-0.3.2/jemalloc/test/integration/aligned_alloc.c010064400007650000024000000057371340421341300223550ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; alignment = 0; set_errno(0); p = aligned_alloc(alignment, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { ps[i] = aligned_alloc(alignment, size); if (ps[i] == NULL) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END int main(void) { return test( test_alignment_errors, test_oom_errors, test_alignment_and_size); } jemalloc-sys-0.3.2/jemalloc/test/integration/allocated.c010064400007650000024000000060021340421340100215070ustar0000000000000000#include "test/jemalloc_test.h" static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; void * thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; uint64_t *ap0, *ap1, *dp0, *dp1; size_t sz, usize; sz = sizeof(a0); if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*ap0, a0, "\"thread.allocatedp\" should provide a pointer to internal " "storage"); sz = sizeof(d0); if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*dp0, d0, "\"thread.deallocatedp\" should provide a pointer to internal " "storage"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); sz = sizeof(ap1); mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); assert_ptr_eq(ap0, ap1, "Pointer returned by \"thread.allocatedp\" should not change"); usize = malloc_usable_size(p); assert_u64_le(a0 + usize, a1, "Allocated memory counter should increase by at least the amount " "explicitly allocated"); free(p); sz = sizeof(d1); mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); sz = sizeof(dp1); mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); assert_ptr_eq(dp0, dp1, "Pointer returned by \"thread.deallocatedp\" should not change"); assert_u64_le(d0 + usize, d1, "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); return NULL; label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); return NULL; } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread); } jemalloc-sys-0.3.2/jemalloc/test/integration/cpp/basic.cpp010064400007650000024000000006551340421340100217720ustar0000000000000000#include #include "test/jemalloc_test.h" TEST_BEGIN(test_basic) { auto foo = new long(4); assert_ptr_not_null(foo, "Unexpected new[] failure"); delete foo; // Test nullptr handling. foo = nullptr; delete foo; auto bar = new long; assert_ptr_not_null(bar, "Unexpected new failure"); delete bar; // Test nullptr handling. bar = nullptr; delete bar; } TEST_END int main() { return test( test_basic); } jemalloc-sys-0.3.2/jemalloc/test/integration/extent.c010064400007650000024000000176461340421340100211060ustar0000000000000000#include "test/jemalloc_test.h" #include "test/extent_hooks.h" static bool check_background_thread_enabled(void) { bool enabled; size_t sz = sizeof(bool); int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); if (ret == ENOENT) { return false; } assert_d_eq(ret, 0, "Unexpected mallctl error"); return enabled; } static void test_extent_body(unsigned arena_ind) { void *p; size_t large0, large1, large2, sz; size_t purge_mib[3]; size_t purge_miblen; int flags; bool xallocx_success_a, xallocx_success_b, xallocx_success_c; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Get large size classes. */ sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected arenas.lextent.0.size failure"); assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, 0), 0, "Unexpected arenas.lextent.1.size failure"); assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, 0), 0, "Unexpected arenas.lextent.2.size failure"); /* Test dalloc/decommit/purge cascade. */ purge_miblen = sizeof(purge_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), 0, "Unexpected mallctlnametomib() failure"); purge_mib[1] = (size_t)arena_ind; called_alloc = false; try_alloc = true; try_dalloc = false; try_decommit = false; p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_true(called_alloc, "Expected alloc call"); called_dalloc = false; called_decommit = false; did_purge_lazy = false; did_purge_forced = false; called_split = false; xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_a) { assert_true(called_dalloc, "Expected dalloc call"); assert_true(called_decommit, "Expected decommit call"); assert_true(did_purge_lazy || did_purge_forced, "Expected purge"); } assert_true(called_split, "Expected split call"); dallocx(p, flags); try_dalloc = true; /* Test decommit/commit and observe split/merge. */ try_dalloc = false; try_decommit = true; p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_decommit = false; did_commit = false; called_split = false; did_split = false; did_merge = false; xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_b) { assert_true(did_split, "Expected split"); } xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); if (did_split) { assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); } if (xallocx_success_b && xallocx_success_c) { assert_true(did_merge, "Expected merge"); } dallocx(p, flags); try_dalloc = true; try_decommit = false; /* Make sure non-large allocation succeeds. */ p = mallocx(42, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, flags); } static void test_manual_hook_auto_arena(void) { unsigned narenas; size_t old_size, new_size, sz; size_t hooks_mib[3]; size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); sz = sizeof(unsigned); /* Get number of auto arenas. */ assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); if (narenas == 1) { return; } /* Install custom extent hooks on arena 1 (might not be initialized). */ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = 1; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, (void *)&new_hooks, new_size), 0, "Unexpected extent_hooks error"); static bool auto_arena_created = false; if (old_hooks != &hooks) { assert_b_eq(auto_arena_created, false, "Expected auto arena 1 created only once."); auto_arena_created = true; } } static void test_manual_hook_body(void) { unsigned arena_ind; size_t old_size, new_size, sz; size_t hooks_mib[3]; size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); /* Install custom extent hooks. */ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = (size_t)arena_ind; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, (void *)&new_hooks, new_size), 0, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->alloc, extent_alloc_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->commit, extent_commit_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->decommit, extent_decommit_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->split, extent_split_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->merge, extent_merge_hook, "Unexpected extent_hooks error"); if (!check_background_thread_enabled()) { test_extent_body(arena_ind); } /* Restore extent hooks. */ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->alloc, default_hooks->alloc, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->commit, default_hooks->commit, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->decommit, default_hooks->decommit, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->split, default_hooks->split, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->merge, default_hooks->merge, "Unexpected extent_hooks error"); } TEST_BEGIN(test_extent_manual_hook) { test_manual_hook_auto_arena(); test_manual_hook_body(); /* Test failure paths. */ try_split = false; test_manual_hook_body(); try_merge = false; test_manual_hook_body(); try_purge_lazy = false; try_purge_forced = false; test_manual_hook_body(); try_split = try_merge = try_purge_lazy = try_purge_forced = true; } TEST_END TEST_BEGIN(test_extent_auto_hook) { unsigned arena_ind; size_t new_size, sz; extent_hooks_t *new_hooks; extent_hooks_prep(); sz = sizeof(unsigned); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); test_skip_if(check_background_thread_enabled()); test_extent_body(arena_ind); } TEST_END int main(void) { return test( test_extent_manual_hook, test_extent_auto_hook); } jemalloc-sys-0.3.2/jemalloc/test/integration/extent.sh010064400007650000024000000001271340421340100212600ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/jemalloc/test/integration/mallocx.c010064400007650000024000000131461340421341300212300ustar0000000000000000#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_overflow) { size_t largemax; largemax = get_large_size(get_nlarge()-1); assert_ptr_null(mallocx(largemax+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(mallocx(SIZE_T_MAX, 0), "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); } TEST_END TEST_BEGIN(test_oom) { size_t largemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ largemax = get_large_size(get_nlarge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(largemax, 0); if (ptrs[i] == NULL) { oom = true; } } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", largemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) { dallocx(ptrs[i], 0); } } purge(); #if LG_SIZEOF_PTR == 3 assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), "Expected OOM for mallocx()"); #endif } TEST_END TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", nsz); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); purge(); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { const char *percpu_arena; size_t sz = sizeof(percpu_arena); if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || strcmp(percpu_arena, "disabled") != 0) { test_skip("test_alignment_and_size skipped: " "not working with percpu arena."); }; #define MAXALIGN (((size_t)1) << 23) #define NITER 4 size_t nsz, rsz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } purge(); } #undef MAXALIGN #undef NITER } TEST_END int main(void) { return test( test_overflow, test_oom, test_basic, test_alignment_and_size); } jemalloc-sys-0.3.2/jemalloc/test/integration/mallocx.sh010064400007650000024000000001271340421340100214100ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/jemalloc/test/integration/MALLOCX_ARENA.c010064400007650000024000000026411340421340100215710ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 static bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; void * thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; void *p; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.create"); if (thread_ind % 4 != 3) { size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); const char *dss_precs[] = {"disabled", "primary", "secondary"}; unsigned prec_ind = thread_ind % (sizeof(dss_precs)/sizeof(char*)); const char *dss = dss_precs[prec_ind]; int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Error in mallctlnametomib()"); mib[1] = arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, sizeof(const char *)), expected_err, "Error in mallctlbymib()"); } p = mallocx(1, MALLOCX_ARENA(arena_ind)); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); return NULL; } TEST_BEGIN(test_MALLOCX_ARENA) { thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)(uintptr_t)i); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END int main(void) { return test( test_MALLOCX_ARENA); } jemalloc-sys-0.3.2/jemalloc/test/integration/overflow.c010064400007650000024000000025371340421341300214360ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_overflow) { unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(malloc(SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() OOM"); assert_ptr_null(realloc(p, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(realloc(p, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); free(p); } TEST_END int main(void) { return test( test_overflow); } jemalloc-sys-0.3.2/jemalloc/test/integration/posix_memalign.c010064400007650000024000000055021340421340100225760ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, "Expected error for invalid alignment %zu", alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END int main(void) { return test( test_alignment_errors, test_oom_errors, test_alignment_and_size); } jemalloc-sys-0.3.2/jemalloc/test/integration/rallocx.c010064400007650000024000000135201340421341300212310ustar0000000000000000#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; #define NCYCLES 3 unsigned i, j; #define NSZS 1024 size_t szs[NSZS]; #define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); szs[0] = sallocx(p, 0); for (i = 0; i < NCYCLES; i++) { for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { q = rallocx(p, szs[j-1]+1, 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j-1], szs[j-1]+1); szs[j] = sallocx(q, 0); assert_zu_ne(szs[j], szs[j-1]+1, "Expected size to be at least: %zu", szs[j-1]+1); p = q; } for (j--; j > 0; j--) { q = rallocx(p, szs[j-1], 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j], szs[j-1]); tsz = sallocx(q, 0); assert_zu_eq(tsz, szs[j-1], "Expected size=%zu, got size=%zu", szs[j-1], tsz); p = q; } } dallocx(p, 0); #undef MAXSZ #undef NSZS #undef NCYCLES } TEST_END static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; for (i = 0; i < len; i++) { uint8_t b = buf[offset+i]; if (b != c) { test_fail("Allocation at %p (len=%zu) contains %#x " "rather than %#x at offset %zu", p, len, b, c, offset+i); ret = true; } } return ret; } TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; #define FILL_BYTE 0xaaU #define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; p = mallocx(start_size, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); psz = sallocx(p, 0); assert_false(validate_fill(p, 0, 0, psz), "Expected zeroed memory"); memset(p, FILL_BYTE, psz); assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); for (j = 1; j < RANGE; j++) { q = rallocx(p, start_size+j, MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error"); qsz = sallocx(q, 0); if (q != p || qsz != psz) { assert_false(validate_fill(q, FILL_BYTE, 0, psz), "Expected filled memory"); assert_false(validate_fill(q, 0, psz, qsz-psz), "Expected zeroed memory"); } if (psz != qsz) { memset((void *)((uintptr_t)q+psz), FILL_BYTE, qsz-psz); psz = qsz; } p = q; } assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); dallocx(p, 0); } #undef FILL_BYTE } TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN } TEST_END TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; unsigned lg_align; size_t sz; #define MAX_LG_ALIGN 25 #define MAX_VALIDATE (ZU(1) << 22) lg_align = 0; p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error for lg_align=%u", lg_align); assert_ptr_null( (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), "%p inadequately aligned for lg_align=%u", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { assert_false(validate_fill(q, 0, 0, sz), "Expected zeroed memory"); } else { assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), "Expected zeroed memory"); assert_false(validate_fill( (void *)((uintptr_t)q+sz-MAX_VALIDATE), 0, 0, MAX_VALIDATE), "Expected zeroed memory"); } p = q; } dallocx(p, 0); #undef MAX_VALIDATE #undef MAX_LG_ALIGN } TEST_END TEST_BEGIN(test_overflow) { size_t largemax; void *p; largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, largemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); } TEST_END int main(void) { return test( test_grow_and_shrink, test_zero, test_align, test_lg_align_and_zero, test_overflow); } jemalloc-sys-0.3.2/jemalloc/test/integration/sdallocx.c010064400007650000024000000020311340421340100213660ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 22) #define NITER 3 TEST_BEGIN(test_basic) { void *ptr = mallocx(64, 0); sdallocx(ptr, 64, 0); } TEST_END TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { sdallocx(ps[i], sz, MALLOCX_ALIGN(alignment)); ps[i] = NULL; } } } } } TEST_END int main(void) { return test_no_reentrancy( test_basic, test_alignment_and_size); } jemalloc-sys-0.3.2/jemalloc/test/integration/thread_arena.c010064400007650000024000000034571340421340100222070ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 void * thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; size_t size; int err; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); free(p); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, (void *)&main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); return NULL; } static void mallctl_failure(int err) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } TEST_BEGIN(test_thread_arena) { void *p; int err; thd_t thds[NTHREADS]; unsigned i; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); unsigned arena_ind, old_arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Arena creation failure"); size_t size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, (void *)&arena_ind, sizeof(arena_ind))) != 0) { mallctl_failure(err); } for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arena_ind); } for (i = 0; i < NTHREADS; i++) { intptr_t join_ret; thd_join(thds[i], (void *)&join_ret); assert_zd_eq(join_ret, 0, "Unexpected thread join error"); } free(p); } TEST_END int main(void) { return test( test_thread_arena); } jemalloc-sys-0.3.2/jemalloc/test/integration/thread_tcache_enabled.c010064400007650000024000000044651340421340100240220ustar0000000000000000#include "test/jemalloc_test.h" void * thd_start(void *arg) { bool e0, e1; size_t sz = sizeof(bool); assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); if (e0) { e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); } e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); return NULL; } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread); } jemalloc-sys-0.3.2/jemalloc/test/integration/xallocx.c010064400007650000024000000234241340421340100212400ustar0000000000000000#include "test/jemalloc_test.h" /* * Use a separate arena for xallocx() extension/contraction tests so that * internal allocation e.g. by heap profiling can't interpose allocations where * xallocx() would ordinarily be able to extend. */ static unsigned arena_ind(void) { static unsigned ind = 0; if (ind == 0) { size_t sz = sizeof(ind); assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, 0), 0, "Unexpected mallctl failure creating arena"); } return ind; } TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, sz-42, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz + 5, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nsmall(void) { return get_nsizes_impl("arenas.nbins"); } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_small_size(size_t ind) { return get_size_impl("arenas.bin.0.size", ind); } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } TEST_BEGIN(test_size) { size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test smallest supported size. */ assert_zu_eq(xallocx(p, 1, 0, 0), small0, "Unexpected xallocx() behavior"); /* Test largest supported size. */ assert_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); /* Test overflow such that largemax-size underflows. */ assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_large) { int flags = MALLOCX_ARENA(arena_ind()); size_t smallmax, large1, large2, large3, largemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); large1 = get_large_size(1); large2 = get_large_size(2); large3 = get_large_size(3); largemax = get_large_size(get_nlarge()-1); p = mallocx(large3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, "Unexpected xallocx() behavior"); if (xallocx(p, large3, 0, flags) != large3) { p = rallocx(p, large3, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); if (xallocx(p, large3, 0, flags) != large3) { p = rallocx(p, large3, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } /* Test size+extra overflow. */ assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, "Unexpected xallocx() behavior"); dallocx(p, flags); } TEST_END static void print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); range0 = 0; c0 = pc[0]; for (i = 0; i < len; i++) { if (pc[i] != c0) { malloc_printf(" %#x[%zu..%zu)", c0, range0, i); range0 = i; c0 = pc[i]; } } malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); } static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { if (pc[i] != c) { err = true; } } if (err) { print_filled_extents(p, c, offset + len); } return err; } static void test_zero(size_t szmin, size_t szmax) { int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect * errors. */ memset(p, FILL_BYTE, sz); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; if (xallocx(p, sz, 0, flags) != sz) { p = rallocx(p, sz, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, flags); if (xallocx(p, sz+1, 0, flags) != nsz) { p = rallocx(p, sz+1, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); assert_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } dallocx(p, flags); } TEST_BEGIN(test_zero_large) { size_t large0, large1; /* Get size classes. */ large0 = get_large_size(0); large1 = get_large_size(1); test_zero(large1, large0 * 2); } TEST_END int main(void) { return test( test_same_size, test_extra_no_move, test_no_move_fail, test_size, test_size_extra_overflow, test_extra_small, test_extra_large, test_zero_large); } jemalloc-sys-0.3.2/jemalloc/test/integration/xallocx.sh010064400007650000024000000001271340421340100214230ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/jemalloc/test/src/btalloc.c010064400007650000024000000001571340421340100174500ustar0000000000000000#include "test/jemalloc_test.h" void * btalloc(size_t size, unsigned bits) { return btalloc_0(size, bits); } jemalloc-sys-0.3.2/jemalloc/test/src/btalloc_0.c010064400007650000024000000000621340421340100176620ustar0000000000000000#include "test/jemalloc_test.h" btalloc_n_gen(0) jemalloc-sys-0.3.2/jemalloc/test/src/btalloc_1.c010064400007650000024000000000621340421340100176630ustar0000000000000000#include "test/jemalloc_test.h" btalloc_n_gen(1) jemalloc-sys-0.3.2/jemalloc/test/src/math.c010064400007650000024000000000601340421340100167520ustar0000000000000000#define MATH_C_ #include "test/jemalloc_test.h" jemalloc-sys-0.3.2/jemalloc/test/src/mq.c010064400007650000024000000007121340421340100164420ustar0000000000000000#include "test/jemalloc_test.h" /* * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep * time is guaranteed. */ void mq_nanosleep(unsigned ns) { assert(ns <= 1000*1000*1000); #ifdef _WIN32 Sleep(ns / 1000); #else { struct timespec timeout; if (ns < 1000*1000*1000) { timeout.tv_sec = 0; timeout.tv_nsec = ns; } else { timeout.tv_sec = 1; timeout.tv_nsec = 0; } nanosleep(&timeout, NULL); } #endif } jemalloc-sys-0.3.2/jemalloc/test/src/mtx.c010064400007650000024000000025371340421340100166440ustar0000000000000000#include "test/jemalloc_test.h" #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif bool mtx_init(mtx_t *mtx) { #ifdef _WIN32 if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) { return true; } #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mtx->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mtx->lock = 0; #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) { return true; } pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); if (pthread_mutex_init(&mtx->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return true; } pthread_mutexattr_destroy(&attr); #endif return false; } void mtx_fini(mtx_t *mtx) { #ifdef _WIN32 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #elif (defined(JEMALLOC_OSSPIN)) #else pthread_mutex_destroy(&mtx->lock); #endif } void mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_lock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mtx->lock); #else pthread_mutex_lock(&mtx->lock); #endif } void mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_unlock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mtx->lock); #else pthread_mutex_unlock(&mtx->lock); #endif } jemalloc-sys-0.3.2/jemalloc/test/src/SFMT.c010064400007650000024000000501701340421340100166010ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" #if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(ONLY64) && !defined(BIG_ENDIAN64) #if defined(__GNUC__) #error "-DONLY64 must be specified with -DBIG_ENDIAN64" #endif #undef ONLY64 #endif /*------------------------------------------------------ 128-bit SIMD data type for Altivec, SSE2 or standard C ------------------------------------------------------*/ #if defined(HAVE_ALTIVEC) /** 128-bit data structure */ union W128_T { vector unsigned int s; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #elif defined(HAVE_SSE2) /** 128-bit data structure */ union W128_T { __m128i si; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #else /** 128-bit data structure */ struct W128_T { uint32_t u[4]; }; /** 128-bit data type */ typedef struct W128_T w128_t; #endif struct sfmt_s { /** the 128-bit internal state array */ w128_t sfmt[N]; /** index counter to the 32-bit internal state array */ int idx; /** a flag: it is 0 if and only if the internal state is not yet * initialized. */ int initialized; }; /*-------------------------------------- FILE GLOBAL VARIABLES internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ static inline int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) static inline void rshift128(w128_t *out, w128_t const *in, int shift); static inline void lshift128(w128_t *out, w128_t const *in, int shift); #endif static inline void gen_rand_all(sfmt_t *ctx); static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); static inline uint32_t func1(uint32_t x); static inline uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) static inline void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) #include "test/SFMT-alti.h" #elif defined(HAVE_SSE2) #include "test/SFMT-sse2.h" #endif /** * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 static inline int idxof(int i) { return i ^ 1; } #else static inline int idxof(int i) { return i; } #endif /** * This function simulates SIMD 128-bit right shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif /** * This function simulates SIMD 128-bit left shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #ifdef ONLY64 static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif #endif /** * This function represents the recursion formula. * @param r output * @param a a 128-bit part of the internal state array * @param b a 128-bit part of the internal state array * @param c a 128-bit part of the internal state array * @param d a 128-bit part of the internal state array */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif #endif #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } for (; i < N; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < N; i++) { do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < size - N; i++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j] = array[j + size - N]; } for (; i < size; i++, j++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; ctx->sfmt[j] = array[i]; } } #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) static inline void swap(w128_t *array, int size) { int i; uint32_t x, y; for (i = 0; i < size; i++) { x = array[i].u[0]; y = array[i].u[2]; array[i].u[0] = array[i].u[1]; array[i].u[2] = array[i].u[3]; array[i].u[1] = x; array[i].u[3] = y; } } #endif /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func1(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func2(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^{MEXP} */ static void period_certification(sfmt_t *ctx) { int inner = 0; int i, j; uint32_t work; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; for (i = 0; i < 4; i++) inner ^= psfmt32[idxof(i)] & parity[i]; for (i = 16; i > 0; i >>= 1) inner ^= inner >> i; inner &= 1; /* check OK */ if (inner == 1) { return; } /* check NG, and modification */ for (i = 0; i < 4; i++) { work = 1; for (j = 0; j < 32; j++) { if ((work & parity[i]) != 0) { psfmt32[idxof(i)] ^= work; return; } work = work << 1; } } } /*---------------- PUBLIC FUNCTIONS ----------------*/ /** * This function returns the identification string. * The string shows the word size, the Mersenne exponent, * and all parameters of this generator. */ const char *get_idstring(void) { return IDSTR; } /** * This function returns the minimum size of array used for \b * fill_array32() function. * @return minimum size of array used for fill_array32() function. */ int get_min_array_size32(void) { return N32; } /** * This function returns the minimum size of array used for \b * fill_array64() function. * @return minimum size of array used for fill_array64() function. */ int get_min_array_size64(void) { return N64; } #ifndef ONLY64 /** * This function generates and returns 32-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * @return 32-bit pseudorandom number */ uint32_t gen_rand32(sfmt_t *ctx) { uint32_t r; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; assert(ctx->initialized); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } r = psfmt32[ctx->idx++]; return r; } /* Generate a random integer in [0..limit). */ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { uint32_t ret, above; above = 0xffffffffU - (0xffffffffU % limit); while (1) { ret = gen_rand32(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #endif /** * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { #if defined(BIG_ENDIAN64) && !defined(ONLY64) uint32_t r1, r2; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; #else uint64_t r; uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; #endif assert(ctx->initialized); assert(ctx->idx % 2 == 0); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } #if defined(BIG_ENDIAN64) && !defined(ONLY64) r1 = psfmt32[ctx->idx]; r2 = psfmt32[ctx->idx + 1]; ctx->idx += 2; return ((uint64_t)r2 << 32) | r1; #else r = psfmt64[ctx->idx / 2]; ctx->idx += 2; return r; #endif } /* Generate a random integer in [0..limit). */ uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { uint64_t ret, above; above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); while (1) { ret = gen_rand64(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #ifndef ONLY64 /** * This function generates pseudorandom 32-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 624 and a * multiple of four. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 32-bit integers are filled * by this function. The pointer to the array must be \b "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 32-bit pseudorandom integers to be * generated. size must be a multiple of 4, and greater than or equal * to (MEXP / 128 + 1) * 4. * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 4 == 0); assert(size >= N32); gen_rand_array(ctx, (w128_t *)array, size / 4); ctx->idx = N32; } #endif /** * This function generates pseudorandom 64-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 312 and a * multiple of two. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 64-bit integers are filled * by this function. The pointer to the array must be "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 64-bit pseudorandom integers to be * generated. size must be a multiple of 2, and greater than or equal * to (MEXP / 128 + 1) * 2 * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 2 == 0); assert(size >= N64); gen_rand_array(ctx, (w128_t *)array, size / 2); ctx->idx = N32; #if defined(BIG_ENDIAN64) && !defined(ONLY64) swap((w128_t *)array, size /2); #endif } /** * This function initializes the internal state array with a 32-bit * integer seed. * * @param seed a 32-bit integer used as the seed. */ sfmt_t *init_gen_rand(uint32_t seed) { void *p; sfmt_t *ctx; int i; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } /** * This function initializes the internal state array, * with an array of 32-bit integers used as the seeds * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { void *p; sfmt_t *ctx; int i, j, count; uint32_t r; int lag; int mid; int size = N * 4; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; if (size >= 623) { lag = 11; } else if (size >= 68) { lag = 7; } else if (size >= 39) { lag = 5; } else { lag = 3; } mid = (size - lag) / 2; memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); if (key_length + 1 > N32) { count = key_length + 1; } else { count = N32; } r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; psfmt32[idxof(mid + lag)] += r; psfmt32[idxof(0)] = r; count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (; j < count; j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (j = 0; j < N32; j++) { r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; psfmt32[idxof((i + mid + lag) % N32)] ^= r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } void fini_gen_rand(sfmt_t *ctx) { assert(ctx != NULL); ctx->initialized = 0; free(ctx); } jemalloc-sys-0.3.2/jemalloc/test/src/test.c010064400007650000024000000107551340421341300170170ustar0000000000000000#include "test/jemalloc_test.h" /* Test status state. */ static unsigned test_count = 0; static test_status_t test_counts[test_status_count] = {0, 0, 0}; static test_status_t test_status = test_status_pass; static const char * test_name = ""; /* Reentrancy testing helpers. */ #define NUM_REENTRANT_ALLOCS 20 typedef enum { non_reentrant = 0, libc_reentrant = 1, arena_new_reentrant = 2 } reentrancy_t; static reentrancy_t reentrancy; static bool libc_hook_ran = false; static bool arena_new_hook_ran = false; static const char * reentrancy_t_str(reentrancy_t r) { switch (r) { case non_reentrant: return "non-reentrant"; case libc_reentrant: return "libc-reentrant"; case arena_new_reentrant: return "arena_new-reentrant"; default: unreachable(); } } static void do_hook(bool *hook_ran, void (**hook)()) { *hook_ran = true; *hook = NULL; size_t alloc_size = 1; for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) { free(malloc(alloc_size)); alloc_size *= 2; } } static void libc_reentrancy_hook() { do_hook(&libc_hook_ran, &hooks_libc_hook); } static void arena_new_reentrancy_hook() { do_hook(&arena_new_hook_ran, &hooks_arena_new_hook); } /* Actual test infrastructure. */ bool test_is_reentrant() { return reentrancy != non_reentrant; } JEMALLOC_FORMAT_PRINTF(1, 2) void test_skip(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_skip; } JEMALLOC_FORMAT_PRINTF(1, 2) void test_fail(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_fail; } static const char * test_status_string(test_status_t test_status) { switch (test_status) { case test_status_pass: return "pass"; case test_status_skip: return "skip"; case test_status_fail: return "fail"; default: not_reached(); } } void p_test_init(const char *name) { test_count++; test_status = test_status_pass; test_name = name; } void p_test_fini(void) { test_counts[test_status]++; malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy), test_status_string(test_status)); } static test_status_t p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { test_status_t ret; if (do_malloc_init) { /* * Make sure initialization occurs prior to running tests. * Tests are special because they may use internal facilities * prior to triggering initialization as a side effect of * calling into the public API. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); return test_status_fail; } } ret = test_status_pass; for (; t != NULL; t = va_arg(ap, test_t *)) { /* Non-reentrant run. */ reentrancy = non_reentrant; hooks_arena_new_hook = hooks_libc_hook = NULL; t(); if (test_status > ret) { ret = test_status; } /* Reentrant run. */ if (do_reentrant) { reentrancy = libc_reentrant; hooks_arena_new_hook = NULL; hooks_libc_hook = &libc_reentrancy_hook; t(); if (test_status > ret) { ret = test_status; } reentrancy = arena_new_reentrant; hooks_libc_hook = NULL; hooks_arena_new_hook = &arena_new_reentrancy_hook; t(); if (test_status > ret) { ret = test_status; } } } malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", test_status_string(test_status_pass), test_counts[test_status_pass], test_count, test_status_string(test_status_skip), test_counts[test_status_skip], test_count, test_status_string(test_status_fail), test_counts[test_status_fail], test_count); return ret; } test_status_t p_test(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); ret = p_test_impl(true, true, t, ap); va_end(ap); return ret; } test_status_t p_test_no_reentrancy(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); ret = p_test_impl(true, false, t, ap); va_end(ap); return ret; } test_status_t p_test_no_malloc_init(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); /* * We also omit reentrancy from bootstrapping tests, since we don't * (yet) care about general reentrancy during bootstrapping. */ ret = p_test_impl(false, false, t, ap); va_end(ap); return ret; } void p_test_fail(const char *prefix, const char *message) { malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); test_status = test_status_fail; } jemalloc-sys-0.3.2/jemalloc/test/src/thd.c010064400007650000024000000013671340421340100166130ustar0000000000000000#include "test/jemalloc_test.h" #ifdef _WIN32 void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); if (*thd == NULL) { test_fail("Error in CreateThread()\n"); } } void thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; GetExitCodeThread(thd, (LPDWORD) &exit_code); *ret = (void *)(uintptr_t)exit_code; } } #else void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { if (pthread_create(thd, NULL, proc, arg) != 0) { test_fail("Error in pthread_create()\n"); } } void thd_join(thd_t thd, void **ret) { pthread_join(thd, ret); } #endif jemalloc-sys-0.3.2/jemalloc/test/src/timer.c010064400007650000024000000021031340421340100171410ustar0000000000000000#include "test/jemalloc_test.h" void timer_start(timedelta_t *timer) { nstime_init(&timer->t0, 0); nstime_update(&timer->t0); } void timer_stop(timedelta_t *timer) { nstime_copy(&timer->t1, &timer->t0); nstime_update(&timer->t1); } uint64_t timer_usec(const timedelta_t *timer) { nstime_t delta; nstime_copy(&delta, &timer->t1); nstime_subtract(&delta, &timer->t0); return nstime_ns(&delta) / 1000; } void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; size_t i = 0; size_t j, n; /* Whole. */ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); i += n; if (i >= buflen) { return; } mult = 1; for (j = 0; j < n; j++) { mult *= 10; } /* Decimal. */ n = malloc_snprintf(&buf[i], buflen-i, "."); i += n; /* Fraction. */ while (i < buflen-1) { uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 >= 5)) ? 1 : 0; n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, (t0 * mult / t1) % 10 + round); i += n; mult *= 10; } } jemalloc-sys-0.3.2/jemalloc/test/stress/microbench.c010064400007650000024000000061731340421340100207010ustar0000000000000000#include "test/jemalloc_test.h" static inline void time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) { uint64_t i; for (i = 0; i < nwarmup; i++) { func(); } timer_start(timer); for (i = 0; i < niter; i++) { func(); } timer_stop(timer); } void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, void (*func_a), const char *name_b, void (*func_b)) { timedelta_t timer_a, timer_b; char ratio_buf[6]; void *p; p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } time_func(&timer_a, nwarmup, niter, func_a); time_func(&timer_b, nwarmup, niter, func_b); timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " "%s=%"FMTu64"us, ratio=1:%s\n", niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), ratio_buf); dallocx(p, 0); } static void malloc_free(void) { /* The compiler can optimize away free(malloc(1))! */ void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } free(p); } static void mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } free(p); } TEST_BEGIN(test_malloc_vs_mallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END static void malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } dallocx(p, 0); } static void malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } sdallocx(p, 1, 0); } TEST_BEGIN(test_free_vs_dallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END TEST_BEGIN(test_dallocx_vs_sdallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END static void malloc_mus_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } malloc_usable_size(p); free(p); } static void malloc_sallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (sallocx(p, 0) < 1) { test_fail("Unexpected sallocx() failure"); } free(p); } TEST_BEGIN(test_mus_vs_sallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END static void malloc_nallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (nallocx(1, 0) < 1) { test_fail("Unexpected nallocx() failure"); } free(p); } TEST_BEGIN(test_sallocx_vs_nallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int main(void) { return test_no_reentrancy( test_malloc_vs_mallocx, test_free_vs_dallocx, test_dallocx_vs_sdallocx, test_mus_vs_sallocx, test_sallocx_vs_nallocx); } jemalloc-sys-0.3.2/jemalloc/test/test.sh.in010064400007650000024000000043621340421340100170170ustar0000000000000000#!/bin/sh case @abi@ in macho) export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" ;; pecoff) export PATH="${PATH}:@objroot@lib" ;; *) ;; esac # Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so # it can be repeatedly concatenated with per test settings. export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF} # Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. export_malloc_conf() { if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" else export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" fi } # Corresponds to test_status_t. pass_code=0 skip_code=1 fail_code=2 pass_count=0 skip_count=0 fail_count=0 for t in $@; do if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then echo fi echo "=== ${t} ===" if [ -e "@srcroot@${t}.sh" ] ; then # Source the shell script corresponding to the test in a subshell and # execute the test. This allows the shell script to set MALLOC_CONF, which # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). enable_fill=@enable_fill@ \ enable_prof=@enable_prof@ \ . @srcroot@${t}.sh && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ else export MALLOC_CONF= && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ fi result_code=$? case ${result_code} in ${pass_code}) pass_count=$((pass_count+1)) ;; ${skip_code}) skip_count=$((skip_count+1)) ;; ${fail_code}) fail_count=$((fail_count+1)) ;; *) echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 exit 1 esac done total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` echo echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" if [ ${fail_count} -eq 0 ] ; then exit 0 else exit 1 fi jemalloc-sys-0.3.2/jemalloc/test/unit/a0.c010064400007650000024000000003451340421340100165170ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_a0) { void *p; p = a0malloc(1); assert_ptr_not_null(p, "Unexpected a0malloc() error"); a0dalloc(p); } TEST_END int main(void) { return test_no_malloc_init( test_a0); } jemalloc-sys-0.3.2/jemalloc/test/unit/arena_reset.c010064400007650000024000000211231340421341300205070ustar0000000000000000#ifndef ARENA_RESET_PROF_C_ #include "test/jemalloc_test.h" #endif #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/rtree.h" #include "test/extent_hooks.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nsmall(void) { return get_nsizes_impl("arenas.nbins"); } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_small_size(size_t ind) { return get_size_impl("arenas.bin.0.size", ind); } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } /* Like ivsalloc(), but safe to call on discarded allocations. */ static size_t vsalloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent; szind_t szind; if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, false, &extent, &szind)) { return 0; } if (extent == NULL) { return 0; } if (extent_state_get(extent) != extent_state_active) { return 0; } if (szind == NSIZES) { return 0; } return sz_index2size(szind); } static unsigned do_arena_create(extent_hooks_t *h) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, "Unexpected mallctl() failure"); return arena_ind; } static void do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { #define NLARGE 32 unsigned nsmall, nlarge, i; size_t sz; int flags; tsdn_t *tsdn; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; nsmall = get_nsmall(); nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); *nptrs = nsmall + nlarge; *ptrs = (void **)malloc(*nptrs * sizeof(void *)); assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); /* Allocate objects with a wide range of sizes. */ for (i = 0; i < nsmall; i++) { sz = get_small_size(i); (*ptrs)[i] = mallocx(sz, flags); assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } for (i = 0; i < nlarge; i++) { sz = get_large_size(i); (*ptrs)[nsmall + i] = mallocx(sz, flags); assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } tsdn = tsdn_fetch(); /* Verify allocations. */ for (i = 0; i < *nptrs; i++) { assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, "Allocation should have queryable size"); } } static void do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { tsdn_t *tsdn; unsigned i; tsdn = tsdn_fetch(); if (have_background_thread) { malloc_mutex_lock(tsdn, &background_thread_info[arena_ind % ncpus].mtx); } /* Verify allocations no longer exist. */ for (i = 0; i < nptrs; i++) { assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, "Allocation should no longer exist"); } if (have_background_thread) { malloc_mutex_unlock(tsdn, &background_thread_info[arena_ind % ncpus].mtx); } free(ptrs); } static void do_arena_reset_destroy(const char *name, unsigned arena_ind) { size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib(name, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static void do_arena_reset(unsigned arena_ind) { do_arena_reset_destroy("arena.0.reset", arena_ind); } static void do_arena_destroy(unsigned arena_ind) { do_arena_reset_destroy("arena.0.destroy", arena_ind); } TEST_BEGIN(test_arena_reset) { unsigned arena_ind; void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); do_arena_reset(arena_ind); do_arena_reset_post(ptrs, nptrs, arena_ind); } TEST_END static bool arena_i_initialized(unsigned arena_ind, bool refresh) { bool initialized; size_t mib[3]; size_t miblen, sz; if (refresh) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return initialized; } TEST_BEGIN(test_arena_destroy_initial) { assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should not be initialized"); } TEST_END TEST_BEGIN(test_arena_destroy_hooks_default) { unsigned arena_ind, arena_ind_another, arena_ind_prev; void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_false(arena_i_initialized(arena_ind, false), "Arena stats should not be initialized"); assert_true(arena_i_initialized(arena_ind, true), "Arena stats should be initialized"); /* * Create another arena before destroying one, to better verify arena * index reuse. */ arena_ind_another = do_arena_create(NULL); do_arena_destroy(arena_ind); assert_false(arena_i_initialized(arena_ind, true), "Arena stats should not be initialized"); assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should be initialized"); do_arena_reset_post(ptrs, nptrs, arena_ind); arena_ind_prev = arena_ind; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_u_eq(arena_ind, arena_ind_prev, "Arena index should have been recycled"); do_arena_destroy(arena_ind); do_arena_reset_post(ptrs, nptrs, arena_ind); do_arena_destroy(arena_ind_another); } TEST_END /* * Actually unmap extents, regardless of opt_retain, so that attempts to access * a destroyed arena's memory will segfault. */ static bool extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; } pages_unmap(addr, size); did_dalloc = true; return false; } static extent_hooks_t hooks_orig; static extent_hooks_t hooks_unmap = { extent_alloc_hook, extent_dalloc_unmap, /* dalloc */ extent_destroy_hook, extent_commit_hook, extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, extent_merge_hook }; TEST_BEGIN(test_arena_destroy_hooks_unmap) { unsigned arena_ind; void **ptrs; unsigned nptrs; extent_hooks_prep(); try_decommit = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); did_alloc = false; arena_ind = do_arena_create(&hooks); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_true(did_alloc, "Expected alloc"); assert_false(arena_i_initialized(arena_ind, false), "Arena stats should not be initialized"); assert_true(arena_i_initialized(arena_ind, true), "Arena stats should be initialized"); did_dalloc = false; do_arena_destroy(arena_ind); assert_true(did_dalloc, "Expected dalloc"); assert_false(arena_i_initialized(arena_ind, true), "Arena stats should not be initialized"); assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should be initialized"); do_arena_reset_post(ptrs, nptrs, arena_ind); memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END int main(void) { return test( test_arena_reset, test_arena_destroy_initial, test_arena_destroy_hooks_default, test_arena_destroy_hooks_unmap); } jemalloc-sys-0.3.2/jemalloc/test/unit/arena_reset_prof.c010064400007650000024000000001261340421340100215320ustar0000000000000000#include "test/jemalloc_test.h" #define ARENA_RESET_PROF_C_ #include "arena_reset.c" jemalloc-sys-0.3.2/jemalloc/test/unit/arena_reset_prof.sh010064400007650000024000000000731340421340100217230ustar0000000000000000#!/bin/sh export MALLOC_CONF="prof:true,lg_prof_sample:0" jemalloc-sys-0.3.2/jemalloc/test/unit/atomic.c010064400007650000024000000156561340421340100175060ustar0000000000000000#include "test/jemalloc_test.h" /* * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for * bool, etc. The one exception is that the short name for void * is "p" in * some places and "ptr" in others. In the long run it would be nice to unify * these, but in the short run we'll use this shim. */ #define assert_p_eq assert_ptr_eq /* * t: the non-atomic type, like "uint32_t". * ta: the short name for the type, like "u32". * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected, * and val3 for desired. */ #define DO_TESTS(t, ta, val1, val2, val3) do { \ t val; \ t expected; \ bool success; \ /* This (along with the load below) also tests ATOMIC_LOAD. */ \ atomic_##ta##_t atom = ATOMIC_INIT(val1); \ \ /* ATOMIC_INIT and load. */ \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, "Load or init failed"); \ \ /* Store. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val2, val, "Store failed"); \ \ /* Exchange. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val2, val, "Exchange store invalid value"); \ \ /* \ * Weak CAS. Spurious failures are allowed, so we loop a few \ * times. \ */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ success = false; \ for (int i = 0; i < 10 && !success; i++) { \ expected = val2; \ success = atomic_compare_exchange_weak_##ta(&atom, \ &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, expected, \ "CAS should update expected"); \ } \ assert_b_eq(val1 == val2, success, \ "Weak CAS did the wrong state update"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ if (success) { \ assert_##ta##_eq(val3, val, \ "Successful CAS should update atomic"); \ } else { \ assert_##ta##_eq(val1, val, \ "Unsuccessful CAS should not update atomic"); \ } \ \ /* Strong CAS. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ expected = val2; \ success = atomic_compare_exchange_strong_##ta(&atom, &expected, \ val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ assert_b_eq(val1 == val2, success, \ "Strong CAS did the wrong state update"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ if (success) { \ assert_##ta##_eq(val3, val, \ "Successful CAS should update atomic"); \ } else { \ assert_##ta##_eq(val1, val, \ "Unsuccessful CAS should not update atomic"); \ } \ \ \ } while (0) #define DO_INTEGER_TESTS(t, ta, val1, val2) do { \ atomic_##ta##_t atom; \ t val; \ \ /* Fetch-add. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-add should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 + val2, val, \ "Fetch-add should update atomic"); \ \ /* Fetch-sub. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-sub should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 - val2, val, \ "Fetch-sub should update atomic"); \ \ /* Fetch-and. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-and should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 & val2, val, \ "Fetch-and should update atomic"); \ \ /* Fetch-or. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-or should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 | val2, val, \ "Fetch-or should update atomic"); \ \ /* Fetch-xor. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-xor should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 ^ val2, val, \ "Fetch-xor should update atomic"); \ } while (0) #define TEST_STRUCT(t, ta) \ typedef struct { \ t val1; \ t val2; \ t val3; \ } ta##_test_t; #define TEST_CASES(t) { \ {(t)-1, (t)-1, (t)-2}, \ {(t)-1, (t) 0, (t)-2}, \ {(t)-1, (t) 1, (t)-2}, \ \ {(t) 0, (t)-1, (t)-2}, \ {(t) 0, (t) 0, (t)-2}, \ {(t) 0, (t) 1, (t)-2}, \ \ {(t) 1, (t)-1, (t)-2}, \ {(t) 1, (t) 0, (t)-2}, \ {(t) 1, (t) 1, (t)-2}, \ \ {(t)0, (t)-(1 << 22), (t)-2}, \ {(t)0, (t)(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)(1 << 22), (t)-2} \ } #define TEST_BODY(t, ta) do { \ const ta##_test_t tests[] = TEST_CASES(t); \ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ ta##_test_t test = tests[i]; \ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ } \ } while (0) #define INTEGER_TEST_BODY(t, ta) do { \ const ta##_test_t tests[] = TEST_CASES(t); \ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ ta##_test_t test = tests[i]; \ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \ } \ } while (0) TEST_STRUCT(uint64_t, u64); TEST_BEGIN(test_atomic_u64) { #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) test_skip("64-bit atomic operations not supported"); #else INTEGER_TEST_BODY(uint64_t, u64); #endif } TEST_END TEST_STRUCT(uint32_t, u32); TEST_BEGIN(test_atomic_u32) { INTEGER_TEST_BODY(uint32_t, u32); } TEST_END TEST_STRUCT(void *, p); TEST_BEGIN(test_atomic_p) { TEST_BODY(void *, p); } TEST_END TEST_STRUCT(size_t, zu); TEST_BEGIN(test_atomic_zu) { INTEGER_TEST_BODY(size_t, zu); } TEST_END TEST_STRUCT(ssize_t, zd); TEST_BEGIN(test_atomic_zd) { INTEGER_TEST_BODY(ssize_t, zd); } TEST_END TEST_STRUCT(unsigned, u); TEST_BEGIN(test_atomic_u) { INTEGER_TEST_BODY(unsigned, u); } TEST_END int main(void) { return test( test_atomic_u64, test_atomic_u32, test_atomic_p, test_atomic_zu, test_atomic_zd, test_atomic_u); } jemalloc-sys-0.3.2/jemalloc/test/unit/background_thread.c010064400007650000024000000061461340421340100216720ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" static void test_switch_background_thread_ctl(bool new_val) { bool e0, e1; size_t sz = sizeof(bool); e1 = new_val; assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, !e1, "background_thread should be %d before.\n", !e1); if (e1) { assert_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); } else { assert_zu_eq(n_background_threads, 0, "Number of background threads should be zero.\n"); } } static void test_repeat_background_thread_ctl(bool before) { bool e0, e1; size_t sz = sizeof(bool); e1 = before; assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, before, "background_thread should be %d.\n", before); if (e1) { assert_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); } else { assert_zu_eq(n_background_threads, 0, "Number of background threads should be zero.\n"); } } TEST_BEGIN(test_background_thread_ctl) { test_skip_if(!have_background_thread); bool e0, e1; size_t sz = sizeof(bool); assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, e1, "Default and opt.background_thread does not match.\n"); if (e0) { test_switch_background_thread_ctl(false); } assert_zu_eq(n_background_threads, 0, "Number of background threads should be 0.\n"); for (unsigned i = 0; i < 4; i++) { test_switch_background_thread_ctl(true); test_repeat_background_thread_ctl(true); test_repeat_background_thread_ctl(true); test_switch_background_thread_ctl(false); test_repeat_background_thread_ctl(false); test_repeat_background_thread_ctl(false); } } TEST_END TEST_BEGIN(test_background_thread_running) { test_skip_if(!have_background_thread); test_skip_if(!config_stats); #if defined(JEMALLOC_BACKGROUND_THREAD) tsd_t *tsd = tsd_fetch(); background_thread_info_t *info = &background_thread_info[0]; test_repeat_background_thread_ctl(false); test_switch_background_thread_ctl(true); assert_b_eq(info->state, background_thread_started, "Background_thread did not start.\n"); nstime_t start, now; nstime_init(&start, 0); nstime_update(&start); bool ran = false; while (true) { malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); if (info->tot_n_runs > 0) { ran = true; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (ran) { break; } nstime_init(&now, 0); nstime_update(&now); nstime_subtract(&now, &start); assert_u64_lt(nstime_sec(&now), 1000, "Background threads did not run for 1000 seconds."); sleep(1); } test_switch_background_thread_ctl(false); #endif } TEST_END int main(void) { /* Background_thread creation tests reentrancy naturally. */ return test_no_reentrancy( test_background_thread_ctl, test_background_thread_running); } jemalloc-sys-0.3.2/jemalloc/test/unit/background_thread_enable.c010064400007650000024000000052551340421341300232030ustar0000000000000000#include "test/jemalloc_test.h" const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20"; TEST_BEGIN(test_deferred) { test_skip_if(!have_background_thread); unsigned id; size_t sz_u = sizeof(unsigned); /* * 10 here is somewhat arbitrary, except insofar as we want to ensure * that the number of background threads is smaller than the number of * arenas. I'll ragequit long before we have to spin up 10 threads per * cpu to handle background purging, so this is a conservative * approximation. */ for (unsigned i = 0; i < 10 * ncpus; i++) { assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } bool enable = true; size_t sz_b = sizeof(bool); assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); enable = false; assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to disable background threads"); } TEST_END TEST_BEGIN(test_max_background_threads) { test_skip_if(!have_background_thread); size_t maxt; size_t opt_maxt; size_t sz_m = sizeof(maxt); assert_d_eq(mallctl("opt.max_background_threads", &opt_maxt, &sz_m, NULL, 0), 0, "Failed to get opt.max_background_threads"); assert_d_eq(mallctl("max_background_threads", &maxt, &sz_m, NULL, 0), 0, "Failed to get max background threads"); assert_zu_eq(20, maxt, "should be ncpus"); assert_zu_eq(opt_maxt, maxt, "max_background_threads and " "opt.max_background_threads should match"); assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), 0, "Failed to set max background threads"); unsigned id; size_t sz_u = sizeof(unsigned); for (unsigned i = 0; i < 10 * ncpus; i++) { assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } bool enable = true; size_t sz_b = sizeof(bool); assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); assert_zu_eq(n_background_threads, maxt, "Number of background threads should be 3.\n"); maxt = 10; assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), 0, "Failed to set max background threads"); assert_zu_eq(n_background_threads, maxt, "Number of background threads should be 10.\n"); maxt = 3; assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), 0, "Failed to set max background threads"); assert_zu_eq(n_background_threads, maxt, "Number of background threads should be 3.\n"); } TEST_END int main(void) { return test_no_reentrancy( test_deferred, test_max_background_threads); } jemalloc-sys-0.3.2/jemalloc/test/unit/base.c010064400007650000024000000150201340421340100171250ustar0000000000000000#include "test/jemalloc_test.h" #include "test/extent_hooks.h" static extent_hooks_t hooks_null = { extent_alloc_hook, NULL, /* dalloc */ NULL, /* destroy */ NULL, /* commit */ NULL, /* decommit */ NULL, /* purge_lazy */ NULL, /* purge_forced */ NULL, /* split */ NULL /* merge */ }; static extent_hooks_t hooks_not_null = { extent_alloc_hook, extent_dalloc_hook, extent_destroy_hook, NULL, /* commit */ extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, NULL, /* split */ NULL /* merge */ }; TEST_BEGIN(test_base_hooks_default) { base_t *base; size_t allocated0, allocated1, resident, mapped, n_thp; tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); if (config_stats) { base_stats_get(tsdn, base, &allocated0, &resident, &mapped, &n_thp); assert_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { assert_zu_gt(n_thp, 0, "Base should have 1 THP at least."); } } assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &resident, &mapped, &n_thp); assert_zu_ge(allocated1 - allocated0, 42, "At least 42 bytes were allocated by base_alloc()"); } base_delete(tsdn, base); } TEST_END TEST_BEGIN(test_base_hooks_null) { extent_hooks_t hooks_orig; base_t *base; size_t allocated0, allocated1, resident, mapped, n_thp; extent_hooks_prep(); try_dalloc = false; try_destroy = true; try_decommit = false; try_purge_lazy = false; try_purge_forced = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated0, &resident, &mapped, &n_thp); assert_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { assert_zu_gt(n_thp, 0, "Base should have 1 THP at least."); } } assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &resident, &mapped, &n_thp); assert_zu_ge(allocated1 - allocated0, 42, "At least 42 bytes were allocated by base_alloc()"); } base_delete(tsdn, base); memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END TEST_BEGIN(test_base_hooks_not_null) { extent_hooks_t hooks_orig; base_t *base; void *p, *q, *r, *r_exp; extent_hooks_prep(); try_dalloc = false; try_destroy = true; try_decommit = false; try_purge_lazy = false; try_purge_forced = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); did_alloc = false; base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); assert_true(did_alloc, "Expected alloc"); /* * Check for tight packing at specified alignment under simple * conditions. */ { const size_t alignments[] = { 1, QUANTUM, QUANTUM << 1, CACHELINE, CACHELINE << 1, }; unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; size_t align_ceil = ALIGNMENT_CEILING(alignment, QUANTUM); p = base_alloc(tsdn, base, 1, alignment); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); assert_ptr_eq(p, (void *)(ALIGNMENT_CEILING((uintptr_t)p, alignment)), "Expected quantum alignment"); q = base_alloc(tsdn, base, alignment, alignment); assert_ptr_not_null(q, "Unexpected base_alloc() failure"); assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q, "Minimal allocation should take up %zu bytes", align_ceil); r = base_alloc(tsdn, base, 1, alignment); assert_ptr_not_null(r, "Unexpected base_alloc() failure"); assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r, "Minimal allocation should take up %zu bytes", align_ceil); } } /* * Allocate an object that cannot fit in the first block, then verify * that the first block's remaining space is considered for subsequent * allocation. */ assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM, "Remainder insufficient for test"); /* Use up all but one quantum of block. */ while (extent_bsize_get(&base->blocks->extent) > QUANTUM) { p = base_alloc(tsdn, base, QUANTUM, QUANTUM); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); } r_exp = extent_addr_get(&base->blocks->extent); assert_zu_eq(base->extent_sn_next, 1, "One extant block expected"); q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM); assert_ptr_not_null(q, "Unexpected base_alloc() failure"); assert_ptr_ne(q, r_exp, "Expected allocation from new block"); assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); r = base_alloc(tsdn, base, QUANTUM, QUANTUM); assert_ptr_not_null(r, "Unexpected base_alloc() failure"); assert_ptr_eq(r, r_exp, "Expected allocation from first block"); assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); /* * Check for proper alignment support when normal blocks are too small. */ { const size_t alignments[] = { HUGEPAGE, HUGEPAGE << 1 }; unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; p = base_alloc(tsdn, base, QUANTUM, alignment); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); assert_ptr_eq(p, (void *)(ALIGNMENT_CEILING((uintptr_t)p, alignment)), "Expected %zu-byte alignment", alignment); } } called_dalloc = called_destroy = called_decommit = called_purge_lazy = called_purge_forced = false; base_delete(tsdn, base); assert_true(called_dalloc, "Expected dalloc call"); assert_true(!called_destroy, "Unexpected destroy call"); assert_true(called_decommit, "Expected decommit call"); assert_true(called_purge_lazy, "Expected purge_lazy call"); assert_true(called_purge_forced, "Expected purge_forced call"); try_dalloc = true; try_destroy = true; try_decommit = true; try_purge_lazy = true; try_purge_forced = true; memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END int main(void) { return test( test_base_hooks_default, test_base_hooks_null, test_base_hooks_not_null); } jemalloc-sys-0.3.2/jemalloc/test/unit/bit_util.c010064400007650000024000000025571340421341300200440ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/bit_util.h" #define TEST_POW2_CEIL(t, suf, pri) do { \ unsigned i, pow2; \ t x; \ \ assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ \ for (i = 0; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ << i, "Unexpected result"); \ } \ \ for (i = 2; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ ((t)1) << i, "Unexpected result"); \ } \ \ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ ((t)1) << (i+1), "Unexpected result"); \ } \ \ for (pow2 = 1; pow2 < 25; pow2++) { \ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ x++) { \ assert_##suf##_eq(pow2_ceil_##suf(x), \ ((t)1) << pow2, \ "Unexpected result, x=%"pri, x); \ } \ } \ } while (0) TEST_BEGIN(test_pow2_ceil_u64) { TEST_POW2_CEIL(uint64_t, u64, FMTu64); } TEST_END TEST_BEGIN(test_pow2_ceil_u32) { TEST_POW2_CEIL(uint32_t, u32, FMTu32); } TEST_END TEST_BEGIN(test_pow2_ceil_zu) { TEST_POW2_CEIL(size_t, zu, "zu"); } TEST_END int main(void) { return test( test_pow2_ceil_u64, test_pow2_ceil_u32, test_pow2_ceil_zu); } jemalloc-sys-0.3.2/jemalloc/test/unit/bitmap.c010064400007650000024000000257411340421340100175020ustar0000000000000000#include "test/jemalloc_test.h" #define NBITS_TAB \ NB( 1) \ NB( 2) \ NB( 3) \ NB( 4) \ NB( 5) \ NB( 6) \ NB( 7) \ NB( 8) \ NB( 9) \ NB(10) \ NB(11) \ NB(12) \ NB(13) \ NB(14) \ NB(15) \ NB(16) \ NB(17) \ NB(18) \ NB(19) \ NB(20) \ NB(21) \ NB(22) \ NB(23) \ NB(24) \ NB(25) \ NB(26) \ NB(27) \ NB(28) \ NB(29) \ NB(30) \ NB(31) \ NB(32) \ \ NB(33) \ NB(34) \ NB(35) \ NB(36) \ NB(37) \ NB(38) \ NB(39) \ NB(40) \ NB(41) \ NB(42) \ NB(43) \ NB(44) \ NB(45) \ NB(46) \ NB(47) \ NB(48) \ NB(49) \ NB(50) \ NB(51) \ NB(52) \ NB(53) \ NB(54) \ NB(55) \ NB(56) \ NB(57) \ NB(58) \ NB(59) \ NB(60) \ NB(61) \ NB(62) \ NB(63) \ NB(64) \ NB(65) \ \ NB(126) \ NB(127) \ NB(128) \ NB(129) \ NB(130) \ \ NB(254) \ NB(255) \ NB(256) \ NB(257) \ NB(258) \ \ NB(510) \ NB(511) \ NB(512) \ NB(513) \ NB(514) \ \ NB(1024) \ NB(2048) \ NB(4096) \ NB(8192) \ NB(16384) \ static void test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { bitmap_info_t binfo_dyn; bitmap_info_init(&binfo_dyn, nbits); assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); assert_zu_eq(binfo->nbits, binfo_dyn.nbits, "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); #ifdef BITMAP_USE_TREE assert_u_eq(binfo->nlevels, binfo_dyn.nlevels, "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); { unsigned i; for (i = 0; i < binfo->nlevels; i++) { assert_zu_eq(binfo->levels[i].group_offset, binfo_dyn.levels[i].group_offset, "Unexpected difference between static and dynamic " "initialization, nbits=%zu, level=%u", nbits, i); } } #else assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups, "Unexpected difference between static and dynamic initialization"); #endif } TEST_BEGIN(test_bitmap_initializer) { #define NB(nbits) { \ if (nbits <= BITMAP_MAXBITS) { \ bitmap_info_t binfo = \ BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_initializer_body(&binfo, nbits); \ } \ } NBITS_TAB #undef NB } TEST_END static size_t test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, size_t prev_size) { size_t size = bitmap_size(binfo); assert_zu_ge(size, (nbits >> 3), "Bitmap size is smaller than expected"); assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); return size; } TEST_BEGIN(test_bitmap_size) { size_t nbits, prev_size; prev_size = 0; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ prev_size = test_bitmap_size_body(&binfo, nbits, \ prev_size); \ } prev_size = 0; NBITS_TAB #undef NB } TEST_END static void test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { assert_false(bitmap_get(bitmap, binfo, i), "Bit should be unset"); } bitmap_init(bitmap, binfo, true); for (i = 0; i < nbits; i++) { assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set"); } free(bitmap); } TEST_BEGIN(test_bitmap_init) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_init_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_init_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); free(bitmap); } TEST_BEGIN(test_bitmap_set) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_set_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_set_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); for (i = 0; i < nbits; i++) { bitmap_unset(bitmap, binfo, i); } for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); free(bitmap); } TEST_BEGIN(test_bitmap_unset) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_unset_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_unset_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); /* Iteratively set bits starting at the beginning. */ for (size_t i = 0; i < nbits; i++) { assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should be just after previous first unset " "bit"); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); /* * Iteratively unset bits starting at the end, and verify that * bitmap_sfu() reaches the unset bits. */ for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */ bitmap_unset(bitmap, binfo, i); assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should the bit previously unset"); bitmap_unset(bitmap, binfo, i); } assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset"); /* * Iteratively set bits starting at the beginning, and verify that * bitmap_sfu() looks past them. */ for (size_t i = 1; i < nbits; i++) { bitmap_set(bitmap, binfo, i - 1); assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should be just after the bit previously " "set"); bitmap_unset(bitmap, binfo, i); } assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, "First unset bit should be the last bit"); assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); /* * Bubble a "usu" pattern through the bitmap and verify that * bitmap_ffu() finds the correct bit for all five min_bit cases. */ if (nbits >= 3) { for (size_t i = 0; i < nbits-2; i++) { bitmap_unset(bitmap, binfo, i); bitmap_unset(bitmap, binfo, i+2); if (i > 0) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, "Unexpected first unset bit"); if (i + 3 < nbits) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3), nbits, "Unexpected first unset bit"); } assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2, "Unexpected first unset bit"); } } /* * Unset the last bit, bubble another unset bit through the bitmap, and * verify that bitmap_ffu() finds the correct bit for all four min_bit * cases. */ if (nbits >= 3) { bitmap_unset(bitmap, binfo, nbits-1); for (size_t i = 0; i < nbits-1; i++) { bitmap_unset(bitmap, binfo, i); if (i > 0) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), nbits-1, "Unexpected first unset bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, "Unexpected first unset bit"); } free(bitmap); } TEST_BEGIN(test_bitmap_xfu) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_xfu_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_xfu_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END int main(void) { return test( test_bitmap_initializer, test_bitmap_size, test_bitmap_init, test_bitmap_set, test_bitmap_unset, test_bitmap_xfu); } jemalloc-sys-0.3.2/jemalloc/test/unit/ckh.c010064400007650000024000000125461340421340100167720ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { "a string", "A string", "a string.", "A string." }; const char *missing = "A string not in the hash table."; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1, "ckh_count() should return %zu, but it returned %zu", i+1, ckh_count(&ckh)); } /* Search. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_search(&ckh, strs[i], kp, vp), "Unexpected ckh_search() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); } assert_true(ckh_search(&ckh, missing, NULL, NULL), "Unexpected ckh_search() success"); /* Remove. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), "Unexpected ckh_remove() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); assert_zu_eq(ckh_count(&ckh), sizeof(strs)/sizeof(const char *) - i - 1, "ckh_count() should return %zu, but it returned %zu", sizeof(strs)/sizeof(const char *) - i - 1, ckh_count(&ckh)); } ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; void *q, *r; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { p[i] = mallocx(i+1, 0); assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); } for (i = 0; i < NITEMS; i++) { size_t j; for (j = i; j < NITEMS; j++) { assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), "Unexpected ckh_insert() failure"); assert_false(ckh_search(&ckh, p[j], &q, &r), "Unexpected ckh_search() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); } assert_zu_eq(ckh_count(&ckh), NITEMS, "ckh_count() should return %zu, but it returned %zu", NITEMS, ckh_count(&ckh)); for (j = i + 1; j < NITEMS; j++) { assert_false(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() success"); } { bool seen[NITEMS]; size_t tabind; memset(seen, 0, sizeof(seen)); for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { size_t k; assert_ptr_eq(q, r, "Key and val not equal"); for (k = 0; k < NITEMS; k++) { if (p[k] == q) { assert_false(seen[k], "Item %zu already seen", k); seen[k] = true; break; } } } for (j = 0; j < i + 1; j++) { assert_true(seen[j], "Item %zu not seen", j); } for (; j < NITEMS; j++) { assert_false(seen[j], "Item %zu seen", j); } } } for (i = 0; i < NITEMS; i++) { assert_false(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[i], q, "Key pointer mismatch"); assert_ptr_eq(p[i], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() success"); dallocx(p[i], 0); } assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); ckh_delete(tsd, &ckh); #undef NITEMS } TEST_END int main(void) { return test( test_new_delete, test_count_insert_search_remove, test_insert_iter_remove); } jemalloc-sys-0.3.2/jemalloc/test/unit/decay.c010064400007650000024000000434711340421340100173130ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ticker.h" static nstime_monotonic_t *nstime_monotonic_orig; static nstime_update_t *nstime_update_orig; static unsigned nupdates_mock; static nstime_t time_mock; static bool monotonic_mock; static bool check_background_thread_enabled(void) { bool enabled; size_t sz = sizeof(bool); int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); if (ret == ENOENT) { return false; } assert_d_eq(ret, 0, "Unexpected mallctl error"); return enabled; } static bool nstime_monotonic_mock(void) { return monotonic_mock; } static bool nstime_update_mock(nstime_t *time) { nupdates_mock++; if (monotonic_mock) { nstime_copy(time, &time_mock); } return !monotonic_mock; } static unsigned do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, "Unexpected mallctlbymib() failure"); assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, "Unexpected mallctlbymib() failure"); return arena_ind; } static void do_arena_destroy(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } void do_epoch(void) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } void do_purge(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } void do_decay(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static uint64_t get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; uint64_t npurge = 0; size_t sz = sizeof(npurge); assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); return npurge; } static uint64_t get_arena_dirty_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); } static uint64_t get_arena_muzzy_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } static uint64_t get_arena_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } static size_t get_arena_pdirty(unsigned arena_ind) { do_epoch(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; size_t pdirty; size_t sz = sizeof(pdirty); assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return pdirty; } static size_t get_arena_pmuzzy(unsigned arena_ind) { do_epoch(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; size_t pmuzzy; size_t sz = sizeof(pmuzzy); assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return pmuzzy; } static void * do_mallocx(size_t size, int flags) { void *p = mallocx(size, flags); assert_ptr_not_null(p, "Unexpected mallocx() failure"); return p; } static void generate_dirty(unsigned arena_ind, size_t size) { int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; void *p = do_mallocx(size, flags); dallocx(p, flags); } TEST_BEGIN(test_decay_ticks) { test_skip_if(check_background_thread_enabled()); ticker_t *decay_ticker; unsigned tick0, tick1, arena_ind; size_t sz, large0; void *p; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); /* Set up a manually managed arena for test. */ arena_ind = do_arena_create(0, 0); /* Migrate to the new arena, and get the ticker. */ unsigned old_arena_ind; size_t sz_arena_ind = sizeof(old_arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); assert_ptr_not_null(decay_ticker, "Unexpected failure getting decay ticker"); /* * Test the standard APIs using a large size class, since we can't * control tcache interactions for small size classes (except by * completely disabling tcache for the entire test program). */ /* malloc(). */ tick0 = ticker_read(decay_ticker); p = malloc(large0); assert_ptr_not_null(p, "Unexpected malloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); /* free(). */ tick0 = ticker_read(decay_ticker); free(p); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); /* calloc(). */ tick0 = ticker_read(decay_ticker); p = calloc(1, large0); assert_ptr_not_null(p, "Unexpected calloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); free(p); /* posix_memalign(). */ tick0 = ticker_read(decay_ticker); assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during posix_memalign()"); free(p); /* aligned_alloc(). */ tick0 = ticker_read(decay_ticker); p = aligned_alloc(sizeof(size_t), large0); assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during aligned_alloc()"); free(p); /* realloc(). */ /* Allocate. */ tick0 = ticker_read(decay_ticker); p = realloc(NULL, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Reallocate. */ tick0 = ticker_read(decay_ticker); p = realloc(p, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Deallocate. */ tick0 = ticker_read(decay_ticker); realloc(p, 0); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* * Test the *allocx() APIs using large and small size classes, with * tcache explicitly disabled. */ { unsigned i; size_t allocx_sizes[2]; allocx_sizes[0] = large0; allocx_sizes[1] = 1; for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { sz = allocx_sizes[i]; /* mallocx(). */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during mallocx() (sz=%zu)", sz); /* rallocx(). */ tick0 = ticker_read(decay_ticker); p = rallocx(p, sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected rallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during rallocx() (sz=%zu)", sz); /* xallocx(). */ tick0 = ticker_read(decay_ticker); xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during xallocx() (sz=%zu)", sz); /* dallocx(). */ tick0 = ticker_read(decay_ticker); dallocx(p, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during dallocx() (sz=%zu)", sz); /* sdallocx(). */ p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick0 = ticker_read(decay_ticker); sdallocx(p, sz, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during sdallocx() " "(sz=%zu)", sz); } } /* * Test tcache fill/flush interactions for large and small size classes, * using an explicit tcache. */ unsigned tcache_ind, i; size_t tcache_sizes[2]; tcache_sizes[0] = large0; tcache_sizes[1] = 1; size_t tcache_max, sz_tcache_max; sz_tcache_max = sizeof(tcache_max); assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { sz = tcache_sizes[i]; /* tcache fill. */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache fill " "(sz=%zu)", sz); /* tcache flush. */ dallocx(p, MALLOCX_TCACHE(tcache_ind)); tick0 = ticker_read(decay_ticker); assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tcache_ind, sizeof(unsigned)), 0, "Unexpected mallctl failure"); tick1 = ticker_read(decay_ticker); /* Will only tick if it's in tcache. */ if (sz <= tcache_max) { assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache " "flush (sz=%zu)", sz); } else { assert_u32_eq(tick1, tick0, "Unexpected ticker tick during tcache " "flush (sz=%zu)", sz); } } } TEST_END static void decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) { #define NINTERVALS 101 nstime_t time, update_interval, decay_ms, deadline; nstime_init(&time, 0); nstime_update(&time); nstime_init2(&decay_ms, dt, 0); nstime_copy(&deadline, &time); nstime_add(&deadline, &decay_ms); nstime_init2(&update_interval, dt, 0); nstime_idivide(&update_interval, NINTERVALS); /* * Keep q's slab from being deallocated during the looping below. If a * cached slab were to repeatedly come and go during looping, it could * prevent the decay backlog ever becoming empty. */ void *p = do_mallocx(1, flags); uint64_t dirty_npurge1, muzzy_npurge1; do { for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { void *q = do_mallocx(1, flags); dallocx(q, flags); } dirty_npurge1 = get_arena_dirty_npurge(arena_ind); muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind); nstime_add(&time_mock, &update_interval); nstime_update(&time); } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || !terminate_asap)); dallocx(p, flags); if (config_stats) { assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + muzzy_npurge0, "Expected purging to occur"); } #undef NINTERVALS } TEST_BEGIN(test_decay_ticker) { test_skip_if(check_background_thread_enabled()); #define NPS 2048 ssize_t ddt = opt_dirty_decay_ms; ssize_t mdt = opt_muzzy_decay_ms; unsigned arena_ind = do_arena_create(ddt, mdt); int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); void *ps[NPS]; size_t large; /* * Allocate a bunch of large objects, pause the clock, deallocate every * other object (to fragment virtual memory), restore the clock, then * [md]allocx() in a tight loop while advancing time rapidly to verify * the ticker triggers purging. */ size_t tcache_max; size_t sz = sizeof(size_t); assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, 0), 0, "Unexpected mallctl failure"); large = nallocx(tcache_max + 1, flags); do_purge(arena_ind); uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind); for (unsigned i = 0; i < NPS; i++) { ps[i] = do_mallocx(large, flags); } nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = true; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (unsigned i = 0; i < NPS; i += 2) { dallocx(ps[i], flags); unsigned nupdates0 = nupdates_mock; do_decay(arena_ind); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, muzzy_npurge0, true); decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, muzzy_npurge0, false); do_arena_destroy(arena_ind); nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END TEST_BEGIN(test_decay_nonmonotonic) { test_skip_if(check_background_thread_enabled()); #define NPS (SMOOTHSTEP_NSTEPS + 1) int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); void *ps[NPS]; uint64_t npurge0 = 0; uint64_t npurge1 = 0; size_t sz, large0; unsigned i, nupdates0; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); do_epoch(); sz = sizeof(uint64_t); npurge0 = get_arena_npurge(0); nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = false; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { ps[i] = mallocx(large0, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } for (i = 0; i < NPS; i++) { dallocx(ps[i], flags); nupdates0 = nupdates_mock; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.decay failure"); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } do_epoch(); sz = sizeof(uint64_t); npurge1 = get_arena_npurge(0); if (config_stats) { assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); } nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END TEST_BEGIN(test_decay_now) { test_skip_if(check_background_thread_enabled()); unsigned arena_ind = do_arena_create(0, 0); assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; /* Verify that dirty/muzzy pages never linger after deallocation. */ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { size_t size = sizes[i]; generate_dirty(arena_ind, size); assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); } do_arena_destroy(arena_ind); } TEST_END TEST_BEGIN(test_decay_never) { test_skip_if(check_background_thread_enabled()); unsigned arena_ind = do_arena_create(-1, -1); int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; void *ptrs[sizeof(sizes)/sizeof(size_t)]; for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { ptrs[i] = do_mallocx(sizes[i], flags); } /* Verify that each deallocation generates additional dirty pages. */ size_t pdirty_prev = get_arena_pdirty(arena_ind); size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind); assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { dallocx(ptrs[i], flags); size_t pdirty = get_arena_pdirty(arena_ind); size_t pmuzzy = get_arena_pmuzzy(arena_ind); assert_zu_gt(pdirty, pdirty_prev, "Expected dirty pages to increase."); assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages"); pdirty_prev = pdirty; } do_arena_destroy(arena_ind); } TEST_END int main(void) { return test( test_decay_ticks, test_decay_ticker, test_decay_nonmonotonic, test_decay_now, test_decay_never); } jemalloc-sys-0.3.2/jemalloc/test/unit/decay.sh010064400007650000024000000001301340421340100174640ustar0000000000000000#!/bin/sh export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" jemalloc-sys-0.3.2/jemalloc/test/unit/div.c010064400007650000024000000012651340421340100170030ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/div.h" TEST_BEGIN(test_div_exhaustive) { for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) { div_info_t div_info; div_init(&div_info, divisor); size_t max = 1000 * divisor; if (max < 1000 * 1000) { max = 1000 * 1000; } for (size_t dividend = 0; dividend < 1000 * divisor; dividend += divisor) { size_t quotient = div_compute( &div_info, dividend); assert_zu_eq(dividend, quotient * divisor, "With divisor = %zu, dividend = %zu, " "got quotient %zu", divisor, dividend, quotient); } } } TEST_END int main(void) { return test_no_reentrancy( test_div_exhaustive); } jemalloc-sys-0.3.2/jemalloc/test/unit/emitter.c010064400007650000024000000241751340421341300177020ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/emitter.h" /* * This is so useful for debugging and feature work, we'll leave printing * functionality committed but disabled by default. */ /* Print the text as it will appear. */ static bool print_raw = false; /* Print the text escaped, so it can be copied back into the test case. */ static bool print_escaped = false; typedef struct buf_descriptor_s buf_descriptor_t; struct buf_descriptor_s { char *buf; size_t len; bool mid_quote; }; /* * Forwards all writes to the passed-in buf_v (which should be cast from a * buf_descriptor_t *). */ static void forwarding_cb(void *buf_descriptor_v, const char *str) { buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v; if (print_raw) { malloc_printf("%s", str); } if (print_escaped) { const char *it = str; while (*it != '\0') { if (!buf_descriptor->mid_quote) { malloc_printf("\""); buf_descriptor->mid_quote = true; } switch (*it) { case '\\': malloc_printf("\\"); break; case '\"': malloc_printf("\\\""); break; case '\t': malloc_printf("\\t"); break; case '\n': malloc_printf("\\n\"\n"); buf_descriptor->mid_quote = false; break; default: malloc_printf("%c", *it); } it++; } } size_t written = malloc_snprintf(buf_descriptor->buf, buf_descriptor->len, "%s", str); assert_zu_eq(written, strlen(str), "Buffer overflow!"); buf_descriptor->buf += written; buf_descriptor->len -= written; assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!"); } static void assert_emit_output(void (*emit_fn)(emitter_t *), const char *expected_json_output, const char *expected_table_output) { emitter_t emitter; char buf[MALLOC_PRINTF_BUFSIZE]; buf_descriptor_t buf_descriptor; buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; emitter_init(&emitter, emitter_output_json, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); assert_str_eq(expected_json_output, buf, "json output failure"); buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; emitter_init(&emitter, emitter_output_table, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); assert_str_eq(expected_table_output, buf, "table output failure"); } static void emit_dict(emitter_t *emitter) { bool b_false = false; bool b_true = true; int i_123 = 123; const char *str = "a string"; emitter_begin(emitter); emitter_dict_begin(emitter, "foo", "This is the foo table:"); emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false); emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true); emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123, "note_key1", emitter_type_string, &str); emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str, "note_key2", emitter_type_bool, &b_false); emitter_dict_end(emitter); emitter_end(emitter); } static const char *dict_json = "{\n" "\t\"foo\": {\n" "\t\t\"abc\": false,\n" "\t\t\"def\": true,\n" "\t\t\"ghi\": 123,\n" "\t\t\"jkl\": \"a string\"\n" "\t}\n" "}\n"; static const char *dict_table = "This is the foo table:\n" " ABC: false\n" " DEF: true\n" " GHI: 123 (note_key1: \"a string\")\n" " JKL: \"a string\" (note_key2: false)\n"; TEST_BEGIN(test_dict) { assert_emit_output(&emit_dict, dict_json, dict_table); } TEST_END static void emit_table_printf(emitter_t *emitter) { emitter_begin(emitter); emitter_table_printf(emitter, "Table note 1\n"); emitter_table_printf(emitter, "Table note 2 %s\n", "with format string"); emitter_end(emitter); } static const char *table_printf_json = "{\n" "}\n"; static const char *table_printf_table = "Table note 1\n" "Table note 2 with format string\n"; TEST_BEGIN(test_table_printf) { assert_emit_output(&emit_table_printf, table_printf_json, table_printf_table); } TEST_END static void emit_nested_dict(emitter_t *emitter) { int val = 123; emitter_begin(emitter); emitter_dict_begin(emitter, "json1", "Dict 1"); emitter_dict_begin(emitter, "json2", "Dict 2"); emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val); emitter_dict_end(emitter); /* Close 2 */ emitter_dict_begin(emitter, "json3", "Dict 3"); emitter_dict_end(emitter); /* Close 3 */ emitter_dict_end(emitter); /* Close 1 */ emitter_dict_begin(emitter, "json4", "Dict 4"); emitter_kv(emitter, "primitive", "Another primitive", emitter_type_int, &val); emitter_dict_end(emitter); /* Close 4 */ emitter_end(emitter); } static const char *nested_dict_json = "{\n" "\t\"json1\": {\n" "\t\t\"json2\": {\n" "\t\t\t\"primitive\": 123\n" "\t\t},\n" "\t\t\"json3\": {\n" "\t\t}\n" "\t},\n" "\t\"json4\": {\n" "\t\t\"primitive\": 123\n" "\t}\n" "}\n"; static const char *nested_dict_table = "Dict 1\n" " Dict 2\n" " A primitive: 123\n" " Dict 3\n" "Dict 4\n" " Another primitive: 123\n"; TEST_BEGIN(test_nested_dict) { assert_emit_output(&emit_nested_dict, nested_dict_json, nested_dict_table); } TEST_END static void emit_types(emitter_t *emitter) { bool b = false; int i = -123; unsigned u = 123; ssize_t zd = -456; size_t zu = 456; const char *str = "string"; uint32_t u32 = 789; uint64_t u64 = 10000000000ULL; emitter_begin(emitter); emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b); emitter_kv(emitter, "k2", "K2", emitter_type_int, &i); emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u); emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd); emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu); emitter_kv(emitter, "k6", "K6", emitter_type_string, &str); emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32); emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64); /* * We don't test the title type, since it's only used for tables. It's * tested in the emitter_table_row tests. */ emitter_end(emitter); } static const char *types_json = "{\n" "\t\"k1\": false,\n" "\t\"k2\": -123,\n" "\t\"k3\": 123,\n" "\t\"k4\": -456,\n" "\t\"k5\": 456,\n" "\t\"k6\": \"string\",\n" "\t\"k7\": 789,\n" "\t\"k8\": 10000000000\n" "}\n"; static const char *types_table = "K1: false\n" "K2: -123\n" "K3: 123\n" "K4: -456\n" "K5: 456\n" "K6: \"string\"\n" "K7: 789\n" "K8: 10000000000\n"; TEST_BEGIN(test_types) { assert_emit_output(&emit_types, types_json, types_table); } TEST_END static void emit_modal(emitter_t *emitter) { int val = 123; emitter_begin(emitter); emitter_dict_begin(emitter, "j0", "T0"); emitter_json_dict_begin(emitter, "j1"); emitter_kv(emitter, "i1", "I1", emitter_type_int, &val); emitter_json_kv(emitter, "i2", emitter_type_int, &val); emitter_table_kv(emitter, "I3", emitter_type_int, &val); emitter_table_dict_begin(emitter, "T1"); emitter_kv(emitter, "i4", "I4", emitter_type_int, &val); emitter_json_dict_end(emitter); /* Close j1 */ emitter_kv(emitter, "i5", "I5", emitter_type_int, &val); emitter_table_dict_end(emitter); /* Close T1 */ emitter_kv(emitter, "i6", "I6", emitter_type_int, &val); emitter_dict_end(emitter); /* Close j0 / T0 */ emitter_end(emitter); } const char *modal_json = "{\n" "\t\"j0\": {\n" "\t\t\"j1\": {\n" "\t\t\t\"i1\": 123,\n" "\t\t\t\"i2\": 123,\n" "\t\t\t\"i4\": 123\n" "\t\t},\n" "\t\t\"i5\": 123,\n" "\t\t\"i6\": 123\n" "\t}\n" "}\n"; const char *modal_table = "T0\n" " I1: 123\n" " I3: 123\n" " T1\n" " I4: 123\n" " I5: 123\n" " I6: 123\n"; TEST_BEGIN(test_modal) { assert_emit_output(&emit_modal, modal_json, modal_table); } TEST_END static void emit_json_arr(emitter_t *emitter) { int ival = 123; emitter_begin(emitter); emitter_json_dict_begin(emitter, "dict"); emitter_json_arr_begin(emitter, "arr"); emitter_json_arr_obj_begin(emitter); emitter_json_kv(emitter, "foo", emitter_type_int, &ival); emitter_json_arr_obj_end(emitter); /* Close arr[0] */ /* arr[1] and arr[2] are primitives. */ emitter_json_arr_value(emitter, emitter_type_int, &ival); emitter_json_arr_value(emitter, emitter_type_int, &ival); emitter_json_arr_obj_begin(emitter); emitter_json_kv(emitter, "bar", emitter_type_int, &ival); emitter_json_kv(emitter, "baz", emitter_type_int, &ival); emitter_json_arr_obj_end(emitter); /* Close arr[3]. */ emitter_json_arr_end(emitter); /* Close arr. */ emitter_json_dict_end(emitter); /* Close dict. */ emitter_end(emitter); } static const char *json_arr_json = "{\n" "\t\"dict\": {\n" "\t\t\"arr\": [\n" "\t\t\t{\n" "\t\t\t\t\"foo\": 123\n" "\t\t\t},\n" "\t\t\t123,\n" "\t\t\t123,\n" "\t\t\t{\n" "\t\t\t\t\"bar\": 123,\n" "\t\t\t\t\"baz\": 123\n" "\t\t\t}\n" "\t\t]\n" "\t}\n" "}\n"; static const char *json_arr_table = ""; TEST_BEGIN(test_json_arr) { assert_emit_output(&emit_json_arr, json_arr_json, json_arr_table); } TEST_END static void emit_table_row(emitter_t *emitter) { emitter_begin(emitter); emitter_row_t row; emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title}; abc.str_val = "ABC title"; emitter_col_t def = {emitter_justify_right, 15, emitter_type_title}; def.str_val = "DEF title"; emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title}; ghi.str_val = "GHI"; emitter_row_init(&row); emitter_col_init(&abc, &row); emitter_col_init(&def, &row); emitter_col_init(&ghi, &row); emitter_table_row(emitter, &row); abc.type = emitter_type_int; def.type = emitter_type_bool; ghi.type = emitter_type_int; abc.int_val = 123; def.bool_val = true; ghi.int_val = 456; emitter_table_row(emitter, &row); abc.int_val = 789; def.bool_val = false; ghi.int_val = 1011; emitter_table_row(emitter, &row); abc.type = emitter_type_string; abc.str_val = "a string"; def.bool_val = false; ghi.type = emitter_type_title; ghi.str_val = "ghi"; emitter_table_row(emitter, &row); emitter_end(emitter); } static const char *table_row_json = "{\n" "}\n"; static const char *table_row_table = "ABC title DEF title GHI\n" "123 true 456\n" "789 false 1011\n" "\"a string\" false ghi\n"; TEST_BEGIN(test_table_row) { assert_emit_output(&emit_table_row, table_row_json, table_row_table); } TEST_END int main(void) { return test_no_reentrancy( test_dict, test_table_printf, test_nested_dict, test_types, test_modal, test_json_arr, test_table_row); } jemalloc-sys-0.3.2/jemalloc/test/unit/extent_quantize.c010064400007650000024000000103511340421340100214440ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_small_extent_size) { unsigned nbins, i; size_t sz, extent_size; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all small size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nbins; i++) { mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); assert_zu_eq(extent_size, extent_size_quantize_floor(extent_size), "Small extent quantization should be a no-op " "(extent_size=%zu)", extent_size); assert_zu_eq(extent_size, extent_size_quantize_ceil(extent_size), "Small extent quantization should be a no-op " "(extent_size=%zu)", extent_size); } } TEST_END TEST_BEGIN(test_large_extent_size) { bool cache_oblivious; unsigned nlextents, i; size_t sz, extent_size_prev, ceil_prev; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all large size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ sz = sizeof(bool); assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nlextents; i++) { size_t lextent_size, extent_size, floor, ceil; mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); extent_size = cache_oblivious ? lextent_size + PAGE : lextent_size; floor = extent_size_quantize_floor(extent_size); ceil = extent_size_quantize_ceil(extent_size); assert_zu_eq(extent_size, floor, "Extent quantization should be a no-op for precise size " "(lextent_size=%zu, extent_size=%zu)", lextent_size, extent_size); assert_zu_eq(extent_size, ceil, "Extent quantization should be a no-op for precise size " "(lextent_size=%zu, extent_size=%zu)", lextent_size, extent_size); if (i > 0) { assert_zu_eq(extent_size_prev, extent_size_quantize_floor(extent_size - PAGE), "Floor should be a precise size"); if (extent_size_prev < ceil_prev) { assert_zu_eq(ceil_prev, extent_size, "Ceiling should be a precise size " "(extent_size_prev=%zu, ceil_prev=%zu, " "extent_size=%zu)", extent_size_prev, ceil_prev, extent_size); } } if (i + 1 < nlextents) { extent_size_prev = floor; ceil_prev = extent_size_quantize_ceil(extent_size + PAGE); } } } TEST_END TEST_BEGIN(test_monotonic) { #define SZ_MAX ZU(4 * 1024 * 1024) unsigned i; size_t floor_prev, ceil_prev; floor_prev = 0; ceil_prev = 0; for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) { size_t extent_size, floor, ceil; extent_size = i << LG_PAGE; floor = extent_size_quantize_floor(extent_size); ceil = extent_size_quantize_ceil(extent_size); assert_zu_le(floor, extent_size, "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)", floor, extent_size, ceil); assert_zu_ge(ceil, extent_size, "Ceiling should be >= (floor=%zu, extent_size=%zu, " "ceil=%zu)", floor, extent_size, ceil); assert_zu_le(floor_prev, floor, "Floor should be monotonic " "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)", floor_prev, floor, extent_size, ceil); assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)", floor, extent_size, ceil_prev, ceil); floor_prev = floor; ceil_prev = ceil; } } TEST_END int main(void) { return test( test_small_extent_size, test_large_extent_size, test_monotonic); } jemalloc-sys-0.3.2/jemalloc/test/unit/fork.c010064400007650000024000000056571340421340100171730ustar0000000000000000#include "test/jemalloc_test.h" #ifndef _WIN32 #include #endif #ifndef _WIN32 static void wait_for_child_exit(int pid) { int status; while (true) { if (waitpid(pid, &status, 0) == -1) { test_fail("Unexpected waitpid() failure."); } if (WIFSIGNALED(status)) { test_fail("Unexpected child termination due to " "signal %d", WTERMSIG(status)); break; } if (WIFEXITED(status)) { if (WEXITSTATUS(status) != 0) { test_fail("Unexpected child exit value %d", WEXITSTATUS(status)); } break; } } } #endif TEST_BEGIN(test_fork) { #ifndef _WIN32 void *p; pid_t pid; /* Set up a manually managed arena for test. */ unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); /* Migrate to the new arena. */ unsigned old_arena_ind; sz = sizeof(old_arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); pid = fork(); free(p); p = malloc(64); assert_ptr_not_null(p, "Unexpected malloc() failure"); free(p); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure"); } else if (pid == 0) { /* Child. */ _exit(0); } else { wait_for_child_exit(pid); } #else test_skip("fork(2) is irrelevant to Windows"); #endif } TEST_END #ifndef _WIN32 static void * do_fork_thd(void *arg) { malloc(1); int pid = fork(); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure"); } else if (pid == 0) { /* Child. */ char *args[] = {"true", NULL}; execvp(args[0], args); test_fail("Exec failed"); } else { /* Parent */ wait_for_child_exit(pid); } return NULL; } #endif #ifndef _WIN32 static void do_test_fork_multithreaded() { thd_t child; thd_create(&child, do_fork_thd, NULL); do_fork_thd(NULL); thd_join(child, NULL); } #endif TEST_BEGIN(test_fork_multithreaded) { #ifndef _WIN32 /* * We've seen bugs involving hanging on arenas_lock (though the same * class of bugs can happen on any mutex). The bugs are intermittent * though, so we want to run the test multiple times. Since we hold the * arenas lock only early in the process lifetime, we can't just run * this test in a loop (since, after all the arenas are initialized, we * won't acquire arenas_lock any further). We therefore repeat the test * with multiple processes. */ for (int i = 0; i < 100; i++) { int pid = fork(); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure,"); } else if (pid == 0) { /* Child. */ do_test_fork_multithreaded(); _exit(0); } else { wait_for_child_exit(pid); } } #else test_skip("fork(2) is irrelevant to Windows"); #endif } TEST_END int main(void) { return test_no_reentrancy( test_fork, test_fork_multithreaded); } jemalloc-sys-0.3.2/jemalloc/test/unit/hash.c010064400007650000024000000116741340421340100171510ustar0000000000000000/* * This file is based on code that is part of SMHasher * (https://code.google.com/p/smhasher/), and is subject to the MIT license * (http://www.opensource.org/licenses/mit-license.php). Both email addresses * associated with the source code's revision history belong to Austin Appleby, * and the revision history ranges from 2010 to 2012. Therefore the copyright * and license are here taken to be: * * Copyright (c) 2010-2012 Austin Appleby * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "test/jemalloc_test.h" #include "jemalloc/internal/hash.h" typedef enum { hash_variant_x86_32, hash_variant_x86_128, hash_variant_x64_128 } hash_variant_t; static int hash_variant_bits(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return 32; case hash_variant_x86_128: return 128; case hash_variant_x64_128: return 128; default: not_reached(); } } static const char * hash_variant_string(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return "hash_x86_32"; case hash_variant_x86_128: return "hash_x86_128"; case hash_variant_x64_128: return "hash_x64_128"; default: not_reached(); } } #define KEY_SIZE 256 static void hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { const int hashbytes = hash_variant_bits(variant) / 8; const int hashes_size = hashbytes * 256; VARIABLE_ARRAY(uint8_t, hashes, hashes_size); VARIABLE_ARRAY(uint8_t, final, hashbytes); unsigned i; uint32_t computed, expected; memset(key, 0, KEY_SIZE); memset(hashes, 0, hashes_size); memset(final, 0, hashbytes); /* * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the * seed. */ for (i = 0; i < 256; i++) { key[i] = (uint8_t)i; switch (variant) { case hash_variant_x86_32: { uint32_t out; out = hash_x86_32(key, i, 256-i); memcpy(&hashes[i*hashbytes], &out, hashbytes); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } default: not_reached(); } } /* Hash the result array. */ switch (variant) { case hash_variant_x86_32: { uint32_t out = hash_x86_32(hashes, hashes_size, 0); memcpy(final, &out, sizeof(out)); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } default: not_reached(); } computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | (final[3] << 24); switch (variant) { #ifdef JEMALLOC_BIG_ENDIAN case hash_variant_x86_32: expected = 0x6213303eU; break; case hash_variant_x86_128: expected = 0x266820caU; break; case hash_variant_x64_128: expected = 0xcc622b6fU; break; #else case hash_variant_x86_32: expected = 0xb0f57ee3U; break; case hash_variant_x86_128: expected = 0xb3ece62aU; break; case hash_variant_x64_128: expected = 0x6384ba69U; break; #endif default: not_reached(); } assert_u32_eq(computed, expected, "Hash mismatch for %s(): expected %#x but got %#x", hash_variant_string(variant), expected, computed); } static void hash_variant_verify(hash_variant_t variant) { #define MAX_ALIGN 16 uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; unsigned i; for (i = 0; i < MAX_ALIGN; i++) { hash_variant_verify_key(variant, &key[i]); } #undef MAX_ALIGN } #undef KEY_SIZE TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int main(void) { return test( test_hash_x86_32, test_hash_x86_128, test_hash_x64_128); } jemalloc-sys-0.3.2/jemalloc/test/unit/hooks.c010064400007650000024000000013601340421341300173430ustar0000000000000000#include "test/jemalloc_test.h" static bool hook_called = false; static void hook() { hook_called = true; } static int func_to_hook(int arg1, int arg2) { return arg1 + arg2; } #define func_to_hook JEMALLOC_HOOK(func_to_hook, hooks_libc_hook) TEST_BEGIN(unhooked_call) { hooks_libc_hook = NULL; hook_called = false; assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_false(hook_called, "Nulling out hook didn't take."); } TEST_END TEST_BEGIN(hooked_call) { hooks_libc_hook = &hook; hook_called = false; assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_true(hook_called, "Hook should have executed."); } TEST_END int main(void) { return test( unhooked_call, hooked_call); } jemalloc-sys-0.3.2/jemalloc/test/unit/junk.c010064400007650000024000000070151340421341300171720ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; static large_dalloc_junk_t *large_dalloc_junk_orig; static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig; static void *watch_for_junking; static bool saw_junking; static void watch_junking(void *p) { watch_for_junking = p; saw_junking = false; } static void arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); for (i = 0; i < bin_info->reg_size; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } if (ptr == watch_for_junking) { saw_junking = true; } } static void large_dalloc_junk_intercept(void *ptr, size_t usize) { size_t i; large_dalloc_junk_orig(ptr, usize); for (i = 0; i < usize; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } if (ptr == watch_for_junking) { saw_junking = true; } } static void large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) { large_dalloc_maybe_junk_orig(ptr, usize); if (ptr == watch_for_junking) { saw_junking = true; } } static void test_junk(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; large_dalloc_junk_orig = large_dalloc_junk; large_dalloc_junk = large_dalloc_junk_intercept; large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk; large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept; } sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { if (opt_junk_alloc) { assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, "Newly allocated byte %zu/%zu isn't " "junk-filled", i, sz); } s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { uint8_t *t; watch_junking(s); t = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)t, "Unexpected rallocx() failure"); assert_zu_ge(sallocx(t, 0), sz+1, "Unexpectedly small rallocx() result"); if (!background_thread_enabled()) { assert_ptr_ne(s, t, "Unexpected in-place rallocx()"); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be " "junk-filled", sz); } s = t; } } watch_junking(s); dallocx(s, 0); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; large_dalloc_junk = large_dalloc_junk_orig; large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig; } } TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); test_junk(1, SMALL_MAXCLASS-1); } TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END int main(void) { return test( test_junk_small, test_junk_large); } jemalloc-sys-0.3.2/jemalloc/test/unit/junk.sh010064400007650000024000000001551340421340100173550ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:true" fi jemalloc-sys-0.3.2/jemalloc/test/unit/junk_alloc.c010064400007650000024000000000221340421340100203300ustar0000000000000000#include "junk.c" jemalloc-sys-0.3.2/jemalloc/test/unit/junk_alloc.sh010064400007650000024000000001561340421340100205300ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:alloc" fi jemalloc-sys-0.3.2/jemalloc/test/unit/junk_free.c010064400007650000024000000000221340421340100201570ustar0000000000000000#include "junk.c" jemalloc-sys-0.3.2/jemalloc/test/unit/junk_free.sh010064400007650000024000000001551340421340100203560ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:free" fi jemalloc-sys-0.3.2/jemalloc/test/unit/log.c010064400007650000024000000077141340421340100170070ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/log.h" static void expect_no_logging(const char *names) { log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l2 = LOG_VAR_INIT("l2"); log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); strcpy(log_var_names, names); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l2) count++; log_do_end(log_l2) log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) } assert_d_eq(count, 0, "Disabled logging not ignored!"); } TEST_BEGIN(test_log_disabled) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); expect_no_logging(""); expect_no_logging("abc"); expect_no_logging("a.b.c"); expect_no_logging("l12"); expect_no_logging("l123|a456|b789"); expect_no_logging("|||"); } TEST_END TEST_BEGIN(test_log_enabled_direct) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); log_var_t log_l2 = LOG_VAR_INIT("l2"); int count; count = 0; strcpy(log_var_names, "l1"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) } assert_d_eq(count, 10, "Mis-logged!"); count = 0; strcpy(log_var_names, "l1.a"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) } assert_d_eq(count, 10, "Mis-logged!"); count = 0; strcpy(log_var_names, "l1.a|abc|l2|def"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) log_do_begin(log_l2) count++; log_do_end(log_l2) } assert_d_eq(count, 20, "Mis-logged!"); } TEST_END TEST_BEGIN(test_log_enabled_indirect) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); strcpy(log_var_names, "l0|l1|abc|l2.b|def"); /* On. */ log_var_t log_l1 = LOG_VAR_INIT("l1"); /* Off. */ log_var_t log_l1a = LOG_VAR_INIT("l1a"); /* On. */ log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); /* Off. */ log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); /* On. */ log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a"); /* On. */ log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b"); /* 4 are on total, so should sum to 40. */ int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l1a) count++; log_do_end(log_l1a) log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) log_do_begin(log_l2_b_a) count++; log_do_end(log_l2_b_a) log_do_begin(log_l2_b_b) count++; log_do_end(log_l2_b_b) } assert_d_eq(count, 40, "Mis-logged!"); } TEST_END TEST_BEGIN(test_log_enabled_global) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); strcpy(log_var_names, "abc|.|def"); log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a"); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l2_a_a) count++; log_do_end(log_l2_a_a) } assert_d_eq(count, 20, "Mis-logged!"); } TEST_END TEST_BEGIN(test_logs_if_no_init) { test_skip_if(!config_log); atomic_store_b(&log_init_done, false, ATOMIC_RELAXED); log_var_t l = LOG_VAR_INIT("definitely.not.enabled"); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(l) count++; log_do_end(l) } assert_d_eq(count, 0, "Logging shouldn't happen if not initialized."); } TEST_END /* * This really just checks to make sure that this usage compiles; we don't have * any test code to run. */ TEST_BEGIN(test_log_only_format_string) { if (false) { LOG("log_str", "No arguments follow this format string."); } } TEST_END int main(void) { return test( test_log_disabled, test_log_enabled_direct, test_log_enabled_indirect, test_log_enabled_global, test_logs_if_no_init, test_log_only_format_string); } jemalloc-sys-0.3.2/jemalloc/test/unit/mallctl.c010064400007650000024000000634471340421341300176660ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; size_t sz; assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, "mallctl() should return ENOENT for non-existent names"); assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on attempt to write " "read-only value"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, "mallctlnametomib() should return ENOENT for non-existent names"); } TEST_END TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; size_t sz; size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " "attempt to write read-only value"); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; size_t sz = sizeof(old_epoch); /* Blind. */ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END TEST_BEGIN(test_mallctlnametomib_short_mib) { size_t mib[4]; size_t miblen; miblen = 3; mib[3] = 42; assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_zu_eq(miblen, 3, "Unexpected mib output length"); assert_zu_eq(mib[3], 42, "mallctlnametomib() wrote past the end of the input mib"); } TEST_END TEST_BEGIN(test_mallctl_config) { #define TEST_MALLCTL_CONFIG(config, t) do { \ t oldval; \ size_t sz = sizeof(oldval); \ assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_b_eq(oldval, config_##config, "Incorrect config value"); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_CONFIG(cache_oblivious, bool); TEST_MALLCTL_CONFIG(debug, bool); TEST_MALLCTL_CONFIG(fill, bool); TEST_MALLCTL_CONFIG(lazy_lock, bool); TEST_MALLCTL_CONFIG(malloc_conf, const char *); TEST_MALLCTL_CONFIG(prof, bool); TEST_MALLCTL_CONFIG(prof_libgcc, bool); TEST_MALLCTL_CONFIG(prof_libunwind, bool); TEST_MALLCTL_CONFIG(stats, bool); TEST_MALLCTL_CONFIG(utrace, bool); TEST_MALLCTL_CONFIG(xmalloc, bool); #undef TEST_MALLCTL_CONFIG } TEST_END TEST_BEGIN(test_mallctl_opt) { bool config_always = true; #define TEST_MALLCTL_OPT(t, opt, config) do { \ t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ 0); \ assert_d_eq(result, expected, \ "Unexpected mallctl() result for opt."#opt); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(bool, abort_conf, always); TEST_MALLCTL_OPT(const char *, metadata_thp, always); TEST_MALLCTL_OPT(bool, retain, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(const char *, percpu_arena, always); TEST_MALLCTL_OPT(bool, background_thread, always); TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always); TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always); TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(const char *, junk, fill); TEST_MALLCTL_OPT(bool, zero, fill); TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); TEST_MALLCTL_OPT(bool, tcache, always); TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always); TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); TEST_MALLCTL_OPT(const char *, thp, always); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof); TEST_MALLCTL_OPT(bool, prof_active, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); TEST_MALLCTL_OPT(bool, prof_accum, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); TEST_MALLCTL_OPT(bool, prof_gdump, prof); TEST_MALLCTL_OPT(bool, prof_final, prof); TEST_MALLCTL_OPT(bool, prof_leak, prof); #undef TEST_MALLCTL_OPT } TEST_END TEST_BEGIN(test_manpage_example) { unsigned nbins, i; size_t mib[4]; size_t len, miblen; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } TEST_END TEST_BEGIN(test_tcache_none) { test_skip_if(!opt_tcache); /* Allocate p and q. */ void *p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); void *q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ dallocx(p0, 0); dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ void *p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); /* Clean up. */ dallocx(p1, MALLOCX_TCACHE_NONE); } TEST_END TEST_BEGIN(test_tcache) { #define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; psz = 42; qsz = nallocx(psz, 0) + 1; /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Verify that tcaches allocate cached regions. */ for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); assert_ptr_eq(ps[i], p0, "Expected mallocx() to allocate cached region, i=%u", i); } /* Verify that reallocation uses cached regions. */ for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", i); assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) { qs[i] = ps[i]; } } for (i = 0; i < NTCACHES; i++) { dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } } TEST_END TEST_BEGIN(test_thread_arena) { unsigned old_arena_ind, new_arena_ind, narenas; const char *opa; size_t sz = sizeof(opa); assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); if (strcmp(opa, "disabled") == 0) { new_arena_ind = narenas - 1; assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); new_arena_ind = 0; assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); } else { assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; if (old_arena_ind != new_arena_ind) { assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), EPERM, "thread.arena ctl " "should not be allowed with percpu arena"); } } } TEST_END TEST_BEGIN(test_arena_i_initialized) { unsigned narenas, i; size_t sz; size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); bool initialized; sz = sizeof(narenas); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); } mib[1] = MALLCTL_ARENAS_ALL; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_true(initialized, "Merged arena statistics should always be initialized"); /* Equivalent to the above but using mallctl() directly. */ sz = sizeof(initialized); assert_d_eq(mallctl( "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", (void *)&initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_true(initialized, "Merged arena statistics should always be initialized"); } TEST_END TEST_BEGIN(test_arena_i_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arena.0.dirty_decay_ms", (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; assert_d_eq(mallctl("arena.0.dirty_decay_ms", (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arena.0.dirty_decay_ms"); } } TEST_END TEST_BEGIN(test_arena_i_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arena.0.muzzy_decay_ms", (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arena.0.muzzy_decay_ms"); } } TEST_END TEST_BEGIN(test_arena_i_purge) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); mib[1] = MALLCTL_ARENAS_ALL; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_decay) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); mib[1] = MALLCTL_ARENAS_ALL; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; size_t sz = sizeof(dss_prec_old); size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); } TEST_END TEST_BEGIN(test_arena_i_retain_grow_limit) { size_t old_limit, new_limit, default_limit; size_t mib[3]; size_t miblen; bool retain_enabled; size_t sz = sizeof(retain_enabled); assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); test_skip_if(!retain_enabled); sz = sizeof(default_limit); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(default_limit, sz_pind2sz(EXTENT_GROW_MAX_PIND), "Unexpected default for retain_grow_limit"); new_limit = PAGE - 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), EFAULT, "Unexpected mallctl() success"); new_limit = PAGE + 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(old_limit, PAGE, "Unexpected value for retain_grow_limit"); /* Expect grow less than psize class 10. */ new_limit = sz_pind2sz(10) - 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(old_limit, sz_pind2sz(9), "Unexpected value for retain_grow_limit"); /* Restore to default. */ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit, sizeof(default_limit)), 0, "Unexpected mallctl() failure"); } TEST_END TEST_BEGIN(test_arenas_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arenas.dirty_decay_ms", (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; assert_d_eq(mallctl("arenas.dirty_decay_ms", (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arenas.dirty_decay_ms"); } } TEST_END TEST_BEGIN(test_arenas_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arenas.muzzy_decay_ms", (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; assert_d_eq(mallctl("arenas.muzzy_decay_ms", (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arenas.muzzy_decay_ms"); } } TEST_END TEST_BEGIN(test_arenas_constants) { #define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS); #undef TEST_ARENAS_CONSTANT } TEST_END TEST_BEGIN(test_arenas_bin_constants) { #define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, bin_infos[0].slab_size); #undef TEST_ARENAS_BIN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_lextent_constants) { #define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS); #undef TEST_ARENAS_LEXTENT_CONSTANT } TEST_END TEST_BEGIN(test_arenas_create) { unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas_before+1, narenas_after, "Unexpected number of arenas before versus after extension"); assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); } TEST_END TEST_BEGIN(test_arenas_lookup) { unsigned arena, arena1; void *ptr; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Unexpected mallocx() failure"); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena, arena1, "Unexpected arena index"); dallocx(ptr, 0); } TEST_END TEST_BEGIN(test_stats_arenas) { #define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ } while (0) TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(const char *, dss); TEST_STATS_ARENAS(ssize_t, dirty_decay_ms); TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms); TEST_STATS_ARENAS(size_t, pactive); TEST_STATS_ARENAS(size_t, pdirty); #undef TEST_STATS_ARENAS } TEST_END int main(void) { return test( test_mallctl_errors, test_mallctlnametomib_errors, test_mallctlbymib_errors, test_mallctl_read_write, test_mallctlnametomib_short_mib, test_mallctl_config, test_mallctl_opt, test_manpage_example, test_tcache_none, test_tcache, test_thread_arena, test_arena_i_initialized, test_arena_i_dirty_decay_ms, test_arena_i_muzzy_decay_ms, test_arena_i_purge, test_arena_i_decay, test_arena_i_dss, test_arena_i_retain_grow_limit, test_arenas_dirty_decay_ms, test_arenas_muzzy_decay_ms, test_arenas_constants, test_arenas_bin_constants, test_arenas_lextent_constants, test_arenas_create, test_arenas_lookup, test_stats_arenas); } jemalloc-sys-0.3.2/jemalloc/test/unit/malloc_io.c010064400007650000024000000175361340421340100201670ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_malloc_strtoumax_no_endptr) { int err; set_errno(0); assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); err = get_errno(); assert_d_eq(err, 0, "Unexpected failure"); } TEST_END TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; int base; int expected_errno; const char *expected_errno_name; uintmax_t expected_x; }; #define ERR(e) e, #e #define KUMAX(x) ((uintmax_t)x##ULL) #define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, {"-42", "", 0, ERR(0), KSMAX(-42)}, {"042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)}, {"-042", "", 0, ERR(0), KSMAX(-042)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, {"42", "", 0, ERR(0), KUMAX(42)}, {" 42", "", 0, ERR(0), KUMAX(42)}, {"42 ", " ", 0, ERR(0), KUMAX(42)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"42x", "x", 0, ERR(0), KUMAX(42)}, {"07", "", 0, ERR(0), KUMAX(7)}, {"010", "", 0, ERR(0), KUMAX(8)}, {"08", "8", 0, ERR(0), KUMAX(0)}, {"0_", "_", 0, ERR(0), KUMAX(0)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"0X", "X", 0, ERR(0), KUMAX(0)}, {"0xg", "xg", 0, ERR(0), KUMAX(0)}, {"0XA", "", 0, ERR(0), KUMAX(10)}, {"010", "", 10, ERR(0), KUMAX(10)}, {"0x3", "x3", 10, ERR(0), KUMAX(0)}, {"12", "2", 2, ERR(0), KUMAX(1)}, {"78", "8", 8, ERR(0), KUMAX(7)}, {"9a", "a", 10, ERR(0), KUMAX(9)}, {"9A", "A", 10, ERR(0), KUMAX(9)}, {"fg", "g", 16, ERR(0), KUMAX(15)}, {"FG", "G", 16, ERR(0), KUMAX(15)}, {"0xfg", "g", 16, ERR(0), KUMAX(15)}, {"0XFG", "G", 16, ERR(0), KUMAX(15)}, {"z_", "_", 36, ERR(0), KUMAX(35)}, {"Z_", "_", 36, ERR(0), KUMAX(35)} }; #undef ERR #undef KUMAX #undef KSMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; int err; uintmax_t result; char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); err = get_errno(); assert_d_eq(err, test->expected_errno, "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); assert_str_eq(remainder, test->expected_remainder, "Unexpected remainder for \"%s\", base %d", test->input, test->base); if (err == 0) { assert_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", test->input, test->base); } } } TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { #define BUFLEN 15 char buf[BUFLEN]; size_t result; size_t len; #define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ buf, expected_str_untruncated); \ assert_zu_eq(result, strlen(expected_str_untruncated), \ "Unexpected result"); \ } while (0) for (len = 1; len < BUFLEN; len++) { TEST("012346789", "012346789"); TEST("a0123b", "a%sb", "0123"); TEST("a01234567", "a%s%s", "0123", "4567"); TEST("a0123 ", "a%-6s", "0123"); TEST("a 0123", "a%6s", "0123"); TEST("a 012", "a%6.3s", "0123"); TEST("a 012", "a%*.*s", 6, 3, "0123"); TEST("a 123b", "a% db", 123); TEST("a123b", "a%-db", 123); TEST("a-123b", "a%-db", -123); TEST("a+123b", "a%+db", 123); } #undef BUFLEN #undef TEST } TEST_END TEST_BEGIN(test_malloc_snprintf) { #define BUFLEN 128 char buf[BUFLEN]; size_t result; #define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ } while (0) TEST("hello", "hello"); TEST("50%, 100%", "50%%, %d%%", 100); TEST("a0123b", "a%sb", "0123"); TEST("a 0123b", "a%5sb", "0123"); TEST("a 0123b", "a%*sb", 5, "0123"); TEST("a0123 b", "a%-5sb", "0123"); TEST("a0123b", "a%*sb", -1, "0123"); TEST("a0123 b", "a%*sb", -5, "0123"); TEST("a0123 b", "a%-*sb", -5, "0123"); TEST("a012b", "a%.3sb", "0123"); TEST("a012b", "a%.*sb", 3, "0123"); TEST("a0123b", "a%.*sb", -3, "0123"); TEST("a 012b", "a%5.3sb", "0123"); TEST("a 012b", "a%5.*sb", 3, "0123"); TEST("a 012b", "a%*.3sb", 5, "0123"); TEST("a 012b", "a%*.*sb", 5, 3, "0123"); TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); TEST("_abcd_", "_%x_", 0xabcd); TEST("_0xabcd_", "_%#x_", 0xabcd); TEST("_1234_", "_%o_", 01234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_1234_", "_%d_", 1234); TEST("_ 1234_", "_% d_", 1234); TEST("_+1234_", "_%+d_", 1234); TEST("_-1234_", "_%d_", -1234); TEST("_-1234_", "_% d_", -1234); TEST("_-1234_", "_%+d_", -1234); TEST("_-1234_", "_%d_", -1234); TEST("_1234_", "_%d_", 1234); TEST("_-1234_", "_%i_", -1234); TEST("_1234_", "_%i_", 1234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_0x1234abc_", "_%#x_", 0x1234abc); TEST("_0X1234ABC_", "_%#X_", 0x1234abc); TEST("_c_", "_%c_", 'c'); TEST("_string_", "_%s_", "string"); TEST("_0x42_", "_%p_", ((void *)0x42)); TEST("_-1234_", "_%ld_", ((long)-1234)); TEST("_1234_", "_%ld_", ((long)1234)); TEST("_-1234_", "_%li_", ((long)-1234)); TEST("_1234_", "_%li_", ((long)1234)); TEST("_01234_", "_%#lo_", ((long)01234)); TEST("_1234_", "_%lu_", ((long)1234)); TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); TEST("_-1234_", "_%lld_", ((long long)-1234)); TEST("_1234_", "_%lld_", ((long long)1234)); TEST("_-1234_", "_%lli_", ((long long)-1234)); TEST("_1234_", "_%lli_", ((long long)1234)); TEST("_01234_", "_%#llo_", ((long long)01234)); TEST("_1234_", "_%llu_", ((long long)1234)); TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%qd_", ((long long)-1234)); TEST("_1234_", "_%qd_", ((long long)1234)); TEST("_-1234_", "_%qi_", ((long long)-1234)); TEST("_1234_", "_%qi_", ((long long)1234)); TEST("_01234_", "_%#qo_", ((long long)01234)); TEST("_1234_", "_%qu_", ((long long)1234)); TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); TEST("_1234_", "_%jd_", ((intmax_t)1234)); TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); TEST("_1234_", "_%ji_", ((intmax_t)1234)); TEST("_01234_", "_%#jo_", ((intmax_t)01234)); TEST("_1234_", "_%ju_", ((intmax_t)1234)); TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); TEST("_1234_", "_%zd_", ((ssize_t)1234)); TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); TEST("_1234_", "_%zi_", ((ssize_t)1234)); TEST("_01234_", "_%#zo_", ((ssize_t)01234)); TEST("_1234_", "_%zu_", ((ssize_t)1234)); TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); #undef BUFLEN } TEST_END int main(void) { return test( test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, test_malloc_snprintf_truncated, test_malloc_snprintf); } jemalloc-sys-0.3.2/jemalloc/test/unit/math.c010064400007650000024000000440701340421340100171530ustar0000000000000000#include "test/jemalloc_test.h" #define MAX_REL_ERR 1.0e-9 #define MAX_ABS_ERR 1.0e-9 #include #ifdef __PGI #undef INFINITY #endif #ifndef INFINITY #define INFINITY (DBL_MAX + DBL_MAX) #endif static bool double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; if (fabs(a - b) < max_abs_err) { return true; } rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t factorial(unsigned x) { uint64_t ret = 1; unsigned i; for (i = 2; i <= x; i++) { ret *= (uint64_t)i; } return ret; } TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ for (x = 1; x <= 21; x++) { assert_true(double_eq_rel(exp(ln_gamma(x)), (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), "Incorrect factorial result for x=%u", x); } } TEST_END /* Expected ln_gamma([0.0..100.0] increment=0.25). */ static const double ln_gamma_misc_expected[] = { INFINITY, 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, 359.13420536957539753 }; TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { double x = (double)i * 0.25; assert_true(double_eq_rel(ln_gamma(x), ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect ln_gamma result for i=%u", i); } } TEST_END /* Expected pt_norm([0.01..0.99] increment=0.01). */ static const double pt_norm_expected[] = { -INFINITY, -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { double p = (double)i * 0.01; assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_norm result for i=%u", i); } } TEST_END /* * Expected pt_chi2(p=[0.01..0.99] increment=0.07, * df={0.1, 1.1, 10.1, 100.1, 1000.1}). */ static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; static const double pt_chi2_expected[] = { 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, 2.606673548632508, 4.602913725294877, 5.646152813924212, 6.488971315540869, 7.249823275816285, 7.977314231410841, 8.700354939944047, 9.441728024225892, 10.224338321374127, 11.076435368801061, 12.039320937038386, 13.183878752697167, 14.657791935084575, 16.885728216339373, 23.361991680031817, 70.14844087392152, 80.92379498849355, 85.53325420085891, 88.94433120715347, 91.83732712857017, 94.46719943606301, 96.96896479994635, 99.43412843510363, 101.94074719829733, 104.57228644307247, 107.43900093448734, 110.71844673417287, 114.76616819871325, 120.57422505959563, 135.92318818757556, 899.0072447849649, 937.9271278858220, 953.8117189560207, 965.3079371501154, 974.8974061207954, 983.4936235182347, 991.5691170518946, 999.4334123954690, 1007.3391826856553, 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { double df = pt_chi2_df[i]; double ln_gamma_df = ln_gamma(df * 0.5); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_chi2 result for i=%u, j=%u", i, j); e++; } } } TEST_END /* * Expected pt_gamma(p=[0.1..0.99] increment=0.07, * shape=[0.5..3.0] increment=0.5). */ static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; static const double pt_gamma_expected[] = { 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { double shape = pt_gamma_shape[i]; double ln_gamma_shape = ln_gamma(shape); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_gamma result for i=%u, j=%u", i, j); e++; } } } TEST_END TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); assert_true(double_eq_rel( pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, MAX_ABS_ERR), "Scale should be trivially equivalent to external multiplication"); } TEST_END int main(void) { return test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, test_pt_gamma_scale); } jemalloc-sys-0.3.2/jemalloc/test/unit/mq.c010064400007650000024000000034111340421340100166310ustar0000000000000000#include "test/jemalloc_test.h" #define NSENDERS 3 #define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { mq_msg(mq_msg_t) link; }; mq_gen(static, mq_, mq_t, mq_msg_t, link) TEST_BEGIN(test_mq_basic) { mq_t mq; mq_msg_t msg; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); assert_u_eq(mq_count(&mq), 0, "mq should be empty"); assert_ptr_null(mq_tryget(&mq), "mq_tryget() should fail when the queue is empty"); mq_put(&mq, &msg); assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); mq_put(&mq, &msg); assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); mq_fini(&mq); } TEST_END static void * thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < (NSENDERS * NMSGS); i++) { mq_msg_t *msg = mq_get(mq); assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } return NULL; } static void * thd_sender_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < NMSGS; i++) { mq_msg_t *msg; void *p; p = mallocx(sizeof(mq_msg_t), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); msg = (mq_msg_t *)p; mq_put(mq, msg); } return NULL; } TEST_BEGIN(test_mq_threaded) { mq_t mq; thd_t receiver; thd_t senders[NSENDERS]; unsigned i; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); thd_create(&receiver, thd_receiver_start, (void *)&mq); for (i = 0; i < NSENDERS; i++) { thd_create(&senders[i], thd_sender_start, (void *)&mq); } thd_join(receiver, NULL); for (i = 0; i < NSENDERS; i++) { thd_join(senders[i], NULL); } mq_fini(&mq); } TEST_END int main(void) { return test( test_mq_basic, test_mq_threaded); } jemalloc-sys-0.3.2/jemalloc/test/unit/mtx.c010064400007650000024000000017601340421340100170310ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 2 #define NINCRS 2000000 TEST_BEGIN(test_mtx_basic) { mtx_t mtx; assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); mtx_lock(&mtx); mtx_unlock(&mtx); mtx_fini(&mtx); } TEST_END typedef struct { mtx_t mtx; unsigned x; } thd_start_arg_t; static void * thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; unsigned i; for (i = 0; i < NINCRS; i++) { mtx_lock(&arg->mtx); arg->x++; mtx_unlock(&arg->mtx); } return NULL; } TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; thd_t thds[NTHREADS]; unsigned i; assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arg); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } assert_u_eq(arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int main(void) { return test( test_mtx_basic, test_mtx_race); } jemalloc-sys-0.3.2/jemalloc/test/unit/nstime.c010064400007650000024000000142621340421340100175210ustar0000000000000000#include "test/jemalloc_test.h" #define BILLION UINT64_C(1000000000) TEST_BEGIN(test_nstime_init) { nstime_t nst; nstime_init(&nst, 42000000043); assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_init2) { nstime_t nst; nstime_init2(&nst, 42, 43); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_copy) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_init(&nstb, 0); nstime_copy(&nstb, &nsta); assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); } TEST_END TEST_BEGIN(test_nstime_compare) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); nstime_init2(&nstb, 42, 42); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 42, 44); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); nstime_init2(&nstb, 41, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 43, 0); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); } TEST_END TEST_BEGIN(test_nstime_add) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 84, 86); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, BILLION - 1); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 85, BILLION - 2); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END TEST_BEGIN(test_nstime_iadd) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, BILLION - 1); nstime_iadd(&nsta, 1); nstime_init2(&nstb, 43, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, 1); nstime_iadd(&nsta, BILLION + 1); nstime_init2(&nstb, 43, 2); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END TEST_BEGIN(test_nstime_subtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_subtract(&nsta, &nstb); nstime_init(&nstb, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_init2(&nstb, 41, 44); nstime_subtract(&nsta, &nstb); nstime_init2(&nstb, 0, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END TEST_BEGIN(test_nstime_isubtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_isubtract(&nsta, 42*BILLION + 43); nstime_init(&nstb, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_isubtract(&nsta, 41*BILLION + 44); nstime_init2(&nstb, 0, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END TEST_BEGIN(test_nstime_imultiply) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_imultiply(&nsta, 10); nstime_init2(&nstb, 420, 430); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); nstime_init2(&nsta, 42, 666666666); nstime_imultiply(&nsta, 3); nstime_init2(&nstb, 127, 999999998); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); } TEST_END TEST_BEGIN(test_nstime_idivide) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_idivide(&nsta, 10); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); nstime_init2(&nsta, 42, 666666666); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 3); nstime_idivide(&nsta, 3); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_divide) { nstime_t nsta, nstb, nstc; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_add(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_subtract(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 9, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_monotonic) { nstime_monotonic(); } TEST_END TEST_BEGIN(test_nstime_update) { nstime_t nst; nstime_init(&nst, 0); assert_false(nstime_update(&nst), "Basic time update failed."); /* Only Rip Van Winkle sleeps this long. */ { nstime_t addend; nstime_init2(&addend, 631152000, 0); nstime_add(&nst, &addend); } { nstime_t nst0; nstime_copy(&nst0, &nst); assert_true(nstime_update(&nst), "Update should detect time roll-back."); assert_d_eq(nstime_compare(&nst, &nst0), 0, "Time should not have been modified"); } } TEST_END int main(void) { return test( test_nstime_init, test_nstime_init2, test_nstime_copy, test_nstime_compare, test_nstime_add, test_nstime_iadd, test_nstime_subtract, test_nstime_isubtract, test_nstime_imultiply, test_nstime_idivide, test_nstime_divide, test_nstime_monotonic, test_nstime_update); } jemalloc-sys-0.3.2/jemalloc/test/unit/pack.c010064400007650000024000000076761340421340100171530ustar0000000000000000#include "test/jemalloc_test.h" /* * Size class that is a divisor of the page size, ideally 4+ regions per run. */ #if LG_PAGE <= 14 #define SZ (ZU(1) << (LG_PAGE - 2)) #else #define SZ ZU(4096) #endif /* * Number of slabs to consume at high water mark. Should be at least 2 so that * if mmap()ed memory grows downward, downward growth of mmap()ed memory is * tested. */ #define NSLABS 8 static unsigned binind_compute(void) { size_t sz; unsigned nbins, i; sz = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < nbins; i++) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); size_t size; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)i; sz = sizeof(size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); if (size == SZ) { return i; } } test_fail("Unable to compute nregs_per_run"); return 0; } static size_t nregs_per_run_compute(void) { uint32_t nregs; size_t sz; unsigned binind = binind_compute(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)binind; sz = sizeof(nregs); assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); return nregs; } static unsigned arenas_create_mallctl(void) { unsigned arena_ind; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.create"); return arena_ind; } static void arena_reset_mallctl(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_BEGIN(test_pack) { bool prof_enabled; size_t sz = sizeof(prof_enabled); if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) { test_skip_if(prof_enabled); } unsigned arena_ind = arenas_create_mallctl(); size_t nregs_per_run = nregs_per_run_compute(); size_t nregs = nregs_per_run * NSLABS; VARIABLE_ARRAY(void *, ptrs, nregs); size_t i, j, offset; /* Fill matrix. */ for (i = offset = 0; i < NSLABS; i++) { for (j = 0; j < nregs_per_run; j++) { void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", SZ, arena_ind, i, j); ptrs[(i * nregs_per_run) + j] = p; } } /* * Free all but one region of each run, but rotate which region is * preserved, so that subsequent allocations exercise the within-run * layout policy. */ offset = 0; for (i = offset = 0; i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p = ptrs[(i * nregs_per_run) + j]; if (offset == j) { continue; } dallocx(p, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); } } /* * Logically refill matrix, skipping preserved regions and verifying * that the matrix is unmodified. */ offset = 0; for (i = offset = 0; i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p; if (offset == j) { continue; } p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], "Unexpected refill discrepancy, run=%zu, reg=%zu\n", i, j); } } /* Clean up. */ arena_reset_mallctl(arena_ind); } TEST_END int main(void) { return test( test_pack); } jemalloc-sys-0.3.2/jemalloc/test/unit/pack.sh010064400007650000024000000001611340421340100173210ustar0000000000000000#!/bin/sh # Immediately purge to minimize fragmentation. export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" jemalloc-sys-0.3.2/jemalloc/test/unit/pages.c010064400007650000024000000013261340421340100173160ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_pages_huge) { size_t alloc_size; bool commit; void *pages, *hugepage; alloc_size = HUGEPAGE * 2 - PAGE; commit = true; pages = pages_map(NULL, alloc_size, PAGE, &commit); assert_ptr_not_null(pages, "Unexpected pages_map() error"); if (init_system_thp_mode == thp_mode_default) { hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, "Unexpected pages_huge() result"); assert_false(pages_nohuge(hugepage, HUGEPAGE), "Unexpected pages_nohuge() result"); } pages_unmap(pages, alloc_size); } TEST_END int main(void) { return test( test_pages_huge); } jemalloc-sys-0.3.2/jemalloc/test/unit/ph.c010064400007650000024000000163151340421340100166320ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ph.h" typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; phn(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the heap, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return ret; } static int node_cmp_magic(const node_t *a, const node_t *b) { assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); return node_cmp(a, b); } typedef ph(node_t) heap_t; ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); static void node_print(const node_t *node, unsigned depth) { unsigned i; node_t *leftmost_child, *sibling; for (i = 0; i < depth; i++) { malloc_printf("\t"); } malloc_printf("%2"FMTu64"\n", node->key); leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) { return; } node_print(leftmost_child, depth + 1); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { node_print(sibling, depth + 1); } } static void heap_print(const heap_t *heap) { node_t *auxelm; malloc_printf("vvv heap %p vvv\n", heap); if (heap->ph_root == NULL) { goto label_return; } node_print(heap->ph_root, 0); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); node_print(auxelm, 0); } label_return: malloc_printf("^^^ heap %p ^^^\n", heap); } static unsigned node_validate(const node_t *node, const node_t *parent) { unsigned nnodes = 1; node_t *leftmost_child, *sibling; if (parent != NULL) { assert_d_ge(node_cmp_magic(node, parent), 0, "Child is less than parent"); } leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) { return nnodes; } assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), (void *)node, "Leftmost child does not link to node"); nnodes += node_validate(leftmost_child, node); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, sibling)), sibling, "sibling's prev doesn't link to sibling"); nnodes += node_validate(sibling, node); } return nnodes; } static unsigned heap_validate(const heap_t *heap) { unsigned nnodes = 0; node_t *auxelm; if (heap->ph_root == NULL) { goto label_return; } nnodes += node_validate(heap->ph_root, NULL); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); nnodes += node_validate(auxelm, NULL); } label_return: if (false) { heap_print(heap); } return nnodes; } TEST_BEGIN(test_ph_empty) { heap_t heap; heap_new(&heap); assert_true(heap_empty(&heap), "Heap should be empty"); assert_ptr_null(heap_first(&heap), "Unexpected node"); assert_ptr_null(heap_any(&heap), "Unexpected node"); } TEST_END static void node_remove(heap_t *heap, node_t *node) { heap_remove(heap, node); node->magic = 0; } static node_t * node_remove_first(heap_t *heap) { node_t *node = heap_remove_first(heap); node->magic = 0; return node; } static node_t * node_remove_any(heap_t *heap) { node_t *node = heap_remove_any(heap); node->magic = 0; return node; } TEST_BEGIN(test_ph_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; heap_t heap; node_t nodes[NNODES]; unsigned i, j, k; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) { bag[j] = j; } break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; } break; default: for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); } } for (j = 1; j <= NNODES; j++) { /* Initialize heap and nodes. */ heap_new(&heap); assert_u_eq(heap_validate(&heap), 0, "Incorrect node count"); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { heap_insert(&heap, &nodes[k]); if (i % 13 == 12) { assert_ptr_not_null(heap_any(&heap), "Heap should not be empty"); /* Trigger merging. */ assert_ptr_not_null(heap_first(&heap), "Heap should not be empty"); } assert_u_eq(heap_validate(&heap), k + 1, "Incorrect node count"); } assert_false(heap_empty(&heap), "Heap should not be empty"); /* Remove nodes. */ switch (i % 6) { case 0: for (k = 0; k < j; k++) { assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, &nodes[k]); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; case 1: for (k = j; k > 0; k--) { node_remove(&heap, &nodes[k-1]); assert_u_eq(heap_validate(&heap), k - 1, "Incorrect node count"); } break; case 2: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = node_remove_first(&heap); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } prev = node; } break; } case 3: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = heap_first(&heap); assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } node_remove(&heap, node); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); prev = node; } break; } case 4: { for (k = 0; k < j; k++) { node_remove_any(&heap); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; } case 5: { for (k = 0; k < j; k++) { node_t *node = heap_any(&heap); assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, node); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; } default: not_reached(); } assert_ptr_null(heap_first(&heap), "Heap should be empty"); assert_ptr_null(heap_any(&heap), "Heap should be empty"); assert_true(heap_empty(&heap), "Heap should be empty"); } } fini_gen_rand(sfmt); #undef NNODES #undef SEED } TEST_END int main(void) { return test( test_ph_empty, test_ph_random); } jemalloc-sys-0.3.2/jemalloc/test/unit/prng.c010064400007650000024000000137271340421340100171750ustar0000000000000000#include "test/jemalloc_test.h" static void test_prng_lg_range_u32(bool atomic) { atomic_u32_t sa, sb; uint32_t ra, rb; unsigned lg_range; atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_eq(ra, rb, "Repeated generation should produce repeated results"); atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, 32, atomic); assert_u32_eq(ra, rb, "Equivalent generation should produce equivalent results"); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_ne(ra, rb, "Full-width results must not immediately repeat"); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); for (lg_range = 31; lg_range > 0; lg_range--) { atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, lg_range, atomic); assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u32_eq(rb, (ra >> (32 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_u64(void) { uint64_t sa, sb, ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_u64(&sa, 64); sa = 42; rb = prng_lg_range_u64(&sa, 64); assert_u64_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u64(&sb, 64); assert_u64_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u64(&sa, 64); rb = prng_lg_range_u64(&sa, 64); assert_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u64(&sa, 64); for (lg_range = 63; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u64(&sb, lg_range); assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_zu(bool atomic) { atomic_zu_t sa, sb; size_t ra, rb; unsigned lg_range; atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Repeated generation should produce repeated results"); atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Equivalent generation should produce equivalent results"); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_ne(ra, rb, "Full-width results must not immediately repeat"); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; lg_range--) { atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, lg_range, atomic); assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range)), "Expected high order bits of full-width " "result, lg_range=%u", lg_range); } } TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { test_prng_lg_range_u32(false); } TEST_END TEST_BEGIN(test_prng_lg_range_u32_atomic) { test_prng_lg_range_u32(true); } TEST_END TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { test_prng_lg_range_u64(); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { test_prng_lg_range_zu(false); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_atomic) { test_prng_lg_range_zu(true); } TEST_END static void test_prng_range_u32(bool atomic) { uint32_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { atomic_u32_t s; unsigned rep; atomic_store_u32(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { uint32_t r = prng_range_u32(&s, range, atomic); assert_u32_lt(r, range, "Out of range"); } } } static void test_prng_range_u64(void) { uint64_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint64_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { uint64_t r = prng_range_u64(&s, range); assert_u64_lt(r, range, "Out of range"); } } } static void test_prng_range_zu(bool atomic) { size_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { atomic_zu_t s; unsigned rep; atomic_store_zu(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { size_t r = prng_range_zu(&s, range, atomic); assert_zu_lt(r, range, "Out of range"); } } } TEST_BEGIN(test_prng_range_u32_nonatomic) { test_prng_range_u32(false); } TEST_END TEST_BEGIN(test_prng_range_u32_atomic) { test_prng_range_u32(true); } TEST_END TEST_BEGIN(test_prng_range_u64_nonatomic) { test_prng_range_u64(); } TEST_END TEST_BEGIN(test_prng_range_zu_nonatomic) { test_prng_range_zu(false); } TEST_END TEST_BEGIN(test_prng_range_zu_atomic) { test_prng_range_zu(true); } TEST_END int main(void) { return test( test_prng_lg_range_u32_nonatomic, test_prng_lg_range_u32_atomic, test_prng_lg_range_u64_nonatomic, test_prng_lg_range_zu_nonatomic, test_prng_lg_range_zu_atomic, test_prng_range_u32_nonatomic, test_prng_range_u32_atomic, test_prng_range_u64_nonatomic, test_prng_range_zu_nonatomic, test_prng_range_zu_atomic); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_accum.c010064400007650000024000000034121340421340100203330ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 4 #define NALLOCS_PER_THREAD 50 #define DUMP_INTERVAL 1 #define BT_COUNT_CHECK_INTERVAL 5 static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } static void * alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); } static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; i_prev = 0; bt_count_prev = 0; for (i = 0; i < NALLOCS_PER_THREAD; i++) { void *p = alloc_from_permuted_backtrace(thd_ind, i); dallocx(p, 0); if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } if (i % BT_COUNT_CHECK_INTERVAL == 0 || i+1 == NALLOCS_PER_THREAD) { bt_count = prof_bt_count(); assert_zu_le(bt_count_prev+(i-i_prev), bt_count, "Expected larger backtrace count increase"); i_prev = i; bt_count_prev = bt_count; } } return NULL; } TEST_BEGIN(test_idump) { bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END int main(void) { return test_no_reentrancy( test_idump); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_accum.sh010064400007650000024000000002111340421340100205150ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_active.c010064400007650000024000000070111340421340100205150ustar0000000000000000#include "test/jemalloc_test.h" static void mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } #define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } #define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } #define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } #define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } #define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); mallctl_thread_prof_active_get(false); mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(false, false); /* prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(true, false); mallctl_thread_prof_active_set(false, false); /* !prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, false); mallctl_thread_prof_active_set(false, true); /* !prof.active, thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, true); mallctl_thread_prof_active_set(true, true); /* prof.active, thread.prof.active. */ prof_sampling_probe(true); /* Restore settings. */ mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(true, false); } TEST_END int main(void) { return test_no_reentrancy( test_prof_active); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_active.sh010064400007650000024000000002051340421340100207030ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_gdump.c010064400007650000024000000036751340421341300203750ustar0000000000000000#include "test/jemalloc_test.h" static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; q = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); gdump = false; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; r = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); gdump = true; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; s = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); dallocx(p, 0); dallocx(q, 0); dallocx(r, 0); dallocx(s, 0); } TEST_END int main(void) { return test_no_reentrancy( test_gdump); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_gdump.sh010064400007650000024000000001711340421340100205460ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_idump.c010064400007650000024000000014741340421340100203670ustar0000000000000000#include "test/jemalloc_test.h" static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } TEST_BEGIN(test_idump) { bool active; void *p; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); assert_true(did_prof_dump_open, "Expected a profile dump"); } TEST_END int main(void) { return test( test_idump); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_idump.sh010064400007650000024000000003171340421340100205520ustar0000000000000000#!/bin/sh export MALLOC_CONF="tcache:false" if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_reset.c010064400007650000024000000164671340421340100204030ustar0000000000000000#include "test/jemalloc_test.h" static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } static void set_prof_active(bool active) { assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); return lg_prof_sample; } static void do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); } TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; test_skip_if(!config_prof); sz = sizeof(size_t); assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); /* Test simple resets. */ for (i = 0; i < 2; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure while resetting profile data"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected profile sample rate change"); } /* Test resets with prof.lg_sample changes. */ lg_prof_sample_next = 1; for (i = 0; i < 2; i++) { do_prof_reset(lg_prof_sample_next); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample, lg_prof_sample_next, "Expected profile sample rate change"); lg_prof_sample_next = lg_prof_sample_orig; } /* Make sure the test code restored prof.lg_sample. */ lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); } TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); return false; } TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; test_skip_if(!config_prof); set_prof_active(true); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header_orig = prof_dump_header; prof_dump_header = prof_dump_header_intercept; assert_false(prof_dump_header_intercepted, "Unexpected intercept"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_true(prof_dump_header_intercepted, "Expected intercept"); assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header = prof_dump_header_orig; dallocx(p, 0); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); set_prof_active(false); } TEST_END #define NTHREADS 4 #define NALLOCS_PER_THREAD (1U << 13) #define OBJ_RING_BUF_COUNT 1531 #define RESET_INTERVAL (1U << 10) #define DUMP_INTERVAL 3677 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; memset(objs, 0, sizeof(objs)); for (i = 0; i < NALLOCS_PER_THREAD; i++) { if (i % RESET_INTERVAL == 0) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile " "data"); } if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); assert_ptr_not_null(*pp, "Unexpected btalloc() failure"); } } /* Clean up any remaining objects. */ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } } return NULL; } TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; size_t bt_count, tdata_count; test_skip_if(!config_prof); bt_count = prof_bt_count(); assert_zu_eq(bt_count, 0, "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); lg_prof_sample_orig = get_lg_prof_sample(); do_prof_reset(5); set_prof_active(true); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); assert_zu_eq(prof_tdata_count(), tdata_count, "Unexpected remaining tdata structures"); set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NTHREADS #undef NALLOCS_PER_THREAD #undef OBJ_RING_BUF_COUNT #undef RESET_INTERVAL #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ #define NITER 10 TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; test_skip_if(!config_prof); lg_prof_sample_orig = get_lg_prof_sample(); set_prof_active(true); /* Reset profiling. */ do_prof_reset(0); for (i = 0; i < NITER; i++) { void *p; size_t sz, nsz; /* Reset profiling. */ do_prof_reset(0); /* Allocate small object (which will be promoted). */ p = ptrs[i] = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); /* Reset profiling. */ do_prof_reset(0); /* Perform successful xallocx(). */ sz = sallocx(p, 0); assert_zu_eq(xallocx(p, sz, 0, 0), sz, "Unexpected xallocx() failure"); /* Perform unsuccessful xallocx(). */ nsz = nallocx(sz+1, 0); assert_zu_eq(xallocx(p, nsz, 0, 0), sz, "Unexpected xallocx() success"); } for (i = 0; i < NITER; i++) { /* dallocx. */ dallocx(ptrs[i], 0); } set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NITER int main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; return test_no_reentrancy( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, test_xallocx); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_reset.sh010064400007650000024000000001711340421340100205540ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_tctx.c010064400007650000024000000024231340421340100202260ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_prof_realloc) { tsdn_t *tsdn; int flags; void *p, *q; prof_tctx_t *tctx_p, *tctx_q; uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; test_skip_if(!config_prof); tsdn = tsdn_fetch(); flags = MALLOCX_TCACHE_NONE; prof_cnt_all(&curobjs_0, NULL, NULL, NULL); p = mallocx(1024, flags); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tctx_p = prof_tctx_get(tsdn, p, NULL); assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, "Expected valid tctx"); prof_cnt_all(&curobjs_1, NULL, NULL, NULL); assert_u64_eq(curobjs_0 + 1, curobjs_1, "Allocation should have increased sample size"); q = rallocx(p, 2048, flags); assert_ptr_ne(p, q, "Expected move"); assert_ptr_not_null(p, "Unexpected rmallocx() failure"); tctx_q = prof_tctx_get(tsdn, q, NULL); assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, "Expected valid tctx"); prof_cnt_all(&curobjs_2, NULL, NULL, NULL); assert_u64_eq(curobjs_1, curobjs_2, "Reallocation should not have changed sample size"); dallocx(q, flags); prof_cnt_all(&curobjs_3, NULL, NULL, NULL); assert_u64_eq(curobjs_0, curobjs_3, "Sample size should have returned to base level"); } TEST_END int main(void) { return test_no_reentrancy( test_prof_realloc); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_tctx.sh010064400007650000024000000001471340421340100204170ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,lg_prof_sample:0" fi jemalloc-sys-0.3.2/jemalloc/test/unit/prof_thread_name.c010064400007650000024000000062151340421340100215160ustar0000000000000000#include "test/jemalloc_test.h" static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, int line) { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } #define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, int line) { assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } #define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); mallctl_thread_name_get(""); mallctl_thread_name_set("hi there"); /* NULL input shouldn't be allowed. */ thread_name = NULL; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* Simultaneous read/write shouldn't be allowed. */ { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, (void *)&thread_name, sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } mallctl_thread_name_set(""); } TEST_END #define NTHREADS 4 #define NRESET 25 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); mallctl_thread_name_get(""); mallctl_thread_name_set(thread_name); for (i = 0; i < NRESET; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); mallctl_thread_name_get(thread_name); } mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); return NULL; } TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END #undef NTHREADS #undef NRESET int main(void) { return test( test_prof_thread_name_validation, test_prof_thread_name_threaded); } jemalloc-sys-0.3.2/jemalloc/test/unit/prof_thread_name.sh010064400007650000024000000001501340421340100216760ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false" fi jemalloc-sys-0.3.2/jemalloc/test/unit/ql.c010064400007650000024000000106671340421340100166430ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ql.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; struct list_s { ql_elm(list_t) link; char id; }; static void test_empty_list(list_head_t *head) { list_t *t; unsigned i; assert_ptr_null(ql_first(head), "Unexpected element for empty list"); assert_ptr_null(ql_last(head, link), "Unexpected element for empty list"); i = 0; ql_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); i = 0; ql_reverse_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); } TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); test_empty_list(&head); } TEST_END static void init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { entries[i].id = 'a' + i; ql_elm_new(&entries[i], link); } } static void test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, "Element id mismatch"); i = 0; ql_foreach(t, head, link) { assert_c_eq(t->id, entries[i].id, "Element id mismatch"); i++; } i = 0; ql_reverse_foreach(t, head, link) { assert_c_eq(t->id, entries[nentries-i-1].id, "Element id mismatch"); i++; } for (i = 0; i < nentries-1; i++) { t = ql_next(head, &entries[i], link); assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); } assert_ptr_null(ql_next(head, &entries[nentries-1], link), "Unexpected element"); assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); for (i = 1; i < nentries; i++) { t = ql_prev(head, &entries[i], link); assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); } } TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); ql_tail_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); } test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); ql_head_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); a = &entries[0]; b = &entries[1]; c = &entries[2]; d = &entries[3]; e = &entries[4]; f = &entries[5]; g = &entries[6]; h = &entries[7]; /* * ql_remove(), ql_before_insert(), and ql_after_insert() are used * internally by other macros that are already tested, so there's no * need to test them completely. However, insertion/deletion from the * middle of lists is not otherwise tested; do so here. */ ql_tail_insert(&head, f, link); ql_before_insert(&head, f, b, link); ql_before_insert(&head, f, c, link); ql_after_insert(f, h, link); ql_after_insert(f, g, link); ql_before_insert(&head, b, a, link); ql_after_insert(c, d, link); ql_before_insert(&head, f, e, link); test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); } TEST_END int main(void) { return test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, test_ql_insert); } jemalloc-sys-0.3.2/jemalloc/test/unit/qr.c010064400007650000024000000122301340421340100166350ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/qr.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 /* Split index, in [1..NENTRIES). */ #define SPLIT_INDEX 5 typedef struct ring_s ring_t; struct ring_s { qr(ring_t) link; char id; }; static void init_entries(ring_t *entries) { unsigned i; for (i = 0; i < NENTRIES; i++) { qr_new(&entries[i], link); entries[i].id = 'a' + i; } } static void test_independent_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_ptr_eq(t, &entries[i], "Next element in single-element ring should be same as " "current element"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_ptr_eq(t, &entries[i], "Previous element in single-element ring should be same as " "current element"); } } TEST_BEGIN(test_qr_one) { ring_t entries[NENTRIES]; init_entries(entries); test_independent_entries(entries); } TEST_END static void test_entries_ring(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } } TEST_BEGIN(test_qr_after_insert) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } test_entries_ring(entries); } TEST_END TEST_BEGIN(test_qr_remove) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[i+j].id, "Element id mismatch"); j++; } j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, "Element id mismatch"); j++; } qr_remove(&entries[i], link); } test_independent_entries(entries); } TEST_END TEST_BEGIN(test_qr_before_insert) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_before_insert(&entries[i - 1], &entries[i], link); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } } TEST_END static void test_split_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { if (i < SPLIT_INDEX) { assert_c_eq(t->id, entries[(i+j) % SPLIT_INDEX].id, "Element id mismatch"); } else { assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, "Element id mismatch"); } j++; } } } TEST_BEGIN(test_qr_meld_split) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); qr_split(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); qr_meld(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); } TEST_END int main(void) { return test( test_qr_one, test_qr_after_insert, test_qr_remove, test_qr_before_insert, test_qr_meld_split); } jemalloc-sys-0.3.2/jemalloc/test/unit/rb.c010064400007650000024000000174101340421340100166230ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/rb.h" #define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ a_type *rbp_bh_t; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ rbp_bh_t)) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ (r_height)++; \ } \ } \ } while (0) typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the tree, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return ret; } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; tree_new(&tree); assert_true(tree_empty(&tree), "Tree should be empty"); assert_ptr_null(tree_first(&tree), "Unexpected node"); assert_ptr_null(tree_last(&tree), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); } TEST_END static unsigned tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; node_t *left_node; node_t *right_node; if (node == NULL) { return ret; } left_node = rbtn_left_get(node_t, link, node); right_node = rbtn_right_get(node_t, link, node); if (!rbtn_red_get(node_t, link, node)) { black_depth++; } /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { if (left_node != NULL) { assert_false(rbtn_red_get(node_t, link, left_node), "Node should be black"); } if (right_node != NULL) { assert_false(rbtn_red_get(node_t, link, right_node), "Node should be black"); } } /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ if (left_node != NULL) { ret += tree_recurse(left_node, black_height, black_depth); } else { ret += (black_depth != black_height); } /* Right subtree. */ if (right_node != NULL) { ret += tree_recurse(right_node, black_height, black_depth); } else { ret += (black_depth != black_height); } return ret; } static node_t * tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Test rb_search(). */ search_node = tree_search(tree, node); assert_ptr_eq(search_node, node, "tree_search() returned unexpected node"); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); assert_ptr_eq(search_node, node, "tree_nsearch() returned unexpected node"); /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); assert_ptr_eq(search_node, node, "tree_psearch() returned unexpected node"); (*i)++; return NULL; } static unsigned tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); return i; } static unsigned tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); return i; } static void node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; tree_remove(tree, node); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); if (search_node != NULL) { assert_u64_ge(search_node->key, node->key, "Key ordering error"); } /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); if (search_node != NULL) { assert_u64_le(search_node->key, node->key, "Key ordering error"); } node->magic = 0; rbtn_black_height(node_t, link, tree, black_height); imbalances = tree_recurse(tree->rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(tree), nnodes-1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(tree), nnodes-1, "Unexpected node iteration count"); } static node_t * remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); return ret; } static node_t * remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); return ret; } static void destroy_cb(node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); (*nnodes)--; } TEST_BEGIN(test_rb_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; node_t nodes[NNODES]; unsigned i, j, k, black_height, imbalances; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) { bag[j] = j; } break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; } break; default: for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); } } for (j = 1; j <= NNODES; j++) { /* Initialize tree and nodes. */ tree_new(&tree); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { tree_insert(&tree, &nodes[k]); rbtn_black_height(node_t, link, &tree, black_height); imbalances = tree_recurse(tree.rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(&tree), k+1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(&tree), k+1, "Unexpected node iteration count"); assert_false(tree_empty(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_first(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_last(&tree), "Tree should not be empty"); tree_next(&tree, &nodes[k]); tree_prev(&tree, &nodes[k]); } /* Remove nodes. */ switch (i % 5) { case 0: for (k = 0; k < j; k++) { node_remove(&tree, &nodes[k], j - k); } break; case 1: for (k = j; k > 0; k--) { node_remove(&tree, &nodes[k-1], k); } break; case 2: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_iter(&tree, start, remove_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 3: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_reverse_iter(&tree, start, remove_reverse_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 4: { unsigned nnodes = j; tree_destroy(&tree, destroy_cb, &nnodes); assert_u_eq(nnodes, 0, "Destruction terminated early"); break; } default: not_reached(); } } } fini_gen_rand(sfmt); #undef NNODES #undef NBAGS #undef SEED } TEST_END int main(void) { return test( test_rb_empty, test_rb_random); } jemalloc-sys-0.3.2/jemalloc/test/unit/retained.c010064400007650000024000000113741340421340100200160ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/spin.h" static unsigned arena_ind; static size_t sz; static size_t esz; #define NEPOCHS 8 #define PER_THD_NALLOCS 1 static atomic_u_t epoch; static atomic_u_t nfinished; static unsigned do_arena_create(extent_hooks_t *h) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, "Unexpected mallctl() failure"); return arena_ind; } static void do_arena_destroy(unsigned arena_ind) { size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static void do_refresh(void) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } static size_t do_get_size_impl(const char *cmd, unsigned arena_ind) { size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); size_t z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = arena_ind; size_t size; assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); return size; } static size_t do_get_active(unsigned arena_ind) { return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; } static size_t do_get_mapped(unsigned arena_ind) { return do_get_size_impl("stats.arenas.0.mapped", arena_ind); } static void * thd_start(void *arg) { for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { /* Busy-wait for next epoch. */ unsigned cur_epoch; spin_t spinner = SPIN_INITIALIZER; while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != next_epoch) { spin_adaptive(&spinner); } assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); /* * Allocate. The main thread will reset the arena, so there's * no need to deallocate. */ for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE ); assert_ptr_not_null(p, "Unexpected mallocx() failure\n"); } /* Let the main thread know we've finished this iteration. */ atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); } return NULL; } TEST_BEGIN(test_retained) { test_skip_if(!config_stats); arena_ind = do_arena_create(NULL); sz = nallocx(HUGEPAGE, 0); esz = sz + sz_large_pad; atomic_store_u(&epoch, 0, ATOMIC_RELAXED); unsigned nthreads = ncpus * 2; VARIABLE_ARRAY(thd_t, threads, nthreads); for (unsigned i = 0; i < nthreads; i++) { thd_create(&threads[i], thd_start, NULL); } for (unsigned e = 1; e < NEPOCHS; e++) { atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); atomic_store_u(&epoch, e, ATOMIC_RELEASE); /* Wait for threads to finish allocating. */ spin_t spinner = SPIN_INITIALIZER; while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { spin_adaptive(&spinner); } /* * Assert that retained is no more than the sum of size classes * that should have been used to satisfy the worker threads' * requests, discounting per growth fragmentation. */ do_refresh(); size_t allocated = esz * nthreads * PER_THD_NALLOCS; size_t active = do_get_active(arena_ind); assert_zu_le(allocated, active, "Unexpected active memory"); size_t mapped = do_get_mapped(arena_ind); assert_zu_le(active, mapped, "Unexpected mapped memory"); arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); size_t usable = 0; size_t fragmented = 0; for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < arena->extent_grow_next; pind++) { size_t psz = sz_pind2sz(pind); size_t psz_fragmented = psz % esz; size_t psz_usable = psz - psz_fragmented; /* * Only consider size classes that wouldn't be skipped. */ if (psz_usable > 0) { assert_zu_lt(usable, allocated, "Excessive retained memory " "(%#zx[+%#zx] > %#zx)", usable, psz_usable, allocated); fragmented += psz_fragmented; usable += psz_usable; } } /* * Clean up arena. Destroying and recreating the arena * is simpler that specifying extent hooks that deallocate * (rather than retaining) during reset. */ do_arena_destroy(arena_ind); assert_u_eq(do_arena_create(NULL), arena_ind, "Unexpected arena index"); } for (unsigned i = 0; i < nthreads; i++) { thd_join(threads[i], NULL); } do_arena_destroy(arena_ind); } TEST_END int main(void) { return test( test_retained); } jemalloc-sys-0.3.2/jemalloc/test/unit/rtree.c010064400007650000024000000153071340421341300173470ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/rtree.h" rtree_node_alloc_t *rtree_node_alloc_orig; rtree_node_dalloc_t *rtree_node_dalloc_orig; rtree_leaf_alloc_t *rtree_leaf_alloc_orig; rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; /* Potentially too large to safely place on the stack. */ rtree_t test_rtree; static rtree_node_elm_t * rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { rtree_node_elm_t *node; if (rtree != &test_rtree) { return rtree_node_alloc_orig(tsdn, rtree, nelms); } malloc_mutex_unlock(tsdn, &rtree->init_lock); node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); assert_ptr_not_null(node, "Unexpected calloc() failure"); malloc_mutex_lock(tsdn, &rtree->init_lock); return node; } static void rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { if (rtree != &test_rtree) { rtree_node_dalloc_orig(tsdn, rtree, node); return; } free(node); } static rtree_leaf_elm_t * rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { rtree_leaf_elm_t *leaf; if (rtree != &test_rtree) { return rtree_leaf_alloc_orig(tsdn, rtree, nelms); } malloc_mutex_unlock(tsdn, &rtree->init_lock); leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); assert_ptr_not_null(leaf, "Unexpected calloc() failure"); malloc_mutex_lock(tsdn, &rtree->init_lock); return leaf; } static void rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { if (rtree != &test_rtree) { rtree_leaf_dalloc_orig(tsdn, rtree, leaf); return; } free(leaf); } TEST_BEGIN(test_rtree_read_empty) { tsdn_t *tsdn; tsdn = tsdn_fetch(); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, false), "rtree_extent_read() should return NULL for empty tree"); rtree_delete(tsdn, rtree); } TEST_END #undef NTHREADS #undef NITERS #undef SEED TEST_BEGIN(test_rtree_extrema) { extent_t extent_a, extent_b; extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false, sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false, false, true); extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0, extent_state_active, false, false, true); tsdn_t *tsdn = tsdn_fetch(); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, extent_szind_get(&extent_a), extent_slab_get(&extent_a)), "Unexpected rtree_write() failure"); rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE, extent_szind_get(&extent_a), extent_slab_get(&extent_a)); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true), &extent_a, "rtree_extent_read() should return previously set value"); assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), &extent_b, extent_szind_get_maybe_invalid(&extent_b), extent_slab_get(&extent_b)), "Unexpected rtree_write() failure"); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), true), &extent_b, "rtree_extent_read() should return previously set value"); rtree_delete(tsdn, rtree); } TEST_END TEST_BEGIN(test_rtree_bits) { tsdn_t *tsdn = tsdn_fetch(); uintptr_t keys[] = {PAGE, PAGE + 1, PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; extent_t extent; extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, extent_state_active, false, false, true); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], &extent, NSIZES, false), "Unexpected rtree_write() failure"); for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[j], true), &extent, "rtree_extent_read() should return previously set " "value and ignore insignificant key bits; i=%u, " "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, j, keys[i], keys[j]); } assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, (((uintptr_t)2) << LG_PAGE), false), "Only leftmost rtree leaf should be set; i=%u", i); rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); } rtree_delete(tsdn, rtree); } TEST_END TEST_BEGIN(test_rtree_random) { #define NSET 16 #define SEED 42 sfmt_t *sfmt = init_gen_rand(SEED); tsdn_t *tsdn = tsdn_fetch(); uintptr_t keys[NSET]; rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); extent_t extent; extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, extent_state_active, false, false, true); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); for (unsigned i = 0; i < NSET; i++) { keys[i] = (uintptr_t)gen_rand64(sfmt); rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx, keys[i], false, true); assert_ptr_not_null(elm, "Unexpected rtree_leaf_elm_lookup() failure"); rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), &extent, "rtree_extent_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), &extent, "rtree_extent_read() should return previously set value, " "i=%u", i); } for (unsigned i = 0; i < NSET; i++) { rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), "rtree_extent_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), "rtree_extent_read() should return previously set value"); } rtree_delete(tsdn, rtree); fini_gen_rand(sfmt); #undef NSET #undef SEED } TEST_END int main(void) { rtree_node_alloc_orig = rtree_node_alloc; rtree_node_alloc = rtree_node_alloc_intercept; rtree_node_dalloc_orig = rtree_node_dalloc; rtree_node_dalloc = rtree_node_dalloc_intercept; rtree_leaf_alloc_orig = rtree_leaf_alloc; rtree_leaf_alloc = rtree_leaf_alloc_intercept; rtree_leaf_dalloc_orig = rtree_leaf_dalloc; rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; return test( test_rtree_read_empty, test_rtree_extrema, test_rtree_bits, test_rtree_random); } jemalloc-sys-0.3.2/jemalloc/test/unit/SFMT.c010064400007650000024000002530701340421340100167750ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "test/jemalloc_test.h" #define BLOCK_SIZE 10000 #define BLOCK_SIZE64 (BLOCK_SIZE / 2) #define COUNT_1 1000 #define COUNT_2 700 static const uint32_t init_gen_rand_32_expected[] = { 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U }; static const uint32_t init_by_array_32_expected[] = { 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U }; static const uint64_t init_gen_rand_64_expected[] = { KQU(16924766246869039260), KQU( 8201438687333352714), KQU( 2265290287015001750), KQU(18397264611805473832), KQU( 3375255223302384358), KQU( 6345559975416828796), KQU(18229739242790328073), KQU( 7596792742098800905), KQU( 255338647169685981), KQU( 2052747240048610300), KQU(18328151576097299343), KQU(12472905421133796567), KQU(11315245349717600863), KQU(16594110197775871209), KQU(15708751964632456450), KQU(10452031272054632535), KQU(11097646720811454386), KQU( 4556090668445745441), KQU(17116187693090663106), KQU(14931526836144510645), KQU( 9190752218020552591), KQU( 9625800285771901401), KQU(13995141077659972832), KQU( 5194209094927829625), KQU( 4156788379151063303), KQU( 8523452593770139494), KQU(14082382103049296727), KQU( 2462601863986088483), KQU( 3030583461592840678), KQU( 5221622077872827681), KQU( 3084210671228981236), KQU(13956758381389953823), KQU(13503889856213423831), KQU(15696904024189836170), KQU( 4612584152877036206), KQU( 6231135538447867881), KQU(10172457294158869468), KQU( 6452258628466708150), KQU(14044432824917330221), KQU( 370168364480044279), KQU(10102144686427193359), KQU( 667870489994776076), KQU( 2732271956925885858), KQU(18027788905977284151), KQU(15009842788582923859), KQU( 7136357960180199542), KQU(15901736243475578127), KQU(16951293785352615701), KQU(10551492125243691632), KQU(17668869969146434804), KQU(13646002971174390445), KQU( 9804471050759613248), KQU( 5511670439655935493), KQU(18103342091070400926), KQU(17224512747665137533), KQU(15534627482992618168), KQU( 1423813266186582647), KQU(15821176807932930024), KQU( 30323369733607156), KQU(11599382494723479403), KQU( 653856076586810062), KQU( 3176437395144899659), KQU(14028076268147963917), KQU(16156398271809666195), KQU( 3166955484848201676), KQU( 5746805620136919390), KQU(17297845208891256593), KQU(11691653183226428483), KQU(17900026146506981577), KQU(15387382115755971042), KQU(16923567681040845943), KQU( 8039057517199388606), KQU(11748409241468629263), KQU( 794358245539076095), KQU(13438501964693401242), KQU(14036803236515618962), KQU( 5252311215205424721), KQU(17806589612915509081), KQU( 6802767092397596006), KQU(14212120431184557140), KQU( 1072951366761385712), KQU(13098491780722836296), KQU( 9466676828710797353), KQU(12673056849042830081), KQU(12763726623645357580), KQU(16468961652999309493), KQU(15305979875636438926), KQU(17444713151223449734), KQU( 5692214267627883674), KQU(13049589139196151505), KQU( 880115207831670745), KQU( 1776529075789695498), KQU(16695225897801466485), KQU(10666901778795346845), KQU( 6164389346722833869), KQU( 2863817793264300475), KQU( 9464049921886304754), KQU( 3993566636740015468), KQU( 9983749692528514136), KQU(16375286075057755211), KQU(16042643417005440820), KQU(11445419662923489877), KQU( 7999038846885158836), KQU( 6721913661721511535), KQU( 5363052654139357320), KQU( 1817788761173584205), KQU(13290974386445856444), KQU( 4650350818937984680), KQU( 8219183528102484836), KQU( 1569862923500819899), KQU( 4189359732136641860), KQU(14202822961683148583), KQU( 4457498315309429058), KQU(13089067387019074834), KQU(11075517153328927293), KQU(10277016248336668389), KQU( 7070509725324401122), KQU(17808892017780289380), KQU(13143367339909287349), KQU( 1377743745360085151), KQU( 5749341807421286485), KQU(14832814616770931325), KQU( 7688820635324359492), KQU(10960474011539770045), KQU( 81970066653179790), KQU(12619476072607878022), KQU( 4419566616271201744), KQU(15147917311750568503), KQU( 5549739182852706345), KQU( 7308198397975204770), KQU(13580425496671289278), KQU(17070764785210130301), KQU( 8202832846285604405), KQU( 6873046287640887249), KQU( 6927424434308206114), KQU( 6139014645937224874), KQU(10290373645978487639), KQU(15904261291701523804), KQU( 9628743442057826883), KQU(18383429096255546714), KQU( 4977413265753686967), KQU( 7714317492425012869), KQU( 9025232586309926193), KQU(14627338359776709107), KQU(14759849896467790763), KQU(10931129435864423252), KQU( 4588456988775014359), KQU(10699388531797056724), KQU( 468652268869238792), KQU( 5755943035328078086), KQU( 2102437379988580216), KQU( 9986312786506674028), KQU( 2654207180040945604), KQU( 8726634790559960062), KQU( 100497234871808137), KQU( 2800137176951425819), KQU( 6076627612918553487), KQU( 5780186919186152796), KQU( 8179183595769929098), KQU( 6009426283716221169), KQU( 2796662551397449358), KQU( 1756961367041986764), KQU( 6972897917355606205), KQU(14524774345368968243), KQU( 2773529684745706940), KQU( 4853632376213075959), KQU( 4198177923731358102), KQU( 8271224913084139776), KQU( 2741753121611092226), KQU(16782366145996731181), KQU(15426125238972640790), KQU(13595497100671260342), KQU( 3173531022836259898), KQU( 6573264560319511662), KQU(18041111951511157441), KQU( 2351433581833135952), KQU( 3113255578908173487), KQU( 1739371330877858784), KQU(16046126562789165480), KQU( 8072101652214192925), KQU(15267091584090664910), KQU( 9309579200403648940), KQU( 5218892439752408722), KQU(14492477246004337115), KQU(17431037586679770619), KQU( 7385248135963250480), KQU( 9580144956565560660), KQU( 4919546228040008720), KQU(15261542469145035584), KQU(18233297270822253102), KQU( 5453248417992302857), KQU( 9309519155931460285), KQU(10342813012345291756), KQU(15676085186784762381), KQU(15912092950691300645), KQU( 9371053121499003195), KQU( 9897186478226866746), KQU(14061858287188196327), KQU( 122575971620788119), KQU(12146750969116317754), KQU( 4438317272813245201), KQU( 8332576791009527119), KQU(13907785691786542057), KQU(10374194887283287467), KQU( 2098798755649059566), KQU( 3416235197748288894), KQU( 8688269957320773484), KQU( 7503964602397371571), KQU(16724977015147478236), KQU( 9461512855439858184), KQU(13259049744534534727), KQU( 3583094952542899294), KQU( 8764245731305528292), KQU(13240823595462088985), KQU(13716141617617910448), KQU(18114969519935960955), KQU( 2297553615798302206), KQU( 4585521442944663362), KQU(17776858680630198686), KQU( 4685873229192163363), KQU( 152558080671135627), KQU(15424900540842670088), KQU(13229630297130024108), KQU(17530268788245718717), KQU(16675633913065714144), KQU( 3158912717897568068), KQU(15399132185380087288), KQU( 7401418744515677872), KQU(13135412922344398535), KQU( 6385314346100509511), KQU(13962867001134161139), KQU(10272780155442671999), KQU(12894856086597769142), KQU(13340877795287554994), KQU(12913630602094607396), KQU(12543167911119793857), KQU(17343570372251873096), KQU(10959487764494150545), KQU( 6966737953093821128), KQU(13780699135496988601), KQU( 4405070719380142046), KQU(14923788365607284982), KQU( 2869487678905148380), KQU( 6416272754197188403), KQU(15017380475943612591), KQU( 1995636220918429487), KQU( 3402016804620122716), KQU(15800188663407057080), KQU(11362369990390932882), KQU(15262183501637986147), KQU(10239175385387371494), KQU( 9352042420365748334), KQU( 1682457034285119875), KQU( 1724710651376289644), KQU( 2038157098893817966), KQU( 9897825558324608773), KQU( 1477666236519164736), KQU(16835397314511233640), KQU(10370866327005346508), KQU(10157504370660621982), KQU(12113904045335882069), KQU(13326444439742783008), KQU(11302769043000765804), KQU(13594979923955228484), KQU(11779351762613475968), KQU( 3786101619539298383), KQU( 8021122969180846063), KQU(15745904401162500495), KQU(10762168465993897267), KQU(13552058957896319026), KQU(11200228655252462013), KQU( 5035370357337441226), KQU( 7593918984545500013), KQU( 5418554918361528700), KQU( 4858270799405446371), KQU( 9974659566876282544), KQU(18227595922273957859), KQU( 2772778443635656220), KQU(14285143053182085385), KQU( 9939700992429600469), KQU(12756185904545598068), KQU( 2020783375367345262), KQU( 57026775058331227), KQU( 950827867930065454), KQU( 6602279670145371217), KQU( 2291171535443566929), KQU( 5832380724425010313), KQU( 1220343904715982285), KQU(17045542598598037633), KQU(15460481779702820971), KQU(13948388779949365130), KQU(13975040175430829518), KQU(17477538238425541763), KQU(11104663041851745725), KQU(15860992957141157587), KQU(14529434633012950138), KQU( 2504838019075394203), KQU( 7512113882611121886), KQU( 4859973559980886617), KQU( 1258601555703250219), KQU(15594548157514316394), KQU( 4516730171963773048), KQU(11380103193905031983), KQU( 6809282239982353344), KQU(18045256930420065002), KQU( 2453702683108791859), KQU( 977214582986981460), KQU( 2006410402232713466), KQU( 6192236267216378358), KQU( 3429468402195675253), KQU(18146933153017348921), KQU(17369978576367231139), KQU( 1246940717230386603), KQU(11335758870083327110), KQU(14166488801730353682), KQU( 9008573127269635732), KQU(10776025389820643815), KQU(15087605441903942962), KQU( 1359542462712147922), KQU(13898874411226454206), KQU(17911176066536804411), KQU( 9435590428600085274), KQU( 294488509967864007), KQU( 8890111397567922046), KQU( 7987823476034328778), KQU(13263827582440967651), KQU( 7503774813106751573), KQU(14974747296185646837), KQU( 8504765037032103375), KQU(17340303357444536213), KQU( 7704610912964485743), KQU( 8107533670327205061), KQU( 9062969835083315985), KQU(16968963142126734184), KQU(12958041214190810180), KQU( 2720170147759570200), KQU( 2986358963942189566), KQU(14884226322219356580), KQU( 286224325144368520), KQU(11313800433154279797), KQU(18366849528439673248), KQU(17899725929482368789), KQU( 3730004284609106799), KQU( 1654474302052767205), KQU( 5006698007047077032), KQU( 8196893913601182838), KQU(15214541774425211640), KQU(17391346045606626073), KQU( 8369003584076969089), KQU( 3939046733368550293), KQU(10178639720308707785), KQU( 2180248669304388697), KQU( 62894391300126322), KQU( 9205708961736223191), KQU( 6837431058165360438), KQU( 3150743890848308214), KQU(17849330658111464583), KQU(12214815643135450865), KQU(13410713840519603402), KQU( 3200778126692046802), KQU(13354780043041779313), KQU( 800850022756886036), KQU(15660052933953067433), KQU( 6572823544154375676), KQU(11030281857015819266), KQU(12682241941471433835), KQU(11654136407300274693), KQU( 4517795492388641109), KQU( 9757017371504524244), KQU(17833043400781889277), KQU(12685085201747792227), KQU(10408057728835019573), KQU( 98370418513455221), KQU( 6732663555696848598), KQU(13248530959948529780), KQU( 3530441401230622826), KQU(18188251992895660615), KQU( 1847918354186383756), KQU( 1127392190402660921), KQU(11293734643143819463), KQU( 3015506344578682982), KQU(13852645444071153329), KQU( 2121359659091349142), KQU( 1294604376116677694), KQU( 5616576231286352318), KQU( 7112502442954235625), KQU(11676228199551561689), KQU(12925182803007305359), KQU( 7852375518160493082), KQU( 1136513130539296154), KQU( 5636923900916593195), KQU( 3221077517612607747), KQU(17784790465798152513), KQU( 3554210049056995938), KQU(17476839685878225874), KQU( 3206836372585575732), KQU( 2765333945644823430), KQU(10080070903718799528), KQU( 5412370818878286353), KQU( 9689685887726257728), KQU( 8236117509123533998), KQU( 1951139137165040214), KQU( 4492205209227980349), KQU(16541291230861602967), KQU( 1424371548301437940), KQU( 9117562079669206794), KQU(14374681563251691625), KQU(13873164030199921303), KQU( 6680317946770936731), KQU(15586334026918276214), KQU(10896213950976109802), KQU( 9506261949596413689), KQU( 9903949574308040616), KQU( 6038397344557204470), KQU( 174601465422373648), KQU(15946141191338238030), KQU(17142225620992044937), KQU( 7552030283784477064), KQU( 2947372384532947997), KQU( 510797021688197711), KQU( 4962499439249363461), KQU( 23770320158385357), KQU( 959774499105138124), KQU( 1468396011518788276), KQU( 2015698006852312308), KQU( 4149400718489980136), KQU( 5992916099522371188), KQU(10819182935265531076), KQU(16189787999192351131), KQU( 342833961790261950), KQU(12470830319550495336), KQU(18128495041912812501), KQU( 1193600899723524337), KQU( 9056793666590079770), KQU( 2154021227041669041), KQU( 4963570213951235735), KQU( 4865075960209211409), KQU( 2097724599039942963), KQU( 2024080278583179845), KQU(11527054549196576736), KQU(10650256084182390252), KQU( 4808408648695766755), KQU( 1642839215013788844), KQU(10607187948250398390), KQU( 7076868166085913508), KQU( 730522571106887032), KQU(12500579240208524895), KQU( 4484390097311355324), KQU(15145801330700623870), KQU( 8055827661392944028), KQU( 5865092976832712268), KQU(15159212508053625143), KQU( 3560964582876483341), KQU( 4070052741344438280), KQU( 6032585709886855634), KQU(15643262320904604873), KQU( 2565119772293371111), KQU( 318314293065348260), KQU(15047458749141511872), KQU( 7772788389811528730), KQU( 7081187494343801976), KQU( 6465136009467253947), KQU(10425940692543362069), KQU( 554608190318339115), KQU(14796699860302125214), KQU( 1638153134431111443), KQU(10336967447052276248), KQU( 8412308070396592958), KQU( 4004557277152051226), KQU( 8143598997278774834), KQU(16413323996508783221), KQU(13139418758033994949), KQU( 9772709138335006667), KQU( 2818167159287157659), KQU(17091740573832523669), KQU(14629199013130751608), KQU(18268322711500338185), KQU( 8290963415675493063), KQU( 8830864907452542588), KQU( 1614839084637494849), KQU(14855358500870422231), KQU( 3472996748392519937), KQU(15317151166268877716), KQU( 5825895018698400362), KQU(16730208429367544129), KQU(10481156578141202800), KQU( 4746166512382823750), KQU(12720876014472464998), KQU( 8825177124486735972), KQU(13733447296837467838), KQU( 6412293741681359625), KQU( 8313213138756135033), KQU(11421481194803712517), KQU( 7997007691544174032), KQU( 6812963847917605930), KQU( 9683091901227558641), KQU(14703594165860324713), KQU( 1775476144519618309), KQU( 2724283288516469519), KQU( 717642555185856868), KQU( 8736402192215092346), KQU(11878800336431381021), KQU( 4348816066017061293), KQU( 6115112756583631307), KQU( 9176597239667142976), KQU(12615622714894259204), KQU(10283406711301385987), KQU( 5111762509485379420), KQU( 3118290051198688449), KQU( 7345123071632232145), KQU( 9176423451688682359), KQU( 4843865456157868971), KQU(12008036363752566088), KQU(12058837181919397720), KQU( 2145073958457347366), KQU( 1526504881672818067), KQU( 3488830105567134848), KQU(13208362960674805143), KQU( 4077549672899572192), KQU( 7770995684693818365), KQU( 1398532341546313593), KQU(12711859908703927840), KQU( 1417561172594446813), KQU(17045191024194170604), KQU( 4101933177604931713), KQU(14708428834203480320), KQU(17447509264469407724), KQU(14314821973983434255), KQU(17990472271061617265), KQU( 5087756685841673942), KQU(12797820586893859939), KQU( 1778128952671092879), KQU( 3535918530508665898), KQU( 9035729701042481301), KQU(14808661568277079962), KQU(14587345077537747914), KQU(11920080002323122708), KQU( 6426515805197278753), KQU( 3295612216725984831), KQU(11040722532100876120), KQU(12305952936387598754), KQU(16097391899742004253), KQU( 4908537335606182208), KQU(12446674552196795504), KQU(16010497855816895177), KQU( 9194378874788615551), KQU( 3382957529567613384), KQU( 5154647600754974077), KQU( 9801822865328396141), KQU( 9023662173919288143), KQU(17623115353825147868), KQU( 8238115767443015816), KQU(15811444159859002560), KQU( 9085612528904059661), KQU( 6888601089398614254), KQU( 258252992894160189), KQU( 6704363880792428622), KQU( 6114966032147235763), KQU(11075393882690261875), KQU( 8797664238933620407), KQU( 5901892006476726920), KQU( 5309780159285518958), KQU(14940808387240817367), KQU(14642032021449656698), KQU( 9808256672068504139), KQU( 3670135111380607658), KQU(11211211097845960152), KQU( 1474304506716695808), KQU(15843166204506876239), KQU( 7661051252471780561), KQU(10170905502249418476), KQU( 7801416045582028589), KQU( 2763981484737053050), KQU( 9491377905499253054), KQU(16201395896336915095), KQU( 9256513756442782198), KQU( 5411283157972456034), KQU( 5059433122288321676), KQU( 4327408006721123357), KQU( 9278544078834433377), KQU( 7601527110882281612), KQU(11848295896975505251), KQU(12096998801094735560), KQU(14773480339823506413), KQU(15586227433895802149), KQU(12786541257830242872), KQU( 6904692985140503067), KQU( 5309011515263103959), KQU(12105257191179371066), KQU(14654380212442225037), KQU( 2556774974190695009), KQU( 4461297399927600261), KQU(14888225660915118646), KQU(14915459341148291824), KQU( 2738802166252327631), KQU( 6047155789239131512), KQU(12920545353217010338), KQU(10697617257007840205), KQU( 2751585253158203504), KQU(13252729159780047496), KQU(14700326134672815469), KQU(14082527904374600529), KQU(16852962273496542070), KQU(17446675504235853907), KQU(15019600398527572311), KQU(12312781346344081551), KQU(14524667935039810450), KQU( 5634005663377195738), KQU(11375574739525000569), KQU( 2423665396433260040), KQU( 5222836914796015410), KQU( 4397666386492647387), KQU( 4619294441691707638), KQU( 665088602354770716), KQU(13246495665281593610), KQU( 6564144270549729409), KQU(10223216188145661688), KQU( 3961556907299230585), KQU(11543262515492439914), KQU(16118031437285993790), KQU( 7143417964520166465), KQU(13295053515909486772), KQU( 40434666004899675), KQU(17127804194038347164), KQU( 8599165966560586269), KQU( 8214016749011284903), KQU(13725130352140465239), KQU( 5467254474431726291), KQU( 7748584297438219877), KQU(16933551114829772472), KQU( 2169618439506799400), KQU( 2169787627665113463), KQU(17314493571267943764), KQU(18053575102911354912), KQU(11928303275378476973), KQU(11593850925061715550), KQU(17782269923473589362), KQU( 3280235307704747039), KQU( 6145343578598685149), KQU(17080117031114086090), KQU(18066839902983594755), KQU( 6517508430331020706), KQU( 8092908893950411541), KQU(12558378233386153732), KQU( 4476532167973132976), KQU(16081642430367025016), KQU( 4233154094369139361), KQU( 8693630486693161027), KQU(11244959343027742285), KQU(12273503967768513508), KQU(14108978636385284876), KQU( 7242414665378826984), KQU( 6561316938846562432), KQU( 8601038474994665795), KQU(17532942353612365904), KQU(17940076637020912186), KQU( 7340260368823171304), KQU( 7061807613916067905), KQU(10561734935039519326), KQU(17990796503724650862), KQU( 6208732943911827159), KQU( 359077562804090617), KQU(14177751537784403113), KQU(10659599444915362902), KQU(15081727220615085833), KQU(13417573895659757486), KQU(15513842342017811524), KQU(11814141516204288231), KQU( 1827312513875101814), KQU( 2804611699894603103), KQU(17116500469975602763), KQU(12270191815211952087), KQU(12256358467786024988), KQU(18435021722453971267), KQU( 671330264390865618), KQU( 476504300460286050), KQU(16465470901027093441), KQU( 4047724406247136402), KQU( 1322305451411883346), KQU( 1388308688834322280), KQU( 7303989085269758176), KQU( 9323792664765233642), KQU( 4542762575316368936), KQU(17342696132794337618), KQU( 4588025054768498379), KQU(13415475057390330804), KQU(17880279491733405570), KQU(10610553400618620353), KQU( 3180842072658960139), KQU(13002966655454270120), KQU( 1665301181064982826), KQU( 7083673946791258979), KQU( 190522247122496820), KQU(17388280237250677740), KQU( 8430770379923642945), KQU(12987180971921668584), KQU( 2311086108365390642), KQU( 2870984383579822345), KQU(14014682609164653318), KQU(14467187293062251484), KQU( 192186361147413298), KQU(15171951713531796524), KQU( 9900305495015948728), KQU(17958004775615466344), KQU(14346380954498606514), KQU(18040047357617407096), KQU( 5035237584833424532), KQU(15089555460613972287), KQU( 4131411873749729831), KQU( 1329013581168250330), KQU(10095353333051193949), KQU(10749518561022462716), KQU( 9050611429810755847), KQU(15022028840236655649), KQU( 8775554279239748298), KQU(13105754025489230502), KQU(15471300118574167585), KQU( 89864764002355628), KQU( 8776416323420466637), KQU( 5280258630612040891), KQU( 2719174488591862912), KQU( 7599309137399661994), KQU(15012887256778039979), KQU(14062981725630928925), KQU(12038536286991689603), KQU( 7089756544681775245), KQU(10376661532744718039), KQU( 1265198725901533130), KQU(13807996727081142408), KQU( 2935019626765036403), KQU( 7651672460680700141), KQU( 3644093016200370795), KQU( 2840982578090080674), KQU(17956262740157449201), KQU(18267979450492880548), KQU(11799503659796848070), KQU( 9942537025669672388), KQU(11886606816406990297), KQU( 5488594946437447576), KQU( 7226714353282744302), KQU( 3784851653123877043), KQU( 878018453244803041), KQU(12110022586268616085), KQU( 734072179404675123), KQU(11869573627998248542), KQU( 469150421297783998), KQU( 260151124912803804), KQU(11639179410120968649), KQU( 9318165193840846253), KQU(12795671722734758075), KQU(15318410297267253933), KQU( 691524703570062620), KQU( 5837129010576994601), KQU(15045963859726941052), KQU( 5850056944932238169), KQU(12017434144750943807), KQU( 7447139064928956574), KQU( 3101711812658245019), KQU(16052940704474982954), KQU(18195745945986994042), KQU( 8932252132785575659), KQU(13390817488106794834), KQU(11582771836502517453), KQU( 4964411326683611686), KQU( 2195093981702694011), KQU(14145229538389675669), KQU(16459605532062271798), KQU( 866316924816482864), KQU( 4593041209937286377), KQU( 8415491391910972138), KQU( 4171236715600528969), KQU(16637569303336782889), KQU( 2002011073439212680), KQU(17695124661097601411), KQU( 4627687053598611702), KQU( 7895831936020190403), KQU( 8455951300917267802), KQU( 2923861649108534854), KQU( 8344557563927786255), KQU( 6408671940373352556), KQU(12210227354536675772), KQU(14294804157294222295), KQU(10103022425071085127), KQU(10092959489504123771), KQU( 6554774405376736268), KQU(12629917718410641774), KQU( 6260933257596067126), KQU( 2460827021439369673), KQU( 2541962996717103668), KQU( 597377203127351475), KQU( 5316984203117315309), KQU( 4811211393563241961), KQU(13119698597255811641), KQU( 8048691512862388981), KQU(10216818971194073842), KQU( 4612229970165291764), KQU(10000980798419974770), KQU( 6877640812402540687), KQU( 1488727563290436992), KQU( 2227774069895697318), KQU(11237754507523316593), KQU(13478948605382290972), KQU( 1963583846976858124), KQU( 5512309205269276457), KQU( 3972770164717652347), KQU( 3841751276198975037), KQU(10283343042181903117), KQU( 8564001259792872199), KQU(16472187244722489221), KQU( 8953493499268945921), KQU( 3518747340357279580), KQU( 4003157546223963073), KQU( 3270305958289814590), KQU( 3966704458129482496), KQU( 8122141865926661939), KQU(14627734748099506653), KQU(13064426990862560568), KQU( 2414079187889870829), KQU( 5378461209354225306), KQU(10841985740128255566), KQU( 538582442885401738), KQU( 7535089183482905946), KQU(16117559957598879095), KQU( 8477890721414539741), KQU( 1459127491209533386), KQU(17035126360733620462), KQU( 8517668552872379126), KQU(10292151468337355014), KQU(17081267732745344157), KQU(13751455337946087178), KQU(14026945459523832966), KQU( 6653278775061723516), KQU(10619085543856390441), KQU( 2196343631481122885), KQU(10045966074702826136), KQU(10082317330452718282), KQU( 5920859259504831242), KQU( 9951879073426540617), KQU( 7074696649151414158), KQU(15808193543879464318), KQU( 7385247772746953374), KQU( 3192003544283864292), KQU(18153684490917593847), KQU(12423498260668568905), KQU(10957758099756378169), KQU(11488762179911016040), KQU( 2099931186465333782), KQU(11180979581250294432), KQU( 8098916250668367933), KQU( 3529200436790763465), KQU(12988418908674681745), KQU( 6147567275954808580), KQU( 3207503344604030989), KQU(10761592604898615360), KQU( 229854861031893504), KQU( 8809853962667144291), KQU(13957364469005693860), KQU( 7634287665224495886), KQU(12353487366976556874), KQU( 1134423796317152034), KQU( 2088992471334107068), KQU( 7393372127190799698), KQU( 1845367839871058391), KQU( 207922563987322884), KQU(11960870813159944976), KQU(12182120053317317363), KQU(17307358132571709283), KQU(13871081155552824936), KQU(18304446751741566262), KQU( 7178705220184302849), KQU(10929605677758824425), KQU(16446976977835806844), KQU(13723874412159769044), KQU( 6942854352100915216), KQU( 1726308474365729390), KQU( 2150078766445323155), KQU(15345558947919656626), KQU(12145453828874527201), KQU( 2054448620739726849), KQU( 2740102003352628137), KQU(11294462163577610655), KQU( 756164283387413743), KQU(17841144758438810880), KQU(10802406021185415861), KQU( 8716455530476737846), KQU( 6321788834517649606), KQU(14681322910577468426), KQU(17330043563884336387), KQU(12701802180050071614), KQU(14695105111079727151), KQU( 5112098511654172830), KQU( 4957505496794139973), KQU( 8270979451952045982), KQU(12307685939199120969), KQU(12425799408953443032), KQU( 8376410143634796588), KQU(16621778679680060464), KQU( 3580497854566660073), KQU( 1122515747803382416), KQU( 857664980960597599), KQU( 6343640119895925918), KQU(12878473260854462891), KQU(10036813920765722626), KQU(14451335468363173812), KQU( 5476809692401102807), KQU(16442255173514366342), KQU(13060203194757167104), KQU(14354124071243177715), KQU(15961249405696125227), KQU(13703893649690872584), KQU( 363907326340340064), KQU( 6247455540491754842), KQU(12242249332757832361), KQU( 156065475679796717), KQU( 9351116235749732355), KQU( 4590350628677701405), KQU( 1671195940982350389), KQU(13501398458898451905), KQU( 6526341991225002255), KQU( 1689782913778157592), KQU( 7439222350869010334), KQU(13975150263226478308), KQU(11411961169932682710), KQU(17204271834833847277), KQU( 541534742544435367), KQU( 6591191931218949684), KQU( 2645454775478232486), KQU( 4322857481256485321), KQU( 8477416487553065110), KQU(12902505428548435048), KQU( 971445777981341415), KQU(14995104682744976712), KQU( 4243341648807158063), KQU( 8695061252721927661), KQU( 5028202003270177222), KQU( 2289257340915567840), KQU(13870416345121866007), KQU(13994481698072092233), KQU( 6912785400753196481), KQU( 2278309315841980139), KQU( 4329765449648304839), KQU( 5963108095785485298), KQU( 4880024847478722478), KQU(16015608779890240947), KQU( 1866679034261393544), KQU( 914821179919731519), KQU( 9643404035648760131), KQU( 2418114953615593915), KQU( 944756836073702374), KQU(15186388048737296834), KQU( 7723355336128442206), KQU( 7500747479679599691), KQU(18013961306453293634), KQU( 2315274808095756456), KQU(13655308255424029566), KQU(17203800273561677098), KQU( 1382158694422087756), KQU( 5090390250309588976), KQU( 517170818384213989), KQU( 1612709252627729621), KQU( 1330118955572449606), KQU( 300922478056709885), KQU(18115693291289091987), KQU(13491407109725238321), KQU(15293714633593827320), KQU( 5151539373053314504), KQU( 5951523243743139207), KQU(14459112015249527975), KQU( 5456113959000700739), KQU( 3877918438464873016), KQU(12534071654260163555), KQU(15871678376893555041), KQU(11005484805712025549), KQU(16353066973143374252), KQU( 4358331472063256685), KQU( 8268349332210859288), KQU(12485161590939658075), KQU(13955993592854471343), KQU( 5911446886848367039), KQU(14925834086813706974), KQU( 6590362597857994805), KQU( 1280544923533661875), KQU( 1637756018947988164), KQU( 4734090064512686329), KQU(16693705263131485912), KQU( 6834882340494360958), KQU( 8120732176159658505), KQU( 2244371958905329346), KQU(10447499707729734021), KQU( 7318742361446942194), KQU( 8032857516355555296), KQU(14023605983059313116), KQU( 1032336061815461376), KQU( 9840995337876562612), KQU( 9869256223029203587), KQU(12227975697177267636), KQU(12728115115844186033), KQU( 7752058479783205470), KQU( 729733219713393087), KQU(12954017801239007622) }; static const uint64_t init_by_array_64_expected[] = { KQU( 2100341266307895239), KQU( 8344256300489757943), KQU(15687933285484243894), KQU( 8268620370277076319), KQU(12371852309826545459), KQU( 8800491541730110238), KQU(18113268950100835773), KQU( 2886823658884438119), KQU( 3293667307248180724), KQU( 9307928143300172731), KQU( 7688082017574293629), KQU( 900986224735166665), KQU( 9977972710722265039), KQU( 6008205004994830552), KQU( 546909104521689292), KQU( 7428471521869107594), KQU(14777563419314721179), KQU(16116143076567350053), KQU( 5322685342003142329), KQU( 4200427048445863473), KQU( 4693092150132559146), KQU(13671425863759338582), KQU( 6747117460737639916), KQU( 4732666080236551150), KQU( 5912839950611941263), KQU( 3903717554504704909), KQU( 2615667650256786818), KQU(10844129913887006352), KQU(13786467861810997820), KQU(14267853002994021570), KQU(13767807302847237439), KQU(16407963253707224617), KQU( 4802498363698583497), KQU( 2523802839317209764), KQU( 3822579397797475589), KQU( 8950320572212130610), KQU( 3745623504978342534), KQU(16092609066068482806), KQU( 9817016950274642398), KQU(10591660660323829098), KQU(11751606650792815920), KQU( 5122873818577122211), KQU(17209553764913936624), KQU( 6249057709284380343), KQU(15088791264695071830), KQU(15344673071709851930), KQU( 4345751415293646084), KQU( 2542865750703067928), KQU(13520525127852368784), KQU(18294188662880997241), KQU( 3871781938044881523), KQU( 2873487268122812184), KQU(15099676759482679005), KQU(15442599127239350490), KQU( 6311893274367710888), KQU( 3286118760484672933), KQU( 4146067961333542189), KQU(13303942567897208770), KQU( 8196013722255630418), KQU( 4437815439340979989), KQU(15433791533450605135), KQU( 4254828956815687049), KQU( 1310903207708286015), KQU(10529182764462398549), KQU(14900231311660638810), KQU( 9727017277104609793), KQU( 1821308310948199033), KQU(11628861435066772084), KQU( 9469019138491546924), KQU( 3145812670532604988), KQU( 9938468915045491919), KQU( 1562447430672662142), KQU(13963995266697989134), KQU( 3356884357625028695), KQU( 4499850304584309747), KQU( 8456825817023658122), KQU(10859039922814285279), KQU( 8099512337972526555), KQU( 348006375109672149), KQU(11919893998241688603), KQU( 1104199577402948826), KQU(16689191854356060289), KQU(10992552041730168078), KQU( 7243733172705465836), KQU( 5668075606180319560), KQU(18182847037333286970), KQU( 4290215357664631322), KQU( 4061414220791828613), KQU(13006291061652989604), KQU( 7140491178917128798), KQU(12703446217663283481), KQU( 5500220597564558267), KQU(10330551509971296358), KQU(15958554768648714492), KQU( 5174555954515360045), KQU( 1731318837687577735), KQU( 3557700801048354857), KQU(13764012341928616198), KQU(13115166194379119043), KQU( 7989321021560255519), KQU( 2103584280905877040), KQU( 9230788662155228488), KQU(16396629323325547654), KQU( 657926409811318051), KQU(15046700264391400727), KQU( 5120132858771880830), KQU( 7934160097989028561), KQU( 6963121488531976245), KQU(17412329602621742089), KQU(15144843053931774092), KQU(17204176651763054532), KQU(13166595387554065870), KQU( 8590377810513960213), KQU( 5834365135373991938), KQU( 7640913007182226243), KQU( 3479394703859418425), KQU(16402784452644521040), KQU( 4993979809687083980), KQU(13254522168097688865), KQU(15643659095244365219), KQU( 5881437660538424982), KQU(11174892200618987379), KQU( 254409966159711077), KQU(17158413043140549909), KQU( 3638048789290376272), KQU( 1376816930299489190), KQU( 4622462095217761923), KQU(15086407973010263515), KQU(13253971772784692238), KQU( 5270549043541649236), KQU(11182714186805411604), KQU(12283846437495577140), KQU( 5297647149908953219), KQU(10047451738316836654), KQU( 4938228100367874746), KQU(12328523025304077923), KQU( 3601049438595312361), KQU( 9313624118352733770), KQU(13322966086117661798), KQU(16660005705644029394), KQU(11337677526988872373), KQU(13869299102574417795), KQU(15642043183045645437), KQU( 3021755569085880019), KQU( 4979741767761188161), KQU(13679979092079279587), KQU( 3344685842861071743), KQU(13947960059899588104), KQU( 305806934293368007), KQU( 5749173929201650029), KQU(11123724852118844098), KQU(15128987688788879802), KQU(15251651211024665009), KQU( 7689925933816577776), KQU(16732804392695859449), KQU(17087345401014078468), KQU(14315108589159048871), KQU( 4820700266619778917), KQU(16709637539357958441), KQU( 4936227875177351374), KQU( 2137907697912987247), KQU(11628565601408395420), KQU( 2333250549241556786), KQU( 5711200379577778637), KQU( 5170680131529031729), KQU(12620392043061335164), KQU( 95363390101096078), KQU( 5487981914081709462), KQU( 1763109823981838620), KQU( 3395861271473224396), KQU( 1300496844282213595), KQU( 6894316212820232902), KQU(10673859651135576674), KQU( 5911839658857903252), KQU(17407110743387299102), KQU( 8257427154623140385), KQU(11389003026741800267), KQU( 4070043211095013717), KQU(11663806997145259025), KQU(15265598950648798210), KQU( 630585789434030934), KQU( 3524446529213587334), KQU( 7186424168495184211), KQU(10806585451386379021), KQU(11120017753500499273), KQU( 1586837651387701301), KQU(17530454400954415544), KQU( 9991670045077880430), KQU( 7550997268990730180), KQU( 8640249196597379304), KQU( 3522203892786893823), KQU(10401116549878854788), KQU(13690285544733124852), KQU( 8295785675455774586), KQU(15535716172155117603), KQU( 3112108583723722511), KQU(17633179955339271113), KQU(18154208056063759375), KQU( 1866409236285815666), KQU(13326075895396412882), KQU( 8756261842948020025), KQU( 6281852999868439131), KQU(15087653361275292858), KQU(10333923911152949397), KQU( 5265567645757408500), KQU(12728041843210352184), KQU( 6347959327507828759), KQU( 154112802625564758), KQU(18235228308679780218), KQU( 3253805274673352418), KQU( 4849171610689031197), KQU(17948529398340432518), KQU(13803510475637409167), KQU(13506570190409883095), KQU(15870801273282960805), KQU( 8451286481299170773), KQU( 9562190620034457541), KQU( 8518905387449138364), KQU(12681306401363385655), KQU( 3788073690559762558), KQU( 5256820289573487769), KQU( 2752021372314875467), KQU( 6354035166862520716), KQU( 4328956378309739069), KQU( 449087441228269600), KQU( 5533508742653090868), KQU( 1260389420404746988), KQU(18175394473289055097), KQU( 1535467109660399420), KQU( 8818894282874061442), KQU(12140873243824811213), KQU(15031386653823014946), KQU( 1286028221456149232), KQU( 6329608889367858784), KQU( 9419654354945132725), KQU( 6094576547061672379), KQU(17706217251847450255), KQU( 1733495073065878126), KQU(16918923754607552663), KQU( 8881949849954945044), KQU(12938977706896313891), KQU(14043628638299793407), KQU(18393874581723718233), KQU( 6886318534846892044), KQU(14577870878038334081), KQU(13541558383439414119), KQU(13570472158807588273), KQU(18300760537910283361), KQU( 818368572800609205), KQU( 1417000585112573219), KQU(12337533143867683655), KQU(12433180994702314480), KQU( 778190005829189083), KQU(13667356216206524711), KQU( 9866149895295225230), KQU(11043240490417111999), KQU( 1123933826541378598), KQU( 6469631933605123610), KQU(14508554074431980040), KQU(13918931242962026714), KQU( 2870785929342348285), KQU(14786362626740736974), KQU(13176680060902695786), KQU( 9591778613541679456), KQU( 9097662885117436706), KQU( 749262234240924947), KQU( 1944844067793307093), KQU( 4339214904577487742), KQU( 8009584152961946551), KQU(16073159501225501777), KQU( 3335870590499306217), KQU(17088312653151202847), KQU( 3108893142681931848), KQU(16636841767202792021), KQU(10423316431118400637), KQU( 8008357368674443506), KQU(11340015231914677875), KQU(17687896501594936090), KQU(15173627921763199958), KQU( 542569482243721959), KQU(15071714982769812975), KQU( 4466624872151386956), KQU( 1901780715602332461), KQU( 9822227742154351098), KQU( 1479332892928648780), KQU( 6981611948382474400), KQU( 7620824924456077376), KQU(14095973329429406782), KQU( 7902744005696185404), KQU(15830577219375036920), KQU(10287076667317764416), KQU(12334872764071724025), KQU( 4419302088133544331), KQU(14455842851266090520), KQU(12488077416504654222), KQU( 7953892017701886766), KQU( 6331484925529519007), KQU( 4902145853785030022), KQU(17010159216096443073), KQU(11945354668653886087), KQU(15112022728645230829), KQU(17363484484522986742), KQU( 4423497825896692887), KQU( 8155489510809067471), KQU( 258966605622576285), KQU( 5462958075742020534), KQU( 6763710214913276228), KQU( 2368935183451109054), KQU(14209506165246453811), KQU( 2646257040978514881), KQU( 3776001911922207672), KQU( 1419304601390147631), KQU(14987366598022458284), KQU( 3977770701065815721), KQU( 730820417451838898), KQU( 3982991703612885327), KQU( 2803544519671388477), KQU(17067667221114424649), KQU( 2922555119737867166), KQU( 1989477584121460932), KQU(15020387605892337354), KQU( 9293277796427533547), KQU(10722181424063557247), KQU(16704542332047511651), KQU( 5008286236142089514), KQU(16174732308747382540), KQU(17597019485798338402), KQU(13081745199110622093), KQU( 8850305883842258115), KQU(12723629125624589005), KQU( 8140566453402805978), KQU(15356684607680935061), KQU(14222190387342648650), KQU(11134610460665975178), KQU( 1259799058620984266), KQU(13281656268025610041), KQU( 298262561068153992), KQU(12277871700239212922), KQU(13911297774719779438), KQU(16556727962761474934), KQU(17903010316654728010), KQU( 9682617699648434744), KQU(14757681836838592850), KQU( 1327242446558524473), KQU(11126645098780572792), KQU( 1883602329313221774), KQU( 2543897783922776873), KQU(15029168513767772842), KQU(12710270651039129878), KQU(16118202956069604504), KQU(15010759372168680524), KQU( 2296827082251923948), KQU(10793729742623518101), KQU(13829764151845413046), KQU(17769301223184451213), KQU( 3118268169210783372), KQU(17626204544105123127), KQU( 7416718488974352644), KQU(10450751996212925994), KQU( 9352529519128770586), KQU( 259347569641110140), KQU( 8048588892269692697), KQU( 1774414152306494058), KQU(10669548347214355622), KQU(13061992253816795081), KQU(18432677803063861659), KQU( 8879191055593984333), KQU(12433753195199268041), KQU(14919392415439730602), KQU( 6612848378595332963), KQU( 6320986812036143628), KQU(10465592420226092859), KQU( 4196009278962570808), KQU( 3747816564473572224), KQU(17941203486133732898), KQU( 2350310037040505198), KQU( 5811779859134370113), KQU(10492109599506195126), KQU( 7699650690179541274), KQU( 1954338494306022961), KQU(14095816969027231152), KQU( 5841346919964852061), KQU(14945969510148214735), KQU( 3680200305887550992), KQU( 6218047466131695792), KQU( 8242165745175775096), KQU(11021371934053307357), KQU( 1265099502753169797), KQU( 4644347436111321718), KQU( 3609296916782832859), KQU( 8109807992218521571), KQU(18387884215648662020), KQU(14656324896296392902), KQU(17386819091238216751), KQU(17788300878582317152), KQU( 7919446259742399591), KQU( 4466613134576358004), KQU(12928181023667938509), KQU(13147446154454932030), KQU(16552129038252734620), KQU( 8395299403738822450), KQU(11313817655275361164), KQU( 434258809499511718), KQU( 2074882104954788676), KQU( 7929892178759395518), KQU( 9006461629105745388), KQU( 5176475650000323086), KQU(11128357033468341069), KQU(12026158851559118955), KQU(14699716249471156500), KQU( 448982497120206757), KQU( 4156475356685519900), KQU( 6063816103417215727), KQU(10073289387954971479), KQU( 8174466846138590962), KQU( 2675777452363449006), KQU( 9090685420572474281), KQU( 6659652652765562060), KQU(12923120304018106621), KQU(11117480560334526775), KQU( 937910473424587511), KQU( 1838692113502346645), KQU(11133914074648726180), KQU( 7922600945143884053), KQU(13435287702700959550), KQU( 5287964921251123332), KQU(11354875374575318947), KQU(17955724760748238133), KQU(13728617396297106512), KQU( 4107449660118101255), KQU( 1210269794886589623), KQU(11408687205733456282), KQU( 4538354710392677887), KQU(13566803319341319267), KQU(17870798107734050771), KQU( 3354318982568089135), KQU( 9034450839405133651), KQU(13087431795753424314), KQU( 950333102820688239), KQU( 1968360654535604116), KQU(16840551645563314995), KQU( 8867501803892924995), KQU(11395388644490626845), KQU( 1529815836300732204), KQU(13330848522996608842), KQU( 1813432878817504265), KQU( 2336867432693429560), KQU(15192805445973385902), KQU( 2528593071076407877), KQU( 128459777936689248), KQU( 9976345382867214866), KQU( 6208885766767996043), KQU(14982349522273141706), KQU( 3099654362410737822), KQU(13776700761947297661), KQU( 8806185470684925550), KQU( 8151717890410585321), KQU( 640860591588072925), KQU(14592096303937307465), KQU( 9056472419613564846), KQU(14861544647742266352), KQU(12703771500398470216), KQU( 3142372800384138465), KQU( 6201105606917248196), KQU(18337516409359270184), KQU(15042268695665115339), KQU(15188246541383283846), KQU(12800028693090114519), KQU( 5992859621101493472), KQU(18278043971816803521), KQU( 9002773075219424560), KQU( 7325707116943598353), KQU( 7930571931248040822), KQU( 5645275869617023448), KQU( 7266107455295958487), KQU( 4363664528273524411), KQU(14313875763787479809), KQU(17059695613553486802), KQU( 9247761425889940932), KQU(13704726459237593128), KQU( 2701312427328909832), KQU(17235532008287243115), KQU(14093147761491729538), KQU( 6247352273768386516), KQU( 8268710048153268415), KQU( 7985295214477182083), KQU(15624495190888896807), KQU( 3772753430045262788), KQU( 9133991620474991698), KQU( 5665791943316256028), KQU( 7551996832462193473), KQU(13163729206798953877), KQU( 9263532074153846374), KQU( 1015460703698618353), KQU(17929874696989519390), KQU(18257884721466153847), KQU(16271867543011222991), KQU( 3905971519021791941), KQU(16814488397137052085), KQU( 1321197685504621613), KQU( 2870359191894002181), KQU(14317282970323395450), KQU(13663920845511074366), KQU( 2052463995796539594), KQU(14126345686431444337), KQU( 1727572121947022534), KQU(17793552254485594241), KQU( 6738857418849205750), KQU( 1282987123157442952), KQU(16655480021581159251), KQU( 6784587032080183866), KQU(14726758805359965162), KQU( 7577995933961987349), KQU(12539609320311114036), KQU(10789773033385439494), KQU( 8517001497411158227), KQU(10075543932136339710), KQU(14838152340938811081), KQU( 9560840631794044194), KQU(17445736541454117475), KQU(10633026464336393186), KQU(15705729708242246293), KQU( 1117517596891411098), KQU( 4305657943415886942), KQU( 4948856840533979263), KQU(16071681989041789593), KQU(13723031429272486527), KQU( 7639567622306509462), KQU(12670424537483090390), KQU( 9715223453097197134), KQU( 5457173389992686394), KQU( 289857129276135145), KQU(17048610270521972512), KQU( 692768013309835485), KQU(14823232360546632057), KQU(18218002361317895936), KQU( 3281724260212650204), KQU(16453957266549513795), KQU( 8592711109774511881), KQU( 929825123473369579), KQU(15966784769764367791), KQU( 9627344291450607588), KQU(10849555504977813287), KQU( 9234566913936339275), KQU( 6413807690366911210), KQU(10862389016184219267), KQU(13842504799335374048), KQU( 1531994113376881174), KQU( 2081314867544364459), KQU(16430628791616959932), KQU( 8314714038654394368), KQU( 9155473892098431813), KQU(12577843786670475704), KQU( 4399161106452401017), KQU( 1668083091682623186), KQU( 1741383777203714216), KQU( 2162597285417794374), KQU(15841980159165218736), KQU( 1971354603551467079), KQU( 1206714764913205968), KQU( 4790860439591272330), KQU(14699375615594055799), KQU( 8374423871657449988), KQU(10950685736472937738), KQU( 697344331343267176), KQU(10084998763118059810), KQU(12897369539795983124), KQU(12351260292144383605), KQU( 1268810970176811234), KQU( 7406287800414582768), KQU( 516169557043807831), KQU( 5077568278710520380), KQU( 3828791738309039304), KQU( 7721974069946943610), KQU( 3534670260981096460), KQU( 4865792189600584891), KQU(16892578493734337298), KQU( 9161499464278042590), KQU(11976149624067055931), KQU(13219479887277343990), KQU(14161556738111500680), KQU(14670715255011223056), KQU( 4671205678403576558), KQU(12633022931454259781), KQU(14821376219869187646), KQU( 751181776484317028), KQU( 2192211308839047070), KQU(11787306362361245189), KQU(10672375120744095707), KQU( 4601972328345244467), KQU(15457217788831125879), KQU( 8464345256775460809), KQU(10191938789487159478), KQU( 6184348739615197613), KQU(11425436778806882100), KQU( 2739227089124319793), KQU( 461464518456000551), KQU( 4689850170029177442), KQU( 6120307814374078625), KQU(11153579230681708671), KQU( 7891721473905347926), KQU(10281646937824872400), KQU( 3026099648191332248), KQU( 8666750296953273818), KQU(14978499698844363232), KQU(13303395102890132065), KQU( 8182358205292864080), KQU(10560547713972971291), KQU(11981635489418959093), KQU( 3134621354935288409), KQU(11580681977404383968), KQU(14205530317404088650), KQU( 5997789011854923157), KQU(13659151593432238041), KQU(11664332114338865086), KQU( 7490351383220929386), KQU( 7189290499881530378), KQU(15039262734271020220), KQU( 2057217285976980055), KQU( 555570804905355739), KQU(11235311968348555110), KQU(13824557146269603217), KQU(16906788840653099693), KQU( 7222878245455661677), KQU( 5245139444332423756), KQU( 4723748462805674292), KQU(12216509815698568612), KQU(17402362976648951187), KQU(17389614836810366768), KQU( 4880936484146667711), KQU( 9085007839292639880), KQU(13837353458498535449), KQU(11914419854360366677), KQU(16595890135313864103), KQU( 6313969847197627222), KQU(18296909792163910431), KQU(10041780113382084042), KQU( 2499478551172884794), KQU(11057894246241189489), KQU( 9742243032389068555), KQU(12838934582673196228), KQU(13437023235248490367), KQU(13372420669446163240), KQU( 6752564244716909224), KQU( 7157333073400313737), KQU(12230281516370654308), KQU( 1182884552219419117), KQU( 2955125381312499218), KQU(10308827097079443249), KQU( 1337648572986534958), KQU(16378788590020343939), KQU( 108619126514420935), KQU( 3990981009621629188), KQU( 5460953070230946410), KQU( 9703328329366531883), KQU(13166631489188077236), KQU( 1104768831213675170), KQU( 3447930458553877908), KQU( 8067172487769945676), KQU( 5445802098190775347), KQU( 3244840981648973873), KQU(17314668322981950060), KQU( 5006812527827763807), KQU(18158695070225526260), KQU( 2824536478852417853), KQU(13974775809127519886), KQU( 9814362769074067392), KQU(17276205156374862128), KQU(11361680725379306967), KQU( 3422581970382012542), KQU(11003189603753241266), KQU(11194292945277862261), KQU( 6839623313908521348), KQU(11935326462707324634), KQU( 1611456788685878444), KQU(13112620989475558907), KQU( 517659108904450427), KQU(13558114318574407624), KQU(15699089742731633077), KQU( 4988979278862685458), KQU( 8111373583056521297), KQU( 3891258746615399627), KQU( 8137298251469718086), KQU(12748663295624701649), KQU( 4389835683495292062), KQU( 5775217872128831729), KQU( 9462091896405534927), KQU( 8498124108820263989), KQU( 8059131278842839525), KQU(10503167994254090892), KQU(11613153541070396656), KQU(18069248738504647790), KQU( 570657419109768508), KQU( 3950574167771159665), KQU( 5514655599604313077), KQU( 2908460854428484165), KQU(10777722615935663114), KQU(12007363304839279486), KQU( 9800646187569484767), KQU( 8795423564889864287), KQU(14257396680131028419), KQU( 6405465117315096498), KQU( 7939411072208774878), KQU(17577572378528990006), KQU(14785873806715994850), KQU(16770572680854747390), KQU(18127549474419396481), KQU(11637013449455757750), KQU(14371851933996761086), KQU( 3601181063650110280), KQU( 4126442845019316144), KQU(10198287239244320669), KQU(18000169628555379659), KQU(18392482400739978269), KQU( 6219919037686919957), KQU( 3610085377719446052), KQU( 2513925039981776336), KQU(16679413537926716955), KQU(12903302131714909434), KQU( 5581145789762985009), KQU(12325955044293303233), KQU(17216111180742141204), KQU( 6321919595276545740), KQU( 3507521147216174501), KQU( 9659194593319481840), KQU(11473976005975358326), KQU(14742730101435987026), KQU( 492845897709954780), KQU(16976371186162599676), KQU(17712703422837648655), KQU( 9881254778587061697), KQU( 8413223156302299551), KQU( 1563841828254089168), KQU( 9996032758786671975), KQU( 138877700583772667), KQU(13003043368574995989), KQU( 4390573668650456587), KQU( 8610287390568126755), KQU(15126904974266642199), KQU( 6703637238986057662), KQU( 2873075592956810157), KQU( 6035080933946049418), KQU(13382846581202353014), KQU( 7303971031814642463), KQU(18418024405307444267), KQU( 5847096731675404647), KQU( 4035880699639842500), KQU(11525348625112218478), KQU( 3041162365459574102), KQU( 2604734487727986558), KQU(15526341771636983145), KQU(14556052310697370254), KQU(12997787077930808155), KQU( 9601806501755554499), KQU(11349677952521423389), KQU(14956777807644899350), KQU(16559736957742852721), KQU(12360828274778140726), KQU( 6685373272009662513), KQU(16932258748055324130), KQU(15918051131954158508), KQU( 1692312913140790144), KQU( 546653826801637367), KQU( 5341587076045986652), KQU(14975057236342585662), KQU(12374976357340622412), KQU(10328833995181940552), KQU(12831807101710443149), KQU(10548514914382545716), KQU( 2217806727199715993), KQU(12627067369242845138), KQU( 4598965364035438158), KQU( 150923352751318171), KQU(14274109544442257283), KQU( 4696661475093863031), KQU( 1505764114384654516), KQU(10699185831891495147), KQU( 2392353847713620519), KQU( 3652870166711788383), KQU( 8640653276221911108), KQU( 3894077592275889704), KQU( 4918592872135964845), KQU(16379121273281400789), KQU(12058465483591683656), KQU(11250106829302924945), KQU( 1147537556296983005), KQU( 6376342756004613268), KQU(14967128191709280506), KQU(18007449949790627628), KQU( 9497178279316537841), KQU( 7920174844809394893), KQU(10037752595255719907), KQU(15875342784985217697), KQU(15311615921712850696), KQU( 9552902652110992950), KQU(14054979450099721140), KQU( 5998709773566417349), KQU(18027910339276320187), KQU( 8223099053868585554), KQU( 7842270354824999767), KQU( 4896315688770080292), KQU(12969320296569787895), KQU( 2674321489185759961), KQU( 4053615936864718439), KQU(11349775270588617578), KQU( 4743019256284553975), KQU( 5602100217469723769), KQU(14398995691411527813), KQU( 7412170493796825470), KQU( 836262406131744846), KQU( 8231086633845153022), KQU( 5161377920438552287), KQU( 8828731196169924949), KQU(16211142246465502680), KQU( 3307990879253687818), KQU( 5193405406899782022), KQU( 8510842117467566693), KQU( 6070955181022405365), KQU(14482950231361409799), KQU(12585159371331138077), KQU( 3511537678933588148), KQU( 2041849474531116417), KQU(10944936685095345792), KQU(18303116923079107729), KQU( 2720566371239725320), KQU( 4958672473562397622), KQU( 3032326668253243412), KQU(13689418691726908338), KQU( 1895205511728843996), KQU( 8146303515271990527), KQU(16507343500056113480), KQU( 473996939105902919), KQU( 9897686885246881481), KQU(14606433762712790575), KQU( 6732796251605566368), KQU( 1399778120855368916), KQU( 935023885182833777), KQU(16066282816186753477), KQU( 7291270991820612055), KQU(17530230393129853844), KQU(10223493623477451366), KQU(15841725630495676683), KQU(17379567246435515824), KQU( 8588251429375561971), KQU(18339511210887206423), KQU(17349587430725976100), KQU(12244876521394838088), KQU( 6382187714147161259), KQU(12335807181848950831), KQU(16948885622305460665), KQU(13755097796371520506), KQU(14806740373324947801), KQU( 4828699633859287703), KQU( 8209879281452301604), KQU(12435716669553736437), KQU(13970976859588452131), KQU( 6233960842566773148), KQU(12507096267900505759), KQU( 1198713114381279421), KQU(14989862731124149015), KQU(15932189508707978949), KQU( 2526406641432708722), KQU( 29187427817271982), KQU( 1499802773054556353), KQU(10816638187021897173), KQU( 5436139270839738132), KQU( 6659882287036010082), KQU( 2154048955317173697), KQU(10887317019333757642), KQU(16281091802634424955), KQU(10754549879915384901), KQU(10760611745769249815), KQU( 2161505946972504002), KQU( 5243132808986265107), KQU(10129852179873415416), KQU( 710339480008649081), KQU( 7802129453068808528), KQU(17967213567178907213), KQU(15730859124668605599), KQU(13058356168962376502), KQU( 3701224985413645909), KQU(14464065869149109264), KQU( 9959272418844311646), KQU(10157426099515958752), KQU(14013736814538268528), KQU(17797456992065653951), KQU(17418878140257344806), KQU(15457429073540561521), KQU( 2184426881360949378), KQU( 2062193041154712416), KQU( 8553463347406931661), KQU( 4913057625202871854), KQU( 2668943682126618425), KQU(17064444737891172288), KQU( 4997115903913298637), KQU(12019402608892327416), KQU(17603584559765897352), KQU(11367529582073647975), KQU( 8211476043518436050), KQU( 8676849804070323674), KQU(18431829230394475730), KQU(10490177861361247904), KQU( 9508720602025651349), KQU( 7409627448555722700), KQU( 5804047018862729008), KQU(11943858176893142594), KQU(11908095418933847092), KQU( 5415449345715887652), KQU( 1554022699166156407), KQU( 9073322106406017161), KQU( 7080630967969047082), KQU(18049736940860732943), KQU(12748714242594196794), KQU( 1226992415735156741), KQU(17900981019609531193), KQU(11720739744008710999), KQU( 3006400683394775434), KQU(11347974011751996028), KQU( 3316999628257954608), KQU( 8384484563557639101), KQU(18117794685961729767), KQU( 1900145025596618194), KQU(17459527840632892676), KQU( 5634784101865710994), KQU( 7918619300292897158), KQU( 3146577625026301350), KQU( 9955212856499068767), KQU( 1873995843681746975), KQU( 1561487759967972194), KQU( 8322718804375878474), KQU(11300284215327028366), KQU( 4667391032508998982), KQU( 9820104494306625580), KQU(17922397968599970610), KQU( 1784690461886786712), KQU(14940365084341346821), KQU( 5348719575594186181), KQU(10720419084507855261), KQU(14210394354145143274), KQU( 2426468692164000131), KQU(16271062114607059202), KQU(14851904092357070247), KQU( 6524493015693121897), KQU( 9825473835127138531), KQU(14222500616268569578), KQU(15521484052007487468), KQU(14462579404124614699), KQU(11012375590820665520), KQU(11625327350536084927), KQU(14452017765243785417), KQU( 9989342263518766305), KQU( 3640105471101803790), KQU( 4749866455897513242), KQU(13963064946736312044), KQU(10007416591973223791), KQU(18314132234717431115), KQU( 3286596588617483450), KQU( 7726163455370818765), KQU( 7575454721115379328), KQU( 5308331576437663422), KQU(18288821894903530934), KQU( 8028405805410554106), KQU(15744019832103296628), KQU( 149765559630932100), KQU( 6137705557200071977), KQU(14513416315434803615), KQU(11665702820128984473), KQU( 218926670505601386), KQU( 6868675028717769519), KQU(15282016569441512302), KQU( 5707000497782960236), KQU( 6671120586555079567), KQU( 2194098052618985448), KQU(16849577895477330978), KQU(12957148471017466283), KQU( 1997805535404859393), KQU( 1180721060263860490), KQU(13206391310193756958), KQU(12980208674461861797), KQU( 3825967775058875366), KQU(17543433670782042631), KQU( 1518339070120322730), KQU(16344584340890991669), KQU( 2611327165318529819), KQU(11265022723283422529), KQU( 4001552800373196817), KQU(14509595890079346161), KQU( 3528717165416234562), KQU(18153222571501914072), KQU( 9387182977209744425), KQU(10064342315985580021), KQU(11373678413215253977), KQU( 2308457853228798099), KQU( 9729042942839545302), KQU( 7833785471140127746), KQU( 6351049900319844436), KQU(14454610627133496067), KQU(12533175683634819111), KQU(15570163926716513029), KQU(13356980519185762498) }; TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_gen_rand(1234); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_gen_rand(1234); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_gen_rand_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_by_array(ini, 4); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_by_array(ini, 4); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_by_array_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_gen_rand(4321); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_gen_rand(4321); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_gen_rand_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; uint32_t ini[] = {5, 4, 3, 2, 1}; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_by_array(ini, 5); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_by_array(ini, 5); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_by_array_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END int main(void) { return test( test_gen_rand_32, test_by_array_32, test_gen_rand_64, test_by_array_64); } jemalloc-sys-0.3.2/jemalloc/test/unit/size_classes.c010064400007650000024000000153111340421341300207100ustar0000000000000000#include "test/jemalloc_test.h" static size_t get_max_size_class(void) { unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); return max_size_class; } TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; szind_t index, max_index; max_size_class = get_max_size_class(); max_index = sz_size2index(max_size_class); for (index = 0, size_class = sz_index2size(index); index < max_index || size_class < max_size_class; index++, size_class = sz_index2size(index)) { assert_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_true(size_class < max_size_class, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_u_eq(index, sz_size2index(size_class), "sz_size2index() does not reverse sz_index2size(): index=%u" " --> size_class=%zu --> index=%u --> size_class=%zu", index, size_class, sz_size2index(size_class), sz_index2size(sz_size2index(size_class))); assert_zu_eq(size_class, sz_index2size(sz_size2index(size_class)), "sz_index2size() does not reverse sz_size2index(): index=%u" " --> size_class=%zu --> index=%u --> size_class=%zu", index, size_class, sz_size2index(size_class), sz_index2size(sz_size2index(size_class))); assert_u_eq(index+1, sz_size2index(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (index > 0) ? sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class-1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), "sz_s2u() does not round up to next size class"); } assert_u_eq(index, sz_size2index(sz_index2size(index)), "sz_size2index() does not reverse sz_index2size()"); assert_zu_eq(max_size_class, sz_index2size( sz_size2index(max_size_class)), "sz_index2size() does not reverse sz_size2index()"); assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class-1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_psize_classes) { size_t size_class, max_psz; pszind_t pind, max_pind; max_psz = get_max_size_class() + PAGE; max_pind = sz_psz2ind(max_psz); for (pind = 0, size_class = sz_pind2sz(pind); pind < max_pind || size_class < max_psz; pind++, size_class = sz_pind2sz(pind)) { assert_true(pind < max_pind, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_true(size_class < max_psz, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_u_eq(pind, sz_psz2ind(size_class), "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); assert_u_eq(pind+1, sz_psz2ind(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (pind > 0) ? sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class-1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), "sz_psz2u() does not round up to next size class"); } assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)), "sz_psz2ind() does not reverse sz_pind2sz()"); assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), "sz_pind2sz() does not reverse sz_psz2ind()"); assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class-1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_overflow) { size_t max_size_class, max_psz; max_size_class = get_max_size_class(); max_psz = max_size_class + PAGE; assert_u_eq(sz_size2index(max_size_class+1), NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_zu_eq(sz_s2u(max_size_class+1), 0, "sz_s2u() should return 0 for unsupported size"); assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, "sz_s2u() should return 0 for unsupported size"); assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, "sz_s2u() should return 0 on overflow"); assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" " size"); assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " "size"); assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow"); } TEST_END int main(void) { return test( test_size_classes, test_psize_classes, test_overflow); } jemalloc-sys-0.3.2/jemalloc/test/unit/slab.c010064400007650000024000000016051340421341300171430ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_arena_slab_regind) { szind_t binind; for (binind = 0; binind < NBINS; binind++) { size_t regind; extent_t slab; const bin_info_t *bin_info = &bin_infos[binind]; extent_init(&slab, NULL, mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, binind, 0, extent_state_active, false, true, true); assert_ptr_not_null(extent_addr_get(&slab), "Unexpected malloc() failure"); for (regind = 0; regind < bin_info->nregs; regind++) { void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + (bin_info->reg_size * regind)); assert_zu_eq(arena_slab_regind(&slab, binind, reg), regind, "Incorrect region index computed for size %zu", bin_info->reg_size); } free(extent_addr_get(&slab)); } } TEST_END int main(void) { return test( test_arena_slab_regind); } jemalloc-sys-0.3.2/jemalloc/test/unit/smoothstep.c010064400007650000024000000052541340421340100204300ustar0000000000000000#include "test/jemalloc_test.h" static const uint64_t smoothstep_tab[] = { #define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; TEST_BEGIN(test_smoothstep_integral) { uint64_t sum, min, max; unsigned i; /* * The integral of smoothstep in the [0..1] range equals 1/2. Verify * that the fixed point representation's integral is no more than * rounding error distant from 1/2. Regarding rounding, each table * element is rounded down to the nearest fixed point value, so the * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += smoothstep_tab[i]; } max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); min = max - SMOOTHSTEP_NSTEPS; assert_u64_ge(sum, min, "Integral too small, even accounting for truncation"); assert_u64_le(sum, max, "Integral exceeds 1/2"); if (false) { malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", max - sum, SMOOTHSTEP_NSTEPS); } } TEST_END TEST_BEGIN(test_smoothstep_monotonic) { uint64_t prev_h; unsigned i; /* * The smoothstep function is monotonic in [0..1], i.e. its slope is * non-negative. In practice we want to parametrize table generation * such that piecewise slope is greater than zero, but do not require * that here. */ prev_h = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { uint64_t h = smoothstep_tab[i]; assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); prev_h = h; } assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); } TEST_END TEST_BEGIN(test_smoothstep_slope) { uint64_t prev_h, prev_delta; unsigned i; /* * The smoothstep slope strictly increases until x=0.5, and then * strictly decreases until x=1.0. Verify the slightly weaker * requirement of monotonicity, so that inadequate table precision does * not cause false test failures. */ prev_h = 0; prev_delta = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { uint64_t h = smoothstep_tab[i]; uint64_t delta = h - prev_h; assert_u64_ge(delta, prev_delta, "Slope must monotonically increase in 0.0 <= x <= 0.5, " "i=%u", i); prev_h = h; prev_delta = delta; } prev_h = KQU(1) << SMOOTHSTEP_BFP; prev_delta = 0; for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { uint64_t h = smoothstep_tab[i]; uint64_t delta = prev_h - h; assert_u64_ge(delta, prev_delta, "Slope must monotonically decrease in 0.5 <= x <= 1.0, " "i=%u", i); prev_h = h; prev_delta = delta; } } TEST_END int main(void) { return test( test_smoothstep_integral, test_smoothstep_monotonic, test_smoothstep_slope); } jemalloc-sys-0.3.2/jemalloc/test/unit/spin.c010064400007650000024000000004051340421340100171650ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/spin.h" TEST_BEGIN(test_spin) { spin_t spinner = SPIN_INITIALIZER; for (unsigned i = 0; i < 100; i++) { spin_adaptive(&spinner); } } TEST_END int main(void) { return test( test_spin); } jemalloc-sys-0.3.2/jemalloc/test/unit/stats.c010064400007650000024000000301321340421341300173550ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_stats_summary) { size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, "active should be less than resident"); assert_zu_lt(active, mapped, "active should be less than mapped"); } } TEST_END TEST_BEGIN(test_stats_large) { void *p; uint64_t epoch; size_t allocated; uint64_t nmalloc, ndalloc, nrequests; size_t sz; int expected = config_stats ? 0 : ENOENT; p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_le(nmalloc, nrequests, "nmalloc should no larger than nrequests"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_summary) { void *little, *large; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(large, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.dirty_npurge", (void *)&dirty_npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", (void *)&dirty_nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.dirty_purged", (void *)&dirty_purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge", (void *)&muzzy_npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_purged", (void *)&muzzy_purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { if (!background_thread_enabled()) { assert_u64_gt(dirty_npurge + muzzy_npurge, 0, "At least one purge should have occurred"); } assert_u64_le(dirty_nmadvise, dirty_purged, "dirty_nmadvise should be no greater than dirty_purged"); assert_u64_le(muzzy_nmadvise, muzzy_purged, "muzzy_nmadvise should be no greater than muzzy_purged"); } } TEST_END void * thd_start(void *arg) { return NULL; } static void no_lazy_lock(void) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_BEGIN(test_stats_arenas_small) { void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.small.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be no greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_large) { void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } dallocx(p, 0); } TEST_END static void gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name); } TEST_BEGIN(test_stats_arenas_bins) { void *p; size_t sz, curslabs, curregs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nslabs, nreslabs; int expected = config_stats ? 0 : ENOENT; /* Make sure allocation below isn't satisfied by tcache. */ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); unsigned arena_ind, old_arena_ind; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Arena creation failure"); sz = sizeof(arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); p = malloc(bin_infos[0].reg_size); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); char cmd[128]; sz = sizeof(uint64_t); gen_mallctl_str(cmd, "nmalloc", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "ndalloc", arena_ind); assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nrequests", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); gen_mallctl_str(cmd, "curregs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); gen_mallctl_str(cmd, "nfills", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nflushes", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nreslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); gen_mallctl_str(cmd, "curslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_zu_gt(curregs, 0, "allocated should be greater than zero"); if (opt_tcache) { assert_u64_gt(nfills, 0, "At least one fill should have occurred"); assert_u64_gt(nflushes, 0, "At least one flush should have occurred"); } assert_u64_gt(nslabs, 0, "At least one slab should have been allocated"); assert_zu_gt(curslabs, 0, "At least one slab should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_lextents) { void *p; uint64_t epoch, nmalloc, ndalloc; size_t curlextents, sz, hsize; int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); p = mallocx(hsize, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", (void *)&curlextents, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(curlextents, 0, "At least one extent should be currently allocated"); } dallocx(p, 0); } TEST_END int main(void) { return test_no_reentrancy( test_stats_summary, test_stats_large, test_stats_arenas_summary, test_stats_arenas_small, test_stats_arenas_large, test_stats_arenas_bins, test_stats_arenas_lextents); } jemalloc-sys-0.3.2/jemalloc/test/unit/stats_print.c010064400007650000024000000550511340421340100205750ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" typedef enum { TOKEN_TYPE_NONE, TOKEN_TYPE_ERROR, TOKEN_TYPE_EOI, TOKEN_TYPE_NULL, TOKEN_TYPE_FALSE, TOKEN_TYPE_TRUE, TOKEN_TYPE_LBRACKET, TOKEN_TYPE_RBRACKET, TOKEN_TYPE_LBRACE, TOKEN_TYPE_RBRACE, TOKEN_TYPE_COLON, TOKEN_TYPE_COMMA, TOKEN_TYPE_STRING, TOKEN_TYPE_NUMBER } token_type_t; typedef struct parser_s parser_t; typedef struct { parser_t *parser; token_type_t token_type; size_t pos; size_t len; size_t line; size_t col; } token_t; struct parser_s { bool verbose; char *buf; /* '\0'-terminated. */ size_t len; /* Number of characters preceding '\0' in buf. */ size_t pos; size_t line; size_t col; token_t token; }; static void token_init(token_t *token, parser_t *parser, token_type_t token_type, size_t pos, size_t len, size_t line, size_t col) { token->parser = parser; token->token_type = token_type; token->pos = pos; token->len = len; token->line = line; token->col = col; } static void token_error(token_t *token) { if (!token->parser->verbose) { return; } switch (token->token_type) { case TOKEN_TYPE_NONE: not_reached(); case TOKEN_TYPE_ERROR: malloc_printf("%zu:%zu: Unexpected character in token: ", token->line, token->col); break; default: malloc_printf("%zu:%zu: Unexpected token: ", token->line, token->col); break; } UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO, &token->parser->buf[token->pos], token->len); malloc_printf("\n"); } static void parser_init(parser_t *parser, bool verbose) { parser->verbose = verbose; parser->buf = NULL; parser->len = 0; parser->pos = 0; parser->line = 1; parser->col = 0; } static void parser_fini(parser_t *parser) { if (parser->buf != NULL) { dallocx(parser->buf, MALLOCX_TCACHE_NONE); } } static bool parser_append(parser_t *parser, const char *str) { size_t len = strlen(str); char *buf = (parser->buf == NULL) ? mallocx(len + 1, MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, MALLOCX_TCACHE_NONE); if (buf == NULL) { return true; } memcpy(&buf[parser->len], str, len + 1); parser->buf = buf; parser->len += len; return false; } static bool parser_tokenize(parser_t *parser) { enum { STATE_START, STATE_EOI, STATE_N, STATE_NU, STATE_NUL, STATE_NULL, STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE, STATE_T, STATE_TR, STATE_TRU, STATE_TRUE, STATE_LBRACKET, STATE_RBRACKET, STATE_LBRACE, STATE_RBRACE, STATE_COLON, STATE_COMMA, STATE_CHARS, STATE_CHAR_ESCAPE, STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD, STATE_STRING, STATE_MINUS, STATE_LEADING_ZERO, STATE_DIGITS, STATE_DECIMAL, STATE_FRAC_DIGITS, STATE_EXP, STATE_EXP_SIGN, STATE_EXP_DIGITS, STATE_ACCEPT } state = STATE_START; size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); size_t token_line JEMALLOC_CC_SILENCE_INIT(1); size_t token_col JEMALLOC_CC_SILENCE_INIT(0); assert_zu_le(parser->pos, parser->len, "Position is past end of buffer"); while (state != STATE_ACCEPT) { char c = parser->buf[parser->pos]; switch (state) { case STATE_START: token_pos = parser->pos; token_line = parser->line; token_col = parser->col; switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': break; case '\0': state = STATE_EOI; break; case 'n': state = STATE_N; break; case 'f': state = STATE_F; break; case 't': state = STATE_T; break; case '[': state = STATE_LBRACKET; break; case ']': state = STATE_RBRACKET; break; case '{': state = STATE_LBRACE; break; case '}': state = STATE_RBRACE; break; case ':': state = STATE_COLON; break; case ',': state = STATE_COMMA; break; case '"': state = STATE_CHARS; break; case '-': state = STATE_MINUS; break; case '0': state = STATE_LEADING_ZERO; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EOI: token_init(&parser->token, parser, TOKEN_TYPE_EOI, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_N: switch (c) { case 'u': state = STATE_NU; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NU: switch (c) { case 'l': state = STATE_NUL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NUL: switch (c) { case 'l': state = STATE_NULL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NULL: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_NULL, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_F: switch (c) { case 'a': state = STATE_FA; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FA: switch (c) { case 'l': state = STATE_FAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FAL: switch (c) { case 's': state = STATE_FALS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FALS: switch (c) { case 'e': state = STATE_FALSE; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FALSE: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_FALSE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_T: switch (c) { case 'r': state = STATE_TR; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TR: switch (c) { case 'u': state = STATE_TRU; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TRU: switch (c) { case 'e': state = STATE_TRUE; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TRUE: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_TRUE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_LBRACKET: token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_RBRACKET: token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_LBRACE: token_init(&parser->token, parser, TOKEN_TYPE_LBRACE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_RBRACE: token_init(&parser->token, parser, TOKEN_TYPE_RBRACE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_COLON: token_init(&parser->token, parser, TOKEN_TYPE_COLON, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_COMMA: token_init(&parser->token, parser, TOKEN_TYPE_COMMA, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_CHARS: switch (c) { case '\\': state = STATE_CHAR_ESCAPE; break; case '"': state = STATE_STRING; break; case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; default: break; } break; case STATE_CHAR_ESCAPE: switch (c) { case '"': case '\\': case '/': case 'b': case 'n': case 'r': case 't': state = STATE_CHARS; break; case 'u': state = STATE_CHAR_U; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_U: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UDD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UDD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UDDD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UDDD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHARS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_STRING: token_init(&parser->token, parser, TOKEN_TYPE_STRING, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_MINUS: switch (c) { case '0': state = STATE_LEADING_ZERO; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_LEADING_ZERO: switch (c) { case '.': state = STATE_DECIMAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case '.': state = STATE_DECIMAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_DECIMAL: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_FRAC_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FRAC_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case 'e': case 'E': state = STATE_EXP; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_EXP: switch (c) { case '-': case '+': state = STATE_EXP_SIGN; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EXP_SIGN: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EXP_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; default: not_reached(); } if (state != STATE_ACCEPT) { if (c == '\n') { parser->line++; parser->col = 0; } else { parser->col++; } parser->pos++; } } return false; } static bool parser_parse_array(parser_t *parser); static bool parser_parse_object(parser_t *parser); static bool parser_parse_value(parser_t *parser) { switch (parser->token.token_type) { case TOKEN_TYPE_NULL: case TOKEN_TYPE_FALSE: case TOKEN_TYPE_TRUE: case TOKEN_TYPE_STRING: case TOKEN_TYPE_NUMBER: return false; case TOKEN_TYPE_LBRACE: return parser_parse_object(parser); case TOKEN_TYPE_LBRACKET: return parser_parse_array(parser); default: return true; } not_reached(); } static bool parser_parse_pair(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Pair should start with string"); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COLON: if (parser_tokenize(parser)) { return true; } return parser_parse_value(parser); default: return true; } } static bool parser_parse_values(parser_t *parser) { if (parser_parse_value(parser)) { return true; } while (true) { if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COMMA: if (parser_tokenize(parser)) { return true; } if (parser_parse_value(parser)) { return true; } break; case TOKEN_TYPE_RBRACKET: return false; default: return true; } } } static bool parser_parse_array(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET, "Array should start with ["); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_RBRACKET: return false; default: return parser_parse_values(parser); } not_reached(); } static bool parser_parse_pairs(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Object should start with string"); if (parser_parse_pair(parser)) { return true; } while (true) { if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COMMA: if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_STRING: if (parser_parse_pair(parser)) { return true; } break; default: return true; } break; case TOKEN_TYPE_RBRACE: return false; default: return true; } } } static bool parser_parse_object(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE, "Object should start with {"); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_STRING: return parser_parse_pairs(parser); case TOKEN_TYPE_RBRACE: return false; default: return true; } not_reached(); } static bool parser_parse(parser_t *parser) { if (parser_tokenize(parser)) { goto label_error; } if (parser_parse_value(parser)) { goto label_error; } if (parser_tokenize(parser)) { goto label_error; } switch (parser->token.token_type) { case TOKEN_TYPE_EOI: return false; default: goto label_error; } not_reached(); label_error: token_error(&parser->token); return true; } TEST_BEGIN(test_json_parser) { size_t i; const char *invalid_inputs[] = { /* Tokenizer error case tests. */ "{ \"string\": X }", "{ \"string\": nXll }", "{ \"string\": nuXl }", "{ \"string\": nulX }", "{ \"string\": nullX }", "{ \"string\": fXlse }", "{ \"string\": faXse }", "{ \"string\": falXe }", "{ \"string\": falsX }", "{ \"string\": falseX }", "{ \"string\": tXue }", "{ \"string\": trXe }", "{ \"string\": truX }", "{ \"string\": trueX }", "{ \"string\": \"\n\" }", "{ \"string\": \"\\z\" }", "{ \"string\": \"\\uX000\" }", "{ \"string\": \"\\u0X00\" }", "{ \"string\": \"\\u00X0\" }", "{ \"string\": \"\\u000X\" }", "{ \"string\": -X }", "{ \"string\": 0.X }", "{ \"string\": 0.0eX }", "{ \"string\": 0.0e+X }", /* Parser error test cases. */ "{\"string\": }", "{\"string\" }", "{\"string\": [ 0 }", "{\"string\": {\"a\":0, 1 } }", "{\"string\": {\"a\":0: } }", "{", "{}{", }; const char *valid_inputs[] = { /* Token tests. */ "null", "false", "true", "{}", "{\"a\": 0}", "[]", "[0, 1]", "0", "1", "10", "-10", "10.23", "10.23e4", "10.23e-4", "10.23e+4", "10.23E4", "10.23E-4", "10.23E+4", "-10.23", "-10.23e4", "-10.23e-4", "-10.23e+4", "-10.23E4", "-10.23E-4", "-10.23E+4", "\"value\"", "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", /* Parser test with various nesting. */ "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", }; for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) { const char *input = invalid_inputs[i]; parser_t parser; parser_init(&parser, false); assert_false(parser_append(&parser, input), "Unexpected input appending failure"); assert_true(parser_parse(&parser), "Unexpected parse success for input: %s", input); parser_fini(&parser); } for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) { const char *input = valid_inputs[i]; parser_t parser; parser_init(&parser, true); assert_false(parser_append(&parser, input), "Unexpected input appending failure"); assert_false(parser_parse(&parser), "Unexpected parse error for input: %s", input); parser_fini(&parser); } } TEST_END void write_cb(void *opaque, const char *str) { parser_t *parser = (parser_t *)opaque; if (parser_append(parser, str)) { test_fail("Unexpected input appending failure"); } } TEST_BEGIN(test_stats_print_json) { const char *opts[] = { "J", "Jg", "Jm", "Jd", "Jmd", "Jgd", "Jgm", "Jgmd", "Ja", "Jb", "Jl", "Jx", "Jbl", "Jal", "Jab", "Jabl", "Jax", "Jbx", "Jlx", "Jablx", "Jgmdablx", }; unsigned arena_ind, i; for (i = 0; i < 3; i++) { unsigned j; switch (i) { case 0: break; case 1: { size_t sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); break; } case 2: { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); mib[1] = arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib failure"); break; } default: not_reached(); } for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) { parser_t parser; parser_init(&parser, true); malloc_stats_print(write_cb, (void *)&parser, opts[j]); assert_false(parser_parse(&parser), "Unexpected parse error, opts=\"%s\"", opts[j]); parser_fini(&parser); } } } TEST_END int main(void) { return test( test_json_parser, test_stats_print_json); } jemalloc-sys-0.3.2/jemalloc/test/unit/ticker.c010064400007650000024000000037721340421340100175070ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ticker.h" TEST_BEGIN(test_ticker_tick) { #define NREPS 2 #define NTICKS 3 ticker_t ticker; int32_t i, j; ticker_init(&ticker, NTICKS); for (i = 0; i < NREPS; i++) { for (j = 0; j < NTICKS; j++) { assert_u_eq(ticker_read(&ticker), NTICKS - j, "Unexpected ticker value (i=%d, j=%d)", i, j); assert_false(ticker_tick(&ticker), "Unexpected ticker fire (i=%d, j=%d)", i, j); } assert_u32_eq(ticker_read(&ticker), 0, "Expected ticker depletion"); assert_true(ticker_tick(&ticker), "Expected ticker fire (i=%d)", i); assert_u32_eq(ticker_read(&ticker), NTICKS, "Expected ticker reset"); } #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_ticks) { #define NTICKS 3 ticker_t ticker; ticker_init(&ticker, NTICKS); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_copy) { #define NTICKS 3 ticker_t ta, tb; ticker_init(&ta, NTICKS); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); ticker_tick(&ta); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END int main(void) { return test( test_ticker_tick, test_ticker_ticks, test_ticker_copy); } jemalloc-sys-0.3.2/jemalloc/test/unit/tsd.c010064400007650000024000000062151340421341300170160ustar0000000000000000#include "test/jemalloc_test.h" static int data_cleanup_count; void data_cleanup(int *data) { if (data_cleanup_count == 0) { assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, "Argument passed into cleanup function should match tsd " "value"); } ++data_cleanup_count; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ bool reincarnate = false; switch (*data) { case MALLOC_TSD_TEST_DATA_INIT: *data = 1; reincarnate = true; break; case 1: *data = 2; reincarnate = true; break; case 2: return; default: not_reached(); } if (reincarnate) { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } } static void * thd_start(void *arg) { int d = (int)(uintptr_t)arg; void *p; tsd_t *tsd = tsd_fetch(); assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT, "Initial tsd get should return initialization value"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); tsd_test_data_set(tsd, d); assert_x_eq(tsd_test_data_get(tsd), d, "After tsd set, tsd get should return value that was set"); d = 0; assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); tsd_test_callback_set(tsd, &data_cleanup); free(p); return NULL; } TEST_BEGIN(test_tsd_main_thread) { thd_start((void *)(uintptr_t)0xa5f3e329); } TEST_END TEST_BEGIN(test_tsd_sub_thread) { thd_t thd; data_cleanup_count = 0; thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT); thd_join(thd, NULL); /* * We reincarnate twice in the data cleanup, so it should execute at * least 3 times. */ assert_x_ge(data_cleanup_count, 3, "Cleanup function should have executed multiple times."); } TEST_END static void * thd_start_reincarnated(void *arg) { tsd_t *tsd = tsd_fetch(); assert(tsd); void *p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); /* Manually trigger reincarnation. */ assert_ptr_not_null(tsd_arena_get(tsd), "Should have tsd arena set."); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared."); assert_u_eq(tsd->state, tsd_state_purgatory, "TSD state should be purgatory\n"); free(p); assert_u_eq(tsd->state, tsd_state_reincarnated, "TSD state should be reincarnated\n"); p = mallocx(1, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "Should not have tsd arena set after reincarnation."); free(p); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared after 2nd cleanup."); return NULL; } TEST_BEGIN(test_tsd_reincarnation) { thd_t thd; thd_create(&thd, thd_start_reincarnated, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Ensure tsd bootstrapped. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); return test_status_fail; } return test_no_reentrancy( test_tsd_main_thread, test_tsd_sub_thread, test_tsd_reincarnation); } jemalloc-sys-0.3.2/jemalloc/test/unit/witness.c010064400007650000024000000176421340421340100177230ustar0000000000000000#include "test/jemalloc_test.h" static witness_lock_error_t *witness_lock_error_orig; static witness_owner_error_t *witness_owner_error_orig; static witness_not_owner_error_t *witness_not_owner_error_orig; static witness_depth_error_t *witness_depth_error_orig; static bool saw_lock_error; static bool saw_owner_error; static bool saw_not_owner_error; static bool saw_depth_error; static void witness_lock_error_intercept(const witness_list_t *witnesses, const witness_t *witness) { saw_lock_error = true; } static void witness_owner_error_intercept(const witness_t *witness) { saw_owner_error = true; } static void witness_not_owner_error_intercept(const witness_t *witness) { saw_not_owner_error = true; } static void witness_depth_error_intercept(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { saw_depth_error = true; } static int witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); assert(oa == (void *)a); assert(ob == (void *)b); return strcmp(a->name, b->name); } static int witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); assert(oa == (void *)a); assert(ob == (void *)b); return -strcmp(a->name, b->name); } TEST_BEGIN(test_witness) { witness_t a, b; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); witness_init(&a, "a", 1, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &a); witness_lock(&witness_tsdn, &a); witness_assert_owner(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0); witness_init(&b, "b", 2, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &b); witness_lock(&witness_tsdn, &b); witness_assert_owner(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 2); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); witness_unlock(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); witness_unlock(&witness_tsdn, &b); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); } TEST_END TEST_BEGIN(test_witness_comp) { witness_t a, b, c, d; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, witness_comp, &a); witness_assert_not_owner(&witness_tsdn, &a); witness_lock(&witness_tsdn, &a); witness_assert_owner(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_init(&b, "b", 1, witness_comp, &b); witness_assert_not_owner(&witness_tsdn, &b); witness_lock(&witness_tsdn, &b); witness_assert_owner(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 2); witness_unlock(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 1); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_init(&c, "c", 1, witness_comp_reverse, &c); witness_assert_not_owner(&witness_tsdn, &c); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &c); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &c); witness_assert_depth(&witness_tsdn, 1); saw_lock_error = false; witness_init(&d, "d", 1, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &d); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &d); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &d); witness_assert_depth(&witness_tsdn, 1); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_reversal) { witness_t a, b; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); witness_init(&b, "b", 2, NULL, NULL); witness_lock(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 1); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_unlock(&witness_tsdn, &b); witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_recursive) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_not_owner_error_orig = witness_not_owner_error; witness_not_owner_error = witness_not_owner_error_intercept; saw_not_owner_error = false; witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); witness_lock(&witness_tsdn, &a); assert_false(saw_lock_error, "Unexpected witness lock error"); assert_false(saw_not_owner_error, "Unexpected witness not owner error"); witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); assert_true(saw_not_owner_error, "Expected witness not owner error"); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_unlock_not_owned) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_owner_error_orig = witness_owner_error; witness_owner_error = witness_owner_error_intercept; saw_owner_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_owner_error, "Unexpected owner error"); witness_unlock(&witness_tsdn, &a); assert_true(saw_owner_error, "Expected owner error"); witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; } TEST_END TEST_BEGIN(test_witness_depth) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_depth_error_orig = witness_depth_error; witness_depth_error = witness_depth_error_intercept; saw_depth_error = false; witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_depth_error, "Unexpected depth error"); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_lock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); assert_true(saw_depth_error, "Expected depth error"); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_depth_error = witness_depth_error_orig; } TEST_END int main(void) { return test( test_witness, test_witness_comp, test_witness_reversal, test_witness_recursive, test_witness_unlock_not_owned, test_witness_depth); } jemalloc-sys-0.3.2/jemalloc/test/unit/zero.c010064400007650000024000000023641340421341300172040ustar0000000000000000#include "test/jemalloc_test.h" static void test_zero(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; #define MAGIC ((uint8_t)0x61) sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_u_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); s[i] = MAGIC; } if (xallocx(s, sz+1, 0, 0) == sz) { s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); #undef MAGIC } TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SMALL_MAXCLASS-1); } TEST_END TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END int main(void) { return test( test_zero_small, test_zero_large); } jemalloc-sys-0.3.2/jemalloc/test/unit/zero.sh010064400007650000024000000001551340421340100173650ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,junk:false,zero:true" fi jemalloc-sys-0.3.2/jemalloc/TUNING.md010064400007650000024000000134401340421340100154430ustar0000000000000000This document summarizes the common approaches for performance fine tuning with jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work reasonably well in practice, and most applications should not have to tune any options. However, in order to cover a wide range of applications and avoid pathological cases, the default setting is sometimes kept conservative and suboptimal, even for many common workloads. When jemalloc is properly tuned for a specific application / workload, it is common to improve system level metrics by a few percent, or make favorable trade-offs. ## Notable runtime options for performance tuning Runtime options can be set via [malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning). * [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread) Enabling jemalloc background threads generally improves the tail latency for application threads, since unused memory purging is shifted to the dedicated background threads. In addition, unintended purging delay caused by application inactivity is avoided with background threads. Suggested: `background_thread:true` when jemalloc managed threads can be allowed. * [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp) Allowing jemalloc to utilize transparent huge pages for its internal metadata usually reduces TLB misses significantly, especially for programs with large memory footprint and frequent allocation / deallocation activities. Metadata memory usage may increase due to the use of huge pages. Suggested for allocation intensive programs: `metadata_thp:auto` or `metadata_thp:always`, which is expected to improve CPU utilization at a small memory cost. * [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms) Decay time determines how fast jemalloc returns unused pages back to the operating system, and therefore provides a fairly straightforward trade-off between CPU and memory usage. Shorter decay time purges unused pages faster to reduces memory usage (usually at the cost of more CPU cycles spent on purging), and vice versa. Suggested: tune the values based on the desired trade-offs. * [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas) By default jemalloc uses multiple arenas to reduce internal lock contention. However high arena count may also increase overall memory fragmentation, since arenas manage memory independently. When high degree of parallelism is not expected at the allocator level, lower number of arenas often improves memory usage. Suggested: if low parallelism is expected, try lower arena count while monitoring CPU and memory usage. * [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena) Enable dynamic thread to arena association based on running CPU. This has the potential to improve locality, e.g. when thread to CPU affinity is present. Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if thread migration between processors is expected to be infrequent. Examples: * High resource consumption application, prioritizing CPU utilization: `background_thread:true,metadata_thp:auto` combined with relaxed decay time (increased `dirty_decay_ms` and / or `muzzy_decay_ms`, e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`). * High resource consumption application, prioritizing memory usage: `background_thread:true` combined with shorter decay time (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`, e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count (e.g. number of CPUs). * Low resource consumption application: `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g. `dirty_decay_ms:1000,muzzy_decay_ms:0`). * Extremely conservative -- minimize memory usage at all costs, only suitable when allocation activity is very rare: `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0` Note that it is recommended to combine the options with `abort_conf:true` which aborts immediately on illegal options. ## Beyond runtime options In addition to the runtime options, there are a number of programmatic ways to improve application performance with jemalloc. * [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create) Manually created arenas can help performance in various ways, e.g. by managing locality and contention for specific usages. For example, applications can explicitly allocate frequently accessed objects from a dedicated arena with [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve locality. In addition, explicit arenas often benefit from individually tuned options, e.g. relaxed [decay time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if frequent reuse is expected. * [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks) Extent hooks allow customization for managing underlying memory. One use case for performance purpose is to utilize huge pages -- for example, [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp) uses explicit arenas with customized extent hooks to manage 1GB huge pages for frequently accessed data, which reduces TLB misses significantly. * [Explicit thread-to-arena binding](http://jemalloc.net/jemalloc.3.html#thread.arena) It is common for some threads in an application to have different memory access / allocation patterns. Threads with heavy workloads often benefit from explicit binding, e.g. binding very active threads to dedicated arenas may reduce contention at the allocator level. jemalloc-sys-0.3.2/README.md010064400007650000024000000206621350066755100136320ustar0000000000000000# jemalloc-sys - Rust bindings to the `jemalloc` C library [![Travis-CI Status]][travis] [![Appveyor Status]][appveyor] [![Latest Version]][crates.io] [![docs]][docs.rs] > Note: the Rust allocator API is implemented for `jemalloc` in the > [`jemallocator`](https://crates.io/crates/jemallocator) crate. ## Documentation * [Latest release (docs.rs)][docs.rs] * [master branch`][master_docs] `jemalloc` is a general purpose memory allocator, its documentation can be found here: * [API documentation][jemalloc_docs] * [Wiki][jemalloc_wiki] (design documents, presentations, profiling, debugging, tuning, ...) [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki **Current jemalloc version**: 5.1. ## Platform support See the platform support of the [`jemallocator`](https://crates.io/crates/jemallocator) crate. ## Features Most features correspond to `jemalloc` features - the reference is [`jemalloc/INSTALL.md`][jemalloc_install]. ### Cargo features This crate provides following cargo feature flags: * `profiling` (configure `jemalloc` with `--enable-prof`): Enable heap profiling and leak detection functionality. See jemalloc's "opt.prof" option documentation for usage details. When enabled, there are several approaches to backtracing, and the configure script chooses the first one in the following list that appears to function correctly: * `libunwind` (requires --enable-prof-libunwind) * `libgcc` (unless --disable-prof-libgcc) * `gcc intrinsics` (unless --disable-prof-gcc) * `stats` (configure `jemalloc` with `--enable-stats`): Enable statistics gathering functionality. See the `jemalloc`'s "`opt.stats_print`" option documentation for usage details. * `debug` (configure `jemalloc` with `--enable-debug`): Enable assertions and validation code. This incurs a substantial performance hit, but is very useful during application development. * `background_threads_runtime_support` (enabled by default): enables background-threads run-time support when building `jemalloc-sys` on some POSIX targets supported by `jemalloc`. Background threads are disabled at run-time by default. This option allows dynamically enabling them at run-time. * `background_threads` (disabled by default): enables background threads by default at run-time. When set to true, background threads are created on demand (the number of background threads will be no more than the number of CPUs or active arenas). Threads run periodically, and handle purging asynchronously. When switching off, background threads are terminated synchronously. Note that after `fork(2)` function, the state in the child process will be disabled regardless the state in parent process. See `stats.background_thread` for related stats. `opt.background_thread` can be used to set the default option. The background thread is only available on selected pthread-based platforms. * `unprefixed_malloc_on_supported_platforms`: when disabled, configure `jemalloc` with `--with-jemalloc-prefix=_rjem_`. Enabling this causes symbols like `malloc` to be emitted without a prefix, overriding the ones defined by libc. This usually causes C and C++ code linked in the same program to use `jemalloc` as well. On some platforms prefixes are always used because unprefixing is known to cause segfaults due to allocator mismatches. * `disable_initial_exec_tls` (disabled by default): when enabled, jemalloc is built with the `--disable-initial-exec-tls` option. It disables the initial-exec TLS model for jemalloc's internal thread-local storage (on those platforms that support explicit settings). This can allow jemalloc to be dynamically loaded after program startup (e.g. using dlopen). If you encounter the error `yourlib.so: cannot allocate memory in static TLS block`, you'll likely want to enable this. ### Environment variables `jemalloc` options taking values are passed via environment variables using the schema `JEMALLOC_SYS_{KEY}=VALUE` where the `KEY` names correspond to the `./configure` options of `jemalloc` where the words are capitalized and the hyphens `-` are replaced with underscores `_`(see [`jemalloc/INSTALL.md`][jemalloc_install]): * `JEMALLOC_SYS_WITH_MALLOC_CONF=`: Embed `` as a run-time options string that is processed prior to the `malloc_conf` global variable, the `/etc/malloc.conf` symlink, and the `MALLOC_CONF` environment variable (note: this variable might be prefixed as `_RJEM_MALLOC_CONF`). For example, to change the default decay time to 30 seconds: ``` JEMALLOC_SYS_WITH_MALLOC_CONF=decay_ms:30000 ``` * `JEMALLOC_SYS_WITH_LG_PAGE=`: Specify the base 2 log of the allocator page size, which must in turn be at least as large as the system page size. By default the configure script determines the host's page size and sets the allocator page size equal to the system page size, so this option need not be specified unless the system page size may change between configuration and execution, e.g. when cross compiling. * `JEMALLOC_SYS_WITH_LG_HUGEPAGE=`: Specify the base 2 log of the system huge page size. This option is useful when cross compiling, or when overriding the default for systems that do not explicitly support huge pages. * `JEMALLOC_SYS_WITH_LG_QUANTUM=`: Specify the base 2 log of the minimum allocation alignment. jemalloc needs to know the minimum alignment that meets the following C standard requirement (quoted from the April 12, 2011 draft of the C11 standard): > The pointer returned if the allocation succeeds is suitably aligned so that > it may be assigned to a pointer to any type of object with a fundamental > alignment requirement and then used to access such an object or an array of > such objects in the space allocated [...] This setting is architecture-specific, and although jemalloc includes known safe values for the most commonly used modern architectures, there is a wrinkle related to GNU libc (glibc) that may impact your choice of . On most modern architectures, this mandates 16-byte alignment (=4), but the glibc developers chose not to meet this requirement for performance reasons. An old discussion can be found at https://sourceware.org/bugzilla/show_bug.cgi?id=206 . Unlike glibc, jemalloc does follow the C standard by default (caveat: jemalloc technically cheats for size classes smaller than the quantum), but the fact that Linux systems already work around this allocator noncompliance means that it is generally safe in practice to let jemalloc's minimum alignment follow glibc's lead. If you specify `JEMALLOC_SYS_WITH_LG_QUANTUM=3` during configuration, jemalloc will provide additional size classes that are not 16-byte-aligned (24, 40, and 56). * `JEMALLOC_SYS_WITH_LG_VADDR=`: Specify the number of significant virtual address bits. By default, the configure script attempts to detect virtual address size on those platforms where it knows how, and picks a default otherwise. This option may be useful when cross-compiling. * `JEMALLOC_SYS_GIT_DEV_BRANCH`: when this environment variable is defined, the latest commit from `jemalloc`'s dev branch is fetched from `https://github.com/jemalloc/jemalloc` and built. [jemalloc_install]: https://github.com/jemalloc/jemalloc/blob/dev/INSTALL.md#advanced-configuration ## License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in `jemalloc-sys` by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [travis]: https://travis-ci.org/gnzlbg/jemallocator [Travis-CI Status]: https://travis-ci.org/gnzlbg/jemallocator.svg?branch=master [appveyor]: https://ci.appveyor.com/project/gnzlbg/jemallocator/branch/master [Appveyor Status]: https://ci.appveyor.com/api/projects/status/github/gnzlbg/jemallocator?branch=master&svg=true [Latest Version]: https://img.shields.io/crates/v/jemalloc-sys.svg [crates.io]: https://crates.io/crates/jemalloc-ctl [docs]: https://docs.rs/jemalloc-sys/badge.svg [docs.rs]: https://docs.rs/jemalloc-sys/ [master_docs]: https://gnzlbg.github.io/jemallocator/jemalloc-sys jemalloc-sys-0.3.2/src/lib.rs010064400007650000024000001056271344440640100142530ustar0000000000000000//! Rust bindings to the `jemalloc` C library. //! //! `jemalloc` is a general purpose memory allocation, its documentation //! can be found here: //! //! * [API documentation][jemalloc_docs] //! * [Wiki][jemalloc_wiki] (design documents, presentations, profiling, debugging, tuning, ...) //! //! `jemalloc` exposes both a standard and a non-standard API. //! //! # Standard API //! //! The standard API includes: the [`malloc`], [`calloc`], [`realloc`], and //! [`free`], which conform to to ISO/IEC 9899:1990 (“ISO C90”), //! [`posix_memalign`] which conforms to conforms to POSIX.1-2016, and //! [`aligned_alloc`]. //! //! Note that these standard leave some details as _implementation defined_. //! This docs document this behavior for `jemalloc`, but keep in mind that other //! standard-conforming implementations of these functions in other allocators //! might behave slightly different. //! //! # Non-Standard API //! //! The non-standard API includes: [`mallocx`], [`rallocx`], [`xallocx`], //! [`sallocx`], [`dallocx`], [`sdallocx`], and [`nallocx`]. These functions all //! have a `flags` argument that can be used to specify options. Use bitwise or //! `|` to specify one or more of the following: [`MALLOCX_LG_ALIGN`], //! [`MALLOCX_ALIGN`], [`MALLOCX_ZERO`], [`MALLOCX_TCACHE`], //! [`MALLOCX_TCACHE_NONE`], and [`MALLOCX_ARENA`]. //! //! # Environment variables //! //! The `MALLOC_CONF` environment variable affects the execution of the allocation functions. //! //! For the documentation of the [`MALLCTL` namespace visit the jemalloc //! documenation][jemalloc_mallctl]. //! //! [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html //! [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki //! [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace #![no_std] #![allow(non_snake_case, non_camel_case_types)] #![cfg_attr( feature = "cargo-clippy", allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap) )] #![deny(missing_docs, intra_doc_link_resolution_failure)] extern crate libc; use libc::{c_char, c_int, c_uint, c_void, size_t}; type c_bool = c_int; /// Align the memory allocation to start at an address that is a /// multiple of `1 << la`. /// /// # Safety /// /// It does not validate that `la` is within the valid range. #[inline] pub fn MALLOCX_LG_ALIGN(la: usize) -> c_int { la as c_int } /// Align the memory allocation to start at an address that is a multiple of `align`, /// where a is a power of two. /// /// # Safety /// /// This macro does not validate that a is a power of 2. #[inline] pub fn MALLOCX_ALIGN(aling: usize) -> c_int { aling.trailing_zeros() as c_int } /// Initialize newly allocated memory to contain zero bytes. /// /// In the growing reallocation case, the real size prior to reallocation /// defines the boundary between untouched bytes and those that are initialized /// to contain zero bytes. /// /// If this option is not set, newly allocated memory is uninitialized. pub const MALLOCX_ZERO: c_int = 0x40; /// Use the thread-specific cache (_tcache_) specified by the identifier `tc`. /// /// # Safety /// /// `tc` must have been acquired via the `tcache.create mallctl`. This function /// does not validate that `tc` specifies a valid identifier. #[inline] pub fn MALLOCX_TCACHE(tc: usize) -> c_int { tc.wrapping_add(2).wrapping_shl(8) as c_int } /// Do not use a thread-specific cache (_tcache_). /// /// Unless `MALLOCX_TCACHE(tc)` or `MALLOCX_TCACHE_NONE` is specified, an /// automatically managed _tcache_ will be used under many circumstances. /// /// # Safety /// /// This option cannot be used in the same `flags` argument as /// `MALLOCX_TCACHE(tc)`. // FIXME: This should just be a const. #[inline] pub fn MALLOCX_TCACHE_NONE() -> c_int { MALLOCX_TCACHE(!0) } /// Use the arena specified by the index `a`. /// /// This option has no effect for regions that were allocated via an arena other /// than the one specified. /// /// # Safety /// /// This function does not validate that `a` specifies an arena index in the /// valid range. #[inline] pub fn MALLOCX_ARENA(a: usize) -> c_int { (a as c_int).wrapping_add(1).wrapping_shl(20) } extern "C" { /// Allocates `size` bytes of uninitialized memory. /// /// It returns a pointer to the start (lowest byte address) of the allocated /// space. This pointer is suitably aligned so that it may be assigned to a /// pointer to any type of object and then used to access such an object in /// the space allocated until the space is explicitly deallocated. Each /// yielded pointer points to an object disjoint from any other object. /// /// If the `size` of the space requested is zero, either a null pointer is /// returned, or the behavior is as if the `size` were some nonzero value, /// except that the returned pointer shall not be used to access an object. /// /// # Errors /// /// If the space cannot be allocated, a null pointer is returned and `errno` /// is set to `ENOMEM`. #[cfg_attr(prefixed, link_name = "_rjem_malloc")] pub fn malloc(size: size_t) -> *mut c_void; /// Allocates zero-initialized space for an array of `number` objects, each /// of whose size is `size`. /// /// The result is identical to calling [`malloc`] with an argument of /// `number * size`, with the exception that the allocated memory is /// explicitly initialized to _zero_ bytes. /// /// Note: zero-initialized memory need not be the same as the /// representation of floating-point zero or a null pointer constant. #[cfg_attr(prefixed, link_name = "_rjem_calloc")] pub fn calloc(number: size_t, size: size_t) -> *mut c_void; /// Allocates `size` bytes of memory at an address which is a multiple of /// `alignment` and is placed in `*ptr`. /// /// If `size` is zero, then the value placed in `*ptr` is either null, or /// the behavior is as if the `size` were some nonzero value, except that /// the returned pointer shall not be used to access an object. /// /// # Errors /// /// On success, it returns zero. On error, the value of `errno` is _not_ set, /// `*ptr` is not modified, and the return values can be: /// /// - `EINVAL`: the `alignment` argument was not a power-of-two or was not a multiple of /// `mem::size_of::<*const c_void>()`. /// - `ENOMEM`: there was insufficient memory to fulfill the allocation request. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` is null. #[cfg_attr(prefixed, link_name = "_rjem_posix_memalign")] pub fn posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int; /// Allocates `size` bytes of memory at an address which is a multiple of /// `alignment`. /// /// If the `size` of the space requested is zero, either a null pointer is /// returned, or the behavior is as if the `size` were some nonzero value, /// except that the returned pointer shall not be used to access an object. /// /// # Errors /// /// Returns null if the request fails. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `alignment` is not a power-of-two /// * `size` is not an integral multiple of `alignment` #[cfg_attr(prefixed, link_name = "_rjem_aligned_alloc")] pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void; /// Resizes the previously-allocated memory region referenced by `ptr` to /// `size` bytes. /// /// Deallocates the old object pointed to by `ptr` and returns a pointer to /// a new object that has the size specified by `size`. The contents of the /// new object are the same as that of the old object prior to deallocation, /// up to the lesser of the new and old sizes. /// /// The memory in the new object beyond the size of the old object is /// uninitialized. /// /// The returned pointer to a new object may have the same value as a /// pointer to the old object, but [`realloc`] may move the memory /// allocation, resulting in a different return value than `ptr`. /// /// If `ptr` is null, [`realloc`] behaves identically to [`malloc`] for the /// specified size. /// /// If the size of the space requested is zero, the behavior is /// implementation-defined: either a null pointer is returned, or the /// behavior is as if the size were some nonzero value, except that the /// returned pointer shall not be used to access an object # Errors /// /// # Errors /// /// If memory for the new object cannot be allocated, the old object is not /// deallocated, its value is unchanged, [`realloc`] returns null, and /// `errno` is set to `ENOMEM`. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` does not match a pointer previously returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_realloc")] pub fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void; /// Deallocates previously-allocated memory region referenced by `ptr`. /// /// This makes the space available for future allocations. /// /// If `ptr` is null, no action occurs. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_free")] pub fn free(ptr: *mut c_void); /// Allocates at least `size` bytes of memory according to `flags`. /// /// It returns a pointer to the start (lowest byte address) of the allocated /// space. This pointer is suitably aligned so that it may be assigned to a /// pointer to any type of object and then used to access such an object in /// the space allocated until the space is explicitly deallocated. Each /// yielded pointer points to an object disjoint from any other object. /// /// # Errors /// /// On success it returns a non-null pointer. A null pointer return value /// indicates that insufficient contiguous memory was available to service /// the allocation request. /// /// # Safety /// /// The behavior is _undefined_ if `size == 0`. #[cfg_attr(prefixed, link_name = "_rjem_mallocx")] pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void; /// Resizes the previously-allocated memory region referenced by `ptr` to be /// at least `size` bytes. /// /// Deallocates the old object pointed to by `ptr` and returns a pointer to /// a new object that has the size specified by `size`. The contents of the /// new object are the same as that of the old object prior to deallocation, /// up to the lesser of the new and old sizes. /// /// The the memory in the new object beyond the size of the old object is /// obtained according to `flags` (it might be uninitialized). /// /// The returned pointer to a new object may have the same value as a /// pointer to the old object, but [`rallocx`] may move the memory /// allocation, resulting in a different return value than `ptr`. /// /// # Errors /// /// On success it returns a non-null pointer. A null pointer return value /// indicates that insufficient contiguous memory was available to service /// the allocation request. In this case, the old object is not /// deallocated, and its value is unchanged. /// /// # Safety /// /// The behavior is _undefiend_ if: /// /// * `size == 0`, or /// * `ptr` does not match a pointer earlier returned by /// the memory allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_rallocx")] pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; /// Resizes the previously-allocated memory region referenced by `ptr` _in /// place_ to be at least `size` bytes, returning the real size of the /// allocation. /// /// Deallocates the old object pointed to by `ptr` and sets `ptr` to a new /// object that has the size returned; the old a new objects share the same /// base address. The contents of the new object are the same as that of the /// old object prior to deallocation, up to the lesser of the new and old /// sizes. /// /// If `extra` is non-zero, an attempt is made to resize the allocation to /// be at least `size + extra` bytes. Inability to allocate the `extra` /// bytes will not by itself result in failure to resize. /// /// The memory in the new object beyond the size of the old object is /// obtained according to `flags` (it might be uninitialized). /// /// # Errors /// /// If the allocation cannot be adequately grown in place up to `size`, the /// size returned is smaller than `size`. /// /// Note: /// /// * the size value returned can be larger than the size requested during /// allocation /// * when shrinking an allocation, use the size returned to determine /// whether the allocation was shrunk sufficiently or not. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `size == 0`, or /// * `size + extra > size_t::max_value()`, or /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_xallocx")] pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; /// Returns the real size of the previously-allocated memory region /// referenced by `ptr`. /// /// The value may be larger than the size requested on allocation. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_sallocx")] pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t; /// Deallocates previously-allocated memory region referenced by `ptr`. /// /// This makes the space available for future allocations. /// /// If `ptr` is null, no action occurs. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_dallocx")] pub fn dallocx(ptr: *mut c_void, flags: c_int); /// Deallocates previously-allocated memory region referenced by `ptr` with /// `size` hint. /// /// This makes the space available for future allocations. /// /// If `ptr` is null, no action occurs. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `size` is not in range `[req_size, alloc_size]`, where `req_size` is /// the size requested when performing the allocation, and `alloc_size` is /// the allocation size returned by [`nallocx`], [`sallocx`], or /// [`xallocx`], /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_sdallocx")] pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); /// Returns the real size of the allocation that would result from a /// [`mallocx`] function call with the same arguments. /// /// # Errors /// /// If the inputs exceed the maximum supported size class and/or alignment /// it returns zero. /// /// # Safety /// /// The behavior is _undefined_ if `size == 0`. #[cfg_attr(prefixed, link_name = "_rjem_nallocx")] pub fn nallocx(size: size_t, flags: c_int) -> size_t; /// Returns the real size of the previously-allocated memory region /// referenced by `ptr`. /// /// The value may be larger than the size requested on allocation. /// /// Although the excess bytes can be overwritten by the application without /// ill effects, this is not good programming practice: the number of excess /// bytes in an allocation depends on the underlying implementation. /// /// The main use of this function is for debugging and introspection. /// /// # Errors /// /// If `ptr` is null, 0 is returned. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * `ptr` does not match a pointer earlier returned by the memory /// allocation functions of this crate, or /// * the memory region referenced by `ptr` has been deallocated. #[cfg_attr(prefixed, link_name = "_rjem_malloc_usable_size")] pub fn malloc_usable_size(ptr: *const c_void) -> size_t; /// General interface for introspecting the memory allocator, as well as /// setting modifiable parameters and triggering actions. /// /// The period-separated name argument specifies a location in a /// tree-structured namespace ([see jemalloc's `MALLCTL` /// documentation][jemalloc_mallctl]). /// /// To read a value, pass a pointer via `oldp` to adequate space to contain /// the value, and a pointer to its length via `oldlenp``; otherwise pass /// null and null. Similarly, to write a value, pass a pointer to the value /// via `newp`, and its length via `newlen`; otherwise pass null and 0. /// /// # Errors /// /// Returns `0` on success, otherwise returns: /// /// * `EINVAL`: if `newp` is not null, and `newlen` is too large or too /// small. Alternatively, `*oldlenp` is too large or too small; in this case /// as much data as possible are read despite the error. /// /// * `ENOENT`: `name` or mib specifies an unknown/invalid value. /// /// * `EPERM`: Attempt to read or write void value, or attempt to write read-only value. /// /// * `EAGAIN`: A memory allocation failure occurred. /// /// * `EFAULT`: An interface with side effects failed in some way not /// directly related to `mallctl` read/write processing. /// /// [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace #[cfg_attr(prefixed, link_name = "_rjem_mallctl")] pub fn mallctl( name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t, ) -> c_int; /// Translates a name to a “Management Information Base” (MIB) that can be /// passed repeatedly to [`mallctlbymib`]. /// /// This avoids repeated name lookups for applications that repeatedly query /// the same portion of the namespace. /// /// On success, `mibp` contains an array of `*miblenp` integers, where /// `*miblenp` is the lesser of the number of components in name and the /// input value of `*miblenp`. Thus it is possible to pass a `*miblenp` that is /// smaller than the number of period-separated name components, which /// results in a partial MIB that can be used as the basis for constructing /// a complete MIB. For name components that are integers (e.g. the 2 in /// arenas.bin.2.size), the corresponding MIB component will always be that /// integer. #[cfg_attr(prefixed, link_name = "_rjem_mallctlnametomib")] pub fn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miblenp: *mut size_t) -> c_int; /// Like [`mallctl`] but taking a `mib` as input instead of a name. #[cfg_attr(prefixed, link_name = "_rjem_mallctlbymib")] pub fn mallctlbymib( mib: *const size_t, miblen: size_t, oldp: *mut c_void, oldpenp: *mut size_t, newp: *mut c_void, newlen: size_t, ) -> c_int; /// Writes summary statistics via the `write_cb` callback function pointer /// and `cbopaque` data passed to `write_cb`, or [`malloc_message`] if `write_cb` /// is null. /// /// The statistics are presented in human-readable form unless “J” /// is specified as a character within the opts string, in which case the /// statistics are presented in JSON format. /// /// This function can be called repeatedly. /// /// General information that never changes during execution can be omitted /// by specifying `g` as a character within the opts string. /// /// Note that [`malloc_message`] uses the `mallctl*` functions internally, /// so inconsistent statistics can be reported if multiple threads use these /// functions simultaneously. /// /// If the Cargo feature `stats` is enabled, `m`, `d`, and `a` can be /// specified to omit merged arena, destroyed merged arena, and per arena /// statistics, respectively; `b` and `l` can be specified to omit per size /// class statistics for bins and large objects, respectively; `x` can be /// specified to omit all mutex statistics. Unrecognized characters are /// silently ignored. /// /// Note that thread caching may prevent some statistics from being /// completely up to date, since extra locking would be required to merge /// counters that track thread cache operations. #[cfg_attr(prefixed, link_name = "_rjem_malloc_stats_print")] pub fn malloc_stats_print( write_cb: Option, cbopaque: *mut c_void, opts: *const c_char, ); /// Allows overriding the function which emits the text strings forming the /// errors and warnings if for some reason the `STDERR_FILENO` file descriptor /// is not suitable for this. /// /// [`malloc_message`] takes the `cbopaque` pointer argument that is null, /// unless overridden by the arguments in a call to [`malloc_stats_print`], /// followed by a string pointer. /// /// Please note that doing anything which tries to allocate memory in this /// function is likely to result in a crash or deadlock. #[cfg_attr(prefixed, link_name = "_rjem_malloc_message")] pub static mut malloc_message: Option; /// Compile-time string of configuration options. /// /// Once, when the first call is made to one of the memory allocation /// routines, the allocator initializes its internals based in part on /// various options that can be specified at compile- or run-time. /// /// The string specified via `--with-malloc-conf`, the string pointed to by /// the global variable `malloc_conf`, the “name” of the file referenced by /// the symbolic link named `/etc/malloc.conf`, and the value of the /// environment variable `MALLOC_CONF`, will be interpreted, in that order, /// from left to right as options. Note that `malloc_conf` may be read /// before `main()` is entered, so the declaration of `malloc_conf` should /// specify an initializer that contains the final value to be read by /// `jemalloc`. /// /// `--with-malloc-conf` and `malloc_conf` are compile-time mechanisms, whereas /// `/etc/malloc.conf` and `MALLOC_CONF` can be safely set any time prior to /// program invocation. /// /// An options string is a comma-separated list of `option:value` pairs. /// There is one key corresponding to each `opt.* mallctl` (see the `MALLCTL /// NAMESPACE` section for options documentation). For example, /// `abort:true,narenas:1` sets the `opt.abort` and `opt.narenas` options. /// Some options have boolean values (`true`/`false`), others have integer /// values (base `8`, `10`, or `16`, depending on prefix), and yet others /// have raw string values. #[cfg_attr(prefixed, link_name = "_rjem_malloc_conf")] pub static malloc_conf: Option<&'static c_char>; } /// Extent lifetime management functions. pub type extent_hooks_t = extent_hooks_s; // note: there are two structs here, one is used when compiling the crate normally, // and the other one is behind the `--cfg jemallocator_docs` flag and used only // when generating docs. // // For the docs we want to use type aliases here, but `ctest` does see through // them when generating the code to verify the FFI bindings, and it needs to // be able to tell that these are `fn` types so that `Option` gets lowered // to C function pointers. #[repr(C)] #[cfg(not(jemallocator_docs))] #[derive(Copy, Clone, Default)] #[doc(hidden)] #[allow(missing_docs)] pub struct extent_hooks_s { pub alloc: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, *mut c_bool, *mut c_bool, c_uint, ) -> *mut c_void, >, pub dalloc: Option< unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint) -> c_bool, >, pub destroy: Option, pub commit: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, size_t, c_uint, ) -> c_bool, >, pub decommit: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, size_t, c_uint, ) -> c_bool, >, pub purge_lazy: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, size_t, c_uint, ) -> c_bool, >, pub purge_forced: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, size_t, c_uint, ) -> c_bool, >, pub split: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, size_t, size_t, c_bool, c_uint, ) -> c_bool, >, pub merge: Option< unsafe extern "C" fn( *mut extent_hooks_t, *mut c_void, size_t, *mut c_void, size_t, c_bool, c_uint, ) -> c_bool, >, } /// Extent lifetime management functions. /// /// The extent_hooks_t structure comprises function pointers which are described /// individually below. `jemalloc` uses these functions to manage extent lifetime, /// which starts off with allocation of mapped committed memory, in the simplest /// case followed by deallocation. However, there are performance and platform /// reasons to retain extents for later reuse. Cleanup attempts cascade from /// deallocation to decommit to forced purging to lazy purging, which gives the /// extent management functions opportunities to reject the most permanent /// cleanup operations in favor of less permanent (and often less costly) /// operations. All operations except allocation can be universally opted out of /// by setting the hook pointers to `NULL`, or selectively opted out of by /// returning failure. Note that once the extent hook is set, the structure is /// accessed directly by the associated arenas, so it must remain valid for the /// entire lifetime of the arenas. #[repr(C)] #[cfg(jemallocator_docs)] #[derive(Copy, Clone, Default)] pub struct extent_hooks_s { #[allow(missing_docs)] pub alloc: Option, #[allow(missing_docs)] pub dalloc: Option, #[allow(missing_docs)] pub destroy: Option, #[allow(missing_docs)] pub commit: Option, #[allow(missing_docs)] pub decommit: Option, #[allow(missing_docs)] pub purge_lazy: Option, #[allow(missing_docs)] pub purge_forced: Option, #[allow(missing_docs)] pub split: Option, #[allow(missing_docs)] pub merge: Option, } /// Extent allocation function. /// /// On success returns a pointer to `size` bytes of mapped memory on behalf of /// arena `arena_ind` such that the extent's base address is a multiple of /// `alignment`, as well as setting `*zero` to indicate whether the extent is /// zeroed and `*commit` to indicate whether the extent is committed. /// /// Zeroing is mandatory if `*zero` is `true` upon function entry. Committing is mandatory if /// `*commit` is true upon function entry. If `new_addr` is not null, the returned /// pointer must be `new_addr` on success or null on error. /// /// Committed memory may be committed in absolute terms as on a system that does /// not overcommit, or in implicit terms as on a system that overcommits and /// satisfies physical memory needs on demand via soft page faults. Note that /// replacing the default extent allocation function makes the arena's /// `arena..dss` setting irrelevant. /// /// # Errors /// /// On error the function returns null and leaves `*zero` and `*commit` unmodified. /// /// # Safety /// /// The behavior is _undefined_ if: /// /// * the `size` parameter is not a multiple of the page size /// * the `alignment` parameter is not a power of two at least as large as the page size pub type extent_alloc_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, new_addr: *mut c_void, size: size_t, alignment: size_t, zero: *mut c_bool, commit: *mut c_bool, arena_ind: c_uint, ) -> *mut c_void; /// Extent deallocation function. /// /// Deallocates an extent at given `addr` and `size` with `committed`/decommited /// memory as indicated, on behalf of arena `arena_ind`, returning `false` upon /// success. /// /// If the function returns `true`, this indicates opt-out from deallocation; /// the virtual memory mapping associated with the extent remains mapped, in the /// same commit state, and available for future use, in which case it will be /// automatically retained for later reuse. pub type extent_dalloc_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, committed: c_bool, arena_ind: c_uint, ) -> c_bool; /// Extent destruction function. /// /// Unconditionally destroys an extent at given `addr` and `size` with /// `committed`/decommited memory as indicated, on behalf of arena `arena_ind`. /// /// This function may be called to destroy retained extents during arena /// destruction (see `arena..destroy`). pub type extent_destroy_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, committed: c_bool, arena_ind: c_uint, ); /// Extent commit function. /// /// Commits zeroed physical memory to back pages within an extent at given /// `addr` and `size` at `offset` bytes, extending for `length` on behalf of /// arena `arena_ind`, returning `false` upon success. /// /// Committed memory may be committed in absolute terms as on a system that does /// not overcommit, or in implicit terms as on a system that overcommits and /// satisfies physical memory needs on demand via soft page faults. If the /// function returns `true`, this indicates insufficient physical memory to /// satisfy the request. pub type extent_commit_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, offset: size_t, length: size_t, arena_ind: c_uint, ) -> c_bool; /// Extent decommit function. /// /// Decommits any physical memory that is backing pages within an extent at /// given `addr` and `size` at `offset` bytes, extending for `length` on behalf of arena /// `arena_ind`, returning `false` upon success, in which case the pages will be /// committed via the extent commit function before being reused. /// /// If the function returns `true`, this indicates opt-out from decommit; the /// memory remains committed and available for future use, in which case it will /// be automatically retained for later reuse. pub type extent_decommit_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, offset: size_t, length: size_t, arena_ind: c_uint, ) -> c_bool; /// Extent purge function. /// /// Discards physical pages within the virtual memory mapping associated with an /// extent at given `addr` and `size` at `offset` bytes, extending for `length` on /// behalf of arena `arena_ind`. /// /// A lazy extent purge function (e.g. implemented via `madvise(...MADV_FREE)`) /// can delay purging indefinitely and leave the pages within the purged virtual /// memory range in an indeterminite state, whereas a forced extent purge /// function immediately purges, and the pages within the virtual memory range /// will be zero-filled the next time they are accessed. If the function returns /// `true`, this indicates failure to purge. pub type extent_purge_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, offset: size_t, length: size_t, arena_ind: c_uint, ) -> c_bool; /// Extent split function. /// /// Optionally splits an extent at given `addr` and `size` into two adjacent /// extents, the first of `size_a` bytes, and the second of `size_b` bytes, /// operating on `committed`/decommitted memory as indicated, on behalf of arena /// `arena_ind`, returning `false` upon success. /// /// If the function returns `true`, this indicates that the extent remains /// unsplit and therefore should continue to be operated on as a whole. pub type extent_split_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr: *mut c_void, size: size_t, size_a: size_t, size_b: size_t, committed: c_bool, arena_ind: c_uint, ) -> c_bool; /// Extent merge function. /// /// Optionally merges adjacent extents, at given `addr_a` and `size_a` with given /// `addr_b` and `size_b` into one contiguous extent, operating on /// `committed`/decommitted memory as indicated, on behalf of arena `arena_ind`, /// returning `false` upon success. /// /// If the function returns `true`, this indicates that the extents remain /// distinct mappings and therefore should continue to be operated on /// independently. pub type extent_merge_t = unsafe extern "C" fn( extent_hooks: *mut extent_hooks_t, addr_a: *mut c_void, size_a: size_t, addr_b: *mut c_void, size_b: size_t, committed: c_bool, arena_ind: c_uint, ) -> c_bool; // These symbols are used by jemalloc on android but the really old android // we're building on doesn't have them defined, so just make sure the symbols // are available. #[no_mangle] #[cfg(target_os = "android")] #[doc(hidden)] pub extern "C" fn pthread_atfork( _prefork: *mut u8, _postfork_parent: *mut u8, _postfork_child: *mut u8, ) -> i32 { 0 } jemalloc-sys-0.3.2/tests/malloc_conf_empty.rs010064400007650000024000000002101337126620700175370ustar0000000000000000extern crate jemalloc_sys; #[test] fn malloc_conf_empty() { unsafe { assert!(jemalloc_sys::malloc_conf.is_none()); } } jemalloc-sys-0.3.2/tests/malloc_conf_set.rs010064400007650000024000000022741337126620700172100ustar0000000000000000extern crate jemalloc_sys; extern crate libc; union U { x: &'static u8, y: &'static libc::c_char, } #[allow(non_upper_case_globals)] #[cfg_attr(prefixed, export_name = "_rjem_malloc_conf")] #[cfg_attr(not(prefixed), no_mangle)] pub static malloc_conf: Option<&'static libc::c_char> = Some(unsafe { U { x: &b"stats_print_opts:mdal\0"[0], } .y }); #[test] fn malloc_conf_set() { unsafe { assert_eq!(jemalloc_sys::malloc_conf, malloc_conf); let mut ptr: *const libc::c_char = std::ptr::null(); let mut ptr_len: libc::size_t = std::mem::size_of::<*const libc::c_char>() as libc::size_t; let r = jemalloc_sys::mallctl( &b"opt.stats_print_opts\0"[0] as *const _ as *const libc::c_char, &mut ptr as *mut *const _ as *mut libc::c_void, &mut ptr_len as *mut _, std::ptr::null_mut(), 0, ); assert_eq!(r, 0); assert!(!ptr.is_null()); let s = std::ffi::CStr::from_ptr(ptr).to_string_lossy().into_owned(); assert!( s.contains("mdal"), "opt.stats_print_opts: \"{}\" (len = {})", s, s.len() ); } } jemalloc-sys-0.3.2/tests/unprefixed_malloc.rs010064400007650000024000000004531336362011000175420ustar0000000000000000extern crate jemalloc_sys; extern crate libc; #[cfg(prefixed)] #[test] fn malloc_is_prefixed() { assert_ne!(jemalloc_sys::malloc as usize, libc::malloc as usize) } #[cfg(not(prefixed))] #[test] fn malloc_is_overridden() { assert_eq!(jemalloc_sys::malloc as usize, libc::malloc as usize) } jemalloc-sys-0.3.2/update_jemalloc.md010064400007650000024000000003671336364017300160230ustar0000000000000000# Updating jemalloc Updating the `jemalloc` version requires generating new `configure` files, which requires `autoconf` to be installed. To generate the configuration files, go to the `jemalloc` source directory and run: ```shell autoconf ``` jemalloc-sys-0.3.2/rep/COPYING010064400007650000024000000032551344617474000141750ustar0000000000000000Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- Copyright (C) 2002-present Jason Evans . All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. Copyright (C) 2009-present Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice(s), this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice(s), this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- jemalloc-sys-0.3.2/rep/ChangeLog010064400007650000024000002037061344617474000147170ustar0000000000000000Following are change highlights associated with official releases. Important bug fixes are all mentioned, but some internal enhancements are omitted here for brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc * 5.1.0 (May 4th, 2018) This release is primarily about fine-tuning, ranging from several new features to numerous notable performance and portability enhancements. The release and prior dev versions have been running in multiple large scale applications for months, and the cumulative improvements are substantial in many cases. Given the long and successful production runs, this release is likely a good candidate for applications to upgrade, from both jemalloc 5.0 and before. For performance-critical applications, the newly added TUNING.md provides guidelines on jemalloc tuning. New features: - Implement transparent huge page support for internal metadata. (@interwq) - Add opt.thp to allow enabling / disabling transparent huge pages for all mappings. (@interwq) - Add maximum background thread count option. (@djwatson) - Allow prof_active to control opt.lg_prof_interval and prof.gdump. (@interwq) - Allow arena index lookup based on allocation addresses via mallctl. (@lionkov) - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD) - Add opt.lg_extent_max_active_fit to set the max ratio between the size of the active extent selected (to split off from) and the size of the requested allocation. (@interwq, @davidtgoldblatt) - Add retain_grow_limit to set the max size when growing virtual address space. (@interwq) - Add mallctl interfaces: + arena..retain_grow_limit (@interwq) + arenas.lookup (@lionkov) + max_background_threads (@djwatson) + opt.lg_extent_max_active_fit (@interwq) + opt.max_background_threads (@djwatson) + opt.metadata_thp (@interwq) + opt.thp (@interwq) + stats.metadata_thp (@interwq) Portability improvements: - Support GNU/kFreeBSD configuration. (@paravoid) - Support m68k, nios2 and SH3 architectures. (@paravoid) - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo) - Fix symbol listing for cross-compiling. (@tamird) - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid) - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin) - Fix MSVC 2015 & 2017 builds. (@rustyx) - Improve RISC-V support. (@EdSchouten) - Set name mangling script in strict mode. (@nicolov) - Avoid MADV_HUGEPAGE on ARM. (@marxin) - Modify configure to determine return value of strerror_r. (@davidtgoldblatt, @cferris1000) - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani) - Fix 32-bit build on MSVC. (@rustyx) - Fix external symbol on MSVC. (@maksqwe) - Avoid a printf format specifier warning. (@jasone) - Add configure option --disable-initial-exec-tls which can allow jemalloc to be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD) - AArch64: Add ILP32 support. (@cmuellner) - Add --with-lg-vaddr configure option to support cross compiling. (@cmuellner, @davidtgoldblatt) Optimizations and refactors: - Improve active extent fit with extent_max_active_fit. This considerably reduces fragmentation over time and improves virtual memory and metadata usage. (@davidtgoldblatt, @interwq) - Eagerly coalesce large extents to reduce fragmentation. (@interwq) - sdallocx: only read size info when page aligned (i.e. possibly sampled), which speeds up the sized deallocation path significantly. (@interwq) - Avoid attempting new mappings for in place expansion with retain, since it rarely succeeds in practice and causes high overhead. (@interwq) - Refactor OOM handling in newImpl. (@wqfish) - Add internal fine-grained logging functionality for debugging use. (@davidtgoldblatt) - Refactor arena / tcache interactions. (@davidtgoldblatt) - Refactor extent management with dumpable flag. (@davidtgoldblatt) - Add runtime detection of lazy purging. (@interwq) - Use pairing heap instead of red-black tree for extents_avail. (@djwatson) - Use sysctl on startup in FreeBSD. (@trasz) - Use thread local prng state instead of atomic. (@djwatson) - Make decay to always purge one more extent than before, because in practice large extents are usually the ones that cross the decay threshold. Purging the additional extent helps save memory as well as reduce VM fragmentation. (@interwq) - Fast division by dynamic values. (@davidtgoldblatt) - Improve the fit for aligned allocation. (@interwq, @edwinsmith) - Refactor extent_t bitpacking. (@rkmisra) - Optimize the generated assembly for ticker operations. (@davidtgoldblatt) - Convert stats printing to use a structured text emitter. (@davidtgoldblatt) - Remove preserve_lru feature for extents management. (@djwatson) - Consolidate two memory loads into one on the fast deallocation path. (@davidtgoldblatt, @interwq) Bug fixes (most of the issues are only relevant to jemalloc 5.0): - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt) - Validate returned file descriptor before use. (@zonyitoo) - Fix a few background thread initialization and shutdown issues. (@interwq) - Fix an extent coalesce + decay race by taking both coalescing extents off the LRU list. (@interwq) - Fix potentially unbound increase during decay, caused by one thread keep stashing memory to purge while other threads generating new pages. The number of pages to purge is checked to prevent this. (@interwq) - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq) - Handle 32 bit mutex counters. (@rkmisra) - Fix a indexing bug when creating background threads. (@davidtgoldblatt, @binliu19) - Fix arguments passed to extent_init. (@yuleniwo, @interwq) - Fix addresses used for ordering mutexes. (@rkmisra) - Fix abort_conf processing during bootstrap. (@interwq) - Fix include path order for out-of-tree builds. (@cmuellner) Incompatible changes: - Remove --disable-thp. (@interwq) - Remove mallctl interfaces: + config.thp (@interwq) Documentation: - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson) * 5.0.1 (July 1, 2017) This bugfix release fixes several issues, most of which are obscure enough that typical applications are not impacted. Bug fixes: - Update decay->nunpurged before purging, in order to avoid potential update races and subsequent incorrect purging volume. (@interwq) - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy locking and/or background threads). This mitigates an initialization failure bug for which we still do not have a clear reproduction test case. (@interwq) - Modify tsd management so that it neither crashes nor leaks if a thread's only allocation activity is to call free() after TLS destructors have been executed. This behavior was observed when operating with GNU libc, and is unlikely to be an issue with other libc implementations. (@interwq) - Mask signals during background thread creation. This prevents signals from being inadvertently delivered to background threads. (@jasone, @davidtgoldblatt, @interwq) - Avoid inactivity checks within background threads, in order to prevent recursive mutex acquisition. (@interwq) - Fix extent_grow_retained() to use the specified hooks when the arena..extent_hooks mallctl is used to override the default hooks. (@interwq) - Add missing reentrancy support for custom extent hooks which allocate. (@interwq) - Post-fork(2), re-initialize the list of tcaches associated with each arena to contain no tcaches except the forking thread's. (@interwq) - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This fixes potential deadlocks after fork(2). (@interwq) - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to generate corrupt configure scripts. (@jasone) - Ensure that the configured page size (--with-lg-page) is no larger than the configured huge page size (--with-lg-hugepage). (@jasone) * 5.0.0 (June 13, 2017) Unlike all previous jemalloc releases, this release does not use naturally aligned "chunks" for virtual memory management, and instead uses page-aligned "extents". This change has few externally visible effects, but the internal impacts are... extensive. Many other internal changes combine to make this the most cohesively designed version of jemalloc so far, with ample opportunity for further enhancements. Continuous integration is now an integral aspect of development thanks to the efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a side effect the official release frequency may decrease over time. New features: - Implement optional per-CPU arena support; threads choose which arena to use based on current CPU rather than on fixed thread-->arena associations. (@interwq) - Implement two-phase decay of unused dirty pages. Pages transition from dirty-->muzzy-->clean, where the first phase transition relies on madvise(... MADV_FREE) semantics, and the second phase transition discards pages such that they are replaced with demand-zeroed pages on next access. (@jasone) - Increase decay time resolution from seconds to milliseconds. (@jasone) - Implement opt-in per CPU background threads, and use them for asynchronous decay-driven unused dirty page purging. (@interwq) - Add mutex profiling, which collects a variety of statistics useful for diagnosing overhead/contention issues. (@interwq) - Add C++ new/delete operator bindings. (@djwatson) - Support manually created arena destruction, such that all data and metadata are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats associated with destroyed arenas. (@jasone) - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing merged/destroyed arena statistics via mallctl. (@jasone) - Add opt.abort_conf to optionally abort if invalid configuration options are detected during initialization. (@interwq) - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the stats dumped during exit if opt.stats_print is true. (@jasone) - Add --with-version=VERSION for use when embedding jemalloc into another project's git repository. (@jasone) - Add --disable-thp to support cross compiling. (@jasone) - Add --with-lg-hugepage to support cross compiling. (@jasone) - Add mallctl interfaces (various authors): + background_thread + opt.abort_conf + opt.retain + opt.percpu_arena + opt.background_thread + opt.{dirty,muzzy}_decay_ms + opt.stats_print_opts + arena..initialized + arena..destroy + arena..{dirty,muzzy}_decay_ms + arena..extent_hooks + arenas.{dirty,muzzy}_decay_ms + arenas.bin..slab_size + arenas.nlextents + arenas.lextent..size + arenas.create + stats.background_thread.{num_threads,num_runs,run_interval} + stats.mutexes.{ctl,background_thread,prof,reset}. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} + stats.arenas..{dirty,muzzy}_decay_ms + stats.arenas..uptime + stats.arenas..{pmuzzy,base,internal,resident} + stats.arenas..{dirty,muzzy}_{npurge,nmadvise,purged} + stats.arenas..bins..{nslabs,reslabs,curslabs} + stats.arenas..bins..mutex. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} + stats.arenas..lextents..{nmalloc,ndalloc,nrequests,curlextents} + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, num_owner_switch} Portability improvements: - Improve reentrant allocation support, such that deadlock is less likely if e.g. a system library call in turn allocates memory. (@davidtgoldblatt, @interwq) - Support static linking of jemalloc with glibc. (@djwatson) Optimizations and refactors: - Organize virtual memory as "extents" of virtual memory pages, rather than as naturally aligned "chunks", and store all metadata in arbitrarily distant locations. This reduces virtual memory external fragmentation, and will interact better with huge pages (not yet explicitly supported). (@jasone) - Fold large and huge size classes together; only small and large size classes remain. (@jasone) - Unify the allocation paths, and merge most fast-path branching decisions. (@davidtgoldblatt, @interwq) - Embed per thread automatic tcache into thread-specific data, which reduces conditional branches and dereferences. Also reorganize tcache to increase fast-path data locality. (@interwq) - Rewrite atomics to closely model the C11 API, convert various synchronization from mutex-based to atomic, and use the explicit memory ordering control to resolve various hypothetical races without increasing synchronization overhead. (@davidtgoldblatt) - Extensively optimize rtree via various methods: + Add multiple layers of rtree lookup caching, since rtree lookups are now part of fast-path deallocation. (@interwq) + Determine rtree layout at compile time. (@jasone) + Make the tree shallower for common configurations. (@jasone) + Embed the root node in the top-level rtree data structure, thus avoiding one level of indirection. (@jasone) + Further specialize leaf elements as compared to internal node elements, and directly embed extent metadata needed for fast-path deallocation. (@jasone) + Ignore leading always-zero address bits (architecture-specific). (@jasone) - Reorganize headers (ongoing work) to make them hermetic, and disentangle various module dependencies. (@davidtgoldblatt) - Convert various internal data structures such as size class metadata from boot-time-initialized to compile-time-initialized. Propagate resulting data structure simplifications, such as making arena metadata fixed-size. (@jasone) - Simplify size class lookups when constrained to size classes that are multiples of the page size. This speeds lookups, but the primary benefit is complexity reduction in code that was the source of numerous regressions. (@jasone) - Lock individual extents when possible for localized extent operations, rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) - Use first fit layout policy instead of best fit, in order to improve packing. (@jasone) - If munmap(2) is not in use, use an exponential series to grow each arena's virtual memory, so that the number of disjoint virtual memory mappings remains low. (@jasone) - Implement per arena base allocators, so that arenas never share any virtual memory pages. (@jasone) - Automatically generate private symbol name mangling macros. (@jasone) Incompatible changes: - Replace chunk hooks with an expanded/normalized set of extent hooks. (@jasone) - Remove ratio-based purging. (@jasone) - Remove --disable-tcache. (@jasone) - Remove --disable-tls. (@jasone) - Remove --enable-ivsalloc. (@jasone) - Remove --with-lg-size-class-group. (@jasone) - Remove --with-lg-tiny-min. (@jasone) - Remove --disable-cc-silence. (@jasone) - Remove --enable-code-coverage. (@jasone) - Remove --disable-munmap (replaced by opt.retain). (@jasone) - Remove Valgrind support. (@jasone) - Remove quarantine support. (@jasone) - Remove redzone support. (@jasone) - Remove mallctl interfaces (various authors): + config.munmap + config.tcache + config.tls + config.valgrind + opt.lg_chunk + opt.purge + opt.lg_dirty_mult + opt.decay_time + opt.quarantine + opt.redzone + opt.thp + arena..lg_dirty_mult + arena..decay_time + arena..chunk_hooks + arenas.initialized + arenas.lg_dirty_mult + arenas.decay_time + arenas.bin..run_size + arenas.nlruns + arenas.lrun..size + arenas.nhchunks + arenas.hchunk..size + arenas.extend + stats.cactive + stats.arenas..lg_dirty_mult + stats.arenas..decay_time + stats.arenas..metadata.{mapped,allocated} + stats.arenas..{npurge,nmadvise,purged} + stats.arenas..huge.{allocated,nmalloc,ndalloc,nrequests} + stats.arenas..bins..{nruns,reruns,curruns} + stats.arenas..lruns..{nmalloc,ndalloc,nrequests,curruns} + stats.arenas..hchunks..{nmalloc,ndalloc,nrequests,curhchunks} Bug fixes: - Improve interval-based profile dump triggering to dump only one profile when a single allocation's size exceeds the interval. (@jasone) - Use prefixed function names (as controlled by --with-jemalloc-prefix) when pruning backtrace frames in jeprof. (@jasone) * 4.5.0 (February 28, 2017) This is the first release to benefit from much broader continuous integration testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure in place for prior releases, it would have caught all of the most serious regressions fixed by this release. New features: - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for transparent huge page integration. (@jasone) - Update zone allocator integration to work with macOS 10.12. (@glandium) - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not during configuration. (@jasone, @ronawho) Bug fixes: - Fix DSS (sbrk(2)-based) allocation. This regression was first released in 4.3.0. (@jasone) - Handle race in per size class utilization computation. This functionality was first released in 4.0.0. (@interwq) - Fix lock order reversal during gdump. (@jasone) - Fix/refactor tcache synchronization. This regression was first released in 4.0.0. (@jasone) - Fix various JSON-formatted malloc_stats_print() bugs. This functionality was first released in 4.3.0. (@jasone) - Fix huge-aligned allocation. This regression was first released in 4.4.0. (@jasone) - When transparent huge page integration is enabled, detect what state pages start in according to the kernel's current operating mode, and only convert arena chunks to non-huge during purging if that is not their initial state. This functionality was first released in 4.4.0. (@jasone) - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. This regression was first released in 4.0.0. (@jasone, @428desmo) - Properly detect sparc64 when building for Linux. (@glaubitz) * 4.4.0 (December 3, 2016) New features: - Add configure support for *-*-linux-android. (@cferris1000, @jasone) - Add the --disable-syscall configure option, for use on systems that place security-motivated limitations on syscall(2). (@jasone) - Add support for Debian GNU/kFreeBSD. (@thesam) Optimizations: - Add extent serial numbers and use them where appropriate as a sort key that is higher priority than address, so that the allocation policy prefers older extents. This tends to improve locality (decrease fragmentation) when memory grows downward. (@jasone) - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized on Linux 4.5 and newer. (@jasone) - Mark partially purged arena chunks as non-huge-page. This improves interaction with Linux's transparent huge page functionality. (@jasone) Bug fixes: - Fix size class computations for edge conditions involving extremely large allocations. This regression was first released in 4.0.0. (@jasone, @ingvarha) - Remove overly restrictive assertions related to the cactive statistic. This regression was first released in 4.1.0. (@jasone) - Implement a more reliable detection scheme for os_unfair_lock on macOS. (@jszakmeister) * 4.3.1 (November 7, 2016) Bug fixes: - Fix a severe virtual memory leak. This regression was first released in 4.3.0. (@interwq, @jasone) - Refactor atomic and prng APIs to restore support for 32-bit platforms that use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) * 4.3.0 (November 4, 2016) This is the first release that passes the test suite for multiple Windows configurations, thanks in large part to @glandium setting up continuous integration via AppVeyor (and Travis CI for Linux and OS X). New features: - Add "J" (JSON) support to malloc_stats_print(). (@jasone) - Add Cray compiler support. (@ronawho) Optimizations: - Add/use adaptive spinning for bootstrapping and radix tree node initialization. (@jasone) Bug fixes: - Fix large allocation to search starting in the optimal size class heap, which can substantially reduce virtual memory churn and fragmentation. This regression was first released in 4.0.0. (@mjp41, @jasone) - Fix stats.arenas..nthreads accounting. (@interwq) - Fix and simplify decay-based purging. (@jasone) - Make DSS (sbrk(2)-related) operations lockless, which resolves potential deadlocks during thread exit. (@jasone) - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, @jasone) - Fix over-sized allocation of arena_t (plus associated stats) data structures. (@jasone, @interwq) - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) - Fix a Valgrind integration bug. (@ronawho) - Disallow 0x5a junk filling when running in Valgrind. (@jasone) - Fix a file descriptor leak on Linux. This regression was first released in 4.2.0. (@vsarunas, @jasone) - Fix static linking of jemalloc with glibc. (@djwatson) - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This works around other libraries' system call wrappers performing reentrant allocation. (@kspinka, @Whissi, @jasone) - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, @jasone) - Fix cached memory management to avoid needless commit/decommit operations during purging, which resolves permanent virtual memory map fragmentation issues on Windows. (@mjp41, @jasone) - Fix TSD fetches to avoid (recursive) allocation. This is relevant to non-TLS and Windows configurations. (@jasone) - Fix malloc_conf overriding to work on Windows. (@jasone) - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) * 4.2.1 (June 8, 2016) Bug fixes: - Fix bootstrapping issues for configurations that require allocation during tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) - Fix gettimeofday() version of nstime_update(). (@ronawho) - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) - Fix potential VM map fragmentation regression. (@jasone) - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) - Fix heap profiling context leaks in reallocation edge cases. (@jasone) * 4.2.0 (May 12, 2016) New features: - Add the arena..reset mallctl, which makes it possible to discard all of an arena's allocations in a single operation. (@jasone) - Add the stats.retained and stats.arenas..retained statistics. (@jasone) - Add the --with-version configure option. (@jasone) - Support --with-lg-page values larger than actual page size. (@jasone) Optimizations: - Use pairing heaps rather than red-black trees for various hot data structures. (@djwatson, @jasone) - Streamline fast paths of rtree operations. (@jasone) - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) - Decommit unused virtual memory if the OS does not overcommit. (@jasone) - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order to avoid unfortunate interactions during fork(2). (@jasone) Bug fixes: - Fix chunk accounting related to triggering gdump profiles. (@jasone) - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) - Scale leak report summary according to sampling probability. (@jasone) * 4.1.1 (May 3, 2016) This bugfix release resolves a variety of mostly minor issues, though the bitmap fix is critical for 64-bit Windows. Bug fixes: - Fix the linear scan version of bitmap_sfu() to shift by the proper amount even when sizeof(long) is not the same as sizeof(void *), as on 64-bit Windows. (@jasone) - Fix hashing functions to avoid unaligned memory accesses (and resulting crashes). This is relevant at least to some ARM-based platforms. (@rkmisra) - Fix fork()-related lock rank ordering reversals. These reversals were unlikely to cause deadlocks in practice except when heap profiling was enabled and active. (@jasone) - Fix various chunk leaks in OOM code paths. (@jasone) - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) - Fix a variety of test failures that were due to test fragility rather than core bugs. (@jasone) * 4.1.0 (February 28, 2016) This release is primarily about optimizations, but it also incorporates a lot of portability-motivated refactoring and enhancements. Many people worked on this release, to an extent that even with the omission here of minor changes (see git revision history), and of the people who reported and diagnosed issues, so much of the work was contributed that starting with this release, changes are annotated with author credits to help reflect the collaborative effort involved. New features: - Implement decay-based unused dirty page purging, a major optimization with mallctl API impact. This is an alternative to the existing ratio-based unused dirty page purging, and is intended to eventually become the sole purging mechanism. New mallctls: + opt.purge + opt.decay_time + arena..decay + arena..decay_time + arenas.decay_time + stats.arenas..decay_time (@jasone, @cevans87) - Add --with-malloc-conf, which makes it possible to embed a default options string during configuration. This was motivated by the desire to specify --with-malloc-conf=purge:decay , since the default must remain purge:ratio until the 5.0.0 release. (@jasone) - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) - Make *allocx() size class overflow behavior defined. The maximum size class is now less than PTRDIFF_MAX to protect applications against numerical overflow, and all allocation functions are guaranteed to indicate errors rather than potentially crashing if the request size exceeds the maximum size class. (@jasone) - jeprof: + Add raw heap profile support. (@jasone) + Add --retain and --exclude for backtrace symbol filtering. (@jasone) Optimizations: - Optimize the fast path to combine various bootstrapping and configuration checks and execute more streamlined code in the common case. (@interwq) - Use linear scan for small bitmaps (used for small object tracking). In addition to speeding up bitmap operations on 64-bit systems, this reduces allocator metadata overhead by approximately 0.2%. (@djwatson) - Separate arena_avail trees, which substantially speeds up run tree operations. (@djwatson) - Use memoization (boot-time-computed table) for run quantization. Separate arena_avail trees reduced the importance of this optimization. (@jasone) - Attempt mmap-based in-place huge reallocation. This can dramatically speed up incremental huge reallocation. (@jasone) Incompatible changes: - Make opt.narenas unsigned rather than size_t. (@jasone) Bug fixes: - Fix stats.cactive accounting regression. (@rustyx, @jasone) - Handle unaligned keys in hash(). This caused problems for some ARM systems. (@jasone, @cferris1000) - Refactor arenas array. In addition to fixing a fork-related deadlock, this makes arena lookups faster and simpler. (@jasone) - Move retained memory allocation out of the default chunk allocation function, to a location that gets executed even if the application installs a custom chunk allocation function. This resolves a virtual memory leak. (@buchgr) - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) - Fix run quantization. In practice this bug had no impact unless applications requested memory with alignment exceeding one page. (@jasone, @djwatson) - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) - jeprof: + Don't discard curl options if timeout is not defined. (@djwatson) + Detect failed profile fetches. (@djwatson) - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for --disable-stats case. (@jasone) * 4.0.4 (October 24, 2015) This bugfix release fixes another xallocx() regression. No other regressions have come to light in over a month, so this is likely a good starting point for people who prefer to wait for "dot one" releases with all the major issues shaken out. Bug fixes: - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large allocations that have been randomly assigned an offset of 0 when --enable-cache-oblivious configure option is enabled. * 4.0.3 (September 24, 2015) This bugfix release continues the trend of xallocx() and heap profiling fixes. Bug fixes: - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large allocations when --enable-cache-oblivious configure option is enabled. - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations when resizing from/to a size class that is not a multiple of the chunk size. - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap profile dumping started. - Work around a potentially bad thread-specific data initialization interaction with NPTL (glibc's pthreads implementation). * 4.0.2 (September 21, 2015) This bugfix release addresses a few bugs specific to heap profiling. Bug fixes: - Fix ixallocx_prof_sample() to never modify nor create sampled small allocations. xallocx() is in general incapable of moving small allocations, so this fix removes buggy code without loss of generality. - Fix irallocx_prof_sample() to always allocate large regions, even when alignment is non-zero. - Fix prof_alloc_rollback() to read tdata from thread-specific data rather than dereferencing a potentially invalid tctx. * 4.0.1 (September 15, 2015) This is a bugfix release that is somewhat high risk due to the amount of refactoring required to address deep xallocx() problems. As a side effect of these fixes, xallocx() now tries harder to partially fulfill requests for optional extra space. Note that a couple of minor heap profiling optimizations are included, but these are better thought of as performance fixes that were integral to discovering most of the other bugs. Optimizations: - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the fast path when heap profiling is enabled. Additionally, split a special case out into arena_prof_tctx_reset(), which also avoids chunk metadata reads. - Optimize irallocx_prof() to optimistically update the sampler state. The prior implementation appears to have been a holdover from when rallocx()/xallocx() functionality was combined as rallocm(). Bug fixes: - Fix TLS configuration such that it is enabled by default for platforms on which it works correctly. - Fix arenas_cache_cleanup() and arena_get_hard() to handle allocation/deallocation within the application's thread-specific data cleanup functions even after arenas_cache is torn down. - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. - Fix chunk purge hook calls for in-place huge shrinking reallocation to specify the old chunk size rather than the new chunk size. This bug caused no correctness issues for the default chunk purge function, but was visible to custom functions set via the "arena..chunk_hooks" mallctl. - Fix heap profiling bugs: + Fix heap profiling to distinguish among otherwise identical sample sites with interposed resets (triggered via the "prof.reset" mallctl). This bug could cause data structure corruption that would most likely result in a segfault. + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + Make one call to prof_active_get_unlocked() per allocation event, and use the result throughout the relevant functions that handle an allocation event. Also add a missing check in prof_realloc(). These fixes protect allocation events against concurrent prof_active changes. + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() in the correct order. + Fix prof_realloc() to call prof_free_sampled_object() after calling prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were the same, the tctx could have been prematurely destroyed. - Fix portability bugs: + Don't bitshift by negative amounts when encoding/decoding run sizes in chunk header maps. This affected systems with page sizes greater than 8 KiB. + Rename index_t to szind_t to avoid an existing type on Solaris. + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to match glibc and avoid compilation errors when including both jemalloc/jemalloc.h and malloc.h in C++ code. + Don't assume that /bin/sh is appropriate when running size_classes.sh during configuration. + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + Link tests to librt if it contains clock_gettime(2). * 4.0.0 (August 17, 2015) This version contains many speed and space optimizations, both minor and major. The major themes are generalization, unification, and simplification. Although many of these optimizations cause no visible behavior change, their cumulative effect is substantial. New features: - Normalize size class spacing to be consistent across the complete size range. By default there are four size classes per size doubling, but this is now configurable via the --with-lg-size-class-group option. Also add the --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and --with-lg-tiny-min options, which can be used to tweak page and size class settings. Impacts: + Worst case performance for incrementally growing/shrinking reallocation is improved because there are far fewer size classes, and therefore copying happens less often. + Internal fragmentation is limited to 20% for all but the smallest size classes (those less than four times the quantum). (1B + 4 KiB) and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + Chunk fragmentation tends to be lower because there are fewer distinct run sizes to pack. - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and "tcache.destroy" mallctls control tcache lifetime and flushing, and the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API control which tcache is used for each operation. - Implement per thread heap profiling, as well as the ability to enable/disable heap profiling on a per thread basis. Add the "prof.reset", "prof.lg_sample", "thread.prof.name", "thread.prof.active", "opt.prof_thread_active_init", "prof.thread_active_init", and "thread.prof.active" mallctls. - Add support for per arena application-specified chunk allocators, configured via the "arena..chunk_hooks" mallctl. - Refactor huge allocation to be managed by arenas, so that arenas now function as general purpose independent allocators. This is important in the context of user-specified chunk allocators, aside from the scalability benefits. Related new statistics: + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" mallctls provide high level per arena huge allocation statistics. + The "arenas.nhchunks", "arenas.hchunk..size", "stats.arenas..hchunks..nmalloc", "stats.arenas..hchunks..ndalloc", "stats.arenas..hchunks..nrequests", and "stats.arenas..hchunks..curhchunks" mallctls provide per size class statistics. - Add the 'util' column to malloc_stats_print() output, which reports the proportion of available regions that are currently in use for each small size class. - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" mallctl), so that it is possible to separately enable junk filling for allocation versus deallocation. - Add the jemalloc-config script, which provides information about how jemalloc was configured, and how to integrate it into application builds. - Add metadata statistics, which are accessible via the "stats.metadata", "stats.arenas..metadata.mapped", and "stats.arenas..metadata.allocated" mallctls. - Add the "stats.resident" mallctl, which reports the upper limit of physically resident memory mapped by the allocator. - Add per arena control over unused dirty page purging, via the "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and "stats.arenas..lg_dirty_mult" mallctls. - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump feature on/off during program execution. - Add sdallocx(), which implements sized deallocation. The primary optimization over dallocx() is the removal of a metadata read, which often suffers an L1 cache miss. - Add missing header includes in jemalloc/jemalloc.h, so that applications only have to #include . - Add support for additional platforms: + Bitrig + Cygwin + DragonFlyBSD + iOS + OpenBSD + OpenRISC/or1k Optimizations: - Maintain dirty runs in per arena LRUs rather than in per arena trees of dirty-run-containing chunks. In practice this change significantly reduces dirty page purging volume. - Integrate whole chunks into the unused dirty page purging machinery. This reduces the cost of repeated huge allocation/deallocation, because it effectively introduces a cache of chunks. - Split the arena chunk map into two separate arrays, in order to increase cache locality for the frequently accessed bits. - Move small run metadata out of runs, into arena chunk headers. This reduces run fragmentation, smaller runs reduce external fragmentation for small size classes, and packed (less uniformly aligned) metadata layout improves CPU cache set distribution. - Randomly distribute large allocation base pointer alignment relative to page boundaries in order to more uniformly utilize CPU cache sets. This can be disabled via the --disable-cache-oblivious configure option, and queried via the "config.cache_oblivious" mallctl. - Micro-optimize the fast paths for the public API functions. - Refactor thread-specific data to reside in a single structure. This assures that only a single TLS read is necessary per call into the public API. - Implement in-place huge allocation growing and shrinking. - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make additional optimizations that reduce maximum lookup depth to one or two levels. This resolves what was a concurrency bottleneck for per arena huge allocation, because a global data structure is critical for determining which arenas own which huge allocations. Incompatible changes: - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious warnings by default. - Assure that the constness of malloc_usable_size()'s return type matches that of the system implementation. - Change the heap profile dump format to support per thread heap profiling, rename pprof to jeprof, and enhance it with the --thread= option. As a result, the bundled jeprof must now be used rather than the upstream (gperftools) pprof. - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can internally deadlock on some platforms. - Change the "arenas.nlruns" mallctl type from size_t to unsigned. - Replace the "stats.arenas..bins..allocated" mallctl with "stats.arenas..bins..curregs". - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. Removed features: - Remove the *allocm() API, which is superseded by the *allocx() API. - Remove the --enable-dss options, and make dss non-optional on all platforms which support sbrk(2). - Remove the "arenas.purge" mallctl, which was obsoleted by the "arena..purge" mallctl in 3.1.0. - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically detects whether it is running inside Valgrind. - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and "stats.huge.ndalloc" mallctls. - Remove the --enable-mremap option. - Remove the "stats.chunks.current", "stats.chunks.total", and "stats.chunks.high" mallctls. Bug fixes: - Fix the cactive statistic to decrease (rather than increase) when active memory decreases. This regression was first released in 3.5.0. - Fix OOM handling in memalign() and valloc(). A variant of this bug existed in all releases since 2.0.0, which introduced these functions. - Fix an OOM-related regression in arena_tcache_fill_small(), which could cause cache corruption on OOM. This regression was present in all releases from 2.2.0 through 3.6.0. - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), calloc(), and realloc() when profiling is enabled. - Fix the "arena..dss" mallctl to return an error if "primary" or "secondary" precedence is specified, but sbrk(2) is not supported. - Fix fallback lg_floor() implementations to handle extremely large inputs. - Ensure the default purgeable zone is after the default zone on OS X. - Fix latent bugs in atomic_*(). - Fix the "arena..dss" mallctl to handle read-only calls. - Fix tls_model configuration to enable the initial-exec model when possible. - Mark malloc_conf as a weak symbol so that the application can override it. - Correctly detect glibc's adaptive pthread mutexes. - Fix the --without-export configure option. * 3.6.0 (March 31, 2014) This version contains a critical bug fix for a regression present in 3.5.0 and 3.5.1. Bug fixes: - Fix a regression in arena_chunk_alloc() that caused crashes during small/large allocation if chunk allocation failed. In the absence of this bug, chunk allocation failure would result in allocation failure, e.g. NULL return from malloc(). This regression was introduced in 3.5.0. - Fix backtracing for gcc intrinsics-based backtracing by specifying -fno-omit-frame-pointer to gcc. Note that the application (and all the libraries it links to) must also be compiled with this option for backtracing to be reliable. - Use dss allocation precedence for huge allocations as well as small/large allocations. - Fix test assertion failure message formatting. This bug did not manifest on x86_64 systems because of implementation subtleties in va_list. - Fix inconsequential test failures for hash and SFMT code. New features: - Support heap profiling on FreeBSD. This feature depends on the proc filesystem being mounted during heap profile dumping. * 3.5.1 (February 25, 2014) This version primarily addresses minor bugs in test code. Bug fixes: - Configure Solaris/Illumos to use MADV_FREE. - Fix junk filling for mremap(2)-based huge reallocation. This is only relevant if configuring with the --enable-mremap option specified. - Avoid compilation failure if 'restrict' C99 keyword is not supported by the compiler. - Add a configure test for SSE2 rather than assuming it is usable on i686 systems. This fixes test compilation errors, especially on 32-bit Linux systems. - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit test. - Fix/remove flawed alignment-related overflow tests. - Prevent compiler optimizations that could change backtraces in the prof_accum unit test. * 3.5.0 (January 22, 2014) This version focuses on refactoring and automated testing, though it also includes some non-trivial heap profiling optimizations not mentioned below. New features: - Add the *allocx() API, which is a successor to the experimental *allocm() API. The *allocx() functions are slightly simpler to use because they have fewer parameters, they directly return the results of primary interest, and mallocx()/rallocx() avoid the strict aliasing pitfall that allocm()/rallocm() share with posix_memalign(). Note that *allocm() is slated for removal in the next non-bugfix release. - Add support for LinuxThreads. Bug fixes: - Unless heap profiling is enabled, disable floating point code and don't link with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 systems, makes it possible to completely disable floating point register use. Some versions of glibc neglect to save/restore caller-saved floating point registers during dynamic lazy symbol loading, and the symbol loading code uses whatever malloc the application happens to have linked/loaded with, the result being potential floating point register corruption. - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling backtrace creation in imemalign(). This bug impacted posix_memalign() and aligned_alloc(). - Fix a file descriptor leak in a prof_dump_maps() error path. - Fix prof_dump() to close the dump file descriptor for all relevant error paths. - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for allocation, not just deallocation. - Fix a data race for large allocation stats counters. - Fix a potential infinite loop during thread exit. This bug occurred on Solaris, and could affect other platforms with similar pthreads TSD implementations. - Don't junk-fill reallocations unless usable size changes. This fixes a violation of the *allocx()/*allocm() semantics. - Fix growing large reallocation to junk fill new space. - Fix huge deallocation to junk fill when munmap is disabled. - Change the default private namespace prefix from empty to je_, and change --with-private-namespace-prefix so that it prepends an additional prefix rather than replacing je_. This reduces the likelihood of applications which statically link jemalloc experiencing symbol name collisions. - Add missing private namespace mangling (relevant when --with-private-namespace is specified). - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as static even for debug builds. - Add a missing mutex unlock in a malloc_init_hard() error path. In practice this error path is never executed. - Fix numerous bugs in malloc_strotumax() error handling/reporting. These bugs had no impact except for malformed inputs. - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by existing calls, so they had no impact. * 3.4.1 (October 20, 2013) Bug fixes: - Fix a race in the "arenas.extend" mallctl that could cause memory corruption of internal data structures and subsequent crashes. - Fix Valgrind integration flaws that caused Valgrind warnings about reads of uninitialized memory in: + arena chunk headers + internal zero-initialized data structures (relevant to tcache and prof code) - Preserve errno during the first allocation. A readlink(2) call during initialization fails unless /etc/malloc.conf exists, so errno was typically set during the first allocation prior to this fix. - Fix compilation warnings reported by gcc 4.8.1. * 3.4.0 (June 2, 2013) This version is essentially a small bugfix release, but the addition of aarch64 support requires that the minor version be incremented. Bug fixes: - Fix race-triggered deadlocks in chunk_record(). These deadlocks were typically triggered by multiple threads concurrently deallocating huge objects. New features: - Add support for the aarch64 architecture. * 3.3.1 (March 6, 2013) This version fixes bugs that are typically encountered only when utilizing custom run-time options. Bug fixes: - Fix a locking order bug that could cause deadlock during fork if heap profiling were enabled. - Fix a chunk recycling bug that could cause the allocator to lose track of whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause corruption if allocating via sbrk(2) (unlikely unless running with the "dss:primary" option specified). This was completely harmless on Linux unless using mlockall(2) (and unlikely even then, unless the --disable-munmap configure option or the "dss:primary" option was specified). This regression was introduced in 3.1.0 by the mlockall(2)/madvise(2) interaction fix. - Fix TLS-related memory corruption that could occur during thread exit if the thread never allocated memory. Only the quarantine and prof facilities were susceptible. - Fix two quarantine bugs: + Internal reallocation of the quarantined object array leaked the old array. + Reallocation failure for internal reallocation of the quarantined object array (very unlikely) resulted in memory corruption. - Fix Valgrind integration to annotate all internally allocated memory in a way that keeps Valgrind happy about internal data structure access. - Fix building for s390 systems. * 3.3.0 (January 23, 2013) This version includes a few minor performance improvements in addition to the listed new features and bug fixes. New features: - Add clipping support to lg_chunk option processing. - Add the --enable-ivsalloc option. - Add the --without-export option. - Add the --disable-zone-allocator option. Bug fixes: - Fix "arenas.extend" mallctl to output the number of arenas. - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory is undefined. - Fix build break on FreeBSD related to alloca.h. * 3.2.0 (November 9, 2012) In addition to a couple of bug fixes, this version modifies page run allocation and dirty page purging algorithms in order to better control page-level virtual memory fragmentation. Incompatible changes: - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). Bug fixes: - Fix dss/mmap allocation precedence code to use recyclable mmap memory only after primary dss allocation fails. - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced in 3.1.0 by the addition of the "arena..purge" mallctl. * 3.1.0 (October 16, 2012) New features: - Auto-detect whether running inside Valgrind, thus removing the need to manually specify MALLOC_CONF=valgrind:true. - Add the "arenas.extend" mallctl, which allows applications to create manually managed arenas. - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, which provide control over dss/mmap precedence. - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". - Define LG_QUANTUM for hppa. Incompatible changes: - Disable tcache by default if running inside Valgrind, in order to avoid making unallocated objects appear reachable to Valgrind. - Drop const from malloc_usable_size() argument on Linux. Bug fixes: - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - Remove const from __*_hook variable declarations, so that glibc can modify them during process forking. - Fix mlockall(2)/madvise(2) interaction. - Fix fork(2)-related deadlocks. - Fix error return value for "thread.tcache.enabled" mallctl. * 3.0.0 (May 11, 2012) Although this version adds some major new features, the primary focus is on internal code cleanup that facilitates maintainability and portability, most of which is not reflected in the ChangeLog. This is the first release to incorporate substantial contributions from numerous other developers, and the result is a more broadly useful allocator (see the git revision history for contribution details). Note that the license has been unified, thanks to Facebook granting a license under the same terms as the other copyright holders (see COPYING). New features: - Implement Valgrind support, redzones, and quarantine. - Add support for additional platforms: + FreeBSD + Mac OS X Lion + MinGW + Windows (no support yet for replacing the system malloc) - Add support for additional architectures: + MIPS + SH4 + Tilera - Add support for cross compiling. - Add nallocm(), which rounds a request size up to the nearest size class without actually allocating. - Implement aligned_alloc() (blame C11). - Add the "thread.tcache.enabled" mallctl. - Add the "opt.prof_final" mallctl. - Update pprof (from gperftools 2.0). - Add the --with-mangling option. - Add the --disable-experimental option. - Add the --disable-munmap option, and make it the default on Linux. - Add the --enable-mremap option, which disables use of mremap(2) by default. Incompatible changes: - Enable stats by default. - Enable fill by default. - Disable lazy locking by default. - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - Rename the "arenas.pagesize" mallctl to "arenas.page". - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - Change the "opt.prof_accum" default from true to false. Removed features: - Remove the swap feature, including the "config.swap", "swap.avail", "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - Remove highruns statistics, including the "stats.arenas..bins..highruns" and "stats.arenas..lruns..highruns" mallctls. - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and "arenas.[tqcs]bins" mallctls. - Remove the "arenas.chunksize" mallctl. - Remove the "opt.lg_prof_tcmax" option. - Remove the "opt.lg_prof_bt_max" option. - Remove the "opt.lg_tcache_gc_sweep" option. - Remove the --disable-tiny option, including the "config.tiny" mallctl. - Remove the --enable-dynamic-page-shift configure option. - Remove the --enable-sysv configure option. Bug fixes: - Fix a statistics-related bug in the "thread.arena" mallctl that could cause invalid statistics and crashes. - Work around TLS deallocation via free() on Linux. This bug could cause write-after-free memory corruption. - Fix a potential deadlock that could occur during interval- and growth-triggered heap profile dumps. - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could cause memory corruption and crashes with --enable-dss specified. - Fix fork-related bugs that could cause deadlock in children between fork and exec. - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - Fix realloc(p, 0) to act like free(p). - Do not enforce minimum alignment in memalign(). - Check for NULL pointer in malloc_usable_size(). - Fix an off-by-one heap profile statistics bug that could be observed in interval- and growth-triggered heap profiles. - Fix the "epoch" mallctl to update cached stats even if the passed in epoch is 0. - Fix bin->runcur management to fix a layout policy bug. This bug did not affect correctness. - Fix a bug in choose_arena_hard() that potentially caused more arenas to be initialized than necessary. - Add missing "opt.lg_tcache_max" mallctl implementation. - Use glibc allocator hooks to make mixed allocator usage less likely. - Fix build issues for --disable-tcache. - Don't mangle pthread_create() when --with-private-namespace is specified. * 2.2.5 (November 14, 2011) Bug fixes: - Fix huge_ralloc() race when using mremap(2). This is a serious bug that could cause memory corruption and/or crashes. - Fix huge_ralloc() to maintain chunk statistics. - Fix malloc_stats_print(..., "a") output. * 2.2.4 (November 5, 2011) Bug fixes: - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as well as for --disable-tls builds in earlier releases. - Do not assume a 4 KiB page size in test/rallocm.c. * 2.2.3 (August 31, 2011) This version fixes numerous bugs related to heap profiling. Bug fixes: - Fix a prof-related race condition. This bug could cause memory corruption, but only occurred in non-default configurations (prof_accum:false). - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is excluded from backtraces). - Fix a prof-related bug in realloc() (only triggered by OOM errors). - Fix prof-related bugs in allocm() and rallocm(). - Fix prof_tdata_cleanup() for --disable-tls builds. - Fix a relative include path, to fix objdir builds. * 2.2.2 (July 30, 2011) Bug fixes: - Fix a build error for --disable-tcache. - Fix assertions in arena_purge() (for real this time). - Add the --with-private-namespace option. This is a workaround for symbol conflicts that can inadvertently arise when using static libraries. * 2.2.1 (March 30, 2011) Bug fixes: - Implement atomic operations for x86/x64. This fixes compilation failures for versions of gcc that are still in wide use. - Fix an assertion in arena_purge(). * 2.2.0 (March 22, 2011) This version incorporates several improvements to algorithms and data structures that tend to reduce fragmentation and increase speed. New features: - Add the "stats.cactive" mallctl. - Update pprof (from google-perftools 1.7). - Improve backtracing-related configuration logic, and add the --disable-prof-libgcc option. Bug fixes: - Change default symbol visibility from "internal", to "hidden", which decreases the overhead of library-internal function calls. - Fix symbol visibility so that it is also set on OS X. - Fix a build dependency regression caused by the introduction of the .pic.o suffix for PIC object files. - Add missing checks for mutex initialization failures. - Don't use libgcc-based backtracing except on x64, where it is known to work. - Fix deadlocks on OS X that were due to memory allocation in pthread_mutex_lock(). - Heap profiling-specific fixes: + Fix memory corruption due to integer overflow in small region index computation, when using a small enough sample interval that profiling context pointers are stored in small run headers. + Fix a bootstrap ordering bug that only occurred with TLS disabled. + Fix a rallocm() rsize bug. + Fix error detection bugs for aligned memory allocation. * 2.1.3 (March 14, 2011) Bug fixes: - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix for OS X in 2.1.2). - Fix a "thread.arena" mallctl bug. - Fix a thread cache stats merging bug. * 2.1.2 (March 2, 2011) Bug fixes: - Fix "thread.{de,}allocatedp" mallctl for OS X. - Add missing jemalloc.a to build system. * 2.1.1 (January 31, 2011) Bug fixes: - Fix aligned huge reallocation (affected allocm()). - Fix the ALLOCM_LG_ALIGN macro definition. - Fix a heap dumping deadlock. - Fix a "thread.arena" mallctl bug. * 2.1.0 (December 3, 2010) This version incorporates some optimizations that can't quite be considered bug fixes. New features: - Use Linux's mremap(2) for huge object reallocation when possible. - Avoid locking in mallctl*() when possible. - Add the "thread.[de]allocatedp" mallctl's. - Convert the manual page source from roff to DocBook, and generate both roff and HTML manuals. Bug fixes: - Fix a crash due to incorrect bootstrap ordering. This only impacted --enable-debug --enable-dss configurations. - Fix a minor statistics bug for mallctl("swap.avail", ...). * 2.0.1 (October 29, 2010) Bug fixes: - Fix a race condition in heap profiling that could cause undefined behavior if "opt.prof_accum" were disabled. - Add missing mutex unlocks for some OOM error paths in the heap profiling code. - Fix a compilation error for non-C99 builds. * 2.0.0 (October 24, 2010) This version focuses on the experimental *allocm() API, and on improved run-time configuration/introspection. Nonetheless, numerous performance improvements are also included. New features: - Implement the experimental {,r,s,d}allocm() API, which provides a superset of the functionality available via malloc(), calloc(), posix_memalign(), realloc(), malloc_usable_size(), and free(). These functions can be used to allocate/reallocate aligned zeroed memory, ask for optional extra memory during reallocation, prevent object movement during reallocation, etc. - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is more human-readable, and more flexible. For example: JEMALLOC_OPTIONS=AJP is now: MALLOC_CONF=abort:true,fill:true,stats_print:true - Port to Apple OS X. Sponsored by Mozilla. - Make it possible for the application to control thread-->arena mappings via the "thread.arena" mallctl. - Add compile-time support for all TLS-related functionality via pthreads TSD. This is mainly of interest for OS X, which does not support TLS, but has a TSD implementation with similar performance. - Override memalign() and valloc() if they are provided by the system. - Add the "arenas.purge" mallctl, which can be used to synchronously purge all dirty unused pages. - Make cumulative heap profiling data optional, so that it is possible to limit the amount of memory consumed by heap profiling data structures. - Add per thread allocation counters that can be accessed via the "thread.allocated" and "thread.deallocated" mallctls. Incompatible changes: - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - Increase default backtrace depth from 4 to 128 for heap profiling. - Disable interval-based profile dumps by default. Bug fixes: - Remove bad assertions in fork handler functions. These assertions could cause aborts for some combinations of configure settings. - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - Fix leak context reporting. This bug tended to cause the number of contexts to be underreported (though the reported number of objects and bytes were correct). - Fix a realloc() bug for large in-place growing reallocation. This bug could cause memory corruption, but it was hard to trigger. - Fix an allocation bug for small allocations that could be triggered if multiple threads raced to create a new run of backing pages. - Enhance the heap profiler to trigger samples based on usable size, rather than request size. - Fix a heap profiling bug due to sometimes losing track of requested object size for sampled objects. * 1.0.3 (August 12, 2010) Bug fixes: - Fix the libunwind-based implementation of stack backtracing (used for heap profiling). This bug could cause zero-length backtraces to be reported. - Add a missing mutex unlock in library initialization code. If multiple threads raced to initialize malloc, some of them could end up permanently blocked. * 1.0.2 (May 11, 2010) Bug fixes: - Fix junk filling of large objects, which could cause memory corruption. - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual memory limits could cause swap file configuration to fail. Contributed by Jordan DeLong. * 1.0.1 (April 14, 2010) Bug fixes: - Fix compilation when --enable-fill is specified. - Fix threads-related profiling bugs that affected accuracy and caused memory to be leaked during thread exit. - Fix dirty page purging race conditions that could cause crashes. - Fix crash in tcache flushing code during thread destruction. * 1.0.0 (April 11, 2010) This release focuses on speed and run-time introspection. Numerous algorithmic improvements make this release substantially faster than its predecessors. New features: - Implement autoconf-based configuration system. - Add mallctl*(), for the purposes of introspection and run-time configuration. - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - Compute various additional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. - Simplify the malloc_message() signature to only take one string argument, and incorporate an opaque data pointer argument for use by the application in combination with malloc_stats_print(). - Add support for allocation backed by one or more swap files, and allow the application to disable over-commit if swap files are in use. - Implement allocation profiling and leak checking. Removed features: - Remove the dynamic arena rebalancing code, since thread-specific caching reduces its utility. Bug fixes: - Modify chunk allocation to work when address space layout randomization (ASLR) is in use. - Fix thread cleanup bugs related to TLS destruction. - Handle 0-size allocation requests in posix_memalign(). - Fix a chunk leak. The leaked chunks were never touched, so this impacted virtual memory usage, but not physical memory usage. * linux_2008082[78]a (August 27/28, 2008) These snapshot releases are the simple result of incorporating Linux-specific support into the FreeBSD malloc sources. -------------------------------------------------------------------------------- vim:filetype=text:textwidth=80 jemalloc-sys-0.3.2/rep/INSTALL.md010064400007650000024000000347141344617474000145760ustar0000000000000000Building and installing a packaged release of jemalloc can be as simple as typing the following while in the root directory of the source tree: ./configure make make install If building from unpackaged developer sources, the simplest command sequence that might work is: ./autogen.sh make dist make make install Note that documentation is not built by the default target because doing so would create a dependency on xsltproc in packaged releases, hence the requirement to either run 'make dist' or avoid installing docs via the various install_* targets documented below. ## Advanced configuration The 'configure' script supports numerous options that allow control of which functionality is enabled, where jemalloc is installed, etc. Optionally, pass any of the following arguments (not a definitive list) to 'configure': * `--help` Print a definitive list of options. * `--prefix=` Set the base directory in which to install. For example: ./configure --prefix=/usr/local will cause files to be installed into /usr/local/include, /usr/local/lib, and /usr/local/man. * `--with-version=(..--g|VERSION)` The VERSION file is mandatory for successful configuration, and the following steps are taken to assure its presence: 1) If --with-version=..--g is specified, generate VERSION using the specified value. 2) If --with-version is not specified in either form and the source directory is inside a git repository, try to generate VERSION via 'git describe' invocations that pattern-match release tags. 3) If VERSION is missing, generate it with a bogus version: 0.0.0-0-g0000000000000000000000000000000000000000 Note that --with-version=VERSION bypasses (1) and (2), which simplifies VERSION configuration when embedding a jemalloc release into another project's git repository. * `--with-rpath=` Embed one or more library paths, so that libjemalloc can find the libraries it is linked to. This works only on ELF-based systems. * `--with-mangling=` Mangle public symbols specified in which is a comma-separated list of name:mangled pairs. For example, to use ld's --wrap option as an alternative method for overriding libc's malloc implementation, specify something like: --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] Note that mangling happens prior to application of the prefix specified by --with-jemalloc-prefix, and mangled symbols are then ignored when applying the prefix. * `--with-jemalloc-prefix=` Prefix all public APIs with . For example, if is "prefix_", API changes like the following occur: malloc() --> prefix_malloc() malloc_conf --> prefix_malloc_conf /etc/malloc.conf --> /etc/prefix_malloc.conf MALLOC_CONF --> PREFIX_MALLOC_CONF This makes it possible to use jemalloc at the same time as the system allocator, or even to use multiple copies of jemalloc simultaneously. By default, the prefix is "", except on OS X, where it is "je_". On OS X, jemalloc overlays the default malloc zone, but makes no attempt to actually replace the "malloc", "calloc", etc. symbols. * `--without-export` Don't export public APIs. This can be useful when building jemalloc as a static library, or to avoid exporting public APIs when using the zone allocator on OSX. * `--with-private-namespace=` Prefix all library-private APIs with je_. For shared libraries, symbol visibility mechanisms prevent these symbols from being exported, but for static libraries, naming collisions are a real possibility. By default, is empty, which results in a symbol prefix of je_ . * `--with-install-suffix=` Append to the base name of all installed files, such that multiple versions of jemalloc can coexist in the same installation directory. For example, libjemalloc.so.0 becomes libjemalloc.so.0. * `--with-malloc-conf=` Embed `` as a run-time options string that is processed prior to the malloc_conf global variable, the /etc/malloc.conf symlink, and the MALLOC_CONF environment variable. For example, to change the default decay time to 30 seconds: --with-malloc-conf=decay_ms:30000 * `--enable-debug` Enable assertions and validation code. This incurs a substantial performance hit, but is very useful during application development. * `--disable-stats` Disable statistics gathering functionality. See the "opt.stats_print" option documentation for usage details. * `--enable-prof` Enable heap profiling and leak detection functionality. See the "opt.prof" option documentation for usage details. When enabled, there are several approaches to backtracing, and the configure script chooses the first one in the following list that appears to function correctly: + libunwind (requires --enable-prof-libunwind) + libgcc (unless --disable-prof-libgcc) + gcc intrinsics (unless --disable-prof-gcc) * `--enable-prof-libunwind` Use the libunwind library (http://www.nongnu.org/libunwind/) for stack backtracing. * `--disable-prof-libgcc` Disable the use of libgcc's backtracing functionality. * `--disable-prof-gcc` Disable the use of gcc intrinsics for backtracing. * `--with-static-libunwind=` Statically link against the specified libunwind.a rather than dynamically linking with -lunwind. * `--disable-fill` Disable support for junk/zero filling of memory. See the "opt.junk" and "opt.zero" option documentation for usage details. * `--disable-zone-allocator` Disable zone allocator for Darwin. This means jemalloc won't be hooked as the default allocator on OSX/iOS. * `--enable-utrace` Enable utrace(2)-based allocation tracing. This feature is not broadly portable (FreeBSD has it, but Linux and OS X do not). * `--enable-xmalloc` Enable support for optional immediate termination due to out-of-memory errors, as is commonly implemented by "xmalloc" wrapper function for malloc. See the "opt.xmalloc" option documentation for usage details. * `--enable-lazy-lock` Enable code that wraps pthread_create() to detect when an application switches from single-threaded to multi-threaded mode, so that it can avoid mutex locking/unlocking operations while in single-threaded mode. In practice, this feature usually has little impact on performance unless thread-specific caching is disabled. * `--disable-cache-oblivious` Disable cache-oblivious large allocation alignment for large allocation requests with no alignment constraints. If this feature is disabled, all large allocations are page-aligned as an implementation artifact, which can severely harm CPU cache utilization. However, the cache-oblivious layout comes at the cost of one extra page per large allocation, which in the most extreme case increases physical memory usage for the 16 KiB size class to 20 KiB. * `--disable-syscall` Disable use of syscall(2) rather than {open,read,write,close}(2). This is intended as a workaround for systems that place security limitations on syscall(2). * `--disable-cxx` Disable C++ integration. This will cause new and delete operator implementations to be omitted. * `--with-xslroot=` Specify where to find DocBook XSL stylesheets when building the documentation. * `--with-lg-page=` Specify the base 2 log of the allocator page size, which must in turn be at least as large as the system page size. By default the configure script determines the host's page size and sets the allocator page size equal to the system page size, so this option need not be specified unless the system page size may change between configuration and execution, e.g. when cross compiling. * `--with-lg-hugepage=` Specify the base 2 log of the system huge page size. This option is useful when cross compiling, or when overriding the default for systems that do not explicitly support huge pages. * `--with-lg-quantum=` Specify the base 2 log of the minimum allocation alignment. jemalloc needs to know the minimum alignment that meets the following C standard requirement (quoted from the April 12, 2011 draft of the C11 standard): > The pointer returned if the allocation succeeds is suitably aligned so that it may be assigned to a pointer to any type of object with a fundamental alignment requirement and then used to access such an object or an array of such objects in the space allocated [...] This setting is architecture-specific, and although jemalloc includes known safe values for the most commonly used modern architectures, there is a wrinkle related to GNU libc (glibc) that may impact your choice of . On most modern architectures, this mandates 16-byte alignment (=4), but the glibc developers chose not to meet this requirement for performance reasons. An old discussion can be found at . Unlike glibc, jemalloc does follow the C standard by default (caveat: jemalloc technically cheats for size classes smaller than the quantum), but the fact that Linux systems already work around this allocator noncompliance means that it is generally safe in practice to let jemalloc's minimum alignment follow glibc's lead. If you specify `--with-lg-quantum=3` during configuration, jemalloc will provide additional size classes that are not 16-byte-aligned (24, 40, and 56). * `--with-lg-vaddr=` Specify the number of significant virtual address bits. By default, the configure script attempts to detect virtual address size on those platforms where it knows how, and picks a default otherwise. This option may be useful when cross-compiling. * `--disable-initial-exec-tls` Disable the initial-exec TLS model for jemalloc's internal thread-local storage (on those platforms that support explicit settings). This can allow jemalloc to be dynamically loaded after program startup (e.g. using dlopen). Note that in this case, there will be two malloc implementations operating in the same process, which will almost certainly result in confusing runtime crashes if pointers leak from one implementation to the other. * `--disable-libdl` Disable the usage of libdl, namely dlsym(3) which is required by the lazy lock option. This can allow building static binaries. The following environment variables (not a definitive list) impact configure's behavior: * `CFLAGS="?"` * `CXXFLAGS="?"` Pass these flags to the C/C++ compiler. Any flags set by the configure script are prepended, which means explicitly set flags generally take precedence. Take care when specifying flags such as -Werror, because configure tests may be affected in undesirable ways. * `EXTRA_CFLAGS="?"` * `EXTRA_CXXFLAGS="?"` Append these flags to CFLAGS/CXXFLAGS, without passing them to the compiler(s) during configuration. This makes it possible to add flags such as -Werror, while allowing the configure script to determine what other flags are appropriate for the specified configuration. * `CPPFLAGS="?"` Pass these flags to the C preprocessor. Note that CFLAGS is not passed to 'cpp' when 'configure' is looking for include files, so you must use CPPFLAGS instead if you need to help 'configure' find header files. * `LD_LIBRARY_PATH="?"` 'ld' uses this colon-separated list to find libraries. * `LDFLAGS="?"` Pass these flags when linking. * `PATH="?"` 'configure' uses this to find programs. In some cases it may be necessary to work around configuration results that do not match reality. For example, Linux 4.5 added support for the MADV_FREE flag to madvise(2), which can cause problems if building on a host with MADV_FREE support and deploying to a target without. To work around this, use a cache file to override the relevant configuration variable defined in configure.ac, e.g.: echo "je_cv_madv_free=no" > config.cache && ./configure -C ## Advanced compilation To build only parts of jemalloc, use the following targets: build_lib_shared build_lib_static build_lib build_doc_html build_doc_man build_doc To install only parts of jemalloc, use the following targets: install_bin install_include install_lib_shared install_lib_static install_lib_pc install_lib install_doc_html install_doc_man install_doc To clean up build results to varying degrees, use the following make targets: clean distclean relclean ## Advanced installation Optionally, define make variables when invoking make, including (not exclusively): * `INCLUDEDIR="?"` Use this as the installation prefix for header files. * `LIBDIR="?"` Use this as the installation prefix for libraries. * `MANDIR="?"` Use this as the installation prefix for man pages. * `DESTDIR="?"` Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful when installing to a different path than was specified via --prefix. * `CC="?"` Use this to invoke the C compiler. * `CFLAGS="?"` Pass these flags to the compiler. * `CPPFLAGS="?"` Pass these flags to the C preprocessor. * `LDFLAGS="?"` Pass these flags when linking. * `PATH="?"` Use this to search for programs used during configuration and building. ## Development If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' script rather than 'configure'. This re-generates 'configure', enables configuration dependency rules, and enables re-generation of automatically generated source files. The build system supports using an object directory separate from the source tree. For example, you can create an 'obj' directory, and from within that directory, issue configuration and build commands: autoconf mkdir obj cd obj ../configure --enable-autogen make ## Documentation The manual page is generated in both html and roff formats. Any web browser can be used to view the html manual. The roff manual page can be formatted prior to installation via the following command: nroff -man -t doc/jemalloc.3 jemalloc-sys-0.3.2/rep/Makefile010064400007650000024000000564771344617502700146170ustar0000000000000000# Clear out all vpaths, then set just one (default vpath) for the main build # directory. vpath vpath % . # Clear the default suffixes, so that built-in rules are not used. .SUFFIXES : SHELL := /bin/sh CC := /usr/bin/clang CXX := /usr/bin/clang++ -std=c++14 # Configuration parameters. DESTDIR = BINDIR := $(DESTDIR)/usr/local/bin INCLUDEDIR := $(DESTDIR)/usr/local/include LIBDIR := $(DESTDIR)/usr/local/lib DATADIR := $(DESTDIR)/usr/local/share MANDIR := $(DESTDIR)/usr/local/share/man srcroot := objroot := abs_srcroot := /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ abs_objroot := /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ # Build parameters. CPPFLAGS := -D_REENTRANT -I$(objroot)include -I$(srcroot)include CONFIGURE_CFLAGS := -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops SPECIFIED_CFLAGS := EXTRA_CFLAGS := CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS)) CONFIGURE_CXXFLAGS := -Wall -Wextra -g3 -O3 SPECIFIED_CXXFLAGS := EXTRA_CXXFLAGS := CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS)) LDFLAGS := EXTRA_LDFLAGS := LIBS := -lstdc++ -pthread RPATH_EXTRA := SO := dylib IMPORTLIB := dylib O := o A := a EXE := LIBPREFIX := lib REV := 2 install_suffix := ABI := macho XSLTPROC := /usr/bin/xsltproc XSLROOT := AUTOCONF := /usr/local/bin/autoconf _RPATH = RPATH = $(if $(1),$(call _RPATH,$(1))) cfghdrs_in := $(addprefix $(srcroot),include/jemalloc/jemalloc_defs.h.in include/jemalloc/internal/jemalloc_internal_defs.h.in include/jemalloc/internal/private_symbols.sh include/jemalloc/internal/private_namespace.sh include/jemalloc/internal/public_namespace.sh include/jemalloc/internal/public_unnamespace.sh include/jemalloc/jemalloc_rename.sh include/jemalloc/jemalloc_mangle.sh include/jemalloc/jemalloc.sh test/include/test/jemalloc_test_defs.h.in) cfghdrs_out := include/jemalloc/jemalloc_defs.h include/jemalloc/jemalloc.h include/jemalloc/internal/private_symbols.awk include/jemalloc/internal/private_symbols_jet.awk include/jemalloc/internal/public_symbols.txt include/jemalloc/internal/public_namespace.h include/jemalloc/internal/public_unnamespace.h include/jemalloc/jemalloc_protos_jet.h include/jemalloc/jemalloc_rename.h include/jemalloc/jemalloc_mangle.h include/jemalloc/jemalloc_mangle_jet.h include/jemalloc/internal/jemalloc_internal_defs.h test/include/test/jemalloc_test_defs.h cfgoutputs_in := $(addprefix $(srcroot),Makefile.in jemalloc.pc.in doc/html.xsl.in doc/manpages.xsl.in doc/jemalloc.xml.in include/jemalloc/jemalloc_macros.h.in include/jemalloc/jemalloc_protos.h.in include/jemalloc/jemalloc_typedefs.h.in include/jemalloc/internal/jemalloc_preamble.h.in test/test.sh.in test/include/test/jemalloc_test.h.in) cfgoutputs_out := Makefile jemalloc.pc doc/html.xsl doc/manpages.xsl doc/jemalloc.xml include/jemalloc/jemalloc_macros.h include/jemalloc/jemalloc_protos.h include/jemalloc/jemalloc_typedefs.h include/jemalloc/internal/jemalloc_preamble.h test/test.sh test/include/test/jemalloc_test.h enable_autogen := 0 enable_shared := 1 enable_static := 1 enable_prof := 0 enable_zone_allocator := 1 enable_experimental_smallocx := 0 MALLOC_CONF := JE_MALLOC_CONF link_whole_archive := 0 DSO_LDFLAGS = -shared -Wl,-install_name,$(LIBDIR)/$(@F) SOREV = 2.dylib PIC_CFLAGS = -fPIC -DPIC CTARGET = -o $@ LDTARGET = -o $@ TEST_LD_MODE = MKLIB = AR = ar ARFLAGS = crus DUMP_SYMS = nm -a AWK := gawk CC_MM = 1 LM := INSTALL = /usr/local/bin/ginstall -c ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" else ifeq (pecoff, $(ABI)) TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" else TEST_LIBRARY_PATH := endif endif LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/arena.c \ $(srcroot)src/background_thread.c \ $(srcroot)src/base.c \ $(srcroot)src/bin.c \ $(srcroot)src/bitmap.c \ $(srcroot)src/ckh.c \ $(srcroot)src/ctl.c \ $(srcroot)src/div.c \ $(srcroot)src/extent.c \ $(srcroot)src/extent_dss.c \ $(srcroot)src/extent_mmap.c \ $(srcroot)src/hash.c \ $(srcroot)src/hook.c \ $(srcroot)src/large.c \ $(srcroot)src/log.c \ $(srcroot)src/malloc_io.c \ $(srcroot)src/mutex.c \ $(srcroot)src/mutex_pool.c \ $(srcroot)src/nstime.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ $(srcroot)src/rtree.c \ $(srcroot)src/stats.c \ $(srcroot)src/sc.c \ $(srcroot)src/sz.c \ $(srcroot)src/tcache.c \ $(srcroot)src/test_hooks.c \ $(srcroot)src/ticker.c \ $(srcroot)src/tsd.c \ $(srcroot)src/witness.c ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) endif ifdef PIC_CFLAGS STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) else STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) endif DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif ifeq (1, $(link_whole_archive)) LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive else LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c ifeq (1, $(link_whole_archive)) C_UTIL_INTEGRATION_SRCS := C_UTIL_CPP_SRCS := else C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c endif TESTS_UNIT := \ $(srcroot)test/unit/a0.c \ $(srcroot)test/unit/arena_reset.c \ $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/background_thread.c \ $(srcroot)test/unit/background_thread_enable.c \ $(srcroot)test/unit/base.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/bit_util.c \ $(srcroot)test/unit/binshard.c \ $(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/decay.c \ $(srcroot)test/unit/div.c \ $(srcroot)test/unit/emitter.c \ $(srcroot)test/unit/extent_quantize.c \ $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hook.c \ $(srcroot)test/unit/huge.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/log.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/malloc_io.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ $(srcroot)test/unit/pack.c \ $(srcroot)test/unit/pages.c \ $(srcroot)test/unit/ph.c \ $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_log.c \ $(srcroot)test/unit/prof_reset.c \ $(srcroot)test/unit/prof_tctx.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ $(srcroot)test/unit/qr.c \ $(srcroot)test/unit/rb.c \ $(srcroot)test/unit/retained.c \ $(srcroot)test/unit/rtree.c \ $(srcroot)test/unit/seq.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/sc.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/slab.c \ $(srcroot)test/unit/smoothstep.c \ $(srcroot)test/unit/spin.c \ $(srcroot)test/unit/stats.c \ $(srcroot)test/unit/stats_print.c \ $(srcroot)test/unit/test_hooks.c \ $(srcroot)test/unit/ticker.c \ $(srcroot)test/unit/nstime.c \ $(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/witness.c \ $(srcroot)test/unit/zero.c ifeq (0, 1) TESTS_UNIT += \ $(srcroot)test/unit/arena_reset_prof.c endif TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/extent.c \ $(srcroot)test/integration/malloc.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ $(srcroot)test/integration/rallocx.c \ $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/slab_sizes.c \ $(srcroot)test/integration/thread_arena.c \ $(srcroot)test/integration/thread_tcache_enabled.c \ $(srcroot)test/integration/xallocx.c ifeq (0, 1) TESTS_INTEGRATION += \ $(srcroot)test/integration/smallocx.c endif ifeq (1, 1) CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp else CPP_SRCS := TESTS_INTEGRATION_CPP := endif TESTS_STRESS := $(srcroot)test/stress/microbench.c \ $(srcroot)test/stress/hookbench.c TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS) PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h) C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O)) C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym) C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O)) C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O)) C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O)) C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym) C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O)) TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean .SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS) # Default target. all: build_lib dist: build_doc $(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl ifneq ($(XSLROOT),) $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< else ifeq ($(wildcard $(DOCS_HTML)),) @echo "

Missing xsltproc. Doc not built.

" > $@ endif @echo "Missing xsltproc. "$@" not (re)built." endif $(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl ifneq ($(XSLROOT),) $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< else ifeq ($(wildcard $(DOCS_MAN3)),) @echo "Missing xsltproc. Doc not built." > $@ endif @echo "Missing xsltproc. "$@" not (re)built." endif build_doc_html: $(DOCS_HTML) build_doc_man: $(DOCS_MAN3) build_doc: $(DOCS) # # Include generated dependency files. # ifdef CC_MM -include $(C_SYM_OBJS:%.$(O)=%.d) -include $(C_OBJS:%.$(O)=%.d) -include $(CPP_OBJS:%.$(O)=%.d) -include $(C_PIC_OBJS:%.$(O)=%.d) -include $(CPP_PIC_OBJS:%.$(O)=%.d) -include $(C_JET_SYM_OBJS:%.$(O)=%.d) -include $(C_JET_OBJS:%.$(O)=%.d) -include $(C_TESTLIB_OBJS:%.$(O)=%.d) -include $(TESTS_OBJS:%.$(O)=%.d) -include $(TESTS_CPP_OBJS:%.$(O)=%.d) endif $(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c $(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O) $(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c $(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp $(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) $(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp $(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS) $(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c $(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O) $(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c $(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET $(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c $(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB $(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST $(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST $(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c $(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp $(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) $(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT endif # Dependencies. ifndef CC_MM HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))) $(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h endif $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h $(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif $(C_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@ $(C_JET_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@ $(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ $(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ %.h: %.gen.h @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi $(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O): @mkdir -p $(@D) $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif ifneq ($(SOREV),$(SO)) %.$(SO) : %.$(SOREV) @mkdir -p $(@D) ln -sf $( $(srcroot)config.stamp.in $(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure ./$(objroot)config.status @touch $@ # There must be some action in order for make to re-read Makefile when it is # out of date. $(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp @true endif jemalloc-sys-0.3.2/rep/Makefile.in010064400007650000024000000537301344617474000152120ustar0000000000000000# Clear out all vpaths, then set just one (default vpath) for the main build # directory. vpath vpath % . # Clear the default suffixes, so that built-in rules are not used. .SUFFIXES : SHELL := /bin/sh CC := @CC@ CXX := @CXX@ # Configuration parameters. DESTDIR = BINDIR := $(DESTDIR)@BINDIR@ INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ LIBDIR := $(DESTDIR)@LIBDIR@ DATADIR := $(DESTDIR)@DATADIR@ MANDIR := $(DESTDIR)@MANDIR@ srcroot := @srcroot@ objroot := @objroot@ abs_srcroot := @abs_srcroot@ abs_objroot := @abs_objroot@ # Build parameters. CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@ SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@ EXTRA_CFLAGS := @EXTRA_CFLAGS@ CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS)) CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@ SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@ EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@ CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS)) LDFLAGS := @LDFLAGS@ EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ LIBS := @LIBS@ RPATH_EXTRA := @RPATH_EXTRA@ SO := @so@ IMPORTLIB := @importlib@ O := @o@ A := @a@ EXE := @exe@ LIBPREFIX := @libprefix@ REV := @rev@ install_suffix := @install_suffix@ ABI := @abi@ XSLTPROC := @XSLTPROC@ XSLROOT := @XSLROOT@ AUTOCONF := @AUTOCONF@ _RPATH = @RPATH@ RPATH = $(if $(1),$(call _RPATH,$(1))) cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@) cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ enable_shared := @enable_shared@ enable_static := @enable_static@ enable_prof := @enable_prof@ enable_zone_allocator := @enable_zone_allocator@ enable_experimental_smallocx := @enable_experimental_smallocx@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF link_whole_archive := @link_whole_archive@ DSO_LDFLAGS = @DSO_LDFLAGS@ SOREV = @SOREV@ PIC_CFLAGS = @PIC_CFLAGS@ CTARGET = @CTARGET@ LDTARGET = @LDTARGET@ TEST_LD_MODE = @TEST_LD_MODE@ MKLIB = @MKLIB@ AR = @AR@ ARFLAGS = @ARFLAGS@ DUMP_SYMS = @DUMP_SYMS@ AWK := @AWK@ CC_MM = @CC_MM@ LM := @LM@ INSTALL = @INSTALL@ ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" else ifeq (pecoff, $(ABI)) TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" else TEST_LIBRARY_PATH := endif endif LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/arena.c \ $(srcroot)src/background_thread.c \ $(srcroot)src/base.c \ $(srcroot)src/bin.c \ $(srcroot)src/bitmap.c \ $(srcroot)src/ckh.c \ $(srcroot)src/ctl.c \ $(srcroot)src/div.c \ $(srcroot)src/extent.c \ $(srcroot)src/extent_dss.c \ $(srcroot)src/extent_mmap.c \ $(srcroot)src/hash.c \ $(srcroot)src/hook.c \ $(srcroot)src/large.c \ $(srcroot)src/log.c \ $(srcroot)src/malloc_io.c \ $(srcroot)src/mutex.c \ $(srcroot)src/mutex_pool.c \ $(srcroot)src/nstime.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ $(srcroot)src/rtree.c \ $(srcroot)src/stats.c \ $(srcroot)src/sc.c \ $(srcroot)src/sz.c \ $(srcroot)src/tcache.c \ $(srcroot)src/test_hooks.c \ $(srcroot)src/ticker.c \ $(srcroot)src/tsd.c \ $(srcroot)src/witness.c ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) endif ifdef PIC_CFLAGS STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) else STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) endif DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif ifeq (1, $(link_whole_archive)) LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive else LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c ifeq (1, $(link_whole_archive)) C_UTIL_INTEGRATION_SRCS := C_UTIL_CPP_SRCS := else C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c endif TESTS_UNIT := \ $(srcroot)test/unit/a0.c \ $(srcroot)test/unit/arena_reset.c \ $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/background_thread.c \ $(srcroot)test/unit/background_thread_enable.c \ $(srcroot)test/unit/base.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/bit_util.c \ $(srcroot)test/unit/binshard.c \ $(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/decay.c \ $(srcroot)test/unit/div.c \ $(srcroot)test/unit/emitter.c \ $(srcroot)test/unit/extent_quantize.c \ $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hook.c \ $(srcroot)test/unit/huge.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/log.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/malloc_io.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ $(srcroot)test/unit/pack.c \ $(srcroot)test/unit/pages.c \ $(srcroot)test/unit/ph.c \ $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_log.c \ $(srcroot)test/unit/prof_reset.c \ $(srcroot)test/unit/prof_tctx.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ $(srcroot)test/unit/qr.c \ $(srcroot)test/unit/rb.c \ $(srcroot)test/unit/retained.c \ $(srcroot)test/unit/rtree.c \ $(srcroot)test/unit/seq.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/sc.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/slab.c \ $(srcroot)test/unit/smoothstep.c \ $(srcroot)test/unit/spin.c \ $(srcroot)test/unit/stats.c \ $(srcroot)test/unit/stats_print.c \ $(srcroot)test/unit/test_hooks.c \ $(srcroot)test/unit/ticker.c \ $(srcroot)test/unit/nstime.c \ $(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/witness.c \ $(srcroot)test/unit/zero.c ifeq (@enable_prof@, 1) TESTS_UNIT += \ $(srcroot)test/unit/arena_reset_prof.c endif TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/extent.c \ $(srcroot)test/integration/malloc.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ $(srcroot)test/integration/rallocx.c \ $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/slab_sizes.c \ $(srcroot)test/integration/thread_arena.c \ $(srcroot)test/integration/thread_tcache_enabled.c \ $(srcroot)test/integration/xallocx.c ifeq (@enable_experimental_smallocx@, 1) TESTS_INTEGRATION += \ $(srcroot)test/integration/smallocx.c endif ifeq (@enable_cxx@, 1) CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp else CPP_SRCS := TESTS_INTEGRATION_CPP := endif TESTS_STRESS := $(srcroot)test/stress/microbench.c \ $(srcroot)test/stress/hookbench.c TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS) PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h) C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O)) C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym) C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O)) C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O)) C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O)) C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym) C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O)) TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean .SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS) # Default target. all: build_lib dist: build_doc $(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl ifneq ($(XSLROOT),) $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< else ifeq ($(wildcard $(DOCS_HTML)),) @echo "

Missing xsltproc. Doc not built.

" > $@ endif @echo "Missing xsltproc. "$@" not (re)built." endif $(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl ifneq ($(XSLROOT),) $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< else ifeq ($(wildcard $(DOCS_MAN3)),) @echo "Missing xsltproc. Doc not built." > $@ endif @echo "Missing xsltproc. "$@" not (re)built." endif build_doc_html: $(DOCS_HTML) build_doc_man: $(DOCS_MAN3) build_doc: $(DOCS) # # Include generated dependency files. # ifdef CC_MM -include $(C_SYM_OBJS:%.$(O)=%.d) -include $(C_OBJS:%.$(O)=%.d) -include $(CPP_OBJS:%.$(O)=%.d) -include $(C_PIC_OBJS:%.$(O)=%.d) -include $(CPP_PIC_OBJS:%.$(O)=%.d) -include $(C_JET_SYM_OBJS:%.$(O)=%.d) -include $(C_JET_OBJS:%.$(O)=%.d) -include $(C_TESTLIB_OBJS:%.$(O)=%.d) -include $(TESTS_OBJS:%.$(O)=%.d) -include $(TESTS_CPP_OBJS:%.$(O)=%.d) endif $(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c $(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O) $(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c $(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp $(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) $(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp $(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS) $(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c $(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE $(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O) $(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c $(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET $(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c $(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB $(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST $(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST $(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c $(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp $(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) $(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT endif # Dependencies. ifndef CC_MM HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))) $(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h endif $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h $(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif $(C_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@ $(C_JET_SYMS): %.sym: @mkdir -p $(@D) $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@ $(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ $(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS) $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ %.h: %.gen.h @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi $(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O): @mkdir -p $(@D) $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif ifneq ($(SOREV),$(SO)) %.$(SO) : %.$(SOREV) @mkdir -p $(@D) ln -sf $( $(srcroot)config.stamp.in $(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure ./$(objroot)config.status @touch $@ # There must be some action in order for make to re-read Makefile when it is # out of date. $(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp @true endif jemalloc-sys-0.3.2/rep/README010064400007650000024000000020271344617474000140160ustar0000000000000000jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, and therefore versatility remains critical. Ongoing development efforts trend toward making jemalloc among the best allocators for a broad range of demanding applications, and eliminating/mitigating weaknesses that have practical repercussions for real world applications. The COPYING file contains copyright and licensing information. The INSTALL file contains information on how to configure, build, and install jemalloc. The ChangeLog file contains a brief summary of changes for each release. URL: http://jemalloc.net/ jemalloc-sys-0.3.2/rep/TUNING.md010064400007650000024000000134401344617474000144650ustar0000000000000000This document summarizes the common approaches for performance fine tuning with jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work reasonably well in practice, and most applications should not have to tune any options. However, in order to cover a wide range of applications and avoid pathological cases, the default setting is sometimes kept conservative and suboptimal, even for many common workloads. When jemalloc is properly tuned for a specific application / workload, it is common to improve system level metrics by a few percent, or make favorable trade-offs. ## Notable runtime options for performance tuning Runtime options can be set via [malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning). * [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread) Enabling jemalloc background threads generally improves the tail latency for application threads, since unused memory purging is shifted to the dedicated background threads. In addition, unintended purging delay caused by application inactivity is avoided with background threads. Suggested: `background_thread:true` when jemalloc managed threads can be allowed. * [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp) Allowing jemalloc to utilize transparent huge pages for its internal metadata usually reduces TLB misses significantly, especially for programs with large memory footprint and frequent allocation / deallocation activities. Metadata memory usage may increase due to the use of huge pages. Suggested for allocation intensive programs: `metadata_thp:auto` or `metadata_thp:always`, which is expected to improve CPU utilization at a small memory cost. * [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms) Decay time determines how fast jemalloc returns unused pages back to the operating system, and therefore provides a fairly straightforward trade-off between CPU and memory usage. Shorter decay time purges unused pages faster to reduces memory usage (usually at the cost of more CPU cycles spent on purging), and vice versa. Suggested: tune the values based on the desired trade-offs. * [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas) By default jemalloc uses multiple arenas to reduce internal lock contention. However high arena count may also increase overall memory fragmentation, since arenas manage memory independently. When high degree of parallelism is not expected at the allocator level, lower number of arenas often improves memory usage. Suggested: if low parallelism is expected, try lower arena count while monitoring CPU and memory usage. * [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena) Enable dynamic thread to arena association based on running CPU. This has the potential to improve locality, e.g. when thread to CPU affinity is present. Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if thread migration between processors is expected to be infrequent. Examples: * High resource consumption application, prioritizing CPU utilization: `background_thread:true,metadata_thp:auto` combined with relaxed decay time (increased `dirty_decay_ms` and / or `muzzy_decay_ms`, e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`). * High resource consumption application, prioritizing memory usage: `background_thread:true` combined with shorter decay time (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`, e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count (e.g. number of CPUs). * Low resource consumption application: `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g. `dirty_decay_ms:1000,muzzy_decay_ms:0`). * Extremely conservative -- minimize memory usage at all costs, only suitable when allocation activity is very rare: `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0` Note that it is recommended to combine the options with `abort_conf:true` which aborts immediately on illegal options. ## Beyond runtime options In addition to the runtime options, there are a number of programmatic ways to improve application performance with jemalloc. * [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create) Manually created arenas can help performance in various ways, e.g. by managing locality and contention for specific usages. For example, applications can explicitly allocate frequently accessed objects from a dedicated arena with [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve locality. In addition, explicit arenas often benefit from individually tuned options, e.g. relaxed [decay time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if frequent reuse is expected. * [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks) Extent hooks allow customization for managing underlying memory. One use case for performance purpose is to utilize huge pages -- for example, [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp) uses explicit arenas with customized extent hooks to manage 1GB huge pages for frequently accessed data, which reduces TLB misses significantly. * [Explicit thread-to-arena binding](http://jemalloc.net/jemalloc.3.html#thread.arena) It is common for some threads in an application to have different memory access / allocation patterns. Threads with heavy workloads often benefit from explicit binding, e.g. binding very active threads to dedicated arenas may reduce contention at the allocator level. jemalloc-sys-0.3.2/rep/VERSION010064400007650000024000000000621344617501700142010ustar00000000000000000.0.0-0-g0000000000000000000000000000000000000000 jemalloc-sys-0.3.2/rep/autogen.sh010075500007650000024000000004121344617474000151330ustar0000000000000000#!/bin/sh for i in autoconf; do echo "$i" $i if [ $? -ne 0 ]; then echo "Error $? in $i" exit 1 fi done echo "./configure --enable-autogen $@" ./configure --enable-autogen $@ if [ $? -ne 0 ]; then echo "Error $? in ./configure" exit 1 fi jemalloc-sys-0.3.2/rep/bin/jemalloc-config010064400007650000024000000034341344617503000166560ustar0000000000000000#!/bin/sh usage() { cat < Options: --help | -h : Print usage. --version : Print jemalloc version. --revision : Print shared library revision number. --config : Print configure options used to build jemalloc. --prefix : Print installation directory prefix. --bindir : Print binary installation directory. --datadir : Print data installation directory. --includedir : Print include installation directory. --libdir : Print library installation directory. --mandir : Print manual page installation directory. --cc : Print compiler used to build jemalloc. --cflags : Print compiler flags used to build jemalloc. --cppflags : Print preprocessor flags used to build jemalloc. --cxxflags : Print C++ compiler flags used to build jemalloc. --ldflags : Print library flags used to build jemalloc. --libs : Print libraries jemalloc was linked against. EOF } prefix="/usr/local" exec_prefix="/usr/local" case "$1" in --help | -h) usage exit 0 ;; --version) echo "0.0.0-0-g0000000000000000000000000000000000000000" ;; --revision) echo "2" ;; --config) echo "CC=/usr/bin/clang CXX=/usr/bin/clang++" ;; --prefix) echo "/usr/local" ;; --bindir) echo "/usr/local/bin" ;; --datadir) echo "/usr/local/share" ;; --includedir) echo "/usr/local/include" ;; --libdir) echo "/usr/local/lib" ;; --mandir) echo "/usr/local/share/man" ;; --cc) echo "/usr/bin/clang" ;; --cflags) echo "-std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops" ;; --cppflags) echo "-D_REENTRANT" ;; --cxxflags) echo "-Wall -Wextra -g3 -O3" ;; --ldflags) echo " " ;; --libs) echo "-lstdc++ -pthread" ;; *) usage exit 1 esac jemalloc-sys-0.3.2/rep/bin/jemalloc-config.in010064400007650000024000000030761344617474000172740ustar0000000000000000#!/bin/sh usage() { cat < Options: --help | -h : Print usage. --version : Print jemalloc version. --revision : Print shared library revision number. --config : Print configure options used to build jemalloc. --prefix : Print installation directory prefix. --bindir : Print binary installation directory. --datadir : Print data installation directory. --includedir : Print include installation directory. --libdir : Print library installation directory. --mandir : Print manual page installation directory. --cc : Print compiler used to build jemalloc. --cflags : Print compiler flags used to build jemalloc. --cppflags : Print preprocessor flags used to build jemalloc. --cxxflags : Print C++ compiler flags used to build jemalloc. --ldflags : Print library flags used to build jemalloc. --libs : Print libraries jemalloc was linked against. EOF } prefix="@prefix@" exec_prefix="@exec_prefix@" case "$1" in --help | -h) usage exit 0 ;; --version) echo "@jemalloc_version@" ;; --revision) echo "@rev@" ;; --config) echo "@CONFIG@" ;; --prefix) echo "@PREFIX@" ;; --bindir) echo "@BINDIR@" ;; --datadir) echo "@DATADIR@" ;; --includedir) echo "@INCLUDEDIR@" ;; --libdir) echo "@LIBDIR@" ;; --mandir) echo "@MANDIR@" ;; --cc) echo "@CC@" ;; --cflags) echo "@CFLAGS@" ;; --cppflags) echo "@CPPFLAGS@" ;; --cxxflags) echo "@CXXFLAGS@" ;; --ldflags) echo "@LDFLAGS@ @EXTRA_LDFLAGS@" ;; --libs) echo "@LIBS@" ;; *) usage exit 1 esac jemalloc-sys-0.3.2/rep/bin/jemalloc.sh010064400007650000024000000002521344617503000160170ustar0000000000000000#!/bin/sh prefix=/usr/local exec_prefix=/usr/local libdir=${exec_prefix}/lib DYLD_INSERT_LIBRARIES=${libdir}/libjemalloc.2.dylib export DYLD_INSERT_LIBRARIES exec "$@" jemalloc-sys-0.3.2/rep/bin/jemalloc.sh.in010064400007650000024000000002271344617474000164350ustar0000000000000000#!/bin/sh prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ @LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ export @LD_PRELOAD_VAR@ exec "$@" jemalloc-sys-0.3.2/rep/bin/jeprof010064400007650000024000005357541344617503000151310ustar0000000000000000#! /usr/bin/env perl # Copyright (c) 1998-2007, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Program for printing the profile generated by common/profiler.cc, # or by the heap profiler (common/debugallocation.cc) # # The profile contains a sequence of entries of the form: # # This program parses the profile, and generates user-readable # output. # # Examples: # # % tools/jeprof "program" "profile" # Enters "interactive" mode # # % tools/jeprof --text "program" "profile" # Generates one line per procedure # # % tools/jeprof --gv "program" "profile" # Generates annotated call-graph and displays via "gv" # # % tools/jeprof --gv --focus=Mutex "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # # % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # and does not match "string" # # % tools/jeprof --list=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --list= pattern. The listing is # annotated with the flat and cumulative sample counts at each line. # # % tools/jeprof --disasm=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --disasm= pattern. The listing is # annotated with the flat and cumulative sample counts at each PC value. # # TODO: Use color to indicate files? use strict; use warnings; use Getopt::Long; use Cwd; my $JEPROF_VERSION = "0.0.0-0-g0000000000000000000000000000000000000000"; my $PPROF_VERSION = "2.0"; # These are the object tools we use which can come from a # user-specified location using --tools, from the JEPROF_TOOLS # environment variable, or from the environment. my %obj_tool_map = ( "objdump" => "objdump", "nm" => "nm", "addr2line" => "addr2line", "c++filt" => "c++filt", ## ConfigureObjTools may add architecture-specific entries: #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables #"addr2line_pdb" => "addr2line-pdb", # ditto #"otool" => "otool", # equivalent of objdump on OS X ); # NOTE: these are lists, so you can put in commandline flags if you want. my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local my @GV = ("gv"); my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread my @KCACHEGRIND = ("kcachegrind"); my @PS2PDF = ("ps2pdf"); # These are used for dynamic profiles my @URL_FETCHER = ("curl", "-s", "--fail"); # These are the web pages that servers need to support for dynamic profiles my $HEAP_PAGE = "/pprof/heap"; my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param # ?seconds=#&event=x&period=n my $GROWTH_PAGE = "/pprof/growth"; my $CONTENTION_PAGE = "/pprof/contention"; my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param # "?seconds=#", # "?tags_regexp=#" and # "?type=#". my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; # These are the web pages that can be named on the command line. # All the alternatives must begin with /. my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; # default binary name my $UNKNOWN_BINARY = "(unknown)"; # There is a pervasive dependency on the length (in hex characters, # i.e., nibbles) of an address, distinguishing between 32-bit and # 64-bit profiles. To err on the safe size, default to 64-bit here: my $address_length = 16; my $dev_null = "/dev/null"; if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for $dev_null = "nul"; } # A list of paths to search for shared object files my @prefix_list = (); # Special routine name that should not have any symbols. # Used as separator to parse "addr2line -i" output. my $sep_symbol = '_fini'; my $sep_address = undef; ##### Argument parsing ##### sub usage_string { return < is a space separated list of profile names. jeprof [options] is a list of profile files where each file contains the necessary symbol mappings as well as profile data (likely generated with --raw). jeprof [options] is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE Each name can be: /path/to/profile - a path to a profile file host:port[/] - a location of a service to get profile from The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. For instance: jeprof http://myserver.com:80$HEAP_PAGE If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). jeprof --symbols Maps addresses to symbol names. In this mode, stdin should be a list of library mappings, in the same format as is found in the heap- and cpu-profile files (this loosely matches that of /proc/self/maps on linux), followed by a list of hex addresses to map, one per line. For more help with querying remote servers, including how to add the necessary server-side support code, see this filename (or one like it): /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html Options: --cum Sort by cumulative data --base= Subtract from before display --interactive Run in interactive mode (interactive "help" gives help) [default] --seconds= Length of time for dynamic profiles [default=30 secs] --add_lib= Read additional symbols and line info from the given library --lib_prefix= Comma separated list of library path prefixes Reporting Granularity: --addresses Report at address level --lines Report at source line level --functions Report at function level [default] --files Report at source file level Output type: --text Generate text report --callgrind Generate callgrind format to stdout --gv Generate Postscript and display --evince Generate PDF and display --web Generate SVG and display --list= Generate source listing of matching routines --disasm= Generate disassembly of matching routines --symbols Print demangled symbol names found at given addresses --dot Generate DOT file to stdout --ps Generate Postcript to stdout --pdf Generate PDF to stdout --svg Generate SVG to stdout --gif Generate GIF to stdout --raw Generate symbolized jeprof data (useful with remote fetch) Heap-Profile Options: --inuse_space Display in-use (mega)bytes [default] --inuse_objects Display in-use objects --alloc_space Display allocated (mega)bytes --alloc_objects Display allocated objects --show_bytes Display space in bytes --drop_negative Ignore negative differences Contention-profile options: --total_delay Display total delay at each region [default] --contentions Display number of delays at each region --mean_delay Display mean delay at each region Call-graph Options: --nodecount= Show at most so many nodes [default=80] --nodefraction= Hide nodes below *total [default=.005] --edgefraction= Hide edges below *total [default=.001] --maxdegree= Max incoming/outgoing edges per node [default=8] --focus= Focus on backtraces with nodes matching --thread= Show profile for thread --ignore= Ignore backtraces with nodes matching --scale= Set GV scaling [default=0] --heapcheck Make nodes with non-0 object counts (i.e. direct leak generators) more visible --retain= Retain only nodes that match --exclude= Exclude all nodes that match Miscellaneous: --tools=[,...] \$PATH for object tool pathnames --test Run unit tests --help This message --version Version information Environment Variables: JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof JEPROF_TOOLS Prefix for object tools pathnames Examples: jeprof /bin/ls ls.prof Enters "interactive" mode jeprof --text /bin/ls ls.prof Outputs one line per procedure jeprof --web /bin/ls ls.prof Displays annotated call-graph in web browser jeprof --gv /bin/ls ls.prof Displays annotated call-graph via 'gv' jeprof --gv --focus=Mutex /bin/ls ls.prof Restricts to code paths including a .*Mutex.* entry jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof Code paths including Mutex but not string jeprof --list=getdir /bin/ls ls.prof (Per-line) annotated source listing for getdir() jeprof --disasm=getdir /bin/ls ls.prof (Per-PC) annotated disassembly for getdir() jeprof http://localhost:1234/ Enters "interactive" mode jeprof --text localhost:1234 Outputs one line per procedure for localhost:1234 jeprof --raw localhost:1234 > ./local.raw jeprof --text ./local.raw Fetches a remote profile for later analysis and then analyzes it in text mode. EOF } sub version_string { return < \$main::opt_help, "version!" => \$main::opt_version, "cum!" => \$main::opt_cum, "base=s" => \$main::opt_base, "seconds=i" => \$main::opt_seconds, "add_lib=s" => \$main::opt_lib, "lib_prefix=s" => \$main::opt_lib_prefix, "functions!" => \$main::opt_functions, "lines!" => \$main::opt_lines, "addresses!" => \$main::opt_addresses, "files!" => \$main::opt_files, "text!" => \$main::opt_text, "callgrind!" => \$main::opt_callgrind, "list=s" => \$main::opt_list, "disasm=s" => \$main::opt_disasm, "symbols!" => \$main::opt_symbols, "gv!" => \$main::opt_gv, "evince!" => \$main::opt_evince, "web!" => \$main::opt_web, "dot!" => \$main::opt_dot, "ps!" => \$main::opt_ps, "pdf!" => \$main::opt_pdf, "svg!" => \$main::opt_svg, "gif!" => \$main::opt_gif, "raw!" => \$main::opt_raw, "interactive!" => \$main::opt_interactive, "nodecount=i" => \$main::opt_nodecount, "nodefraction=f" => \$main::opt_nodefraction, "edgefraction=f" => \$main::opt_edgefraction, "maxdegree=i" => \$main::opt_maxdegree, "focus=s" => \$main::opt_focus, "thread=s" => \$main::opt_thread, "ignore=s" => \$main::opt_ignore, "scale=i" => \$main::opt_scale, "heapcheck" => \$main::opt_heapcheck, "retain=s" => \$main::opt_retain, "exclude=s" => \$main::opt_exclude, "inuse_space!" => \$main::opt_inuse_space, "inuse_objects!" => \$main::opt_inuse_objects, "alloc_space!" => \$main::opt_alloc_space, "alloc_objects!" => \$main::opt_alloc_objects, "show_bytes!" => \$main::opt_show_bytes, "drop_negative!" => \$main::opt_drop_negative, "total_delay!" => \$main::opt_total_delay, "contentions!" => \$main::opt_contentions, "mean_delay!" => \$main::opt_mean_delay, "tools=s" => \$main::opt_tools, "test!" => \$main::opt_test, "debug!" => \$main::opt_debug, # Undocumented flags used only by unittests: "test_stride=i" => \$main::opt_test_stride, ) || usage("Invalid option(s)"); # Deal with the standard --help and --version if ($main::opt_help) { print usage_string(); exit(0); } if ($main::opt_version) { print version_string(); exit(0); } # Disassembly/listing/symbols mode requires address-level info if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { $main::opt_functions = 0; $main::opt_lines = 0; $main::opt_addresses = 1; $main::opt_files = 0; } # Check heap-profiling flags if ($main::opt_inuse_space + $main::opt_inuse_objects + $main::opt_alloc_space + $main::opt_alloc_objects > 1) { usage("Specify at most on of --inuse/--alloc options"); } # Check output granularities my $grains = $main::opt_functions + $main::opt_lines + $main::opt_addresses + $main::opt_files + 0; if ($grains > 1) { usage("Only specify one output granularity option"); } if ($grains == 0) { $main::opt_functions = 1; } # Check output modes my $modes = $main::opt_text + $main::opt_callgrind + ($main::opt_list eq '' ? 0 : 1) + ($main::opt_disasm eq '' ? 0 : 1) + ($main::opt_symbols == 0 ? 0 : 1) + $main::opt_gv + $main::opt_evince + $main::opt_web + $main::opt_dot + $main::opt_ps + $main::opt_pdf + $main::opt_svg + $main::opt_gif + $main::opt_raw + $main::opt_interactive + 0; if ($modes > 1) { usage("Only specify one output mode"); } if ($modes == 0) { if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode $main::opt_interactive = 1; } else { $main::opt_text = 1; } } if ($main::opt_test) { RunUnitTests(); # Should not return exit(1); } # Binary name and profile arguments list $main::prog = ""; @main::pfile_args = (); # Remote profiling without a binary (using $SYMBOL_PAGE instead) if (@ARGV > 0) { if (IsProfileURL($ARGV[0])) { $main::use_symbol_page = 1; } elsif (IsSymbolizedProfileFile($ARGV[0])) { $main::use_symbolized_profile = 1; $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file } } if ($main::use_symbol_page || $main::use_symbolized_profile) { # We don't need a binary! my %disabled = ('--lines' => $main::opt_lines, '--disasm' => $main::opt_disasm); for my $option (keys %disabled) { usage("$option cannot be used without a binary") if $disabled{$option}; } # Set $main::prog later... scalar(@ARGV) || usage("Did not specify profile file"); } elsif ($main::opt_symbols) { # --symbols needs a binary-name (to run nm on, etc) but not profiles $main::prog = shift(@ARGV) || usage("Did not specify program"); } else { $main::prog = shift(@ARGV) || usage("Did not specify program"); scalar(@ARGV) || usage("Did not specify profile file"); } # Parse profile file/location arguments foreach my $farg (@ARGV) { if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { my $machine = $1; my $num_machines = $2; my $path = $3; for (my $i = 0; $i < $num_machines; $i++) { unshift(@main::pfile_args, "$i.$machine$path"); } } else { unshift(@main::pfile_args, $farg); } } if ($main::use_symbol_page) { unless (IsProfileURL($main::pfile_args[0])) { error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); } CheckSymbolPage(); $main::prog = FetchProgramName(); } elsif (!$main::use_symbolized_profile) { # may not need objtools! ConfigureObjTools($main::prog) } # Break the opt_lib_prefix into the prefix_list array @prefix_list = split (',', $main::opt_lib_prefix); # Remove trailing / from the prefixes, in the list to prevent # searching things like /my/path//lib/mylib.so foreach (@prefix_list) { s|/+$||; } } sub FilterAndPrint { my ($profile, $symbols, $libs, $thread) = @_; # Get total data in profile my $total = TotalProfile($profile); # Remove uniniteresting stack items $profile = RemoveUninterestingFrames($symbols, $profile); # Focus? if ($main::opt_focus ne '') { $profile = FocusProfile($symbols, $profile, $main::opt_focus); } # Ignore? if ($main::opt_ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); } my $calls = ExtractCalls($symbols, $profile); # Reduce profiles to required output granularity, and also clean # each stack trace so a given entry exists at most once. my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); # Print if (!$main::opt_interactive) { if ($main::opt_disasm) { PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); } elsif ($main::opt_list) { PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); } elsif ($main::opt_text) { # Make sure the output is empty when have nothing to report # (only matters when --heapcheck is given but we must be # compatible with old branches that did not pass --heapcheck always): if ($total != 0) { printf("Total%s: %s %s\n", (defined($thread) ? " (t$thread)" : ""), Unparse($total), Units()); } PrintText($symbols, $flat, $cumulative, -1); } elsif ($main::opt_raw) { PrintSymbolizedProfile($symbols, $profile, $main::prog); } elsif ($main::opt_callgrind) { PrintCallgrind($calls); } else { if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), ""); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), ""); } elsif ($main::opt_web) { my $tmp = TempName($main::next_tmpfile, "svg"); RunWeb($tmp); # The command we run might hand the file name off # to an already running browser instance and then exit. # Normally, we'd remove $tmp on exit (right now), # but fork a child to remove $tmp a little later, so that the # browser has time to load it first. delete $main::tempnames{$tmp}; if (fork() == 0) { sleep 5; unlink($tmp); exit(0); } } } else { cleanup(); exit(1); } } } else { InteractiveMode($profile, $symbols, $libs, $total); } } sub Main() { Init(); $main::collected_profile = undef; @main::profile_files = (); $main::op_time = time(); # Printing symbols is special and requires a lot less info that most. if ($main::opt_symbols) { PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin return; } # Fetch all profile data FetchDynamicProfiles(); # this will hold symbols that we read from the profile files my $symbol_map = {}; # Read one profile, pick the last item on the list my $data = ReadProfile($main::prog, pop(@main::profile_files)); my $profile = $data->{profile}; my $pcs = $data->{pcs}; my $libs = $data->{libs}; # Info about main program and shared libraries $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); # Add additional profiles, if available. if (scalar(@main::profile_files) > 0) { foreach my $pname (@main::profile_files) { my $data2 = ReadProfile($main::prog, $pname); $profile = AddProfile($profile, $data2->{profile}); $pcs = AddPcs($pcs, $data2->{pcs}); $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); } } # Subtract base from profile, if specified if ($main::opt_base ne '') { my $base = ReadProfile($main::prog, $main::opt_base); $profile = SubtractProfile($profile, $base->{profile}); $pcs = AddPcs($pcs, $base->{pcs}); $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); } # Collect symbols my $symbols; if ($main::use_symbolized_profile) { $symbols = FetchSymbols($pcs, $symbol_map); } elsif ($main::use_symbol_page) { $symbols = FetchSymbols($pcs); } else { # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, # which may differ from the data from subsequent profiles, especially # if they were run on different machines. Use appropriate libs for # each pc somehow. $symbols = ExtractSymbols($libs, $pcs); } if (!defined($main::opt_thread)) { FilterAndPrint($profile, $symbols, $libs); } if (defined($data->{threads})) { foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) { if (defined($main::opt_thread) && ($main::opt_thread eq '*' || $main::opt_thread == $thread)) { my $thread_profile = $data->{threads}{$thread}; FilterAndPrint($thread_profile, $symbols, $libs, $thread); } } } cleanup(); exit(0); } ##### Entry Point ##### Main(); # Temporary code to detect if we're running on a Goobuntu system. # These systems don't have the right stuff installed for the special # Readline libraries to work, so as a temporary workaround, we default # to using the normal stdio code, rather than the fancier readline-based # code sub ReadlineMightFail { if (-e '/lib/libtermcap.so.2') { return 0; # libtermcap exists, so readline should be okay } else { return 1; } } sub RunGV { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { # Options using double dash are supported by this gv version. # Also, turn on noantialias to better handle bug in gv for # postscript files with large dimensions. # TODO: Maybe we should not pass the --noantialias flag # if the gv version is known to work properly without the flag. system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) . $bg); } else { # Old gv version - only supports options that use single dash. print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); } } sub RunEvince { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background system(ShellEscape(@EVINCE, $fname) . $bg); } sub RunWeb { my $fname = shift; print STDERR "Loading web page file:///$fname\n"; if (`uname` =~ /Darwin/) { # OS X: open will use standard preference for SVG files. system("/usr/bin/open", $fname); return; } # Some kind of Unix; try generic symlinks, then specific browsers. # (Stop once we find one.) # Works best if the browser is already running. my @alt = ( "/etc/alternatives/gnome-www-browser", "/etc/alternatives/x-www-browser", "google-chrome", "firefox", ); foreach my $b (@alt) { if (system($b, $fname) == 0) { return; } } print STDERR "Could not load web browser.\n"; } sub RunKcachegrind { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; system(ShellEscape(@KCACHEGRIND, $fname) . $bg); } ##### Interactive helper routines ##### sub InteractiveMode { $| = 1; # Make output unbuffered for interactive mode my ($orig_profile, $symbols, $libs, $total) = @_; print STDERR "Welcome to jeprof! For help, type 'help'.\n"; # Use ReadLine if it's installed and input comes from a console. if ( -t STDIN && !ReadlineMightFail() && defined(eval {require Term::ReadLine}) ) { my $term = new Term::ReadLine 'jeprof'; while ( defined ($_ = $term->readline('(jeprof) '))) { $term->addhistory($_) if /\S/; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } } } else { # don't have readline while (1) { print STDERR "(jeprof) "; $_ = ; last if ! defined $_ ; s/\r//g; # turn windows-looking lines into unix-looking lines # Save some flags that might be reset by InteractiveCommand() my $save_opt_lines = $main::opt_lines; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } # Restore flags $main::opt_lines = $save_opt_lines; } } } # Takes two args: orig profile, and command to run. # Returns 1 if we should keep going, or 0 if we were asked to quit sub InteractiveCommand { my($orig_profile, $symbols, $libs, $total, $command) = @_; $_ = $command; # just to make future m//'s easier if (!defined($_)) { print STDERR "\n"; return 0; } if (m/^\s*quit/) { return 0; } if (m/^\s*help/) { InteractiveHelpMessage(); return 1; } # Clear all the mode options -- mode is controlled by "$command" $main::opt_text = 0; $main::opt_callgrind = 0; $main::opt_disasm = 0; $main::opt_list = 0; $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_cum = 0; if (m/^\s*(text|top)(\d*)\s*(.*)/) { $main::opt_text = 1; my $line_limit = ($2 ne "") ? int($2) : 10; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($3); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintText($symbols, $flat, $cumulative, $line_limit); return 1; } if (m/^\s*callgrind\s*([^ \n]*)/) { $main::opt_callgrind = 1; # Get derived profiles my $calls = ExtractCalls($symbols, $orig_profile); my $filename = $1; if ( $1 eq '' ) { $filename = TempName($main::next_tmpfile, "callgrind"); } PrintCallgrind($calls, $filename); if ( $1 eq '' ) { RunKcachegrind($filename, " & "); $main::next_tmpfile++; } return 1; } if (m/^\s*(web)?list\s*(.+)/) { my $html = (defined($1) && ($1 eq "web")); $main::opt_list = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($2); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintListing($total, $libs, $flat, $cumulative, $routine, $html); return 1; } if (m/^\s*disasm\s*(.+)/) { $main::opt_disasm = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($1); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintDisassembly($libs, $flat, $cumulative, $routine); return 1; } if (m/^\s*(gv|web|evince)\s*(.*)/) { $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_web = 0; if ($1 eq "gv") { $main::opt_gv = 1; } elsif ($1 eq "evince") { $main::opt_evince = 1; } elsif ($1 eq "web") { $main::opt_web = 1; } my $focus; my $ignore; ($focus, $ignore) = ParseInteractiveArgs($2); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, $focus, $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), " &"); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); } elsif ($main::opt_web) { RunWeb(TempName($main::next_tmpfile, "svg")); } $main::next_tmpfile++; } return 1; } if (m/^\s*$/) { return 1; } print STDERR "Unknown command: try 'help'.\n"; return 1; } sub ProcessProfile { my $total_count = shift; my $orig_profile = shift; my $symbols = shift; my $focus = shift; my $ignore = shift; # Process current profile to account for various settings my $profile = $orig_profile; printf("Total: %s %s\n", Unparse($total_count), Units()); if ($focus ne '') { $profile = FocusProfile($symbols, $profile, $focus); my $focus_count = TotalProfile($profile); printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", $focus, Unparse($focus_count), Units(), Unparse($total_count), ($focus_count*100.0) / $total_count); } if ($ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $ignore); my $ignore_count = TotalProfile($profile); printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", $ignore, Unparse($ignore_count), Units(), Unparse($total_count), ($ignore_count*100.0) / $total_count); } return $profile; } sub InteractiveHelpMessage { print STDERR <{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { my $depth = $#addrs + 1; # int(foo / 2**32) is the only reliable way to get rid of bottom # 32 bits on both 32- and 64-bit systems. print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); foreach my $full_addr (@addrs) { my $addr = $full_addr; $addr =~ s/0x0*//; # strip off leading 0x, zeroes if (length($addr) > 16) { print STDERR "Invalid address in profile: $full_addr\n"; next; } my $low_addr = substr($addr, -8); # get last 8 hex chars my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); } } } } # Print symbols and profile data sub PrintSymbolizedProfile { my $symbols = shift; my $profile = shift; my $prog = shift; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; print '--- ', $symbol_marker, "\n"; if (defined($prog)) { print 'binary=', $prog, "\n"; } while (my ($pc, $name) = each(%{$symbols})) { my $sep = ' '; print '0x', $pc; # We have a list of function names, which include the inlined # calls. They are separated (and terminated) by --, which is # illegal in function names. for (my $j = 2; $j <= $#{$name}; $j += 3) { print $sep, $name->[$j]; $sep = '--'; } print "\n"; } print '---', "\n"; my $profile_marker; if ($main::profile_type eq 'heap') { $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'growth') { $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'contention') { $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } else { # elsif ($main::profile_type eq 'cpu') $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } print '--- ', $profile_marker, "\n"; if (defined($main::collected_profile)) { # if used with remote fetch, simply dump the collected profile to output. open(SRC, "<$main::collected_profile"); while () { print $_; } close(SRC); } else { # --raw/http: For everything to work correctly for non-remote profiles, we # would need to extend PrintProfileData() to handle all possible profile # types, re-enable the code that is currently disabled in ReadCPUProfile() # and FixCallerAddresses(), and remove the remote profile dumping code in # the block above. die "--raw/http: jeprof can only dump remote profiles for --raw\n"; # dump a cpu-format profile to standard out PrintProfileData($profile); } } # Print text output sub PrintText { my $symbols = shift; my $flat = shift; my $cumulative = shift; my $line_limit = shift; my $total = TotalProfile($flat); # Which profile to sort by? my $s = $main::opt_cum ? $cumulative : $flat; my $running_sum = 0; my $lines = 0; foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } keys(%{$cumulative})) { my $f = GetEntry($flat, $k); my $c = GetEntry($cumulative, $k); $running_sum += $f; my $sym = $k; if (exists($symbols->{$k})) { $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; if ($main::opt_addresses) { $sym = $k . " " . $sym; } } if ($f != 0 || $c != 0) { printf("%8s %6s %6s %8s %6s %s\n", Unparse($f), Percent($f, $total), Percent($running_sum, $total), Unparse($c), Percent($c, $total), $sym); } $lines++; last if ($line_limit >= 0 && $lines >= $line_limit); } } # Callgrind format has a compression for repeated function and file # names. You show the name the first time, and just use its number # subsequently. This can cut down the file to about a third or a # quarter of its uncompressed size. $key and $val are the key/value # pair that would normally be printed by callgrind; $map is a map from # value to number. sub CompressedCGName { my($key, $val, $map) = @_; my $idx = $map->{$val}; # For very short keys, providing an index hurts rather than helps. if (length($val) <= 3) { return "$key=$val\n"; } elsif (defined($idx)) { return "$key=($idx)\n"; } else { # scalar(keys $map) gives the number of items in the map. $idx = scalar(keys(%{$map})) + 1; $map->{$val} = $idx; return "$key=($idx) $val\n"; } } # Print the call graph in a way that's suiteable for callgrind. sub PrintCallgrind { my $calls = shift; my $filename; my %filename_to_index_map; my %fnname_to_index_map; if ($main::opt_interactive) { $filename = shift; print STDERR "Writing callgrind file to '$filename'.\n" } else { $filename = "&STDOUT"; } open(CG, ">$filename"); printf CG ("events: Hits\n\n"); foreach my $call ( map { $_->[0] } sort { $a->[1] cmp $b ->[1] || $a->[2] <=> $b->[2] } map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; [$_, $1, $2] } keys %$calls ) { my $count = int($calls->{$call}); $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; my ( $caller_file, $caller_line, $caller_function, $callee_file, $callee_line, $callee_function ) = ( $1, $2, $3, $5, $6, $7 ); # TODO(csilvers): for better compression, collect all the # caller/callee_files and functions first, before printing # anything, and only compress those referenced more than once. printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); if (defined $6) { printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); printf CG ("calls=$count $callee_line\n"); } printf CG ("$caller_line $count\n\n"); } } # Print disassembly for all all routines that match $main::opt_disasm sub PrintDisassembly { my $libs = shift; my $flat = shift; my $cumulative = shift; my $disasm_opts = shift; my $total = TotalProfile($flat); foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; # See if there are any samples in this routine my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { PrintDisassembledFunction($lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $total); last; } $addr = AddressInc($addr); } } } } # Return reference to array of tuples of the form: # [start_address, filename, linenumber, instruction, limit_address] # E.g., # ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] sub Disassemble { my $prog = shift; my $offset = shift; my $start_addr = shift; my $end_addr = shift; my $objdump = $obj_tool_map{"objdump"}; my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", "--start-address=0x$start_addr", "--stop-address=0x$end_addr", $prog); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); my @result = (); my $filename = ""; my $linenumber = -1; my $last = ["", "", "", ""]; while () { s/\r//g; # turn windows-looking lines into unix-looking lines chop; if (m|\s*([^:\s]+):(\d+)\s*$|) { # Location line of the form: # : $filename = $1; $linenumber = $2; } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { # Disassembly line -- zero-extend address to full length my $addr = HexExtend($1); my $k = AddressAdd($addr, $offset); $last->[4] = $k; # Store ending address for previous instruction $last = [$k, $filename, $linenumber, $2, $end_addr]; push(@result, $last); } } close(OBJDUMP); return @result; } # The input file should contain lines of the form /proc/maps-like # output (same format as expected from the profiles) or that looks # like hex addresses (like "0xDEADBEEF"). We will parse all # /proc/maps output, and for all the hex addresses, we will output # "short" symbol names, one per line, in the same order as the input. sub PrintSymbols { my $maps_and_symbols_file = shift; # ParseLibraries expects pcs to be in a set. Fine by us... my @pclist = (); # pcs in sorted order my $pcs = {}; my $map = ""; foreach my $line (<$maps_and_symbols_file>) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /\b(0x[0-9a-f]+)\b/i) { push(@pclist, HexExtend($1)); $pcs->{$pclist[-1]} = 1; } else { $map .= $line; } } my $libs = ParseLibraries($main::prog, $map, $pcs); my $symbols = ExtractSymbols($libs, $pcs); foreach my $pc (@pclist) { # ->[0] is the shortname, ->[2] is the full name print(($symbols->{$pc}->[0] || "??") . "\n"); } } # For sorting functions by name sub ByName { return ShortFunctionName($a) cmp ShortFunctionName($b); } # Print source-listing for all all routines that match $list_opts sub PrintListing { my $total = shift; my $libs = shift; my $flat = shift; my $cumulative = shift; my $list_opts = shift; my $html = shift; my $output = \*STDOUT; my $fname = ""; if ($html) { # Arrange to write the output to a temporary file $fname = TempName($main::next_tmpfile, "html"); $main::next_tmpfile++; if (!open(TEMP, ">$fname")) { print STDERR "$fname: $!\n"; return; } $output = \*TEMP; print $output HtmlListingHeader(); printf $output ("
%s
Total: %s %s
\n", $main::prog, Unparse($total), Units()); } my $listed = 0; foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { # Print if there are any samples in this routine my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { $listed += PrintSource( $lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $html, $output); last; } $addr = AddressInc($addr); } } } if ($html) { if ($listed > 0) { print $output HtmlListingFooter(); close($output); RunWeb($fname); } else { close($output); unlink($fname); } } } sub HtmlListingHeader { return <<'EOF'; Pprof listing EOF } sub HtmlListingFooter { return <<'EOF'; EOF } sub HtmlEscape { my $text = shift; $text =~ s/&/&/g; $text =~ s//>/g; return $text; } # Returns the indentation of the line, if it has any non-whitespace # characters. Otherwise, returns -1. sub Indentation { my $line = shift; if (m/^(\s*)\S/) { return length($1); } else { return -1; } } # If the symbol table contains inlining info, Disassemble() may tag an # instruction with a location inside an inlined function. But for # source listings, we prefer to use the location in the function we # are listing. So use MapToSymbols() to fetch full location # information for each instruction and then pick out the first # location from a location list (location list contains callers before # callees in case of inlining). # # After this routine has run, each entry in $instructions contains: # [0] start address # [1] filename for function we are listing # [2] line number for function we are listing # [3] disassembly # [4] limit address # [5] most specific filename (may be different from [1] due to inlining) # [6] most specific line number (may be different from [2] due to inlining) sub GetTopLevelLineNumbers { my ($lib, $offset, $instructions) = @_; my $pcs = []; for (my $i = 0; $i <= $#{$instructions}; $i++) { push(@{$pcs}, $instructions->[$i]->[0]); } my $symbols = {}; MapToSymbols($lib, $offset, $pcs, $symbols); for (my $i = 0; $i <= $#{$instructions}; $i++) { my $e = $instructions->[$i]; push(@{$e}, $e->[1]); push(@{$e}, $e->[2]); my $addr = $e->[0]; my $sym = $symbols->{$addr}; if (defined($sym)) { if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { $e->[1] = $1; # File name $e->[2] = $2; # Line number } } } } # Print source-listing for one routine sub PrintSource { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $html = shift; my $output = shift; # Disassemble all instructions (just to get line numbers) my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); GetTopLevelLineNumbers($prog, $offset, \@instructions); # Hack 1: assume that the first source file encountered in the # disassembly contains the routine my $filename = undef; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[2] >= 0) { $filename = $instructions[$i]->[1]; last; } } if (!defined($filename)) { print STDERR "no filename found in $routine\n"; return 0; } # Hack 2: assume that the largest line number from $filename is the # end of the procedure. This is typically safe since if P1 contains # an inlined call to P2, then P2 usually occurs earlier in the # source file. If this does not work, we might have to compute a # density profile or just print all regions we find. my $lastline = 0; for (my $i = 0; $i <= $#instructions; $i++) { my $f = $instructions[$i]->[1]; my $l = $instructions[$i]->[2]; if (($f eq $filename) && ($l > $lastline)) { $lastline = $l; } } # Hack 3: assume the first source location from "filename" is the start of # the source code. my $firstline = 1; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[1] eq $filename) { $firstline = $instructions[$i]->[2]; last; } } # Hack 4: Extend last line forward until its indentation is less than # the indentation we saw on $firstline my $oldlastline = $lastline; { if (!open(FILE, "<$filename")) { print STDERR "$filename: $!\n"; return 0; } my $l = 0; my $first_indentation = -1; while () { s/\r//g; # turn windows-looking lines into unix-looking lines $l++; my $indent = Indentation($_); if ($l >= $firstline) { if ($first_indentation < 0 && $indent >= 0) { $first_indentation = $indent; last if ($first_indentation == 0); } } if ($l >= $lastline && $indent >= 0) { if ($indent >= $first_indentation) { $lastline = $l+1; } else { last; } } } close(FILE); } # Assign all samples to the range $firstline,$lastline, # Hack 4: If an instruction does not occur in the range, its samples # are moved to the next instruction that occurs in the range. my $samples1 = {}; # Map from line number to flat count my $samples2 = {}; # Map from line number to cumulative count my $running1 = 0; # Unassigned flat counts my $running2 = 0; # Unassigned cumulative counts my $total1 = 0; # Total flat counts my $total2 = 0; # Total cumulative counts my %disasm = (); # Map from line number to disassembly my $running_disasm = ""; # Unassigned disassembly my $skip_marker = "---\n"; if ($html) { $skip_marker = ""; for (my $l = $firstline; $l <= $lastline; $l++) { $disasm{$l} = ""; } } my $last_dis_filename = ''; my $last_dis_linenum = -1; my $last_touched_line = -1; # To detect gaps in disassembly for a line foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } if ($html) { my $dis = sprintf(" %6s %6s \t\t%8s: %s ", HtmlPrintNumber($c1), HtmlPrintNumber($c2), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); # Append the most specific source line associated with this instruction if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; $dis = HtmlEscape($dis); my $f = $e->[5]; my $l = $e->[6]; if ($f ne $last_dis_filename) { $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } elsif ($l ne $last_dis_linenum) { # De-emphasize the unchanged file name portion $dis .= sprintf("%s" . ":%d", HtmlEscape(CleanFileName($f)), $l); } else { # De-emphasize the entire location $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } $last_dis_filename = $f; $last_dis_linenum = $l; $running_disasm .= $dis; $running_disasm .= "\n"; } $running1 += $c1; $running2 += $c2; $total1 += $c1; $total2 += $c2; my $file = $e->[1]; my $line = $e->[2]; if (($file eq $filename) && ($line >= $firstline) && ($line <= $lastline)) { # Assign all accumulated samples to this line AddEntry($samples1, $line, $running1); AddEntry($samples2, $line, $running2); $running1 = 0; $running2 = 0; if ($html) { if ($line != $last_touched_line && $disasm{$line} ne '') { $disasm{$line} .= "\n"; } $disasm{$line} .= $running_disasm; $running_disasm = ''; $last_touched_line = $line; } } } # Assign any leftover samples to $lastline AddEntry($samples1, $lastline, $running1); AddEntry($samples2, $lastline, $running2); if ($html) { if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { $disasm{$lastline} .= "\n"; } $disasm{$lastline} .= $running_disasm; } if ($html) { printf $output ( "

%s

%s\n
\n" .
      "Total:%6s %6s (flat / cumulative %s)\n",
      HtmlEscape(ShortFunctionName($routine)),
      HtmlEscape(CleanFileName($filename)),
      Unparse($total1),
      Unparse($total2),
      Units());
  } else {
    printf $output (
      "ROUTINE ====================== %s in %s\n" .
      "%6s %6s Total %s (flat / cumulative)\n",
      ShortFunctionName($routine),
      CleanFileName($filename),
      Unparse($total1),
      Unparse($total2),
      Units());
  }
  if (!open(FILE, "<$filename")) {
    print STDERR "$filename: $!\n";
    return 0;
  }
  my $l = 0;
  while () {
    s/\r//g;         # turn windows-looking lines into unix-looking lines
    $l++;
    if ($l >= $firstline - 5 &&
        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
      chop;
      my $text = $_;
      if ($l == $firstline) { print $output $skip_marker; }
      my $n1 = GetEntry($samples1, $l);
      my $n2 = GetEntry($samples2, $l);
      if ($html) {
        # Emit a span that has one of the following classes:
        #    livesrc -- has samples
        #    deadsrc -- has disassembly, but with no samples
        #    nop     -- has no matching disasembly
        # Also emit an optional span containing disassembly.
        my $dis = $disasm{$l};
        my $asm = "";
        if (defined($dis) && $dis ne '') {
          $asm = "" . $dis . "";
        }
        my $source_class = (($n1 + $n2 > 0)
                            ? "livesrc"
                            : (($asm ne "") ? "deadsrc" : "nop"));
        printf $output (
          "%5d " .
          "%6s %6s %s%s\n",
          $l, $source_class,
          HtmlPrintNumber($n1),
          HtmlPrintNumber($n2),
          HtmlEscape($text),
          $asm);
      } else {
        printf $output(
          "%6s %6s %4d: %s\n",
          UnparseAlt($n1),
          UnparseAlt($n2),
          $l,
          $text);
      }
      if ($l == $lastline)  { print $output $skip_marker; }
    };
  }
  close(FILE);
  if ($html) {
    print $output "
\n"; } return 1; } # Return the source line for the specified file/linenumber. # Returns undef if not found. sub SourceLine { my $file = shift; my $line = shift; # Look in cache if (!defined($main::source_cache{$file})) { if (100 < scalar keys(%main::source_cache)) { # Clear the cache when it gets too big $main::source_cache = (); } # Read all lines from the file if (!open(FILE, "<$file")) { print STDERR "$file: $!\n"; $main::source_cache{$file} = []; # Cache the negative result return undef; } my $lines = []; push(@{$lines}, ""); # So we can use 1-based line numbers as indices while () { push(@{$lines}, $_); } close(FILE); # Save the lines in the cache $main::source_cache{$file} = $lines; } my $lines = $main::source_cache{$file}; if (($line < 0) || ($line > $#{$lines})) { return undef; } else { return $lines->[$line]; } } # Print disassembly for one routine with interspersed source if available sub PrintDisassembledFunction { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $total = shift; # Disassemble all instructions my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); # Make array of counts per instruction my @flat_count = (); my @cum_count = (); my $flat_total = 0; my $cum_total = 0; foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } push(@flat_count, $c1); push(@cum_count, $c2); $flat_total += $c1; $cum_total += $c2; } # Print header with total counts printf("ROUTINE ====================== %s\n" . "%6s %6s %s (flat, cumulative) %.1f%% of total\n", ShortFunctionName($routine), Unparse($flat_total), Unparse($cum_total), Units(), ($cum_total * 100.0) / $total); # Process instructions in order my $current_file = ""; for (my $i = 0; $i <= $#instructions; ) { my $e = $instructions[$i]; # Print the new file name whenever we switch files if ($e->[1] ne $current_file) { $current_file = $e->[1]; my $fname = $current_file; $fname =~ s|^\./||; # Trim leading "./" # Shorten long file names if (length($fname) >= 58) { $fname = "..." . substr($fname, -55); } printf("-------------------- %s\n", $fname); } # TODO: Compute range of lines to print together to deal with # small reorderings. my $first_line = $e->[2]; my $last_line = $first_line; my %flat_sum = (); my %cum_sum = (); for (my $l = $first_line; $l <= $last_line; $l++) { $flat_sum{$l} = 0; $cum_sum{$l} = 0; } # Find run of instructions for this range of source lines my $first_inst = $i; while (($i <= $#instructions) && ($instructions[$i]->[2] >= $first_line) && ($instructions[$i]->[2] <= $last_line)) { $e = $instructions[$i]; $flat_sum{$e->[2]} += $flat_count[$i]; $cum_sum{$e->[2]} += $cum_count[$i]; $i++; } my $last_inst = $i - 1; # Print source lines for (my $l = $first_line; $l <= $last_line; $l++) { my $line = SourceLine($current_file, $l); if (!defined($line)) { $line = "?\n"; next; } else { $line =~ s/^\s+//; } printf("%6s %6s %5d: %s", UnparseAlt($flat_sum{$l}), UnparseAlt($cum_sum{$l}), $l, $line); } # Print disassembly for (my $x = $first_inst; $x <= $last_inst; $x++) { my $e = $instructions[$x]; printf("%6s %6s %8s: %6s\n", UnparseAlt($flat_count[$x]), UnparseAlt($cum_count[$x]), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); } } } # Print DOT graph sub PrintDot { my $prog = shift; my $symbols = shift; my $raw = shift; my $flat = shift; my $cumulative = shift; my $overall_total = shift; # Get total my $local_total = TotalProfile($flat); my $nodelimit = int($main::opt_nodefraction * $local_total); my $edgelimit = int($main::opt_edgefraction * $local_total); my $nodecount = $main::opt_nodecount; # Find nodes to include my @list = (sort { abs(GetEntry($cumulative, $b)) <=> abs(GetEntry($cumulative, $a)) || $a cmp $b } keys(%{$cumulative})); my $last = $nodecount - 1; if ($last > $#list) { $last = $#list; } while (($last >= 0) && (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { $last--; } if ($last < 0) { print STDERR "No nodes to print\n"; return 0; } if ($nodelimit > 0 || $edgelimit > 0) { printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", Unparse($nodelimit), Units(), Unparse($edgelimit), Units()); } # Open DOT output file my $output; my $escaped_dot = ShellEscape(@DOT); my $escaped_ps2pdf = ShellEscape(@PS2PDF); if ($main::opt_gv) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); $output = "| $escaped_dot -Tps2 >$escaped_outfile"; } elsif ($main::opt_evince) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; } elsif ($main::opt_ps) { $output = "| $escaped_dot -Tps2"; } elsif ($main::opt_pdf) { $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; } elsif ($main::opt_web || $main::opt_svg) { # We need to post-process the SVG, so write to a temporary file always. my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); $output = "| $escaped_dot -Tsvg >$escaped_outfile"; } elsif ($main::opt_gif) { $output = "| $escaped_dot -Tgif"; } else { $output = ">&STDOUT"; } open(DOT, $output) || error("$output: $!\n"); # Title printf DOT ("digraph \"%s; %s %s\" {\n", $prog, Unparse($overall_total), Units()); if ($main::opt_pdf) { # The output is more printable if we set the page size for dot. printf DOT ("size=\"8,11\"\n"); } printf DOT ("node [width=0.375,height=0.25];\n"); # Print legend printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", $prog, sprintf("Total %s: %s", Units(), Unparse($overall_total)), sprintf("Focusing on: %s", Unparse($local_total)), sprintf("Dropped nodes with <= %s abs(%s)", Unparse($nodelimit), Units()), sprintf("Dropped edges with <= %s %s", Unparse($edgelimit), Units()) ); # Print nodes my %node = (); my $nextnode = 1; foreach my $a (@list[0..$last]) { # Pick font size my $f = GetEntry($flat, $a); my $c = GetEntry($cumulative, $a); my $fs = 8; if ($local_total > 0) { $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); } $node{$a} = $nextnode++; my $sym = $a; $sym =~ s/\s+/\\n/g; $sym =~ s/::/\\n/g; # Extra cumulative info to print for non-leaves my $extra = ""; if ($f != $c) { $extra = sprintf("\\rof %s (%s)", Unparse($c), Percent($c, $local_total)); } my $style = ""; if ($main::opt_heapcheck) { if ($f > 0) { # make leak-causing nodes more visible (add a background) $style = ",style=filled,fillcolor=gray" } elsif ($f < 0) { # make anti-leak-causing nodes (which almost never occur) # stand out as well (triple border) $style = ",peripheries=3" } } printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . "\",shape=box,fontsize=%.1f%s];\n", $node{$a}, $sym, Unparse($f), Percent($f, $local_total), $extra, $fs, $style, ); } # Get edges and counts per edge my %edge = (); my $n; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$raw})) { # TODO: omit low %age edges $n = $raw->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); for (my $i = 1; $i <= $#translated; $i++) { my $src = $translated[$i]; my $dst = $translated[$i-1]; #next if ($src eq $dst); # Avoid self-edges? if (exists($node{$src}) && exists($node{$dst})) { my $edge_label = "$src\001$dst"; if (!exists($edge{$edge_label})) { $edge{$edge_label} = 0; } $edge{$edge_label} += $n; } } } # Print edges (process in order of decreasing counts) my %indegree = (); # Number of incoming edges added per node so far my %outdegree = (); # Number of outgoing edges added per node so far foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { my @x = split(/\001/, $e); $n = $edge{$e}; # Initialize degree of kept incoming and outgoing edges if necessary my $src = $x[0]; my $dst = $x[1]; if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } my $keep; if ($indegree{$dst} == 0) { # Keep edge if needed for reachability $keep = 1; } elsif (abs($n) <= $edgelimit) { # Drop if we are below --edgefraction $keep = 0; } elsif ($outdegree{$src} >= $main::opt_maxdegree || $indegree{$dst} >= $main::opt_maxdegree) { # Keep limited number of in/out edges per node $keep = 0; } else { $keep = 1; } if ($keep) { $outdegree{$src}++; $indegree{$dst}++; # Compute line width based on edge count my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); if ($fraction > 1) { $fraction = 1; } my $w = $fraction * 2; if ($w < 1 && ($main::opt_web || $main::opt_svg)) { # SVG output treats line widths < 1 poorly. $w = 1; } # Dot sometimes segfaults if given edge weights that are too large, so # we cap the weights at a large value my $edgeweight = abs($n) ** 0.7; if ($edgeweight > 100000) { $edgeweight = 100000; } $edgeweight = int($edgeweight); my $style = sprintf("setlinewidth(%f)", $w); if ($x[1] =~ m/\(inline\)/) { $style .= ",dashed"; } # Use a slightly squashed function of the edge count as the weight printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", $node{$x[0]}, $node{$x[1]}, Unparse($n), $edgeweight, $style); } } print DOT ("}\n"); close(DOT); if ($main::opt_web || $main::opt_svg) { # Rewrite SVG to be more usable inside web browser. RewriteSvg(TempName($main::next_tmpfile, "svg")); } return 1; } sub RewriteSvg { my $svgfile = shift; open(SVG, $svgfile) || die "open temp svg: $!"; my @svg = ; close(SVG); unlink $svgfile; my $svg = join('', @svg); # Dot's SVG output is # # # # ... # # # # Change it to # # # $svg_javascript # # # ... # # # # Fix width, height; drop viewBox. $svg =~ s/(?s) above first my $svg_javascript = SvgJavascript(); my $viewport = "\n"; $svg =~ s/ above . $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; $svg =~ s/$svgfile") || die "open $svgfile: $!"; print SVG $svg; close(SVG); } } sub SvgJavascript { return <<'EOF'; EOF } # Provides a map from fullname to shortname for cases where the # shortname is ambiguous. The symlist has both the fullname and # shortname for all symbols, which is usually fine, but sometimes -- # such as overloaded functions -- two different fullnames can map to # the same shortname. In that case, we use the address of the # function to disambiguate the two. This function fills in a map that # maps fullnames to modified shortnames in such cases. If a fullname # is not present in the map, the 'normal' shortname provided by the # symlist is the appropriate one to use. sub FillFullnameToShortnameMap { my $symbols = shift; my $fullname_to_shortname_map = shift; my $shortnames_seen_once = {}; my $shortnames_seen_more_than_once = {}; foreach my $symlist (values(%{$symbols})) { # TODO(csilvers): deal with inlined symbols too. my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address next; # the only collisions we care about are when addresses differ } if (defined($shortnames_seen_once->{$shortname}) && $shortnames_seen_once->{$shortname} ne $fullname) { $shortnames_seen_more_than_once->{$shortname} = 1; } else { $shortnames_seen_once->{$shortname} = $fullname; } } foreach my $symlist (values(%{$symbols})) { my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; # TODO(csilvers): take in a list of addresses we care about, and only # store in the map if $symlist->[1] is in that list. Saves space. next if defined($fullname_to_shortname_map->{$fullname}); if (defined($shortnames_seen_more_than_once->{$shortname})) { if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; } } } } # Return a small number that identifies the argument. # Multiple calls with the same argument will return the same number. # Calls with different arguments will return different numbers. sub ShortIdFor { my $key = shift; my $id = $main::uniqueid{$key}; if (!defined($id)) { $id = keys(%main::uniqueid) + 1; $main::uniqueid{$key} = $id; } return $id; } # Translate a stack of addresses into a stack of symbols sub TranslateStack { my $symbols = shift; my $fullname_to_shortname_map = shift; my $k = shift; my @addrs = split(/\n/, $k); my @result = (); for (my $i = 0; $i <= $#addrs; $i++) { my $a = $addrs[$i]; # Skip large addresses since they sometimes show up as fake entries on RH9 if (length($a) > 8 && $a gt "7fffffffffffffff") { next; } if ($main::opt_disasm || $main::opt_list) { # We want just the address for the key push(@result, $a); next; } my $symlist = $symbols->{$a}; if (!defined($symlist)) { $symlist = [$a, "", $a]; } # We can have a sequence of symbols for a particular entry # (more than one symbol in the case of inlining). Callers # come before callees in symlist, so walk backwards since # the translated stack should contain callees before callers. for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { my $func = $symlist->[$j-2]; my $fileline = $symlist->[$j-1]; my $fullfunc = $symlist->[$j]; if (defined($fullname_to_shortname_map->{$fullfunc})) { $func = $fullname_to_shortname_map->{$fullfunc}; } if ($j > 2) { $func = "$func (inline)"; } # Do not merge nodes corresponding to Callback::Run since that # causes confusing cycles in dot display. Instead, we synthesize # a unique name for this frame per caller. if ($func =~ m/Callback.*::Run$/) { my $caller = ($i > 0) ? $addrs[$i-1] : 0; $func = "Run#" . ShortIdFor($caller); } if ($main::opt_addresses) { push(@result, "$a $func $fileline"); } elsif ($main::opt_lines) { if ($func eq '??' && $fileline eq '??:0') { push(@result, "$a"); } else { push(@result, "$func $fileline"); } } elsif ($main::opt_functions) { if ($func eq '??') { push(@result, "$a"); } else { push(@result, $func); } } elsif ($main::opt_files) { if ($fileline eq '??:0' || $fileline eq '') { push(@result, "$a"); } else { my $f = $fileline; $f =~ s/:\d+$//; push(@result, $f); } } else { push(@result, $a); last; # Do not print inlined info } } } # print join(",", @addrs), " => ", join(",", @result), "\n"; return @result; } # Generate percent string for a number and a total sub Percent { my $num = shift; my $tot = shift; if ($tot != 0) { return sprintf("%.1f%%", $num * 100.0 / $tot); } else { return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); } } # Generate pretty-printed form of number sub Unparse { my $num = shift; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return sprintf("%d", $num); } else { if ($main::opt_show_bytes) { return sprintf("%d", $num); } else { return sprintf("%.1f", $num / 1048576.0); } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds } else { return sprintf("%d", $num); } } # Alternate pretty-printed form: 0 maps to "." sub UnparseAlt { my $num = shift; if ($num == 0) { return "."; } else { return Unparse($num); } } # Alternate pretty-printed form: 0 maps to "" sub HtmlPrintNumber { my $num = shift; if ($num == 0) { return ""; } else { return Unparse($num); } } # Return output units sub Units { if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return "objects"; } else { if ($main::opt_show_bytes) { return "B"; } else { return "MB"; } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return "seconds"; } else { return "samples"; } } ##### Profile manipulation code ##### # Generate flattened profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a] sub FlatProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { AddEntry($result, $addrs[0], $count); } } return $result; } # Generate cumulative profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a], [b], [c], [d] sub CumulativeProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { AddEntry($result, $a, $count); } } return $result; } # If the second-youngest PC on the stack is always the same, returns # that pc. Otherwise, returns undef. sub IsSecondPcAlwaysTheSame { my $profile = shift; my $second_pc = undef; foreach my $k (keys(%{$profile})) { my @addrs = split(/\n/, $k); if ($#addrs < 1) { return undef; } if (not defined $second_pc) { $second_pc = $addrs[1]; } else { if ($second_pc ne $addrs[1]) { return undef; } } } return $second_pc; } sub ExtractSymbolLocation { my $symbols = shift; my $address = shift; # 'addr2line' outputs "??:0" for unknown locations; we do the # same to be consistent. my $location = "??:0:unknown"; if (exists $symbols->{$address}) { my $file = $symbols->{$address}->[1]; if ($file eq "?") { $file = "??:0" } $location = $file . ":" . $symbols->{$address}->[0]; } return $location; } # Extracts a graph of calls. sub ExtractCalls { my $symbols = shift; my $profile = shift; my $calls = {}; while( my ($stack_trace, $count) = each %$profile ) { my @address = split(/\n/, $stack_trace); my $destination = ExtractSymbolLocation($symbols, $address[0]); AddEntry($calls, $destination, $count); for (my $i = 1; $i <= $#address; $i++) { my $source = ExtractSymbolLocation($symbols, $address[$i]); my $call = "$source -> $destination"; AddEntry($calls, $call, $count); $destination = $source; } } return $calls; } sub FilterFrames { my $symbols = shift; my $profile = shift; if ($main::opt_retain eq '' && $main::opt_exclude eq '') { return $profile; } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { my $sym; if (exists($symbols->{$a})) { $sym = $symbols->{$a}->[0]; } else { $sym = $a; } if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) { next; } if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) { next; } push(@path, $a); } if (scalar(@path) > 0) { my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } } return $result; } sub RemoveUninterestingFrames { my $symbols = shift; my $profile = shift; # List of function names to skip my %skip = (); my $skip_regexp = 'NOMATCH'; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { foreach my $name ('je_calloc', 'cfree', 'je_malloc', 'newImpl', 'void* newImpl', 'je_free', 'je_memalign', 'je_posix_memalign', 'je_aligned_alloc', 'pvalloc', 'je_valloc', 'je_realloc', 'je_mallocx', 'je_rallocx', 'je_xallocx', 'je_dallocx', 'je_sdallocx', 'tc_calloc', 'tc_cfree', 'tc_malloc', 'tc_free', 'tc_memalign', 'tc_posix_memalign', 'tc_pvalloc', 'tc_valloc', 'tc_realloc', 'tc_new', 'tc_delete', 'tc_newarray', 'tc_deletearray', 'tc_new_nothrow', 'tc_newarray_nothrow', 'do_malloc', '::do_malloc', # new name -- got moved to an unnamed ns '::do_malloc_or_cpp_alloc', 'DoSampledAllocation', 'simple_alloc::allocate', '__malloc_alloc_template::allocate', '__builtin_delete', '__builtin_new', '__builtin_vec_delete', '__builtin_vec_new', 'operator new', 'operator new[]', # The entry to our memory-allocation routines on OS X 'malloc_zone_malloc', 'malloc_zone_calloc', 'malloc_zone_valloc', 'malloc_zone_realloc', 'malloc_zone_memalign', 'malloc_zone_free', # These mark the beginning/end of our custom sections '__start_google_malloc', '__stop_google_malloc', '__start_malloc_hook', '__stop_malloc_hook') { $skip{$name} = 1; $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything } # TODO: Remove TCMalloc once everything has been # moved into the tcmalloc:: namespace and we have flushed # old code out of the system. $skip_regexp = "TCMalloc|^tcmalloc::"; } elsif ($main::profile_type eq 'contention') { foreach my $vname ('base::RecordLockProfileData', 'base::SubmitMutexProfileData', 'base::SubmitSpinLockProfileData', 'Mutex::Unlock', 'Mutex::UnlockSlow', 'Mutex::ReaderUnlock', 'MutexLock::~MutexLock', 'SpinLock::Unlock', 'SpinLock::SlowUnlock', 'SpinLockHolder::~SpinLockHolder') { $skip{$vname} = 1; } } elsif ($main::profile_type eq 'cpu') { # Drop signal handlers used for CPU profile collection # TODO(dpeng): this should not be necessary; it's taken # care of by the general 2nd-pc mechanism below. foreach my $name ('ProfileData::Add', # historical 'ProfileData::prof_handler', # historical 'CpuProfiler::prof_handler', '__FRAME_END__', '__pthread_sighandler', '__restore') { $skip{$name} = 1; } } else { # Nothing skipped for unknown types } if ($main::profile_type eq 'cpu') { # If all the second-youngest program counters are the same, # this STRONGLY suggests that it is an artifact of measurement, # i.e., stack frames pushed by the CPU profiler signal handler. # Hence, we delete them. # (The topmost PC is read from the signal structure, not from # the stack, so it does not get involved.) while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { my $result = {}; my $func = ''; if (exists($symbols->{$second_pc})) { $second_pc = $symbols->{$second_pc}->[0]; } print STDERR "Removing $second_pc from all stack traces.\n"; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); splice @addrs, 1, 1; my $reduced_path = join("\n", @addrs); AddEntry($result, $reduced_path, $count); } $profile = $result; } } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { if (exists($symbols->{$a})) { my $func = $symbols->{$a}->[0]; if ($skip{$func} || ($func =~ m/$skip_regexp/)) { # Throw away the portion of the backtrace seen so far, under the # assumption that previous frames were for functions internal to the # allocator. @path = (); next; } } push(@path, $a); } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } $result = FilterFrames($symbols, $result); return $result; } # Reduce profile to granularity given by user sub ReduceProfile { my $symbols = shift; my $profile = shift; my $result = {}; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); my @path = (); my %seen = (); $seen{''} = 1; # So that empty keys are skipped foreach my $e (@translated) { # To avoid double-counting due to recursion, skip a stack-trace # entry if it has already been seen if (!$seen{$e}) { $seen{$e} = 1; push(@path, $e); } } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } return $result; } # Does the specified symbol array match the regexp? sub SymbolMatches { my $sym = shift; my $re = shift; if (defined($sym)) { for (my $i = 0; $i < $#{$sym}; $i += 3) { if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { return 1; } } } return 0; } # Focus only on paths involving specified regexps sub FocusProfile { my $symbols = shift; my $profile = shift; my $focus = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { AddEntry($result, $k, $count); last; } } } return $result; } # Focus only on paths not involving specified regexps sub IgnoreProfile { my $symbols = shift; my $profile = shift; my $ignore = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my $matched = 0; foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { $matched = 1; last; } } if (!$matched) { AddEntry($result, $k, $count); } } return $result; } # Get total count in profile sub TotalProfile { my $profile = shift; my $result = 0; foreach my $k (keys(%{$profile})) { $result += $profile->{$k}; } return $result; } # Add A to B sub AddProfile { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { my $v = $A->{$k}; AddEntry($R, $k, $v); } # add all keys in B foreach my $k (keys(%{$B})) { my $v = $B->{$k}; AddEntry($R, $k, $v); } return $R; } # Merges symbol maps sub MergeSymbols { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { $R->{$k} = $A->{$k}; } if (defined($B)) { foreach my $k (keys(%{$B})) { $R->{$k} = $B->{$k}; } } return $R; } # Add A to B sub AddPcs { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { $R->{$k} = 1 } # add all keys in B foreach my $k (keys(%{$B})) { $R->{$k} = 1 } return $R; } # Subtract B from A sub SubtractProfile { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { my $v = $A->{$k} - GetEntry($B, $k); if ($v < 0 && $main::opt_drop_negative) { $v = 0; } AddEntry($R, $k, $v); } if (!$main::opt_drop_negative) { # Take care of when subtracted profile has more entries foreach my $k (keys(%{$B})) { if (!exists($A->{$k})) { AddEntry($R, $k, 0 - $B->{$k}); } } } return $R; } # Get entry from profile; zero if not present sub GetEntry { my $profile = shift; my $k = shift; if (exists($profile->{$k})) { return $profile->{$k}; } else { return 0; } } # Add entry to specified profile sub AddEntry { my $profile = shift; my $k = shift; my $n = shift; if (!exists($profile->{$k})) { $profile->{$k} = 0; } $profile->{$k} += $n; } # Add a stack of entries to specified profile, and add them to the $pcs # list. sub AddEntries { my $profile = shift; my $pcs = shift; my $stack = shift; my $count = shift; my @k = (); foreach my $e (split(/\s+/, $stack)) { my $pc = HexExtend($e); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $count); } ##### Code to profile a server dynamically ##### sub CheckSymbolPage { my $url = SymbolPageURL(); my $command = ShellEscape(@URL_FETCHER, $url); open(SYMBOL, "$command |") or error($command); my $line = ; $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(SYMBOL); unless (defined($line)) { error("$url doesn't exist\n"); } if ($line =~ /^num_symbols:\s+(\d+)$/) { if ($1 == 0) { error("Stripped binary. No symbols available.\n"); } } else { error("Failed to get the number of symbols from $url\n"); } } sub IsProfileURL { my $profile_name = shift; if (-f $profile_name) { printf STDERR "Using local file $profile_name.\n"; return 0; } return 1; } sub ParseProfileURL { my $profile_name = shift; if (!defined($profile_name) || $profile_name eq "") { return (); } # Split profile URL - matches all non-empty strings, so no test. $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; my $proto = $1 || "http://"; my $hostport = $2; my $prefix = $3; my $profile = $4 || "/"; my $host = $hostport; $host =~ s/:.*//; my $baseurl = "$proto$hostport$prefix"; return ($host, $baseurl, $profile); } # We fetch symbols from the first profile argument. sub SymbolPageURL { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); return "$baseURL$SYMBOL_PAGE"; } sub FetchProgramName() { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); my $url = "$baseURL$PROGRAM_NAME_PAGE"; my $command_line = ShellEscape(@URL_FETCHER, $url); open(CMDLINE, "$command_line |") or error($command_line); my $cmdline = ; $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(CMDLINE); error("Failed to get program name from $url\n") unless defined($cmdline); $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. $cmdline =~ s!\n!!g; # Remove LFs. return $cmdline; } # Gee, curl's -L (--location) option isn't reliable at least # with its 7.12.3 version. Curl will forget to post data if # there is a redirection. This function is a workaround for # curl. Redirection happens on borg hosts. sub ResolveRedirectionForCurl { my $url = shift; my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); open(CMDLINE, "$command_line |") or error($command_line); while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^Location: (.*)/) { $url = $1; } } close(CMDLINE); return $url; } # Add a timeout flat to URL_FETCHER. Returns a new list. sub AddFetchTimeout { my $timeout = shift; my @fetcher = @_; if (defined($timeout)) { if (join(" ", @fetcher) =~ m/\bcurl -s/) { push(@fetcher, "--max-time", sprintf("%d", $timeout)); } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { push(@fetcher, sprintf("--deadline=%d", $timeout)); } } return @fetcher; } # Reads a symbol map from the file handle name given as $1, returning # the resulting symbol map. Also processes variables relating to symbols. # Currently, the only variable processed is 'binary=' which updates # $main::prog to have the correct program name. sub ReadSymbols { my $in = shift; my $map = {}; while (<$in>) { s/\r//g; # turn windows-looking lines into unix-looking lines # Removes all the leading zeroes from the symbols, see comment below. if (m/^0x0*([0-9a-f]+)\s+(.+)/) { $map->{$1} = $2; } elsif (m/^---/) { last; } elsif (m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1, $2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "binary") { if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", $main::prog, $value); } $main::prog = $value; } else { printf STDERR ("Ignoring unknown variable in symbols list: " . "'%s' = '%s'\n", $variable, $value); } } } return $map; } sub URLEncode { my $str = shift; $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg; return $str; } sub AppendSymbolFilterParams { my $url = shift; my @params = (); if ($main::opt_retain ne '') { push(@params, sprintf("retain=%s", URLEncode($main::opt_retain))); } if ($main::opt_exclude ne '') { push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude))); } if (scalar @params > 0) { $url = sprintf("%s?%s", $url, join("&", @params)); } return $url; } # Fetches and processes symbols to prepare them for use in the profile output # code. If the optional 'symbol_map' arg is not given, fetches symbols from # $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols # are assumed to have already been fetched into 'symbol_map' and are simply # extracted and processed. sub FetchSymbols { my $pcset = shift; my $symbol_map = shift; my %seen = (); my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq if (!defined($symbol_map)) { my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); open(POSTFILE, ">$main::tmpfile_sym"); print POSTFILE $post_data; close(POSTFILE); my $url = SymbolPageURL(); my $command_line; if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { $url = ResolveRedirectionForCurl($url); $url = AppendSymbolFilterParams($url); $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", $url); } else { $url = AppendSymbolFilterParams($url); $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) . " < " . ShellEscape($main::tmpfile_sym)); } # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); $symbol_map = ReadSymbols(*SYMBOL{IO}); close(SYMBOL); } my $symbols = {}; foreach my $pc (@pcs) { my $fullname; # For 64 bits binaries, symbols are extracted with 8 leading zeroes. # Then /symbol reads the long symbols in as uint64, and outputs # the result with a "0x%08llx" format which get rid of the zeroes. # By removing all the leading zeroes in both $pc and the symbols from # /symbol, the symbols match and are retrievable from the map. my $shortpc = $pc; $shortpc =~ s/^0*//; # Each line may have a list of names, which includes the function # and also other functions it has inlined. They are separated (in # PrintSymbolizedProfile), by --, which is illegal in function names. my $fullnames; if (defined($symbol_map->{$shortpc})) { $fullnames = $symbol_map->{$shortpc}; } else { $fullnames = "0x" . $pc; # Just use addresses } my $sym = []; $symbols->{$pc} = $sym; foreach my $fullname (split("--", $fullnames)) { my $name = ShortFunctionName($fullname); push(@{$sym}, $name, "?", $fullname); } } return $symbols; } sub BaseName { my $file_name = shift; $file_name =~ s!^.*/!!; # Remove directory name return $file_name; } sub MakeProfileBaseName { my ($binary_name, $profile_name) = @_; my ($host, $baseURL, $path) = ParseProfileURL($profile_name); my $binary_shortname = BaseName($binary_name); return sprintf("%s.%s.%s", $binary_shortname, $main::op_time, $host); } sub FetchDynamicProfile { my $binary_name = shift; my $profile_name = shift; my $fetch_name_only = shift; my $encourage_patience = shift; if (!IsProfileURL($profile_name)) { return $profile_name; } else { my ($host, $baseURL, $path) = ParseProfileURL($profile_name); if ($path eq "" || $path eq "/") { # Missing type specifier defaults to cpu-profile $path = $PROFILE_PAGE; } my $profile_file = MakeProfileBaseName($binary_name, $profile_name); my $url = "$baseURL$path"; my $fetch_timeout = undef; if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { if ($path =~ m/[?]/) { $url .= "&"; } else { $url .= "?"; } $url .= sprintf("seconds=%d", $main::opt_seconds); $fetch_timeout = $main::opt_seconds * 1.01 + 60; # Set $profile_type for consumption by PrintSymbolizedProfile. $main::profile_type = 'cpu'; } else { # For non-CPU profiles, we add a type-extension to # the target profile file name. my $suffix = $path; $suffix =~ s,/,.,g; $profile_file .= $suffix; # Set $profile_type for consumption by PrintSymbolizedProfile. if ($path =~ m/$HEAP_PAGE/) { $main::profile_type = 'heap'; } elsif ($path =~ m/$GROWTH_PAGE/) { $main::profile_type = 'growth'; } elsif ($path =~ m/$CONTENTION_PAGE/) { $main::profile_type = 'contention'; } } my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); if (! -d $profile_dir) { mkdir($profile_dir) || die("Unable to create profile directory $profile_dir: $!\n"); } my $tmp_profile = "$profile_dir/.tmp.$profile_file"; my $real_profile = "$profile_dir/$profile_file"; if ($fetch_name_only > 0) { return $real_profile; } my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; if ($encourage_patience) { print STDERR "Be patient...\n"; } } else { print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; } (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); print STDERR "Wrote profile to $real_profile\n"; $main::collected_profile = $real_profile; return $main::collected_profile; } } # Collect profiles in parallel sub FetchDynamicProfiles { my $items = scalar(@main::pfile_args); my $levels = log($items) / log(2); if ($items == 1) { $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); } else { # math rounding issues if ((2 ** $levels) < $items) { $levels++; } my $count = scalar(@main::pfile_args); for (my $i = 0; $i < $count; $i++) { $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); } print STDERR "Fetching $count profiles, Be patient...\n"; FetchDynamicProfilesRecurse($levels, 0, 0); $main::collected_profile = join(" \\\n ", @main::profile_files); } } # Recursively fork a process to get enough processes # collecting profiles sub FetchDynamicProfilesRecurse { my $maxlevel = shift; my $level = shift; my $position = shift; if (my $pid = fork()) { $position = 0 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); wait; } else { $position = 1 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); cleanup(); exit(0); } } # Collect a single profile sub TryCollectProfile { my $maxlevel = shift; my $level = shift; my $position = shift; if ($level >= ($maxlevel - 1)) { if ($position < scalar(@main::pfile_args)) { FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); } } else { FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); } } ##### Parsing code ##### # Provide a small streaming-read module to handle very large # cpu-profile files. Stream in chunks along a sliding window. # Provides an interface to get one 'slot', correctly handling # endian-ness differences. A slot is one 32-bit or 64-bit word # (depending on the input profile). We tell endianness and bit-size # for the profile by looking at the first 8 bytes: in cpu profiles, # the second slot is always 3 (we'll accept anything that's not 0). BEGIN { package CpuProfileStream; sub new { my ($class, $file, $fname) = @_; my $self = { file => $file, base => 0, stride => 512 * 1024, # must be a multiple of bitsize/8 slots => [], unpack_code => "", # N for big-endian, V for little perl_is_64bit => 1, # matters if profile is 64-bit }; bless $self, $class; # Let unittests adjust the stride if ($main::opt_test_stride > 0) { $self->{stride} = $main::opt_test_stride; } # Read the first two slots to figure out bitsize and endianness. my $slots = $self->{slots}; my $str; read($self->{file}, $str, 8); # Set the global $address_length based on what we see here. # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). $address_length = ($str eq (chr(0)x8)) ? 16 : 8; if ($address_length == 8) { if (substr($str, 6, 2) eq chr(0)x2) { $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 4, 2) eq chr(0)x2) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**16\n"); } @$slots = unpack($self->{unpack_code} . "*", $str); } else { # If we're a 64-bit profile, check if we're a 64-bit-capable # perl. Otherwise, each slot will be represented as a float # instead of an int64, losing precision and making all the # 64-bit addresses wrong. We won't complain yet, but will # later if we ever see a value that doesn't fit in 32 bits. my $has_q = 0; eval { $has_q = pack("Q", "1") ? 1 : 1; }; if (!$has_q) { $self->{perl_is_64bit} = 0; } read($self->{file}, $str, 8); if (substr($str, 4, 4) eq chr(0)x4) { # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 0, 4) eq chr(0)x4) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**32\n"); } my @pair = unpack($self->{unpack_code} . "*", $str); # Since we know one of the pair is 0, it's fine to just add them. @$slots = (0, $pair[0] + $pair[1]); } return $self; } # Load more data when we access slots->get(X) which is not yet in memory. sub overflow { my ($self) = @_; my $slots = $self->{slots}; $self->{base} += $#$slots + 1; # skip over data we're replacing my $str; read($self->{file}, $str, $self->{stride}); if ($address_length == 8) { # the 32-bit case # This is the easy case: unpack provides 32-bit unpacking primitives. @$slots = unpack($self->{unpack_code} . "*", $str); } else { # We need to unpack 32 bits at a time and combine. my @b32_values = unpack($self->{unpack_code} . "*", $str); my @b64_values = (); for (my $i = 0; $i < $#b32_values; $i += 2) { # TODO(csilvers): if this is a 32-bit perl, the math below # could end up in a too-large int, which perl will promote # to a double, losing necessary precision. Deal with that. # Right now, we just die. my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); if ($self->{unpack_code} eq 'N') { # big-endian ($lo, $hi) = ($hi, $lo); } my $value = $lo + $hi * (2**32); if (!$self->{perl_is_64bit} && # check value is exactly represented (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { ::error("Need a 64-bit perl to process this 64-bit profile.\n"); } push(@b64_values, $value); } @$slots = @b64_values; } } # Access the i-th long in the file (logically), or -1 at EOF. sub get { my ($self, $idx) = @_; my $slots = $self->{slots}; while ($#$slots >= 0) { if ($idx < $self->{base}) { # The only time we expect a reference to $slots[$i - something] # after referencing $slots[$i] is reading the very first header. # Since $stride > |header|, that shouldn't cause any lookback # errors. And everything after the header is sequential. print STDERR "Unexpected look-back reading CPU profile"; return -1; # shrug, don't know what better to return } elsif ($idx > $self->{base} + $#$slots) { $self->overflow(); } else { return $slots->[$idx - $self->{base}]; } } # If we get here, $slots is [], which means we've reached EOF return -1; # unique since slots is supposed to hold unsigned numbers } } # Reads the top, 'header' section of a profile, and returns the last # line of the header, commonly called a 'header line'. The header # section of a profile consists of zero or more 'command' lines that # are instructions to jeprof, which jeprof executes when reading the # header. All 'command' lines start with a %. After the command # lines is the 'header line', which is a profile-specific line that # indicates what type of profile it is, and perhaps other global # information about the profile. For instance, here's a header line # for a heap profile: # heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile # For historical reasons, the CPU profile does not contain a text- # readable header line. If the profile looks like a CPU profile, # this function returns "". If no header line could be found, this # function returns undef. # # The following commands are recognized: # %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' # # The input file should be in binmode. sub ReadProfileHeader { local *PROFILE = shift; my $firstchar = ""; my $line = ""; read(PROFILE, $firstchar, 1); seek(PROFILE, -1, 1); # unread the firstchar if ($firstchar !~ /[[:print:]]/) { # is not a text character return ""; } while (defined($line = )) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /^%warn\s+(.*)/) { # 'warn' command # Note this matches both '%warn blah\n' and '%warn\n'. print STDERR "WARNING: $1\n"; # print the rest of the line } elsif ($line =~ /^%/) { print STDERR "Ignoring unknown command from profile header: $line"; } else { # End of commands, must be the header line. return $line; } } return undef; # got to EOF without seeing a header line } sub IsSymbolizedProfileFile { my $file_name = shift; if (!(-e $file_name) || !(-r $file_name)) { return 0; } # Check if the file contains a symbol-section marker. open(TFILE, "<$file_name"); binmode TFILE; my $firstline = ReadProfileHeader(*TFILE); close(TFILE); if (!$firstline) { return 0; } $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; return $firstline =~ /^--- *$symbol_marker/; } # Parse profile generated by common/profiler.cc and return a reference # to a map: # $result->{version} Version number of profile file # $result->{period} Sampling period (in microseconds) # $result->{profile} Profile object # $result->{threads} Map of thread IDs to profile objects # $result->{map} Memory map info from profile # $result->{pcs} Hash of all PC values seen, key is hex address sub ReadProfile { my $prog = shift; my $fname = shift; my $result; # return value $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $contention_marker = $&; $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $growth_marker = $&; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $heap_marker = $&; # Look at first line to see if it is a heap or a CPU profile. # CPU profile may start with no header at all, and just binary data # (starting with \0\0\0\0) -- in that case, don't try to read the # whole firstline, since it may be gigabytes(!) of data. open(PROFILE, "<$fname") || error("$fname: $!\n"); binmode PROFILE; # New perls do UTF-8 processing my $header = ReadProfileHeader(*PROFILE); if (!defined($header)) { # means "at EOF" error("Profile is empty.\n"); } my $symbols; if ($header =~ m/^--- *$symbol_marker/o) { # Verify that the user asked for a symbolized profile if (!$main::use_symbolized_profile) { # we have both a binary and symbolized profiles, abort error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . "a binary arg. Try again without passing\n $prog\n"); } # Read the symbol section of the symbolized profile file. $symbols = ReadSymbols(*PROFILE{IO}); # Read the next line to get the header for the remaining profile. $header = ReadProfileHeader(*PROFILE) || ""; } if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) { # Skip "--- ..." line for profile types that have their own headers. $header = ReadProfileHeader(*PROFILE) || ""; } $main::profile_type = ''; if ($header =~ m/^heap profile:.*$growth_marker/o) { $main::profile_type = 'growth'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap profile:/) { $main::profile_type = 'heap'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap/) { $main::profile_type = 'heap'; $result = ReadThreadedHeapProfile($prog, $fname, $header); } elsif ($header =~ m/^--- *$contention_marker/o) { $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *Stacks:/) { print STDERR "Old format contention profile: mistakenly reports " . "condition variable signals as lock contentions.\n"; $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *$profile_marker/) { # the binary cpu profile data starts immediately after this line $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } else { if (defined($symbols)) { # a symbolized profile contains a format we don't recognize, bail out error("$fname: Cannot recognize profile section after symbols.\n"); } # no ascii header present -- must be a CPU profile $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } close(PROFILE); # if we got symbols along with the profile, return those as well if (defined($symbols)) { $result->{symbols} = $symbols; } return $result; } # Subtract one from caller pc so we map back to call instr. # However, don't do this if we're reading a symbolized profile # file, in which case the subtract-one was done when the file # was written. # # We apply the same logic to all readers, though ReadCPUProfile uses an # independent implementation. sub FixCallerAddresses { my $stack = shift; # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile() # dumps unadjusted profiles. { $stack =~ /(\s)/; my $delimiter = $1; my @addrs = split(' ', $stack); my @fixedaddrs; $#fixedaddrs = $#addrs; if ($#addrs >= 0) { $fixedaddrs[0] = $addrs[0]; } for (my $i = 1; $i <= $#addrs; $i++) { $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); } return join $delimiter, @fixedaddrs; } } # CPU profile reader sub ReadCPUProfile { my $prog = shift; my $fname = shift; # just used for logging local *PROFILE = shift; my $version; my $period; my $i; my $profile = {}; my $pcs = {}; # Parse string into array of slots. my $slots = CpuProfileStream->new(*PROFILE, $fname); # Read header. The current header version is a 5-element structure # containing: # 0: header count (always 0) # 1: header "words" (after this one: 3) # 2: format version (0) # 3: sampling period (usec) # 4: unused padding (always 0) if ($slots->get(0) != 0 ) { error("$fname: not a profile file, or old format profile file\n"); } $i = 2 + $slots->get(1); $version = $slots->get(2); $period = $slots->get(3); # Do some sanity checking on these header values. if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { error("$fname: not a profile file, or corrupted profile file\n"); } # Parse profile while ($slots->get($i) != -1) { my $n = $slots->get($i++); my $d = $slots->get($i++); if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); print STDERR "At index $i (address $addr):\n"; error("$fname: stack trace depth >= 2**32\n"); } if ($slots->get($i) == 0) { # End of profile data marker $i += $d; last; } # Make key out of the stack entries my @k = (); for (my $j = 0; $j < $d; $j++) { my $pc = $slots->get($i+$j); # Subtract one from caller pc so we map back to call instr. $pc--; $pc = sprintf("%0*x", $address_length, $pc); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $n); $i += $d; } # Parse map my $map = ''; seek(PROFILE, $i * 4, 0); read(PROFILE, $map, (stat PROFILE)[7]); my $r = {}; $r->{version} = $version; $r->{period} = $period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub HeapProfileIndex { my $index = 1; if ($main::opt_inuse_space) { $index = 1; } elsif ($main::opt_inuse_objects) { $index = 0; } elsif ($main::opt_alloc_space) { $index = 3; } elsif ($main::opt_alloc_objects) { $index = 2; } return $index; } sub ReadMappedLibraries { my $fh = shift; my $map = ""; # Read the /proc/self/maps data while (<$fh>) { s/\r//g; # turn windows-looking lines into unix-looking lines $map .= $_; } return $map; } sub ReadMemoryMap { my $fh = shift; my $map = ""; # Read /proc/self/maps data as formatted by DumpAddressMap() my $buildvar = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Parse "build=" specification if supplied if (m/^\s*build=(.*)\n/) { $buildvar = $1; } # Expand "$build" variable if available $_ =~ s/\$build\b/$buildvar/g; $map .= $_; } return $map; } sub AdjustSamples { my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; if ($sample_adjustment) { if ($sampling_algorithm == 2) { # Remote-heap version 2 # The sampling frequency is the rate of a Poisson process. # This means that the probability of sampling an allocation of # size X with sampling rate Y is 1 - exp(-X/Y) if ($n1 != 0) { my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n1 *= $scale_factor; $s1 *= $scale_factor; } if ($n2 != 0) { my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n2 *= $scale_factor; $s2 *= $scale_factor; } } else { # Remote-heap version 1 my $ratio; $ratio = (($s1*1.0)/$n1)/($sample_adjustment); if ($ratio < 1) { $n1 /= $ratio; $s1 /= $ratio; } $ratio = (($s2*1.0)/$n2)/($sample_adjustment); if ($ratio < 1) { $n2 /= $ratio; $s2 /= $ratio; } } } return ($n1, $s1, $n2, $s2); } sub ReadHeapProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $index = HeapProfileIndex(); # Find the type of this profile. The header line looks like: # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 # There are two pairs , the first inuse objects/space, and the # second allocated objects/space. This is followed optionally by a profile # type, and if that is present, optionally by a sampling frequency. # For remote heap profiles (v1): # The interpretation of the sampling frequency is that the profiler, for # each sample, calculates a uniformly distributed random integer less than # the given value, and records the next sample after that many bytes have # been allocated. Therefore, the expected sample interval is half of the # given frequency. By default, if not specified, the expected sample # interval is 128KB. Only remote-heap-page profiles are adjusted for # sample size. # For remote heap profiles (v2): # The sampling frequency is the rate of a Poisson process. This means that # the probability of sampling an allocation of size X with sampling rate Y # is 1 - exp(-X/Y) # For version 2, a typical header line might look like this: # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 # the trailing number (524288) is the sampling rate. (Version 1 showed # double the 'rate' here) my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { if (defined($6) && ($6 ne '')) { $type = $6; my $sample_period = $8; # $type is "heapprofile" for profiles generated by the # heap-profiler, and either "heap" or "heap_v2" for profiles # generated by sampling directly within tcmalloc. It can also # be "growth" for heap-growth profiles. The first is typically # found for profiles generated locally, and the others for # remote profiles. if (($type eq "heapprofile") || ($type !~ /heap/) ) { # No need to adjust for the sampling rate with heap-profiler-derived data $sampling_algorithm = 0; } elsif ($type =~ /_v2/) { $sampling_algorithm = 2; # version 2 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period); } } else { $sampling_algorithm = 1; # version 1 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period)/2; } } } else { # We detect whether or not this is a remote-heap profile by checking # that the total-allocated stats ($n2,$s2) are exactly the # same as the in-use stats ($n1,$s1). It is remotely conceivable # that a non-remote-heap profile may pass this check, but it is hard # to imagine how that could happen. # In this case it's so old it's guaranteed to be remote-heap version 1. my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); if (($n1 == $n2) && ($s1 == $s2)) { # This is likely to be a remote-heap based sample profile $sampling_algorithm = 1; } } } if ($sampling_algorithm > 0) { # For remote-heap generated profiles, adjust the counts and sizes to # account for the sample rate (we sample once every 128KB by default). if ($sample_adjustment == 0) { # Turn on profile adjustment. $sample_adjustment = 128*1024; print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; } else { printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", $sample_adjustment); } if ($sampling_algorithm > 1) { # We don't bother printing anything for the original version (version 1) printf STDERR "Heap version $sampling_algorithm\n"; } } my $profile = {}; my $pcs = {}; my $map = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # : [: ] @ a1 a2 a3 ... an s/^\s*//; s/\s*$//; if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { my $stack = $5; my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadThreadedHeapProfile { my ($prog, $fname, $header) = @_; my $index = HeapProfileIndex(); my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; # Assuming a very specific type of header for now. if ($header =~ m"^heap_v2/(\d+)") { $type = "_v2"; $sampling_algorithm = 2; $sample_adjustment = int($1); } if ($type ne "_v2" || !defined($sample_adjustment)) { die "Threaded heap profiles require v2 sampling with a sample rate\n"; } my $profile = {}; my $thread_profiles = {}; my $pcs = {}; my $map = ""; my $stack = ""; while () { s/\r//g; if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # @ a1 a2 ... an # t*: : [: ] # t1: : [: ] # ... # tn: : [: ] s/^\s*//; s/\s*$//; if (m/^@\s+(.*)$/) { $stack = $1; } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { if ($stack eq "") { # Still in the header, so this is just a per-thread summary. next; } my $thread = $2; my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); if ($thread eq "*") { AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } else { if (!exists($thread_profiles->{$thread})) { $thread_profiles->{$thread} = {}; } AddEntries($thread_profiles->{$thread}, $pcs, FixCallerAddresses($stack), $counts[$index]); } } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{threads} = $thread_profiles; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadSynchProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $map = ''; my $profile = {}; my $pcs = {}; my $sampling_period = 1; my $cyclespernanosec = 2.8; # Default assumption for old binaries my $seen_clockrate = 0; my $line; my $index = 0; if ($main::opt_total_delay) { $index = 0; } elsif ($main::opt_contentions) { $index = 1; } elsif ($main::opt_mean_delay) { $index = 2; } while ( $line = ) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $count, $stack) = ($1, $2, $3); # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; $count *= $sampling_period; my @values = ($cycles, $count, $cycles / $count); AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $stack) = ($1, $2); if ($cycles !~ /^\d+$/) { next; } # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1,$2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "cycles/second") { $cyclespernanosec = $value / 1e9; $seen_clockrate = 1; } elsif ($variable eq "sampling period") { $sampling_period = $value; } elsif ($variable eq "ms since reset") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } elsif ($variable eq "discarded samples") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } else { printf STDERR ("Ignoring unnknown variable in /contention output: " . "'%s' = '%s'\n",$variable,$value); } } else { # Memory map entry $map .= $line; } } if (!$seen_clockrate) { printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", $cyclespernanosec); } my $r = {}; $r->{version} = 0; $r->{period} = $sampling_period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } # Given a hex value in the form "0x1abcd" or "1abcd", return either # "0001abcd" or "000000000001abcd", depending on the current (global) # address length. sub HexExtend { my $addr = shift; $addr =~ s/^(0x)?0*//; my $zeros_needed = $address_length - length($addr); if ($zeros_needed < 0) { printf STDERR "Warning: address $addr is longer than address length $address_length\n"; return $addr; } return ("0" x $zeros_needed) . $addr; } ##### Symbol extraction ##### # Aggressively search the lib_prefix values for the given library # If all else fails, just return the name of the library unmodified. # If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" # it will search the following locations in this order, until it finds a file: # /my/path/lib/dir/mylib.so # /other/path/lib/dir/mylib.so # /my/path/dir/mylib.so # /other/path/dir/mylib.so # /my/path/mylib.so # /other/path/mylib.so # /lib/dir/mylib.so (returned as last resort) sub FindLibrary { my $file = shift; my $suffix = $file; # Search for the library as described above do { foreach my $prefix (@prefix_list) { my $fullpath = $prefix . $suffix; if (-e $fullpath) { return $fullpath; } } } while ($suffix =~ s|^/[^/]+/|/|); return $file; } # Return path to library with debugging symbols. # For libc libraries, the copy in /usr/lib/debug contains debugging symbols sub DebuggingLibrary { my $file = shift; if ($file =~ m|^/|) { if (-f "/usr/lib/debug$file") { return "/usr/lib/debug$file"; } elsif (-f "/usr/lib/debug$file.debug") { return "/usr/lib/debug$file.debug"; } } return undef; } # Parse text section header of a library using objdump sub ParseTextSectionHeaderFromObjdump { my $lib = shift; my $size = undef; my $vma; my $file_offset; # Get objdump output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Idx Name Size VMA LMA File off Algn # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file # offset may still be 8. But AddressSub below will still handle that. my @x = split; if (($#x >= 6) && ($x[1] eq '.text')) { $size = $x[2]; $vma = $x[3]; $file_offset = $x[5]; last; } } close(OBJDUMP); if (!defined($size)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } # Parse text section header of a library using otool (on OS X) sub ParseTextSectionHeaderFromOtool { my $lib = shift; my $size = undef; my $vma = undef; my $file_offset = undef; # Get otool output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); open(OTOOL, "$command |") || error("$command: $!\n"); my $cmd = ""; my $sectname = ""; my $segname = ""; foreach my $line () { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines # Load command <#> # cmd LC_SEGMENT # [...] # Section # sectname __text # segname __TEXT # addr 0x000009f8 # size 0x00018b9e # offset 2552 # align 2^2 (4) # We will need to strip off the leading 0x from the hex addresses, # and convert the offset into hex. if ($line =~ /Load command/) { $cmd = ""; $sectname = ""; $segname = ""; } elsif ($line =~ /Section/) { $sectname = ""; $segname = ""; } elsif ($line =~ /cmd (\w+)/) { $cmd = $1; } elsif ($line =~ /sectname (\w+)/) { $sectname = $1; } elsif ($line =~ /segname (\w+)/) { $segname = $1; } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && $sectname eq "__text" && $segname eq "__TEXT")) { next; } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { $vma = $1; } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { $size = $1; } elsif ($line =~ /\boffset ([0-9]+)/) { $file_offset = sprintf("%016x", $1); } if (defined($vma) && defined($size) && defined($file_offset)) { last; } } close(OTOOL); if (!defined($vma) || !defined($size) || !defined($file_offset)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } sub ParseTextSectionHeader { # obj_tool_map("otool") is only defined if we're in a Mach-O environment if (defined($obj_tool_map{"otool"})) { my $r = ParseTextSectionHeaderFromOtool(@_); if (defined($r)){ return $r; } } # If otool doesn't work, or we don't have it, fall back to objdump return ParseTextSectionHeaderFromObjdump(@_); } # Split /proc/pid/maps dump into a list of libraries sub ParseLibraries { return if $main::use_symbol_page; # We don't need libraries info. my $prog = Cwd::abs_path(shift); my $map = shift; my $pcs = shift; my $result = []; my $h = "[a-f0-9]+"; my $zero_offset = HexExtend("0"); my $buildvar = ""; foreach my $l (split("\n", $map)) { if ($l =~ m/^\s*build=(.*)$/) { $buildvar = $1; } my $start; my $finish; my $offset; my $lib; if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { # Full line from /proc/self/maps. Example: # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { # Cooked line from DumpAddressMap. Example: # 40000000-40015000: /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = $3; } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) { # PIEs and address space randomization do not play well with our # default assumption that main executable is at lowest # addresses. So we're detecting main executable in # /proc/self/maps as well. $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) # # Example: # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s # o.1 NCH -1 elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = FindLibrary($5); } else { next; } # Expand "$build" variable if available $lib =~ s/\$build\b/$buildvar/g; $lib = FindLibrary($lib); # Check for pre-relocated libraries, which use pre-relocated symbol tables # and thus require adjusting the offset that we'll use to translate # VM addresses into symbol table addresses. # Only do this if we're not going to fetch the symbol table from a # debugging copy of the library. if (!DebuggingLibrary($lib)) { my $text = ParseTextSectionHeader($lib); if (defined($text)) { my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); $offset = AddressAdd($offset, $vma_offset); } } if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } push(@{$result}, [$lib, $start, $finish, $offset]); } # Append special entry for additional library (not relocated) if ($main::opt_lib ne "") { my $text = ParseTextSectionHeader($main::opt_lib); if (defined($text)) { my $start = $text->{vma}; my $finish = AddressAdd($start, $text->{size}); push(@{$result}, [$main::opt_lib, $start, $finish, $start]); } } # Append special entry for the main program. This covers # 0..max_pc_value_seen, so that we assume pc values not found in one # of the library ranges will be treated as coming from the main # program binary. my $min_pc = HexExtend("0"); my $max_pc = $min_pc; # find the maximal PC value in any sample foreach my $pc (keys(%{$pcs})) { if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } } push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); return $result; } # Add two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressAdd { my $addr1 = shift; my $addr2 = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. if ($main::opt_debug and $main::opt_test) { print STDERR "AddressAdd $addr1 + $addr2 = "; } my $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); my $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2); my $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } my $r = sprintf("%07x", $sum); $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2) + $c; $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } $r = sprintf("%07x", $sum) . $r; $sum = hex($addr1) + hex($addr2) + $c; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } return $r; } } # Subtract two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressSub { my $addr1 = shift; my $addr2 = shift; my $diff; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $diff); } else { # Do the addition in 7-nibble chunks to trivialize borrow handling. # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } my $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); my $a2 = hex(substr($addr2,-7)); $addr2 = substr($addr2,0,-7); my $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; my $r = sprintf("%07x", $diff); $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); $a2 = hex(substr($addr2,-7)) + $b; $addr2 = substr($addr2,0,-7); $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; $r = sprintf("%07x", $diff) . $r; $a1 = hex($addr1); $a2 = hex($addr2) + $b; if ($a2 > $a1) { $a1 += 0x100; } $diff = $a1 - $a2; $r = sprintf("%02x", $diff) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Increment a hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressInc { my $addr = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr)+1) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. # We are always doing this to step through the addresses in a function, # and will almost never overflow the first chunk, so we check for this # case and exit early. # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } my $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; my $r = sprintf("%07x", $sum); if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "0000000"; } $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; $r = sprintf("%07x", $sum) . $r; if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "00000000000000"; } $sum = hex($addr) + 1; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Extract symbols for all PC values found in profile sub ExtractSymbols { my $libs = shift; my $pcset = shift; my $symbols = {}; # Map each PC value to the containing library. To make this faster, # we sort libraries by their starting pc value (highest first), and # advance through the libraries as we advance the pc. Sometimes the # addresses of libraries may overlap with the addresses of the main # binary, so to make sure the libraries 'win', we iterate over the # libraries in reverse order (which assumes the binary doesn't start # in the middle of a library, which seems a fair assumption). my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { my $libname = $lib->[0]; my $start = $lib->[1]; my $finish = $lib->[2]; my $offset = $lib->[3]; # Use debug library if it exists my $debug_libname = DebuggingLibrary($libname); if ($debug_libname) { $libname = $debug_libname; } # Get list of pcs that belong in this library. my $contained = []; my ($start_pc_index, $finish_pc_index); # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; $finish_pc_index--) { last if $pcs[$finish_pc_index - 1] le $finish; } # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; $start_pc_index--) { last if $pcs[$start_pc_index - 1] lt $start; } # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, # in case there are overlaps in libraries and the main binary. @{$contained} = splice(@pcs, $start_pc_index, $finish_pc_index - $start_pc_index); # Map to symbols MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); } return $symbols; } # Map list of PC values to symbols for a given image sub MapToSymbols { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; my $debug = 0; # Ignore empty binaries if ($#{$pclist} < 0) { return; } # Figure out the addr2line command to use my $addr2line = $obj_tool_map{"addr2line"}; my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); if (exists $obj_tool_map{"addr2line_pdb"}) { $addr2line = $obj_tool_map{"addr2line_pdb"}; $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); } # If "addr2line" isn't installed on the system at all, just use # nm to get what info we can (function names, but not line numbers). if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { MapSymbolsWithNM($image, $offset, $pclist, $symbols); return; } # "addr2line -i" can produce a variable number of lines per input # address, with no separator that allows us to tell when data for # the next address starts. So we find the address for a special # symbol (_fini) and interleave this address between all real # addresses passed to addr2line. The name of this special symbol # can then be used as a separator. $sep_address = undef; # May be filled in by MapSymbolsWithNM() my $nm_symbols = {}; MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); if (defined($sep_address)) { # Only add " -i" to addr2line if the binary supports it. # addr2line --help returns 0, but not if it sees an unknown flag first. if (system("$cmd -i --help >$dev_null 2>&1") == 0) { $cmd .= " -i"; } else { $sep_address = undef; # no need for sep_address if we don't support -i } } # Make file with all PC values with intervening 'sep_address' so # that we can reliably detect the end of inlined function list open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); if ($debug) { print("---- $image ---\n"); } for (my $i = 0; $i <= $#{$pclist}; $i++) { # addr2line always reads hex addresses, and does not need '0x' prefix. if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); if (defined($sep_address)) { printf ADDRESSES ("%s\n", $sep_address); } } close(ADDRESSES); if ($debug) { print("----\n"); system("cat", $main::tmpfile_sym); print("----\n"); system("$cmd < " . ShellEscape($main::tmpfile_sym)); print("----\n"); } open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") || error("$cmd: $!\n"); my $count = 0; # Index in pclist while () { # Read fullfunction and filelineinfo from next pair of lines s/\r?\n$//g; my $fullfunction = $_; $_ = ; s/\r?\n$//g; my $filelinenum = $_; if (defined($sep_address) && $fullfunction eq $sep_symbol) { # Terminating marker for data for this address $count++; next; } $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths my $pcstr = $pclist->[$count]; my $function = ShortFunctionName($fullfunction); my $nms = $nm_symbols->{$pcstr}; if (defined($nms)) { if ($fullfunction eq '??') { # nm found a symbol for us. $function = $nms->[0]; $fullfunction = $nms->[2]; } else { # MapSymbolsWithNM tags each routine with its starting address, # useful in case the image has multiple occurrences of this # routine. (It uses a syntax that resembles template paramters, # that are automatically stripped out by ShortFunctionName().) # addr2line does not provide the same information. So we check # if nm disambiguated our symbol, and if so take the annotated # (nm) version of the routine-name. TODO(csilvers): this won't # catch overloaded, inlined symbols, which nm doesn't see. # Better would be to do a check similar to nm's, in this fn. if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn $function = $nms->[0]; $fullfunction = $nms->[2]; } } } # Prepend to accumulated symbols for pcstr # (so that caller comes before callee) my $sym = $symbols->{$pcstr}; if (!defined($sym)) { $sym = []; $symbols->{$pcstr} = $sym; } unshift(@{$sym}, $function, $filelinenum, $fullfunction); if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } if (!defined($sep_address)) { # Inlining is off, so this entry ends immediately $count++; } } close(SYMBOLS); } # Use nm to map the list of referenced PCs to symbols. Return true iff we # are able to read procedure information via nm. sub MapSymbolsWithNM { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; # Get nm output sorted by increasing address my $symbol_table = GetProcedureBoundaries($image, "."); if (!%{$symbol_table}) { return 0; } # Start addresses are already the right length (8 or 16 hex digits). my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } keys(%{$symbol_table}); if ($#names < 0) { # No symbols: just use addresses foreach my $pc (@{$pclist}) { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } return 0; } # Sort addresses so we can do a join against nm output my $index = 0; my $fullname = $names[0]; my $name = ShortFunctionName($fullname); foreach my $pc (sort { $a cmp $b } @{$pclist}) { # Adjust for mapped offset my $mpc = AddressSub($pc, $offset); while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ $index++; $fullname = $names[$index]; $name = ShortFunctionName($fullname); } if ($mpc lt $symbol_table->{$fullname}->[1]) { $symbols->{$pc} = [$name, "?", $fullname]; } else { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } } return 1; } sub ShortFunctionName { my $function = shift; while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type return $function; } # Trim overly long symbols found in disassembler output sub CleanDisassembly { my $d = shift; while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments return $d; } # Clean file name for display sub CleanFileName { my ($f) = @_; $f =~ s|^/proc/self/cwd/||; $f =~ s|^\./||; return $f; } # Make address relative to section and clean up for display sub UnparseAddress { my ($offset, $address) = @_; $address = AddressSub($address, $offset); $address =~ s/^0x//; $address =~ s/^0*//; return $address; } ##### Miscellaneous ##### # Find the right versions of the above object tools to use. The # argument is the program file being analyzed, and should be an ELF # 32-bit or ELF 64-bit executable file. The location of the tools # is determined by considering the following options in this order: # 1) --tools option, if set # 2) JEPROF_TOOLS environment variable, if set # 3) the environment sub ConfigureObjTools { my $prog_file = shift; # Check for the existence of $prog_file because /usr/bin/file does not # predictably return error status in prod. (-e $prog_file) || error("$prog_file does not exist.\n"); my $file_type = undef; if (-e "/usr/bin/file") { # Follow symlinks (at least for systems where "file" supports that). my $escaped_prog_file = ShellEscape($prog_file); $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || /usr/bin/file $escaped_prog_file`; } elsif ($^O == "MSWin32") { $file_type = "MS Windows"; } else { print STDERR "WARNING: Can't determine the file type of $prog_file"; } if ($file_type =~ /64-bit/) { # Change $address_length to 16 if the program file is ELF 64-bit. # We can't detect this from many (most?) heap or lock contention # profiles, since the actual addresses referenced are generally in low # memory even for 64-bit programs. $address_length = 16; } if ($file_type =~ /MS Windows/) { # For windows, we provide a version of nm and addr2line as part of # the opensource release, which is capable of parsing # Windows-style PDB executables. It should live in the path, or # in the same directory as jeprof. $obj_tool_map{"nm_pdb"} = "nm-pdb"; $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; } if ($file_type =~ /Mach-O/) { # OS X uses otool to examine Mach-O files, rather than objdump. $obj_tool_map{"otool"} = "otool"; $obj_tool_map{"addr2line"} = "false"; # no addr2line $obj_tool_map{"objdump"} = "false"; # no objdump } # Go fill in %obj_tool_map with the pathnames to use: foreach my $tool (keys %obj_tool_map) { $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); } } # Returns the path of a caller-specified object tool. If --tools or # JEPROF_TOOLS are specified, then returns the full path to the tool # with that prefix. Otherwise, returns the path unmodified (which # means we will look for it on PATH). sub ConfigureTool { my $tool = shift; my $path; # --tools (or $JEPROF_TOOLS) is a comma separated list, where each # item is either a) a pathname prefix, or b) a map of the form # :. First we look for an entry of type (b) for our # tool. If one is found, we use it. Otherwise, we consider all the # pathname prefixes in turn, until one yields an existing file. If # none does, we use a default path. my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { $path = $2; # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. } elsif ($tools ne '') { foreach my $prefix (split(',', $tools)) { next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list if (-x $prefix . $tool) { $path = $prefix . $tool; last; } } if (!$path) { error("No '$tool' found with prefix specified by " . "--tools (or \$JEPROF_TOOLS) '$tools'\n"); } } else { # ... otherwise use the version that exists in the same directory as # jeprof. If there's nothing there, use $PATH. $0 =~ m,[^/]*$,; # this is everything after the last slash my $dirname = $`; # this is everything up to and including the last slash if (-x "$dirname$tool") { $path = "$dirname$tool"; } else { $path = $tool; } } if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } return $path; } sub ShellEscape { my @escaped_words = (); foreach my $word (@_) { my $escaped_word = $word; if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist $escaped_word =~ s/'/'\\''/; $escaped_word = "'$escaped_word'"; } push(@escaped_words, $escaped_word); } return join(" ", @escaped_words); } sub cleanup { unlink($main::tmpfile_sym); unlink(keys %main::tempnames); # We leave any collected profiles in $HOME/jeprof in case the user wants # to look at them later. We print a message informing them of this. if ((scalar(@main::profile_files) > 0) && defined($main::collected_profile)) { if (scalar(@main::profile_files) == 1) { print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; } print STDERR "If you want to investigate this profile further, you can do:\n"; print STDERR "\n"; print STDERR " jeprof \\\n"; print STDERR " $main::prog \\\n"; print STDERR " $main::collected_profile\n"; print STDERR "\n"; } } sub sighandler { cleanup(); exit(1); } sub error { my $msg = shift; print STDERR $msg; cleanup(); exit(1); } # Run $nm_command and get all the resulting procedure boundaries whose # names match "$regexp" and returns them in a hashtable mapping from # procedure name to a two-element vector of [start address, end address] sub GetProcedureBoundariesViaNm { my $escaped_nm_command = shift; # shell-escaped my $regexp = shift; my $symbol_table = {}; open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); my $last_start = "0"; my $routine = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (m/^\s*([0-9a-f]+) (.) (..*)/) { my $start_val = $1; my $type = $2; my $this_routine = $3; # It's possible for two symbols to share the same address, if # one is a zero-length variable (like __start_google_malloc) or # one symbol is a weak alias to another (like __libc_malloc). # In such cases, we want to ignore all values except for the # actual symbol, which in nm-speak has type "T". The logic # below does this, though it's a bit tricky: what happens when # we have a series of lines with the same address, is the first # one gets queued up to be processed. However, it won't # *actually* be processed until later, when we read a line with # a different address. That means that as long as we're reading # lines with the same address, we have a chance to replace that # item in the queue, which we do whenever we see a 'T' entry -- # that is, a line with type 'T'. If we never see a 'T' entry, # we'll just go ahead and process the first entry (which never # got touched in the queue), and ignore the others. if ($start_val eq $last_start && $type =~ /t/i) { # We are the 'T' symbol at this address, replace previous symbol. $routine = $this_routine; next; } elsif ($start_val eq $last_start) { # We're not the 'T' symbol at this address, so ignore us. next; } if ($this_routine eq $sep_symbol) { $sep_address = HexExtend($start_val); } # Tag this routine with the starting address in case the image # has multiple occurrences of this routine. We use a syntax # that resembles template parameters that are automatically # stripped out by ShortFunctionName() $this_routine .= "<$start_val>"; if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($start_val)]; } $last_start = $start_val; $routine = $this_routine; } elsif (m/^Loaded image name: (.+)/) { # The win32 nm workalike emits information about the binary it is using. if ($main::opt_debug) { print STDERR "Using Image $1\n"; } } elsif (m/^PDB file name: (.+)/) { # The win32 nm workalike emits information about the pdb it is using. if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } } } close(NM); # Handle the last line in the nm output. Unfortunately, we don't know # how big this last symbol is, because we don't know how big the file # is. For now, we just give it a size of 0. # TODO(csilvers): do better here. if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($last_start)]; } return $symbol_table; } # Gets the procedure boundaries for all routines in "$image" whose names # match "$regexp" and returns them in a hashtable mapping from procedure # name to a two-element vector of [start address, end address]. # Will return an empty map if nm is not installed or not working properly. sub GetProcedureBoundaries { my $image = shift; my $regexp = shift; # If $image doesn't start with /, then put ./ in front of it. This works # around an obnoxious bug in our probing of nm -f behavior. # "nm -f $image" is supposed to fail on GNU nm, but if: # # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND # b. you have a.out in your current directory (a not uncommon occurence) # # then "nm -f $image" succeeds because -f only looks at the first letter of # the argument, which looks valid because it's [BbSsPp], and then since # there's no image provided, it looks for a.out and finds it. # # This regex makes sure that $image starts with . or /, forcing the -f # parsing to fail since . and / are not valid formats. $image =~ s#^[^/]#./$&#; # For libc libraries, the copy in /usr/lib/debug contains debugging symbols my $debugging = DebuggingLibrary($image); if ($debugging) { $image = $debugging; } my $nm = $obj_tool_map{"nm"}; my $cppfilt = $obj_tool_map{"c++filt"}; # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm # binary doesn't support --demangle. In addition, for OS X we need # to use the -f flag to get 'flat' nm output (otherwise we don't sort # properly and get incorrect results). Unfortunately, GNU nm uses -f # in an incompatible way. So first we test whether our nm supports # --demangle and -f. my $demangle_flag = ""; my $cppfilt_flag = ""; my $to_devnull = ">$dev_null 2>&1"; if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { # In this mode, we do "nm --demangle " $demangle_flag = "--demangle"; $cppfilt_flag = ""; } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { # In this mode, we do "nm | c++filt" $cppfilt_flag = " | " . ShellEscape($cppfilt); }; my $flatten_flag = ""; if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { $flatten_flag = "-f"; } # Finally, in the case $imagie isn't a debug library, we try again with # -D to at least get *exported* symbols. If we can't use --demangle, # we use c++filt instead, if it exists on this system. my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", # 6nm is for Go binaries ShellEscape("6nm", "$image") . " 2>$dev_null | sort", ); # If the executable is an MS Windows PDB-format executable, we'll # have set up obj_tool_map("nm_pdb"). In this case, we actually # want to use both unix nm and windows-specific nm_pdb, since # PDB-format executables can apparently include dwarf .o files. if (exists $obj_tool_map{"nm_pdb"}) { push(@nm_commands, ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) . " 2>$dev_null"); } foreach my $nm_command (@nm_commands) { my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); return $symbol_table if (%{$symbol_table}); } my $symbol_table = {}; return $symbol_table; } # The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. # To make them more readable, we add underscores at interesting places. # This routine removes the underscores, producing the canonical representation # used by jeprof to represent addresses, particularly in the tested routines. sub CanonicalHex { my $arg = shift; return join '', (split '_',$arg); } # Unit test for AddressAdd: sub AddressAddUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd ($row->[0], $row->[1]); if ($sum ne $row->[2]) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); my $expected = join '', (split '_',$row->[2]); if ($sum ne CanonicalHex($row->[2])) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressSub: sub AddressSubUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub ($row->[0], $row->[1]); if ($sum ne $row->[3]) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); if ($sum ne CanonicalHex($row->[3])) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressInc: sub AddressIncUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc ($row->[0]); if ($sum ne $row->[4]) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc (CanonicalHex($row->[0])); if ($sum ne CanonicalHex($row->[4])) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Driver for unit tests. # Currently just the address add/subtract/increment routines for 64-bit. sub RunUnitTests { my $error_count = 0; # This is a list of tuples [a, b, a+b, a-b, a+1] my $unit_test_data_8 = [ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], [qw(00000001 ffffffff 00000000 00000002 00000002)], [qw(00000001 fffffff0 fffffff1 00000011 00000002)], ]; my $unit_test_data_16 = [ # The implementation handles data in 7-nibble chunks, so those are the # interesting boundaries. [qw(aaaaaaaa 50505050 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], [qw(50505050 aaaaaaaa 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], [qw(ffffffff aaaaaaaa 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], [qw(00000001 ffffffff 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], [qw(00000001 fffffff0 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], [qw(00_a00000a_aaaaaaa 50505050 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], [qw(0f_fff0005_0505050 aaaaaaaa 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], [qw(00_000000f_fffffff 01_800000a_aaaaaaa 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], [qw(00_0000000_0000001 ff_fffffff_fffffff 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], [qw(00_0000000_0000001 ff_fffffff_ffffff0 ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], ]; $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); if ($error_count > 0) { print STDERR $error_count, " errors: FAILED\n"; } else { print STDERR "PASS\n"; } exit ($error_count); } ��������������������jemalloc-sys-0.3.2/rep/bin/jeprof.in����������������������������������������������������������������0100644�0000765�0000024�00000536203�13446174740�0015533�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /usr/bin/env perl # Copyright (c) 1998-2007, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Program for printing the profile generated by common/profiler.cc, # or by the heap profiler (common/debugallocation.cc) # # The profile contains a sequence of entries of the form: # # This program parses the profile, and generates user-readable # output. # # Examples: # # % tools/jeprof "program" "profile" # Enters "interactive" mode # # % tools/jeprof --text "program" "profile" # Generates one line per procedure # # % tools/jeprof --gv "program" "profile" # Generates annotated call-graph and displays via "gv" # # % tools/jeprof --gv --focus=Mutex "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # # % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # and does not match "string" # # % tools/jeprof --list=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --list= pattern. The listing is # annotated with the flat and cumulative sample counts at each line. # # % tools/jeprof --disasm=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --disasm= pattern. The listing is # annotated with the flat and cumulative sample counts at each PC value. # # TODO: Use color to indicate files? use strict; use warnings; use Getopt::Long; use Cwd; my $JEPROF_VERSION = "@jemalloc_version@"; my $PPROF_VERSION = "2.0"; # These are the object tools we use which can come from a # user-specified location using --tools, from the JEPROF_TOOLS # environment variable, or from the environment. my %obj_tool_map = ( "objdump" => "objdump", "nm" => "nm", "addr2line" => "addr2line", "c++filt" => "c++filt", ## ConfigureObjTools may add architecture-specific entries: #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables #"addr2line_pdb" => "addr2line-pdb", # ditto #"otool" => "otool", # equivalent of objdump on OS X ); # NOTE: these are lists, so you can put in commandline flags if you want. my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local my @GV = ("gv"); my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread my @KCACHEGRIND = ("kcachegrind"); my @PS2PDF = ("ps2pdf"); # These are used for dynamic profiles my @URL_FETCHER = ("curl", "-s", "--fail"); # These are the web pages that servers need to support for dynamic profiles my $HEAP_PAGE = "/pprof/heap"; my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param # ?seconds=#&event=x&period=n my $GROWTH_PAGE = "/pprof/growth"; my $CONTENTION_PAGE = "/pprof/contention"; my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param # "?seconds=#", # "?tags_regexp=#" and # "?type=#". my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; # These are the web pages that can be named on the command line. # All the alternatives must begin with /. my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; # default binary name my $UNKNOWN_BINARY = "(unknown)"; # There is a pervasive dependency on the length (in hex characters, # i.e., nibbles) of an address, distinguishing between 32-bit and # 64-bit profiles. To err on the safe size, default to 64-bit here: my $address_length = 16; my $dev_null = "/dev/null"; if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for $dev_null = "nul"; } # A list of paths to search for shared object files my @prefix_list = (); # Special routine name that should not have any symbols. # Used as separator to parse "addr2line -i" output. my $sep_symbol = '_fini'; my $sep_address = undef; ##### Argument parsing ##### sub usage_string { return < is a space separated list of profile names. jeprof [options] is a list of profile files where each file contains the necessary symbol mappings as well as profile data (likely generated with --raw). jeprof [options] is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE Each name can be: /path/to/profile - a path to a profile file host:port[/] - a location of a service to get profile from The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. For instance: jeprof http://myserver.com:80$HEAP_PAGE If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). jeprof --symbols Maps addresses to symbol names. In this mode, stdin should be a list of library mappings, in the same format as is found in the heap- and cpu-profile files (this loosely matches that of /proc/self/maps on linux), followed by a list of hex addresses to map, one per line. For more help with querying remote servers, including how to add the necessary server-side support code, see this filename (or one like it): /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html Options: --cum Sort by cumulative data --base= Subtract from before display --interactive Run in interactive mode (interactive "help" gives help) [default] --seconds= Length of time for dynamic profiles [default=30 secs] --add_lib= Read additional symbols and line info from the given library --lib_prefix= Comma separated list of library path prefixes Reporting Granularity: --addresses Report at address level --lines Report at source line level --functions Report at function level [default] --files Report at source file level Output type: --text Generate text report --callgrind Generate callgrind format to stdout --gv Generate Postscript and display --evince Generate PDF and display --web Generate SVG and display --list= Generate source listing of matching routines --disasm= Generate disassembly of matching routines --symbols Print demangled symbol names found at given addresses --dot Generate DOT file to stdout --ps Generate Postcript to stdout --pdf Generate PDF to stdout --svg Generate SVG to stdout --gif Generate GIF to stdout --raw Generate symbolized jeprof data (useful with remote fetch) Heap-Profile Options: --inuse_space Display in-use (mega)bytes [default] --inuse_objects Display in-use objects --alloc_space Display allocated (mega)bytes --alloc_objects Display allocated objects --show_bytes Display space in bytes --drop_negative Ignore negative differences Contention-profile options: --total_delay Display total delay at each region [default] --contentions Display number of delays at each region --mean_delay Display mean delay at each region Call-graph Options: --nodecount= Show at most so many nodes [default=80] --nodefraction= Hide nodes below *total [default=.005] --edgefraction= Hide edges below *total [default=.001] --maxdegree= Max incoming/outgoing edges per node [default=8] --focus= Focus on backtraces with nodes matching --thread= Show profile for thread --ignore= Ignore backtraces with nodes matching --scale= Set GV scaling [default=0] --heapcheck Make nodes with non-0 object counts (i.e. direct leak generators) more visible --retain= Retain only nodes that match --exclude= Exclude all nodes that match Miscellaneous: --tools=[,...] \$PATH for object tool pathnames --test Run unit tests --help This message --version Version information Environment Variables: JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof JEPROF_TOOLS Prefix for object tools pathnames Examples: jeprof /bin/ls ls.prof Enters "interactive" mode jeprof --text /bin/ls ls.prof Outputs one line per procedure jeprof --web /bin/ls ls.prof Displays annotated call-graph in web browser jeprof --gv /bin/ls ls.prof Displays annotated call-graph via 'gv' jeprof --gv --focus=Mutex /bin/ls ls.prof Restricts to code paths including a .*Mutex.* entry jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof Code paths including Mutex but not string jeprof --list=getdir /bin/ls ls.prof (Per-line) annotated source listing for getdir() jeprof --disasm=getdir /bin/ls ls.prof (Per-PC) annotated disassembly for getdir() jeprof http://localhost:1234/ Enters "interactive" mode jeprof --text localhost:1234 Outputs one line per procedure for localhost:1234 jeprof --raw localhost:1234 > ./local.raw jeprof --text ./local.raw Fetches a remote profile for later analysis and then analyzes it in text mode. EOF } sub version_string { return < \$main::opt_help, "version!" => \$main::opt_version, "cum!" => \$main::opt_cum, "base=s" => \$main::opt_base, "seconds=i" => \$main::opt_seconds, "add_lib=s" => \$main::opt_lib, "lib_prefix=s" => \$main::opt_lib_prefix, "functions!" => \$main::opt_functions, "lines!" => \$main::opt_lines, "addresses!" => \$main::opt_addresses, "files!" => \$main::opt_files, "text!" => \$main::opt_text, "callgrind!" => \$main::opt_callgrind, "list=s" => \$main::opt_list, "disasm=s" => \$main::opt_disasm, "symbols!" => \$main::opt_symbols, "gv!" => \$main::opt_gv, "evince!" => \$main::opt_evince, "web!" => \$main::opt_web, "dot!" => \$main::opt_dot, "ps!" => \$main::opt_ps, "pdf!" => \$main::opt_pdf, "svg!" => \$main::opt_svg, "gif!" => \$main::opt_gif, "raw!" => \$main::opt_raw, "interactive!" => \$main::opt_interactive, "nodecount=i" => \$main::opt_nodecount, "nodefraction=f" => \$main::opt_nodefraction, "edgefraction=f" => \$main::opt_edgefraction, "maxdegree=i" => \$main::opt_maxdegree, "focus=s" => \$main::opt_focus, "thread=s" => \$main::opt_thread, "ignore=s" => \$main::opt_ignore, "scale=i" => \$main::opt_scale, "heapcheck" => \$main::opt_heapcheck, "retain=s" => \$main::opt_retain, "exclude=s" => \$main::opt_exclude, "inuse_space!" => \$main::opt_inuse_space, "inuse_objects!" => \$main::opt_inuse_objects, "alloc_space!" => \$main::opt_alloc_space, "alloc_objects!" => \$main::opt_alloc_objects, "show_bytes!" => \$main::opt_show_bytes, "drop_negative!" => \$main::opt_drop_negative, "total_delay!" => \$main::opt_total_delay, "contentions!" => \$main::opt_contentions, "mean_delay!" => \$main::opt_mean_delay, "tools=s" => \$main::opt_tools, "test!" => \$main::opt_test, "debug!" => \$main::opt_debug, # Undocumented flags used only by unittests: "test_stride=i" => \$main::opt_test_stride, ) || usage("Invalid option(s)"); # Deal with the standard --help and --version if ($main::opt_help) { print usage_string(); exit(0); } if ($main::opt_version) { print version_string(); exit(0); } # Disassembly/listing/symbols mode requires address-level info if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { $main::opt_functions = 0; $main::opt_lines = 0; $main::opt_addresses = 1; $main::opt_files = 0; } # Check heap-profiling flags if ($main::opt_inuse_space + $main::opt_inuse_objects + $main::opt_alloc_space + $main::opt_alloc_objects > 1) { usage("Specify at most on of --inuse/--alloc options"); } # Check output granularities my $grains = $main::opt_functions + $main::opt_lines + $main::opt_addresses + $main::opt_files + 0; if ($grains > 1) { usage("Only specify one output granularity option"); } if ($grains == 0) { $main::opt_functions = 1; } # Check output modes my $modes = $main::opt_text + $main::opt_callgrind + ($main::opt_list eq '' ? 0 : 1) + ($main::opt_disasm eq '' ? 0 : 1) + ($main::opt_symbols == 0 ? 0 : 1) + $main::opt_gv + $main::opt_evince + $main::opt_web + $main::opt_dot + $main::opt_ps + $main::opt_pdf + $main::opt_svg + $main::opt_gif + $main::opt_raw + $main::opt_interactive + 0; if ($modes > 1) { usage("Only specify one output mode"); } if ($modes == 0) { if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode $main::opt_interactive = 1; } else { $main::opt_text = 1; } } if ($main::opt_test) { RunUnitTests(); # Should not return exit(1); } # Binary name and profile arguments list $main::prog = ""; @main::pfile_args = (); # Remote profiling without a binary (using $SYMBOL_PAGE instead) if (@ARGV > 0) { if (IsProfileURL($ARGV[0])) { $main::use_symbol_page = 1; } elsif (IsSymbolizedProfileFile($ARGV[0])) { $main::use_symbolized_profile = 1; $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file } } if ($main::use_symbol_page || $main::use_symbolized_profile) { # We don't need a binary! my %disabled = ('--lines' => $main::opt_lines, '--disasm' => $main::opt_disasm); for my $option (keys %disabled) { usage("$option cannot be used without a binary") if $disabled{$option}; } # Set $main::prog later... scalar(@ARGV) || usage("Did not specify profile file"); } elsif ($main::opt_symbols) { # --symbols needs a binary-name (to run nm on, etc) but not profiles $main::prog = shift(@ARGV) || usage("Did not specify program"); } else { $main::prog = shift(@ARGV) || usage("Did not specify program"); scalar(@ARGV) || usage("Did not specify profile file"); } # Parse profile file/location arguments foreach my $farg (@ARGV) { if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { my $machine = $1; my $num_machines = $2; my $path = $3; for (my $i = 0; $i < $num_machines; $i++) { unshift(@main::pfile_args, "$i.$machine$path"); } } else { unshift(@main::pfile_args, $farg); } } if ($main::use_symbol_page) { unless (IsProfileURL($main::pfile_args[0])) { error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); } CheckSymbolPage(); $main::prog = FetchProgramName(); } elsif (!$main::use_symbolized_profile) { # may not need objtools! ConfigureObjTools($main::prog) } # Break the opt_lib_prefix into the prefix_list array @prefix_list = split (',', $main::opt_lib_prefix); # Remove trailing / from the prefixes, in the list to prevent # searching things like /my/path//lib/mylib.so foreach (@prefix_list) { s|/+$||; } } sub FilterAndPrint { my ($profile, $symbols, $libs, $thread) = @_; # Get total data in profile my $total = TotalProfile($profile); # Remove uniniteresting stack items $profile = RemoveUninterestingFrames($symbols, $profile); # Focus? if ($main::opt_focus ne '') { $profile = FocusProfile($symbols, $profile, $main::opt_focus); } # Ignore? if ($main::opt_ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); } my $calls = ExtractCalls($symbols, $profile); # Reduce profiles to required output granularity, and also clean # each stack trace so a given entry exists at most once. my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); # Print if (!$main::opt_interactive) { if ($main::opt_disasm) { PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); } elsif ($main::opt_list) { PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); } elsif ($main::opt_text) { # Make sure the output is empty when have nothing to report # (only matters when --heapcheck is given but we must be # compatible with old branches that did not pass --heapcheck always): if ($total != 0) { printf("Total%s: %s %s\n", (defined($thread) ? " (t$thread)" : ""), Unparse($total), Units()); } PrintText($symbols, $flat, $cumulative, -1); } elsif ($main::opt_raw) { PrintSymbolizedProfile($symbols, $profile, $main::prog); } elsif ($main::opt_callgrind) { PrintCallgrind($calls); } else { if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), ""); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), ""); } elsif ($main::opt_web) { my $tmp = TempName($main::next_tmpfile, "svg"); RunWeb($tmp); # The command we run might hand the file name off # to an already running browser instance and then exit. # Normally, we'd remove $tmp on exit (right now), # but fork a child to remove $tmp a little later, so that the # browser has time to load it first. delete $main::tempnames{$tmp}; if (fork() == 0) { sleep 5; unlink($tmp); exit(0); } } } else { cleanup(); exit(1); } } } else { InteractiveMode($profile, $symbols, $libs, $total); } } sub Main() { Init(); $main::collected_profile = undef; @main::profile_files = (); $main::op_time = time(); # Printing symbols is special and requires a lot less info that most. if ($main::opt_symbols) { PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin return; } # Fetch all profile data FetchDynamicProfiles(); # this will hold symbols that we read from the profile files my $symbol_map = {}; # Read one profile, pick the last item on the list my $data = ReadProfile($main::prog, pop(@main::profile_files)); my $profile = $data->{profile}; my $pcs = $data->{pcs}; my $libs = $data->{libs}; # Info about main program and shared libraries $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); # Add additional profiles, if available. if (scalar(@main::profile_files) > 0) { foreach my $pname (@main::profile_files) { my $data2 = ReadProfile($main::prog, $pname); $profile = AddProfile($profile, $data2->{profile}); $pcs = AddPcs($pcs, $data2->{pcs}); $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); } } # Subtract base from profile, if specified if ($main::opt_base ne '') { my $base = ReadProfile($main::prog, $main::opt_base); $profile = SubtractProfile($profile, $base->{profile}); $pcs = AddPcs($pcs, $base->{pcs}); $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); } # Collect symbols my $symbols; if ($main::use_symbolized_profile) { $symbols = FetchSymbols($pcs, $symbol_map); } elsif ($main::use_symbol_page) { $symbols = FetchSymbols($pcs); } else { # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, # which may differ from the data from subsequent profiles, especially # if they were run on different machines. Use appropriate libs for # each pc somehow. $symbols = ExtractSymbols($libs, $pcs); } if (!defined($main::opt_thread)) { FilterAndPrint($profile, $symbols, $libs); } if (defined($data->{threads})) { foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) { if (defined($main::opt_thread) && ($main::opt_thread eq '*' || $main::opt_thread == $thread)) { my $thread_profile = $data->{threads}{$thread}; FilterAndPrint($thread_profile, $symbols, $libs, $thread); } } } cleanup(); exit(0); } ##### Entry Point ##### Main(); # Temporary code to detect if we're running on a Goobuntu system. # These systems don't have the right stuff installed for the special # Readline libraries to work, so as a temporary workaround, we default # to using the normal stdio code, rather than the fancier readline-based # code sub ReadlineMightFail { if (-e '/lib/libtermcap.so.2') { return 0; # libtermcap exists, so readline should be okay } else { return 1; } } sub RunGV { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { # Options using double dash are supported by this gv version. # Also, turn on noantialias to better handle bug in gv for # postscript files with large dimensions. # TODO: Maybe we should not pass the --noantialias flag # if the gv version is known to work properly without the flag. system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) . $bg); } else { # Old gv version - only supports options that use single dash. print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); } } sub RunEvince { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background system(ShellEscape(@EVINCE, $fname) . $bg); } sub RunWeb { my $fname = shift; print STDERR "Loading web page file:///$fname\n"; if (`uname` =~ /Darwin/) { # OS X: open will use standard preference for SVG files. system("/usr/bin/open", $fname); return; } # Some kind of Unix; try generic symlinks, then specific browsers. # (Stop once we find one.) # Works best if the browser is already running. my @alt = ( "/etc/alternatives/gnome-www-browser", "/etc/alternatives/x-www-browser", "google-chrome", "firefox", ); foreach my $b (@alt) { if (system($b, $fname) == 0) { return; } } print STDERR "Could not load web browser.\n"; } sub RunKcachegrind { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; system(ShellEscape(@KCACHEGRIND, $fname) . $bg); } ##### Interactive helper routines ##### sub InteractiveMode { $| = 1; # Make output unbuffered for interactive mode my ($orig_profile, $symbols, $libs, $total) = @_; print STDERR "Welcome to jeprof! For help, type 'help'.\n"; # Use ReadLine if it's installed and input comes from a console. if ( -t STDIN && !ReadlineMightFail() && defined(eval {require Term::ReadLine}) ) { my $term = new Term::ReadLine 'jeprof'; while ( defined ($_ = $term->readline('(jeprof) '))) { $term->addhistory($_) if /\S/; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } } } else { # don't have readline while (1) { print STDERR "(jeprof) "; $_ = ; last if ! defined $_ ; s/\r//g; # turn windows-looking lines into unix-looking lines # Save some flags that might be reset by InteractiveCommand() my $save_opt_lines = $main::opt_lines; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } # Restore flags $main::opt_lines = $save_opt_lines; } } } # Takes two args: orig profile, and command to run. # Returns 1 if we should keep going, or 0 if we were asked to quit sub InteractiveCommand { my($orig_profile, $symbols, $libs, $total, $command) = @_; $_ = $command; # just to make future m//'s easier if (!defined($_)) { print STDERR "\n"; return 0; } if (m/^\s*quit/) { return 0; } if (m/^\s*help/) { InteractiveHelpMessage(); return 1; } # Clear all the mode options -- mode is controlled by "$command" $main::opt_text = 0; $main::opt_callgrind = 0; $main::opt_disasm = 0; $main::opt_list = 0; $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_cum = 0; if (m/^\s*(text|top)(\d*)\s*(.*)/) { $main::opt_text = 1; my $line_limit = ($2 ne "") ? int($2) : 10; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($3); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintText($symbols, $flat, $cumulative, $line_limit); return 1; } if (m/^\s*callgrind\s*([^ \n]*)/) { $main::opt_callgrind = 1; # Get derived profiles my $calls = ExtractCalls($symbols, $orig_profile); my $filename = $1; if ( $1 eq '' ) { $filename = TempName($main::next_tmpfile, "callgrind"); } PrintCallgrind($calls, $filename); if ( $1 eq '' ) { RunKcachegrind($filename, " & "); $main::next_tmpfile++; } return 1; } if (m/^\s*(web)?list\s*(.+)/) { my $html = (defined($1) && ($1 eq "web")); $main::opt_list = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($2); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintListing($total, $libs, $flat, $cumulative, $routine, $html); return 1; } if (m/^\s*disasm\s*(.+)/) { $main::opt_disasm = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($1); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintDisassembly($libs, $flat, $cumulative, $routine); return 1; } if (m/^\s*(gv|web|evince)\s*(.*)/) { $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_web = 0; if ($1 eq "gv") { $main::opt_gv = 1; } elsif ($1 eq "evince") { $main::opt_evince = 1; } elsif ($1 eq "web") { $main::opt_web = 1; } my $focus; my $ignore; ($focus, $ignore) = ParseInteractiveArgs($2); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, $focus, $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), " &"); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); } elsif ($main::opt_web) { RunWeb(TempName($main::next_tmpfile, "svg")); } $main::next_tmpfile++; } return 1; } if (m/^\s*$/) { return 1; } print STDERR "Unknown command: try 'help'.\n"; return 1; } sub ProcessProfile { my $total_count = shift; my $orig_profile = shift; my $symbols = shift; my $focus = shift; my $ignore = shift; # Process current profile to account for various settings my $profile = $orig_profile; printf("Total: %s %s\n", Unparse($total_count), Units()); if ($focus ne '') { $profile = FocusProfile($symbols, $profile, $focus); my $focus_count = TotalProfile($profile); printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", $focus, Unparse($focus_count), Units(), Unparse($total_count), ($focus_count*100.0) / $total_count); } if ($ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $ignore); my $ignore_count = TotalProfile($profile); printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", $ignore, Unparse($ignore_count), Units(), Unparse($total_count), ($ignore_count*100.0) / $total_count); } return $profile; } sub InteractiveHelpMessage { print STDERR <{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { my $depth = $#addrs + 1; # int(foo / 2**32) is the only reliable way to get rid of bottom # 32 bits on both 32- and 64-bit systems. print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); foreach my $full_addr (@addrs) { my $addr = $full_addr; $addr =~ s/0x0*//; # strip off leading 0x, zeroes if (length($addr) > 16) { print STDERR "Invalid address in profile: $full_addr\n"; next; } my $low_addr = substr($addr, -8); # get last 8 hex chars my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); } } } } # Print symbols and profile data sub PrintSymbolizedProfile { my $symbols = shift; my $profile = shift; my $prog = shift; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; print '--- ', $symbol_marker, "\n"; if (defined($prog)) { print 'binary=', $prog, "\n"; } while (my ($pc, $name) = each(%{$symbols})) { my $sep = ' '; print '0x', $pc; # We have a list of function names, which include the inlined # calls. They are separated (and terminated) by --, which is # illegal in function names. for (my $j = 2; $j <= $#{$name}; $j += 3) { print $sep, $name->[$j]; $sep = '--'; } print "\n"; } print '---', "\n"; my $profile_marker; if ($main::profile_type eq 'heap') { $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'growth') { $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } elsif ($main::profile_type eq 'contention') { $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } else { # elsif ($main::profile_type eq 'cpu') $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash $profile_marker = $&; } print '--- ', $profile_marker, "\n"; if (defined($main::collected_profile)) { # if used with remote fetch, simply dump the collected profile to output. open(SRC, "<$main::collected_profile"); while () { print $_; } close(SRC); } else { # --raw/http: For everything to work correctly for non-remote profiles, we # would need to extend PrintProfileData() to handle all possible profile # types, re-enable the code that is currently disabled in ReadCPUProfile() # and FixCallerAddresses(), and remove the remote profile dumping code in # the block above. die "--raw/http: jeprof can only dump remote profiles for --raw\n"; # dump a cpu-format profile to standard out PrintProfileData($profile); } } # Print text output sub PrintText { my $symbols = shift; my $flat = shift; my $cumulative = shift; my $line_limit = shift; my $total = TotalProfile($flat); # Which profile to sort by? my $s = $main::opt_cum ? $cumulative : $flat; my $running_sum = 0; my $lines = 0; foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } keys(%{$cumulative})) { my $f = GetEntry($flat, $k); my $c = GetEntry($cumulative, $k); $running_sum += $f; my $sym = $k; if (exists($symbols->{$k})) { $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; if ($main::opt_addresses) { $sym = $k . " " . $sym; } } if ($f != 0 || $c != 0) { printf("%8s %6s %6s %8s %6s %s\n", Unparse($f), Percent($f, $total), Percent($running_sum, $total), Unparse($c), Percent($c, $total), $sym); } $lines++; last if ($line_limit >= 0 && $lines >= $line_limit); } } # Callgrind format has a compression for repeated function and file # names. You show the name the first time, and just use its number # subsequently. This can cut down the file to about a third or a # quarter of its uncompressed size. $key and $val are the key/value # pair that would normally be printed by callgrind; $map is a map from # value to number. sub CompressedCGName { my($key, $val, $map) = @_; my $idx = $map->{$val}; # For very short keys, providing an index hurts rather than helps. if (length($val) <= 3) { return "$key=$val\n"; } elsif (defined($idx)) { return "$key=($idx)\n"; } else { # scalar(keys $map) gives the number of items in the map. $idx = scalar(keys(%{$map})) + 1; $map->{$val} = $idx; return "$key=($idx) $val\n"; } } # Print the call graph in a way that's suiteable for callgrind. sub PrintCallgrind { my $calls = shift; my $filename; my %filename_to_index_map; my %fnname_to_index_map; if ($main::opt_interactive) { $filename = shift; print STDERR "Writing callgrind file to '$filename'.\n" } else { $filename = "&STDOUT"; } open(CG, ">$filename"); printf CG ("events: Hits\n\n"); foreach my $call ( map { $_->[0] } sort { $a->[1] cmp $b ->[1] || $a->[2] <=> $b->[2] } map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; [$_, $1, $2] } keys %$calls ) { my $count = int($calls->{$call}); $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; my ( $caller_file, $caller_line, $caller_function, $callee_file, $callee_line, $callee_function ) = ( $1, $2, $3, $5, $6, $7 ); # TODO(csilvers): for better compression, collect all the # caller/callee_files and functions first, before printing # anything, and only compress those referenced more than once. printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); if (defined $6) { printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); printf CG ("calls=$count $callee_line\n"); } printf CG ("$caller_line $count\n\n"); } } # Print disassembly for all all routines that match $main::opt_disasm sub PrintDisassembly { my $libs = shift; my $flat = shift; my $cumulative = shift; my $disasm_opts = shift; my $total = TotalProfile($flat); foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; # See if there are any samples in this routine my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { PrintDisassembledFunction($lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $total); last; } $addr = AddressInc($addr); } } } } # Return reference to array of tuples of the form: # [start_address, filename, linenumber, instruction, limit_address] # E.g., # ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] sub Disassemble { my $prog = shift; my $offset = shift; my $start_addr = shift; my $end_addr = shift; my $objdump = $obj_tool_map{"objdump"}; my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", "--start-address=0x$start_addr", "--stop-address=0x$end_addr", $prog); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); my @result = (); my $filename = ""; my $linenumber = -1; my $last = ["", "", "", ""]; while () { s/\r//g; # turn windows-looking lines into unix-looking lines chop; if (m|\s*([^:\s]+):(\d+)\s*$|) { # Location line of the form: # : $filename = $1; $linenumber = $2; } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { # Disassembly line -- zero-extend address to full length my $addr = HexExtend($1); my $k = AddressAdd($addr, $offset); $last->[4] = $k; # Store ending address for previous instruction $last = [$k, $filename, $linenumber, $2, $end_addr]; push(@result, $last); } } close(OBJDUMP); return @result; } # The input file should contain lines of the form /proc/maps-like # output (same format as expected from the profiles) or that looks # like hex addresses (like "0xDEADBEEF"). We will parse all # /proc/maps output, and for all the hex addresses, we will output # "short" symbol names, one per line, in the same order as the input. sub PrintSymbols { my $maps_and_symbols_file = shift; # ParseLibraries expects pcs to be in a set. Fine by us... my @pclist = (); # pcs in sorted order my $pcs = {}; my $map = ""; foreach my $line (<$maps_and_symbols_file>) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /\b(0x[0-9a-f]+)\b/i) { push(@pclist, HexExtend($1)); $pcs->{$pclist[-1]} = 1; } else { $map .= $line; } } my $libs = ParseLibraries($main::prog, $map, $pcs); my $symbols = ExtractSymbols($libs, $pcs); foreach my $pc (@pclist) { # ->[0] is the shortname, ->[2] is the full name print(($symbols->{$pc}->[0] || "??") . "\n"); } } # For sorting functions by name sub ByName { return ShortFunctionName($a) cmp ShortFunctionName($b); } # Print source-listing for all all routines that match $list_opts sub PrintListing { my $total = shift; my $libs = shift; my $flat = shift; my $cumulative = shift; my $list_opts = shift; my $html = shift; my $output = \*STDOUT; my $fname = ""; if ($html) { # Arrange to write the output to a temporary file $fname = TempName($main::next_tmpfile, "html"); $main::next_tmpfile++; if (!open(TEMP, ">$fname")) { print STDERR "$fname: $!\n"; return; } $output = \*TEMP; print $output HtmlListingHeader(); printf $output ("
%s
Total: %s %s
\n", $main::prog, Unparse($total), Units()); } my $listed = 0; foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { # Print if there are any samples in this routine my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { $listed += PrintSource( $lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $html, $output); last; } $addr = AddressInc($addr); } } } if ($html) { if ($listed > 0) { print $output HtmlListingFooter(); close($output); RunWeb($fname); } else { close($output); unlink($fname); } } } sub HtmlListingHeader { return <<'EOF'; Pprof listing EOF } sub HtmlListingFooter { return <<'EOF'; EOF } sub HtmlEscape { my $text = shift; $text =~ s/&/&/g; $text =~ s//>/g; return $text; } # Returns the indentation of the line, if it has any non-whitespace # characters. Otherwise, returns -1. sub Indentation { my $line = shift; if (m/^(\s*)\S/) { return length($1); } else { return -1; } } # If the symbol table contains inlining info, Disassemble() may tag an # instruction with a location inside an inlined function. But for # source listings, we prefer to use the location in the function we # are listing. So use MapToSymbols() to fetch full location # information for each instruction and then pick out the first # location from a location list (location list contains callers before # callees in case of inlining). # # After this routine has run, each entry in $instructions contains: # [0] start address # [1] filename for function we are listing # [2] line number for function we are listing # [3] disassembly # [4] limit address # [5] most specific filename (may be different from [1] due to inlining) # [6] most specific line number (may be different from [2] due to inlining) sub GetTopLevelLineNumbers { my ($lib, $offset, $instructions) = @_; my $pcs = []; for (my $i = 0; $i <= $#{$instructions}; $i++) { push(@{$pcs}, $instructions->[$i]->[0]); } my $symbols = {}; MapToSymbols($lib, $offset, $pcs, $symbols); for (my $i = 0; $i <= $#{$instructions}; $i++) { my $e = $instructions->[$i]; push(@{$e}, $e->[1]); push(@{$e}, $e->[2]); my $addr = $e->[0]; my $sym = $symbols->{$addr}; if (defined($sym)) { if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { $e->[1] = $1; # File name $e->[2] = $2; # Line number } } } } # Print source-listing for one routine sub PrintSource { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $html = shift; my $output = shift; # Disassemble all instructions (just to get line numbers) my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); GetTopLevelLineNumbers($prog, $offset, \@instructions); # Hack 1: assume that the first source file encountered in the # disassembly contains the routine my $filename = undef; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[2] >= 0) { $filename = $instructions[$i]->[1]; last; } } if (!defined($filename)) { print STDERR "no filename found in $routine\n"; return 0; } # Hack 2: assume that the largest line number from $filename is the # end of the procedure. This is typically safe since if P1 contains # an inlined call to P2, then P2 usually occurs earlier in the # source file. If this does not work, we might have to compute a # density profile or just print all regions we find. my $lastline = 0; for (my $i = 0; $i <= $#instructions; $i++) { my $f = $instructions[$i]->[1]; my $l = $instructions[$i]->[2]; if (($f eq $filename) && ($l > $lastline)) { $lastline = $l; } } # Hack 3: assume the first source location from "filename" is the start of # the source code. my $firstline = 1; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[1] eq $filename) { $firstline = $instructions[$i]->[2]; last; } } # Hack 4: Extend last line forward until its indentation is less than # the indentation we saw on $firstline my $oldlastline = $lastline; { if (!open(FILE, "<$filename")) { print STDERR "$filename: $!\n"; return 0; } my $l = 0; my $first_indentation = -1; while () { s/\r//g; # turn windows-looking lines into unix-looking lines $l++; my $indent = Indentation($_); if ($l >= $firstline) { if ($first_indentation < 0 && $indent >= 0) { $first_indentation = $indent; last if ($first_indentation == 0); } } if ($l >= $lastline && $indent >= 0) { if ($indent >= $first_indentation) { $lastline = $l+1; } else { last; } } } close(FILE); } # Assign all samples to the range $firstline,$lastline, # Hack 4: If an instruction does not occur in the range, its samples # are moved to the next instruction that occurs in the range. my $samples1 = {}; # Map from line number to flat count my $samples2 = {}; # Map from line number to cumulative count my $running1 = 0; # Unassigned flat counts my $running2 = 0; # Unassigned cumulative counts my $total1 = 0; # Total flat counts my $total2 = 0; # Total cumulative counts my %disasm = (); # Map from line number to disassembly my $running_disasm = ""; # Unassigned disassembly my $skip_marker = "---\n"; if ($html) { $skip_marker = ""; for (my $l = $firstline; $l <= $lastline; $l++) { $disasm{$l} = ""; } } my $last_dis_filename = ''; my $last_dis_linenum = -1; my $last_touched_line = -1; # To detect gaps in disassembly for a line foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } if ($html) { my $dis = sprintf(" %6s %6s \t\t%8s: %s ", HtmlPrintNumber($c1), HtmlPrintNumber($c2), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); # Append the most specific source line associated with this instruction if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; $dis = HtmlEscape($dis); my $f = $e->[5]; my $l = $e->[6]; if ($f ne $last_dis_filename) { $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } elsif ($l ne $last_dis_linenum) { # De-emphasize the unchanged file name portion $dis .= sprintf("%s" . ":%d", HtmlEscape(CleanFileName($f)), $l); } else { # De-emphasize the entire location $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } $last_dis_filename = $f; $last_dis_linenum = $l; $running_disasm .= $dis; $running_disasm .= "\n"; } $running1 += $c1; $running2 += $c2; $total1 += $c1; $total2 += $c2; my $file = $e->[1]; my $line = $e->[2]; if (($file eq $filename) && ($line >= $firstline) && ($line <= $lastline)) { # Assign all accumulated samples to this line AddEntry($samples1, $line, $running1); AddEntry($samples2, $line, $running2); $running1 = 0; $running2 = 0; if ($html) { if ($line != $last_touched_line && $disasm{$line} ne '') { $disasm{$line} .= "\n"; } $disasm{$line} .= $running_disasm; $running_disasm = ''; $last_touched_line = $line; } } } # Assign any leftover samples to $lastline AddEntry($samples1, $lastline, $running1); AddEntry($samples2, $lastline, $running2); if ($html) { if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { $disasm{$lastline} .= "\n"; } $disasm{$lastline} .= $running_disasm; } if ($html) { printf $output ( "

%s

%s\n
\n" .
      "Total:%6s %6s (flat / cumulative %s)\n",
      HtmlEscape(ShortFunctionName($routine)),
      HtmlEscape(CleanFileName($filename)),
      Unparse($total1),
      Unparse($total2),
      Units());
  } else {
    printf $output (
      "ROUTINE ====================== %s in %s\n" .
      "%6s %6s Total %s (flat / cumulative)\n",
      ShortFunctionName($routine),
      CleanFileName($filename),
      Unparse($total1),
      Unparse($total2),
      Units());
  }
  if (!open(FILE, "<$filename")) {
    print STDERR "$filename: $!\n";
    return 0;
  }
  my $l = 0;
  while () {
    s/\r//g;         # turn windows-looking lines into unix-looking lines
    $l++;
    if ($l >= $firstline - 5 &&
        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
      chop;
      my $text = $_;
      if ($l == $firstline) { print $output $skip_marker; }
      my $n1 = GetEntry($samples1, $l);
      my $n2 = GetEntry($samples2, $l);
      if ($html) {
        # Emit a span that has one of the following classes:
        #    livesrc -- has samples
        #    deadsrc -- has disassembly, but with no samples
        #    nop     -- has no matching disasembly
        # Also emit an optional span containing disassembly.
        my $dis = $disasm{$l};
        my $asm = "";
        if (defined($dis) && $dis ne '') {
          $asm = "" . $dis . "";
        }
        my $source_class = (($n1 + $n2 > 0)
                            ? "livesrc"
                            : (($asm ne "") ? "deadsrc" : "nop"));
        printf $output (
          "%5d " .
          "%6s %6s %s%s\n",
          $l, $source_class,
          HtmlPrintNumber($n1),
          HtmlPrintNumber($n2),
          HtmlEscape($text),
          $asm);
      } else {
        printf $output(
          "%6s %6s %4d: %s\n",
          UnparseAlt($n1),
          UnparseAlt($n2),
          $l,
          $text);
      }
      if ($l == $lastline)  { print $output $skip_marker; }
    };
  }
  close(FILE);
  if ($html) {
    print $output "
\n"; } return 1; } # Return the source line for the specified file/linenumber. # Returns undef if not found. sub SourceLine { my $file = shift; my $line = shift; # Look in cache if (!defined($main::source_cache{$file})) { if (100 < scalar keys(%main::source_cache)) { # Clear the cache when it gets too big $main::source_cache = (); } # Read all lines from the file if (!open(FILE, "<$file")) { print STDERR "$file: $!\n"; $main::source_cache{$file} = []; # Cache the negative result return undef; } my $lines = []; push(@{$lines}, ""); # So we can use 1-based line numbers as indices while () { push(@{$lines}, $_); } close(FILE); # Save the lines in the cache $main::source_cache{$file} = $lines; } my $lines = $main::source_cache{$file}; if (($line < 0) || ($line > $#{$lines})) { return undef; } else { return $lines->[$line]; } } # Print disassembly for one routine with interspersed source if available sub PrintDisassembledFunction { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $total = shift; # Disassemble all instructions my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); # Make array of counts per instruction my @flat_count = (); my @cum_count = (); my $flat_total = 0; my $cum_total = 0; foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } push(@flat_count, $c1); push(@cum_count, $c2); $flat_total += $c1; $cum_total += $c2; } # Print header with total counts printf("ROUTINE ====================== %s\n" . "%6s %6s %s (flat, cumulative) %.1f%% of total\n", ShortFunctionName($routine), Unparse($flat_total), Unparse($cum_total), Units(), ($cum_total * 100.0) / $total); # Process instructions in order my $current_file = ""; for (my $i = 0; $i <= $#instructions; ) { my $e = $instructions[$i]; # Print the new file name whenever we switch files if ($e->[1] ne $current_file) { $current_file = $e->[1]; my $fname = $current_file; $fname =~ s|^\./||; # Trim leading "./" # Shorten long file names if (length($fname) >= 58) { $fname = "..." . substr($fname, -55); } printf("-------------------- %s\n", $fname); } # TODO: Compute range of lines to print together to deal with # small reorderings. my $first_line = $e->[2]; my $last_line = $first_line; my %flat_sum = (); my %cum_sum = (); for (my $l = $first_line; $l <= $last_line; $l++) { $flat_sum{$l} = 0; $cum_sum{$l} = 0; } # Find run of instructions for this range of source lines my $first_inst = $i; while (($i <= $#instructions) && ($instructions[$i]->[2] >= $first_line) && ($instructions[$i]->[2] <= $last_line)) { $e = $instructions[$i]; $flat_sum{$e->[2]} += $flat_count[$i]; $cum_sum{$e->[2]} += $cum_count[$i]; $i++; } my $last_inst = $i - 1; # Print source lines for (my $l = $first_line; $l <= $last_line; $l++) { my $line = SourceLine($current_file, $l); if (!defined($line)) { $line = "?\n"; next; } else { $line =~ s/^\s+//; } printf("%6s %6s %5d: %s", UnparseAlt($flat_sum{$l}), UnparseAlt($cum_sum{$l}), $l, $line); } # Print disassembly for (my $x = $first_inst; $x <= $last_inst; $x++) { my $e = $instructions[$x]; printf("%6s %6s %8s: %6s\n", UnparseAlt($flat_count[$x]), UnparseAlt($cum_count[$x]), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); } } } # Print DOT graph sub PrintDot { my $prog = shift; my $symbols = shift; my $raw = shift; my $flat = shift; my $cumulative = shift; my $overall_total = shift; # Get total my $local_total = TotalProfile($flat); my $nodelimit = int($main::opt_nodefraction * $local_total); my $edgelimit = int($main::opt_edgefraction * $local_total); my $nodecount = $main::opt_nodecount; # Find nodes to include my @list = (sort { abs(GetEntry($cumulative, $b)) <=> abs(GetEntry($cumulative, $a)) || $a cmp $b } keys(%{$cumulative})); my $last = $nodecount - 1; if ($last > $#list) { $last = $#list; } while (($last >= 0) && (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { $last--; } if ($last < 0) { print STDERR "No nodes to print\n"; return 0; } if ($nodelimit > 0 || $edgelimit > 0) { printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", Unparse($nodelimit), Units(), Unparse($edgelimit), Units()); } # Open DOT output file my $output; my $escaped_dot = ShellEscape(@DOT); my $escaped_ps2pdf = ShellEscape(@PS2PDF); if ($main::opt_gv) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); $output = "| $escaped_dot -Tps2 >$escaped_outfile"; } elsif ($main::opt_evince) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; } elsif ($main::opt_ps) { $output = "| $escaped_dot -Tps2"; } elsif ($main::opt_pdf) { $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; } elsif ($main::opt_web || $main::opt_svg) { # We need to post-process the SVG, so write to a temporary file always. my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); $output = "| $escaped_dot -Tsvg >$escaped_outfile"; } elsif ($main::opt_gif) { $output = "| $escaped_dot -Tgif"; } else { $output = ">&STDOUT"; } open(DOT, $output) || error("$output: $!\n"); # Title printf DOT ("digraph \"%s; %s %s\" {\n", $prog, Unparse($overall_total), Units()); if ($main::opt_pdf) { # The output is more printable if we set the page size for dot. printf DOT ("size=\"8,11\"\n"); } printf DOT ("node [width=0.375,height=0.25];\n"); # Print legend printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", $prog, sprintf("Total %s: %s", Units(), Unparse($overall_total)), sprintf("Focusing on: %s", Unparse($local_total)), sprintf("Dropped nodes with <= %s abs(%s)", Unparse($nodelimit), Units()), sprintf("Dropped edges with <= %s %s", Unparse($edgelimit), Units()) ); # Print nodes my %node = (); my $nextnode = 1; foreach my $a (@list[0..$last]) { # Pick font size my $f = GetEntry($flat, $a); my $c = GetEntry($cumulative, $a); my $fs = 8; if ($local_total > 0) { $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); } $node{$a} = $nextnode++; my $sym = $a; $sym =~ s/\s+/\\n/g; $sym =~ s/::/\\n/g; # Extra cumulative info to print for non-leaves my $extra = ""; if ($f != $c) { $extra = sprintf("\\rof %s (%s)", Unparse($c), Percent($c, $local_total)); } my $style = ""; if ($main::opt_heapcheck) { if ($f > 0) { # make leak-causing nodes more visible (add a background) $style = ",style=filled,fillcolor=gray" } elsif ($f < 0) { # make anti-leak-causing nodes (which almost never occur) # stand out as well (triple border) $style = ",peripheries=3" } } printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . "\",shape=box,fontsize=%.1f%s];\n", $node{$a}, $sym, Unparse($f), Percent($f, $local_total), $extra, $fs, $style, ); } # Get edges and counts per edge my %edge = (); my $n; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$raw})) { # TODO: omit low %age edges $n = $raw->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); for (my $i = 1; $i <= $#translated; $i++) { my $src = $translated[$i]; my $dst = $translated[$i-1]; #next if ($src eq $dst); # Avoid self-edges? if (exists($node{$src}) && exists($node{$dst})) { my $edge_label = "$src\001$dst"; if (!exists($edge{$edge_label})) { $edge{$edge_label} = 0; } $edge{$edge_label} += $n; } } } # Print edges (process in order of decreasing counts) my %indegree = (); # Number of incoming edges added per node so far my %outdegree = (); # Number of outgoing edges added per node so far foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { my @x = split(/\001/, $e); $n = $edge{$e}; # Initialize degree of kept incoming and outgoing edges if necessary my $src = $x[0]; my $dst = $x[1]; if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } my $keep; if ($indegree{$dst} == 0) { # Keep edge if needed for reachability $keep = 1; } elsif (abs($n) <= $edgelimit) { # Drop if we are below --edgefraction $keep = 0; } elsif ($outdegree{$src} >= $main::opt_maxdegree || $indegree{$dst} >= $main::opt_maxdegree) { # Keep limited number of in/out edges per node $keep = 0; } else { $keep = 1; } if ($keep) { $outdegree{$src}++; $indegree{$dst}++; # Compute line width based on edge count my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); if ($fraction > 1) { $fraction = 1; } my $w = $fraction * 2; if ($w < 1 && ($main::opt_web || $main::opt_svg)) { # SVG output treats line widths < 1 poorly. $w = 1; } # Dot sometimes segfaults if given edge weights that are too large, so # we cap the weights at a large value my $edgeweight = abs($n) ** 0.7; if ($edgeweight > 100000) { $edgeweight = 100000; } $edgeweight = int($edgeweight); my $style = sprintf("setlinewidth(%f)", $w); if ($x[1] =~ m/\(inline\)/) { $style .= ",dashed"; } # Use a slightly squashed function of the edge count as the weight printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", $node{$x[0]}, $node{$x[1]}, Unparse($n), $edgeweight, $style); } } print DOT ("}\n"); close(DOT); if ($main::opt_web || $main::opt_svg) { # Rewrite SVG to be more usable inside web browser. RewriteSvg(TempName($main::next_tmpfile, "svg")); } return 1; } sub RewriteSvg { my $svgfile = shift; open(SVG, $svgfile) || die "open temp svg: $!"; my @svg = ; close(SVG); unlink $svgfile; my $svg = join('', @svg); # Dot's SVG output is # # # # ... # # # # Change it to # # # $svg_javascript # # # ... # # # # Fix width, height; drop viewBox. $svg =~ s/(?s) above first my $svg_javascript = SvgJavascript(); my $viewport = "\n"; $svg =~ s/ above . $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; $svg =~ s/$svgfile") || die "open $svgfile: $!"; print SVG $svg; close(SVG); } } sub SvgJavascript { return <<'EOF'; EOF } # Provides a map from fullname to shortname for cases where the # shortname is ambiguous. The symlist has both the fullname and # shortname for all symbols, which is usually fine, but sometimes -- # such as overloaded functions -- two different fullnames can map to # the same shortname. In that case, we use the address of the # function to disambiguate the two. This function fills in a map that # maps fullnames to modified shortnames in such cases. If a fullname # is not present in the map, the 'normal' shortname provided by the # symlist is the appropriate one to use. sub FillFullnameToShortnameMap { my $symbols = shift; my $fullname_to_shortname_map = shift; my $shortnames_seen_once = {}; my $shortnames_seen_more_than_once = {}; foreach my $symlist (values(%{$symbols})) { # TODO(csilvers): deal with inlined symbols too. my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address next; # the only collisions we care about are when addresses differ } if (defined($shortnames_seen_once->{$shortname}) && $shortnames_seen_once->{$shortname} ne $fullname) { $shortnames_seen_more_than_once->{$shortname} = 1; } else { $shortnames_seen_once->{$shortname} = $fullname; } } foreach my $symlist (values(%{$symbols})) { my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; # TODO(csilvers): take in a list of addresses we care about, and only # store in the map if $symlist->[1] is in that list. Saves space. next if defined($fullname_to_shortname_map->{$fullname}); if (defined($shortnames_seen_more_than_once->{$shortname})) { if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; } } } } # Return a small number that identifies the argument. # Multiple calls with the same argument will return the same number. # Calls with different arguments will return different numbers. sub ShortIdFor { my $key = shift; my $id = $main::uniqueid{$key}; if (!defined($id)) { $id = keys(%main::uniqueid) + 1; $main::uniqueid{$key} = $id; } return $id; } # Translate a stack of addresses into a stack of symbols sub TranslateStack { my $symbols = shift; my $fullname_to_shortname_map = shift; my $k = shift; my @addrs = split(/\n/, $k); my @result = (); for (my $i = 0; $i <= $#addrs; $i++) { my $a = $addrs[$i]; # Skip large addresses since they sometimes show up as fake entries on RH9 if (length($a) > 8 && $a gt "7fffffffffffffff") { next; } if ($main::opt_disasm || $main::opt_list) { # We want just the address for the key push(@result, $a); next; } my $symlist = $symbols->{$a}; if (!defined($symlist)) { $symlist = [$a, "", $a]; } # We can have a sequence of symbols for a particular entry # (more than one symbol in the case of inlining). Callers # come before callees in symlist, so walk backwards since # the translated stack should contain callees before callers. for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { my $func = $symlist->[$j-2]; my $fileline = $symlist->[$j-1]; my $fullfunc = $symlist->[$j]; if (defined($fullname_to_shortname_map->{$fullfunc})) { $func = $fullname_to_shortname_map->{$fullfunc}; } if ($j > 2) { $func = "$func (inline)"; } # Do not merge nodes corresponding to Callback::Run since that # causes confusing cycles in dot display. Instead, we synthesize # a unique name for this frame per caller. if ($func =~ m/Callback.*::Run$/) { my $caller = ($i > 0) ? $addrs[$i-1] : 0; $func = "Run#" . ShortIdFor($caller); } if ($main::opt_addresses) { push(@result, "$a $func $fileline"); } elsif ($main::opt_lines) { if ($func eq '??' && $fileline eq '??:0') { push(@result, "$a"); } else { push(@result, "$func $fileline"); } } elsif ($main::opt_functions) { if ($func eq '??') { push(@result, "$a"); } else { push(@result, $func); } } elsif ($main::opt_files) { if ($fileline eq '??:0' || $fileline eq '') { push(@result, "$a"); } else { my $f = $fileline; $f =~ s/:\d+$//; push(@result, $f); } } else { push(@result, $a); last; # Do not print inlined info } } } # print join(",", @addrs), " => ", join(",", @result), "\n"; return @result; } # Generate percent string for a number and a total sub Percent { my $num = shift; my $tot = shift; if ($tot != 0) { return sprintf("%.1f%%", $num * 100.0 / $tot); } else { return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); } } # Generate pretty-printed form of number sub Unparse { my $num = shift; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return sprintf("%d", $num); } else { if ($main::opt_show_bytes) { return sprintf("%d", $num); } else { return sprintf("%.1f", $num / 1048576.0); } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds } else { return sprintf("%d", $num); } } # Alternate pretty-printed form: 0 maps to "." sub UnparseAlt { my $num = shift; if ($num == 0) { return "."; } else { return Unparse($num); } } # Alternate pretty-printed form: 0 maps to "" sub HtmlPrintNumber { my $num = shift; if ($num == 0) { return ""; } else { return Unparse($num); } } # Return output units sub Units { if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return "objects"; } else { if ($main::opt_show_bytes) { return "B"; } else { return "MB"; } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return "seconds"; } else { return "samples"; } } ##### Profile manipulation code ##### # Generate flattened profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a] sub FlatProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { AddEntry($result, $addrs[0], $count); } } return $result; } # Generate cumulative profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a], [b], [c], [d] sub CumulativeProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { AddEntry($result, $a, $count); } } return $result; } # If the second-youngest PC on the stack is always the same, returns # that pc. Otherwise, returns undef. sub IsSecondPcAlwaysTheSame { my $profile = shift; my $second_pc = undef; foreach my $k (keys(%{$profile})) { my @addrs = split(/\n/, $k); if ($#addrs < 1) { return undef; } if (not defined $second_pc) { $second_pc = $addrs[1]; } else { if ($second_pc ne $addrs[1]) { return undef; } } } return $second_pc; } sub ExtractSymbolLocation { my $symbols = shift; my $address = shift; # 'addr2line' outputs "??:0" for unknown locations; we do the # same to be consistent. my $location = "??:0:unknown"; if (exists $symbols->{$address}) { my $file = $symbols->{$address}->[1]; if ($file eq "?") { $file = "??:0" } $location = $file . ":" . $symbols->{$address}->[0]; } return $location; } # Extracts a graph of calls. sub ExtractCalls { my $symbols = shift; my $profile = shift; my $calls = {}; while( my ($stack_trace, $count) = each %$profile ) { my @address = split(/\n/, $stack_trace); my $destination = ExtractSymbolLocation($symbols, $address[0]); AddEntry($calls, $destination, $count); for (my $i = 1; $i <= $#address; $i++) { my $source = ExtractSymbolLocation($symbols, $address[$i]); my $call = "$source -> $destination"; AddEntry($calls, $call, $count); $destination = $source; } } return $calls; } sub FilterFrames { my $symbols = shift; my $profile = shift; if ($main::opt_retain eq '' && $main::opt_exclude eq '') { return $profile; } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { my $sym; if (exists($symbols->{$a})) { $sym = $symbols->{$a}->[0]; } else { $sym = $a; } if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) { next; } if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) { next; } push(@path, $a); } if (scalar(@path) > 0) { my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } } return $result; } sub RemoveUninterestingFrames { my $symbols = shift; my $profile = shift; # List of function names to skip my %skip = (); my $skip_regexp = 'NOMATCH'; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { foreach my $name ('@JEMALLOC_PREFIX@calloc', 'cfree', '@JEMALLOC_PREFIX@malloc', 'newImpl', 'void* newImpl', '@JEMALLOC_PREFIX@free', '@JEMALLOC_PREFIX@memalign', '@JEMALLOC_PREFIX@posix_memalign', '@JEMALLOC_PREFIX@aligned_alloc', 'pvalloc', '@JEMALLOC_PREFIX@valloc', '@JEMALLOC_PREFIX@realloc', '@JEMALLOC_PREFIX@mallocx', '@JEMALLOC_PREFIX@rallocx', '@JEMALLOC_PREFIX@xallocx', '@JEMALLOC_PREFIX@dallocx', '@JEMALLOC_PREFIX@sdallocx', 'tc_calloc', 'tc_cfree', 'tc_malloc', 'tc_free', 'tc_memalign', 'tc_posix_memalign', 'tc_pvalloc', 'tc_valloc', 'tc_realloc', 'tc_new', 'tc_delete', 'tc_newarray', 'tc_deletearray', 'tc_new_nothrow', 'tc_newarray_nothrow', 'do_malloc', '::do_malloc', # new name -- got moved to an unnamed ns '::do_malloc_or_cpp_alloc', 'DoSampledAllocation', 'simple_alloc::allocate', '__malloc_alloc_template::allocate', '__builtin_delete', '__builtin_new', '__builtin_vec_delete', '__builtin_vec_new', 'operator new', 'operator new[]', # The entry to our memory-allocation routines on OS X 'malloc_zone_malloc', 'malloc_zone_calloc', 'malloc_zone_valloc', 'malloc_zone_realloc', 'malloc_zone_memalign', 'malloc_zone_free', # These mark the beginning/end of our custom sections '__start_google_malloc', '__stop_google_malloc', '__start_malloc_hook', '__stop_malloc_hook') { $skip{$name} = 1; $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything } # TODO: Remove TCMalloc once everything has been # moved into the tcmalloc:: namespace and we have flushed # old code out of the system. $skip_regexp = "TCMalloc|^tcmalloc::"; } elsif ($main::profile_type eq 'contention') { foreach my $vname ('base::RecordLockProfileData', 'base::SubmitMutexProfileData', 'base::SubmitSpinLockProfileData', 'Mutex::Unlock', 'Mutex::UnlockSlow', 'Mutex::ReaderUnlock', 'MutexLock::~MutexLock', 'SpinLock::Unlock', 'SpinLock::SlowUnlock', 'SpinLockHolder::~SpinLockHolder') { $skip{$vname} = 1; } } elsif ($main::profile_type eq 'cpu') { # Drop signal handlers used for CPU profile collection # TODO(dpeng): this should not be necessary; it's taken # care of by the general 2nd-pc mechanism below. foreach my $name ('ProfileData::Add', # historical 'ProfileData::prof_handler', # historical 'CpuProfiler::prof_handler', '__FRAME_END__', '__pthread_sighandler', '__restore') { $skip{$name} = 1; } } else { # Nothing skipped for unknown types } if ($main::profile_type eq 'cpu') { # If all the second-youngest program counters are the same, # this STRONGLY suggests that it is an artifact of measurement, # i.e., stack frames pushed by the CPU profiler signal handler. # Hence, we delete them. # (The topmost PC is read from the signal structure, not from # the stack, so it does not get involved.) while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { my $result = {}; my $func = ''; if (exists($symbols->{$second_pc})) { $second_pc = $symbols->{$second_pc}->[0]; } print STDERR "Removing $second_pc from all stack traces.\n"; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); splice @addrs, 1, 1; my $reduced_path = join("\n", @addrs); AddEntry($result, $reduced_path, $count); } $profile = $result; } } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { if (exists($symbols->{$a})) { my $func = $symbols->{$a}->[0]; if ($skip{$func} || ($func =~ m/$skip_regexp/)) { # Throw away the portion of the backtrace seen so far, under the # assumption that previous frames were for functions internal to the # allocator. @path = (); next; } } push(@path, $a); } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } $result = FilterFrames($symbols, $result); return $result; } # Reduce profile to granularity given by user sub ReduceProfile { my $symbols = shift; my $profile = shift; my $result = {}; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); my @path = (); my %seen = (); $seen{''} = 1; # So that empty keys are skipped foreach my $e (@translated) { # To avoid double-counting due to recursion, skip a stack-trace # entry if it has already been seen if (!$seen{$e}) { $seen{$e} = 1; push(@path, $e); } } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } return $result; } # Does the specified symbol array match the regexp? sub SymbolMatches { my $sym = shift; my $re = shift; if (defined($sym)) { for (my $i = 0; $i < $#{$sym}; $i += 3) { if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { return 1; } } } return 0; } # Focus only on paths involving specified regexps sub FocusProfile { my $symbols = shift; my $profile = shift; my $focus = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { AddEntry($result, $k, $count); last; } } } return $result; } # Focus only on paths not involving specified regexps sub IgnoreProfile { my $symbols = shift; my $profile = shift; my $ignore = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my $matched = 0; foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { $matched = 1; last; } } if (!$matched) { AddEntry($result, $k, $count); } } return $result; } # Get total count in profile sub TotalProfile { my $profile = shift; my $result = 0; foreach my $k (keys(%{$profile})) { $result += $profile->{$k}; } return $result; } # Add A to B sub AddProfile { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { my $v = $A->{$k}; AddEntry($R, $k, $v); } # add all keys in B foreach my $k (keys(%{$B})) { my $v = $B->{$k}; AddEntry($R, $k, $v); } return $R; } # Merges symbol maps sub MergeSymbols { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { $R->{$k} = $A->{$k}; } if (defined($B)) { foreach my $k (keys(%{$B})) { $R->{$k} = $B->{$k}; } } return $R; } # Add A to B sub AddPcs { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { $R->{$k} = 1 } # add all keys in B foreach my $k (keys(%{$B})) { $R->{$k} = 1 } return $R; } # Subtract B from A sub SubtractProfile { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { my $v = $A->{$k} - GetEntry($B, $k); if ($v < 0 && $main::opt_drop_negative) { $v = 0; } AddEntry($R, $k, $v); } if (!$main::opt_drop_negative) { # Take care of when subtracted profile has more entries foreach my $k (keys(%{$B})) { if (!exists($A->{$k})) { AddEntry($R, $k, 0 - $B->{$k}); } } } return $R; } # Get entry from profile; zero if not present sub GetEntry { my $profile = shift; my $k = shift; if (exists($profile->{$k})) { return $profile->{$k}; } else { return 0; } } # Add entry to specified profile sub AddEntry { my $profile = shift; my $k = shift; my $n = shift; if (!exists($profile->{$k})) { $profile->{$k} = 0; } $profile->{$k} += $n; } # Add a stack of entries to specified profile, and add them to the $pcs # list. sub AddEntries { my $profile = shift; my $pcs = shift; my $stack = shift; my $count = shift; my @k = (); foreach my $e (split(/\s+/, $stack)) { my $pc = HexExtend($e); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $count); } ##### Code to profile a server dynamically ##### sub CheckSymbolPage { my $url = SymbolPageURL(); my $command = ShellEscape(@URL_FETCHER, $url); open(SYMBOL, "$command |") or error($command); my $line = ; $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(SYMBOL); unless (defined($line)) { error("$url doesn't exist\n"); } if ($line =~ /^num_symbols:\s+(\d+)$/) { if ($1 == 0) { error("Stripped binary. No symbols available.\n"); } } else { error("Failed to get the number of symbols from $url\n"); } } sub IsProfileURL { my $profile_name = shift; if (-f $profile_name) { printf STDERR "Using local file $profile_name.\n"; return 0; } return 1; } sub ParseProfileURL { my $profile_name = shift; if (!defined($profile_name) || $profile_name eq "") { return (); } # Split profile URL - matches all non-empty strings, so no test. $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; my $proto = $1 || "http://"; my $hostport = $2; my $prefix = $3; my $profile = $4 || "/"; my $host = $hostport; $host =~ s/:.*//; my $baseurl = "$proto$hostport$prefix"; return ($host, $baseurl, $profile); } # We fetch symbols from the first profile argument. sub SymbolPageURL { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); return "$baseURL$SYMBOL_PAGE"; } sub FetchProgramName() { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); my $url = "$baseURL$PROGRAM_NAME_PAGE"; my $command_line = ShellEscape(@URL_FETCHER, $url); open(CMDLINE, "$command_line |") or error($command_line); my $cmdline = ; $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(CMDLINE); error("Failed to get program name from $url\n") unless defined($cmdline); $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. $cmdline =~ s!\n!!g; # Remove LFs. return $cmdline; } # Gee, curl's -L (--location) option isn't reliable at least # with its 7.12.3 version. Curl will forget to post data if # there is a redirection. This function is a workaround for # curl. Redirection happens on borg hosts. sub ResolveRedirectionForCurl { my $url = shift; my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); open(CMDLINE, "$command_line |") or error($command_line); while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^Location: (.*)/) { $url = $1; } } close(CMDLINE); return $url; } # Add a timeout flat to URL_FETCHER. Returns a new list. sub AddFetchTimeout { my $timeout = shift; my @fetcher = @_; if (defined($timeout)) { if (join(" ", @fetcher) =~ m/\bcurl -s/) { push(@fetcher, "--max-time", sprintf("%d", $timeout)); } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { push(@fetcher, sprintf("--deadline=%d", $timeout)); } } return @fetcher; } # Reads a symbol map from the file handle name given as $1, returning # the resulting symbol map. Also processes variables relating to symbols. # Currently, the only variable processed is 'binary=' which updates # $main::prog to have the correct program name. sub ReadSymbols { my $in = shift; my $map = {}; while (<$in>) { s/\r//g; # turn windows-looking lines into unix-looking lines # Removes all the leading zeroes from the symbols, see comment below. if (m/^0x0*([0-9a-f]+)\s+(.+)/) { $map->{$1} = $2; } elsif (m/^---/) { last; } elsif (m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1, $2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "binary") { if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", $main::prog, $value); } $main::prog = $value; } else { printf STDERR ("Ignoring unknown variable in symbols list: " . "'%s' = '%s'\n", $variable, $value); } } } return $map; } sub URLEncode { my $str = shift; $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg; return $str; } sub AppendSymbolFilterParams { my $url = shift; my @params = (); if ($main::opt_retain ne '') { push(@params, sprintf("retain=%s", URLEncode($main::opt_retain))); } if ($main::opt_exclude ne '') { push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude))); } if (scalar @params > 0) { $url = sprintf("%s?%s", $url, join("&", @params)); } return $url; } # Fetches and processes symbols to prepare them for use in the profile output # code. If the optional 'symbol_map' arg is not given, fetches symbols from # $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols # are assumed to have already been fetched into 'symbol_map' and are simply # extracted and processed. sub FetchSymbols { my $pcset = shift; my $symbol_map = shift; my %seen = (); my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq if (!defined($symbol_map)) { my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); open(POSTFILE, ">$main::tmpfile_sym"); print POSTFILE $post_data; close(POSTFILE); my $url = SymbolPageURL(); my $command_line; if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { $url = ResolveRedirectionForCurl($url); $url = AppendSymbolFilterParams($url); $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", $url); } else { $url = AppendSymbolFilterParams($url); $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) . " < " . ShellEscape($main::tmpfile_sym)); } # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); $symbol_map = ReadSymbols(*SYMBOL{IO}); close(SYMBOL); } my $symbols = {}; foreach my $pc (@pcs) { my $fullname; # For 64 bits binaries, symbols are extracted with 8 leading zeroes. # Then /symbol reads the long symbols in as uint64, and outputs # the result with a "0x%08llx" format which get rid of the zeroes. # By removing all the leading zeroes in both $pc and the symbols from # /symbol, the symbols match and are retrievable from the map. my $shortpc = $pc; $shortpc =~ s/^0*//; # Each line may have a list of names, which includes the function # and also other functions it has inlined. They are separated (in # PrintSymbolizedProfile), by --, which is illegal in function names. my $fullnames; if (defined($symbol_map->{$shortpc})) { $fullnames = $symbol_map->{$shortpc}; } else { $fullnames = "0x" . $pc; # Just use addresses } my $sym = []; $symbols->{$pc} = $sym; foreach my $fullname (split("--", $fullnames)) { my $name = ShortFunctionName($fullname); push(@{$sym}, $name, "?", $fullname); } } return $symbols; } sub BaseName { my $file_name = shift; $file_name =~ s!^.*/!!; # Remove directory name return $file_name; } sub MakeProfileBaseName { my ($binary_name, $profile_name) = @_; my ($host, $baseURL, $path) = ParseProfileURL($profile_name); my $binary_shortname = BaseName($binary_name); return sprintf("%s.%s.%s", $binary_shortname, $main::op_time, $host); } sub FetchDynamicProfile { my $binary_name = shift; my $profile_name = shift; my $fetch_name_only = shift; my $encourage_patience = shift; if (!IsProfileURL($profile_name)) { return $profile_name; } else { my ($host, $baseURL, $path) = ParseProfileURL($profile_name); if ($path eq "" || $path eq "/") { # Missing type specifier defaults to cpu-profile $path = $PROFILE_PAGE; } my $profile_file = MakeProfileBaseName($binary_name, $profile_name); my $url = "$baseURL$path"; my $fetch_timeout = undef; if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { if ($path =~ m/[?]/) { $url .= "&"; } else { $url .= "?"; } $url .= sprintf("seconds=%d", $main::opt_seconds); $fetch_timeout = $main::opt_seconds * 1.01 + 60; # Set $profile_type for consumption by PrintSymbolizedProfile. $main::profile_type = 'cpu'; } else { # For non-CPU profiles, we add a type-extension to # the target profile file name. my $suffix = $path; $suffix =~ s,/,.,g; $profile_file .= $suffix; # Set $profile_type for consumption by PrintSymbolizedProfile. if ($path =~ m/$HEAP_PAGE/) { $main::profile_type = 'heap'; } elsif ($path =~ m/$GROWTH_PAGE/) { $main::profile_type = 'growth'; } elsif ($path =~ m/$CONTENTION_PAGE/) { $main::profile_type = 'contention'; } } my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); if (! -d $profile_dir) { mkdir($profile_dir) || die("Unable to create profile directory $profile_dir: $!\n"); } my $tmp_profile = "$profile_dir/.tmp.$profile_file"; my $real_profile = "$profile_dir/$profile_file"; if ($fetch_name_only > 0) { return $real_profile; } my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; if ($encourage_patience) { print STDERR "Be patient...\n"; } } else { print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; } (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); print STDERR "Wrote profile to $real_profile\n"; $main::collected_profile = $real_profile; return $main::collected_profile; } } # Collect profiles in parallel sub FetchDynamicProfiles { my $items = scalar(@main::pfile_args); my $levels = log($items) / log(2); if ($items == 1) { $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); } else { # math rounding issues if ((2 ** $levels) < $items) { $levels++; } my $count = scalar(@main::pfile_args); for (my $i = 0; $i < $count; $i++) { $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); } print STDERR "Fetching $count profiles, Be patient...\n"; FetchDynamicProfilesRecurse($levels, 0, 0); $main::collected_profile = join(" \\\n ", @main::profile_files); } } # Recursively fork a process to get enough processes # collecting profiles sub FetchDynamicProfilesRecurse { my $maxlevel = shift; my $level = shift; my $position = shift; if (my $pid = fork()) { $position = 0 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); wait; } else { $position = 1 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); cleanup(); exit(0); } } # Collect a single profile sub TryCollectProfile { my $maxlevel = shift; my $level = shift; my $position = shift; if ($level >= ($maxlevel - 1)) { if ($position < scalar(@main::pfile_args)) { FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); } } else { FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); } } ##### Parsing code ##### # Provide a small streaming-read module to handle very large # cpu-profile files. Stream in chunks along a sliding window. # Provides an interface to get one 'slot', correctly handling # endian-ness differences. A slot is one 32-bit or 64-bit word # (depending on the input profile). We tell endianness and bit-size # for the profile by looking at the first 8 bytes: in cpu profiles, # the second slot is always 3 (we'll accept anything that's not 0). BEGIN { package CpuProfileStream; sub new { my ($class, $file, $fname) = @_; my $self = { file => $file, base => 0, stride => 512 * 1024, # must be a multiple of bitsize/8 slots => [], unpack_code => "", # N for big-endian, V for little perl_is_64bit => 1, # matters if profile is 64-bit }; bless $self, $class; # Let unittests adjust the stride if ($main::opt_test_stride > 0) { $self->{stride} = $main::opt_test_stride; } # Read the first two slots to figure out bitsize and endianness. my $slots = $self->{slots}; my $str; read($self->{file}, $str, 8); # Set the global $address_length based on what we see here. # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). $address_length = ($str eq (chr(0)x8)) ? 16 : 8; if ($address_length == 8) { if (substr($str, 6, 2) eq chr(0)x2) { $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 4, 2) eq chr(0)x2) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**16\n"); } @$slots = unpack($self->{unpack_code} . "*", $str); } else { # If we're a 64-bit profile, check if we're a 64-bit-capable # perl. Otherwise, each slot will be represented as a float # instead of an int64, losing precision and making all the # 64-bit addresses wrong. We won't complain yet, but will # later if we ever see a value that doesn't fit in 32 bits. my $has_q = 0; eval { $has_q = pack("Q", "1") ? 1 : 1; }; if (!$has_q) { $self->{perl_is_64bit} = 0; } read($self->{file}, $str, 8); if (substr($str, 4, 4) eq chr(0)x4) { # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 0, 4) eq chr(0)x4) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**32\n"); } my @pair = unpack($self->{unpack_code} . "*", $str); # Since we know one of the pair is 0, it's fine to just add them. @$slots = (0, $pair[0] + $pair[1]); } return $self; } # Load more data when we access slots->get(X) which is not yet in memory. sub overflow { my ($self) = @_; my $slots = $self->{slots}; $self->{base} += $#$slots + 1; # skip over data we're replacing my $str; read($self->{file}, $str, $self->{stride}); if ($address_length == 8) { # the 32-bit case # This is the easy case: unpack provides 32-bit unpacking primitives. @$slots = unpack($self->{unpack_code} . "*", $str); } else { # We need to unpack 32 bits at a time and combine. my @b32_values = unpack($self->{unpack_code} . "*", $str); my @b64_values = (); for (my $i = 0; $i < $#b32_values; $i += 2) { # TODO(csilvers): if this is a 32-bit perl, the math below # could end up in a too-large int, which perl will promote # to a double, losing necessary precision. Deal with that. # Right now, we just die. my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); if ($self->{unpack_code} eq 'N') { # big-endian ($lo, $hi) = ($hi, $lo); } my $value = $lo + $hi * (2**32); if (!$self->{perl_is_64bit} && # check value is exactly represented (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { ::error("Need a 64-bit perl to process this 64-bit profile.\n"); } push(@b64_values, $value); } @$slots = @b64_values; } } # Access the i-th long in the file (logically), or -1 at EOF. sub get { my ($self, $idx) = @_; my $slots = $self->{slots}; while ($#$slots >= 0) { if ($idx < $self->{base}) { # The only time we expect a reference to $slots[$i - something] # after referencing $slots[$i] is reading the very first header. # Since $stride > |header|, that shouldn't cause any lookback # errors. And everything after the header is sequential. print STDERR "Unexpected look-back reading CPU profile"; return -1; # shrug, don't know what better to return } elsif ($idx > $self->{base} + $#$slots) { $self->overflow(); } else { return $slots->[$idx - $self->{base}]; } } # If we get here, $slots is [], which means we've reached EOF return -1; # unique since slots is supposed to hold unsigned numbers } } # Reads the top, 'header' section of a profile, and returns the last # line of the header, commonly called a 'header line'. The header # section of a profile consists of zero or more 'command' lines that # are instructions to jeprof, which jeprof executes when reading the # header. All 'command' lines start with a %. After the command # lines is the 'header line', which is a profile-specific line that # indicates what type of profile it is, and perhaps other global # information about the profile. For instance, here's a header line # for a heap profile: # heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile # For historical reasons, the CPU profile does not contain a text- # readable header line. If the profile looks like a CPU profile, # this function returns "". If no header line could be found, this # function returns undef. # # The following commands are recognized: # %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' # # The input file should be in binmode. sub ReadProfileHeader { local *PROFILE = shift; my $firstchar = ""; my $line = ""; read(PROFILE, $firstchar, 1); seek(PROFILE, -1, 1); # unread the firstchar if ($firstchar !~ /[[:print:]]/) { # is not a text character return ""; } while (defined($line = )) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /^%warn\s+(.*)/) { # 'warn' command # Note this matches both '%warn blah\n' and '%warn\n'. print STDERR "WARNING: $1\n"; # print the rest of the line } elsif ($line =~ /^%/) { print STDERR "Ignoring unknown command from profile header: $line"; } else { # End of commands, must be the header line. return $line; } } return undef; # got to EOF without seeing a header line } sub IsSymbolizedProfileFile { my $file_name = shift; if (!(-e $file_name) || !(-r $file_name)) { return 0; } # Check if the file contains a symbol-section marker. open(TFILE, "<$file_name"); binmode TFILE; my $firstline = ReadProfileHeader(*TFILE); close(TFILE); if (!$firstline) { return 0; } $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; return $firstline =~ /^--- *$symbol_marker/; } # Parse profile generated by common/profiler.cc and return a reference # to a map: # $result->{version} Version number of profile file # $result->{period} Sampling period (in microseconds) # $result->{profile} Profile object # $result->{threads} Map of thread IDs to profile objects # $result->{map} Memory map info from profile # $result->{pcs} Hash of all PC values seen, key is hex address sub ReadProfile { my $prog = shift; my $fname = shift; my $result; # return value $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $contention_marker = $&; $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $growth_marker = $&; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $heap_marker = $&; # Look at first line to see if it is a heap or a CPU profile. # CPU profile may start with no header at all, and just binary data # (starting with \0\0\0\0) -- in that case, don't try to read the # whole firstline, since it may be gigabytes(!) of data. open(PROFILE, "<$fname") || error("$fname: $!\n"); binmode PROFILE; # New perls do UTF-8 processing my $header = ReadProfileHeader(*PROFILE); if (!defined($header)) { # means "at EOF" error("Profile is empty.\n"); } my $symbols; if ($header =~ m/^--- *$symbol_marker/o) { # Verify that the user asked for a symbolized profile if (!$main::use_symbolized_profile) { # we have both a binary and symbolized profiles, abort error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . "a binary arg. Try again without passing\n $prog\n"); } # Read the symbol section of the symbolized profile file. $symbols = ReadSymbols(*PROFILE{IO}); # Read the next line to get the header for the remaining profile. $header = ReadProfileHeader(*PROFILE) || ""; } if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) { # Skip "--- ..." line for profile types that have their own headers. $header = ReadProfileHeader(*PROFILE) || ""; } $main::profile_type = ''; if ($header =~ m/^heap profile:.*$growth_marker/o) { $main::profile_type = 'growth'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap profile:/) { $main::profile_type = 'heap'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap/) { $main::profile_type = 'heap'; $result = ReadThreadedHeapProfile($prog, $fname, $header); } elsif ($header =~ m/^--- *$contention_marker/o) { $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *Stacks:/) { print STDERR "Old format contention profile: mistakenly reports " . "condition variable signals as lock contentions.\n"; $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *$profile_marker/) { # the binary cpu profile data starts immediately after this line $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } else { if (defined($symbols)) { # a symbolized profile contains a format we don't recognize, bail out error("$fname: Cannot recognize profile section after symbols.\n"); } # no ascii header present -- must be a CPU profile $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } close(PROFILE); # if we got symbols along with the profile, return those as well if (defined($symbols)) { $result->{symbols} = $symbols; } return $result; } # Subtract one from caller pc so we map back to call instr. # However, don't do this if we're reading a symbolized profile # file, in which case the subtract-one was done when the file # was written. # # We apply the same logic to all readers, though ReadCPUProfile uses an # independent implementation. sub FixCallerAddresses { my $stack = shift; # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile() # dumps unadjusted profiles. { $stack =~ /(\s)/; my $delimiter = $1; my @addrs = split(' ', $stack); my @fixedaddrs; $#fixedaddrs = $#addrs; if ($#addrs >= 0) { $fixedaddrs[0] = $addrs[0]; } for (my $i = 1; $i <= $#addrs; $i++) { $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); } return join $delimiter, @fixedaddrs; } } # CPU profile reader sub ReadCPUProfile { my $prog = shift; my $fname = shift; # just used for logging local *PROFILE = shift; my $version; my $period; my $i; my $profile = {}; my $pcs = {}; # Parse string into array of slots. my $slots = CpuProfileStream->new(*PROFILE, $fname); # Read header. The current header version is a 5-element structure # containing: # 0: header count (always 0) # 1: header "words" (after this one: 3) # 2: format version (0) # 3: sampling period (usec) # 4: unused padding (always 0) if ($slots->get(0) != 0 ) { error("$fname: not a profile file, or old format profile file\n"); } $i = 2 + $slots->get(1); $version = $slots->get(2); $period = $slots->get(3); # Do some sanity checking on these header values. if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { error("$fname: not a profile file, or corrupted profile file\n"); } # Parse profile while ($slots->get($i) != -1) { my $n = $slots->get($i++); my $d = $slots->get($i++); if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); print STDERR "At index $i (address $addr):\n"; error("$fname: stack trace depth >= 2**32\n"); } if ($slots->get($i) == 0) { # End of profile data marker $i += $d; last; } # Make key out of the stack entries my @k = (); for (my $j = 0; $j < $d; $j++) { my $pc = $slots->get($i+$j); # Subtract one from caller pc so we map back to call instr. $pc--; $pc = sprintf("%0*x", $address_length, $pc); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $n); $i += $d; } # Parse map my $map = ''; seek(PROFILE, $i * 4, 0); read(PROFILE, $map, (stat PROFILE)[7]); my $r = {}; $r->{version} = $version; $r->{period} = $period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub HeapProfileIndex { my $index = 1; if ($main::opt_inuse_space) { $index = 1; } elsif ($main::opt_inuse_objects) { $index = 0; } elsif ($main::opt_alloc_space) { $index = 3; } elsif ($main::opt_alloc_objects) { $index = 2; } return $index; } sub ReadMappedLibraries { my $fh = shift; my $map = ""; # Read the /proc/self/maps data while (<$fh>) { s/\r//g; # turn windows-looking lines into unix-looking lines $map .= $_; } return $map; } sub ReadMemoryMap { my $fh = shift; my $map = ""; # Read /proc/self/maps data as formatted by DumpAddressMap() my $buildvar = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Parse "build=" specification if supplied if (m/^\s*build=(.*)\n/) { $buildvar = $1; } # Expand "$build" variable if available $_ =~ s/\$build\b/$buildvar/g; $map .= $_; } return $map; } sub AdjustSamples { my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; if ($sample_adjustment) { if ($sampling_algorithm == 2) { # Remote-heap version 2 # The sampling frequency is the rate of a Poisson process. # This means that the probability of sampling an allocation of # size X with sampling rate Y is 1 - exp(-X/Y) if ($n1 != 0) { my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n1 *= $scale_factor; $s1 *= $scale_factor; } if ($n2 != 0) { my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n2 *= $scale_factor; $s2 *= $scale_factor; } } else { # Remote-heap version 1 my $ratio; $ratio = (($s1*1.0)/$n1)/($sample_adjustment); if ($ratio < 1) { $n1 /= $ratio; $s1 /= $ratio; } $ratio = (($s2*1.0)/$n2)/($sample_adjustment); if ($ratio < 1) { $n2 /= $ratio; $s2 /= $ratio; } } } return ($n1, $s1, $n2, $s2); } sub ReadHeapProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $index = HeapProfileIndex(); # Find the type of this profile. The header line looks like: # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 # There are two pairs , the first inuse objects/space, and the # second allocated objects/space. This is followed optionally by a profile # type, and if that is present, optionally by a sampling frequency. # For remote heap profiles (v1): # The interpretation of the sampling frequency is that the profiler, for # each sample, calculates a uniformly distributed random integer less than # the given value, and records the next sample after that many bytes have # been allocated. Therefore, the expected sample interval is half of the # given frequency. By default, if not specified, the expected sample # interval is 128KB. Only remote-heap-page profiles are adjusted for # sample size. # For remote heap profiles (v2): # The sampling frequency is the rate of a Poisson process. This means that # the probability of sampling an allocation of size X with sampling rate Y # is 1 - exp(-X/Y) # For version 2, a typical header line might look like this: # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 # the trailing number (524288) is the sampling rate. (Version 1 showed # double the 'rate' here) my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { if (defined($6) && ($6 ne '')) { $type = $6; my $sample_period = $8; # $type is "heapprofile" for profiles generated by the # heap-profiler, and either "heap" or "heap_v2" for profiles # generated by sampling directly within tcmalloc. It can also # be "growth" for heap-growth profiles. The first is typically # found for profiles generated locally, and the others for # remote profiles. if (($type eq "heapprofile") || ($type !~ /heap/) ) { # No need to adjust for the sampling rate with heap-profiler-derived data $sampling_algorithm = 0; } elsif ($type =~ /_v2/) { $sampling_algorithm = 2; # version 2 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period); } } else { $sampling_algorithm = 1; # version 1 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period)/2; } } } else { # We detect whether or not this is a remote-heap profile by checking # that the total-allocated stats ($n2,$s2) are exactly the # same as the in-use stats ($n1,$s1). It is remotely conceivable # that a non-remote-heap profile may pass this check, but it is hard # to imagine how that could happen. # In this case it's so old it's guaranteed to be remote-heap version 1. my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); if (($n1 == $n2) && ($s1 == $s2)) { # This is likely to be a remote-heap based sample profile $sampling_algorithm = 1; } } } if ($sampling_algorithm > 0) { # For remote-heap generated profiles, adjust the counts and sizes to # account for the sample rate (we sample once every 128KB by default). if ($sample_adjustment == 0) { # Turn on profile adjustment. $sample_adjustment = 128*1024; print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; } else { printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", $sample_adjustment); } if ($sampling_algorithm > 1) { # We don't bother printing anything for the original version (version 1) printf STDERR "Heap version $sampling_algorithm\n"; } } my $profile = {}; my $pcs = {}; my $map = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # : [: ] @ a1 a2 a3 ... an s/^\s*//; s/\s*$//; if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { my $stack = $5; my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadThreadedHeapProfile { my ($prog, $fname, $header) = @_; my $index = HeapProfileIndex(); my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; # Assuming a very specific type of header for now. if ($header =~ m"^heap_v2/(\d+)") { $type = "_v2"; $sampling_algorithm = 2; $sample_adjustment = int($1); } if ($type ne "_v2" || !defined($sample_adjustment)) { die "Threaded heap profiles require v2 sampling with a sample rate\n"; } my $profile = {}; my $thread_profiles = {}; my $pcs = {}; my $map = ""; my $stack = ""; while () { s/\r//g; if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # @ a1 a2 ... an # t*: : [: ] # t1: : [: ] # ... # tn: : [: ] s/^\s*//; s/\s*$//; if (m/^@\s+(.*)$/) { $stack = $1; } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { if ($stack eq "") { # Still in the header, so this is just a per-thread summary. next; } my $thread = $2; my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); if ($thread eq "*") { AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } else { if (!exists($thread_profiles->{$thread})) { $thread_profiles->{$thread} = {}; } AddEntries($thread_profiles->{$thread}, $pcs, FixCallerAddresses($stack), $counts[$index]); } } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{threads} = $thread_profiles; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadSynchProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $map = ''; my $profile = {}; my $pcs = {}; my $sampling_period = 1; my $cyclespernanosec = 2.8; # Default assumption for old binaries my $seen_clockrate = 0; my $line; my $index = 0; if ($main::opt_total_delay) { $index = 0; } elsif ($main::opt_contentions) { $index = 1; } elsif ($main::opt_mean_delay) { $index = 2; } while ( $line = ) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $count, $stack) = ($1, $2, $3); # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; $count *= $sampling_period; my @values = ($cycles, $count, $cycles / $count); AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $stack) = ($1, $2); if ($cycles !~ /^\d+$/) { next; } # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1,$2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "cycles/second") { $cyclespernanosec = $value / 1e9; $seen_clockrate = 1; } elsif ($variable eq "sampling period") { $sampling_period = $value; } elsif ($variable eq "ms since reset") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } elsif ($variable eq "discarded samples") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } else { printf STDERR ("Ignoring unnknown variable in /contention output: " . "'%s' = '%s'\n",$variable,$value); } } else { # Memory map entry $map .= $line; } } if (!$seen_clockrate) { printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", $cyclespernanosec); } my $r = {}; $r->{version} = 0; $r->{period} = $sampling_period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } # Given a hex value in the form "0x1abcd" or "1abcd", return either # "0001abcd" or "000000000001abcd", depending on the current (global) # address length. sub HexExtend { my $addr = shift; $addr =~ s/^(0x)?0*//; my $zeros_needed = $address_length - length($addr); if ($zeros_needed < 0) { printf STDERR "Warning: address $addr is longer than address length $address_length\n"; return $addr; } return ("0" x $zeros_needed) . $addr; } ##### Symbol extraction ##### # Aggressively search the lib_prefix values for the given library # If all else fails, just return the name of the library unmodified. # If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" # it will search the following locations in this order, until it finds a file: # /my/path/lib/dir/mylib.so # /other/path/lib/dir/mylib.so # /my/path/dir/mylib.so # /other/path/dir/mylib.so # /my/path/mylib.so # /other/path/mylib.so # /lib/dir/mylib.so (returned as last resort) sub FindLibrary { my $file = shift; my $suffix = $file; # Search for the library as described above do { foreach my $prefix (@prefix_list) { my $fullpath = $prefix . $suffix; if (-e $fullpath) { return $fullpath; } } } while ($suffix =~ s|^/[^/]+/|/|); return $file; } # Return path to library with debugging symbols. # For libc libraries, the copy in /usr/lib/debug contains debugging symbols sub DebuggingLibrary { my $file = shift; if ($file =~ m|^/|) { if (-f "/usr/lib/debug$file") { return "/usr/lib/debug$file"; } elsif (-f "/usr/lib/debug$file.debug") { return "/usr/lib/debug$file.debug"; } } return undef; } # Parse text section header of a library using objdump sub ParseTextSectionHeaderFromObjdump { my $lib = shift; my $size = undef; my $vma; my $file_offset; # Get objdump output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Idx Name Size VMA LMA File off Algn # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file # offset may still be 8. But AddressSub below will still handle that. my @x = split; if (($#x >= 6) && ($x[1] eq '.text')) { $size = $x[2]; $vma = $x[3]; $file_offset = $x[5]; last; } } close(OBJDUMP); if (!defined($size)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } # Parse text section header of a library using otool (on OS X) sub ParseTextSectionHeaderFromOtool { my $lib = shift; my $size = undef; my $vma = undef; my $file_offset = undef; # Get otool output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); open(OTOOL, "$command |") || error("$command: $!\n"); my $cmd = ""; my $sectname = ""; my $segname = ""; foreach my $line () { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines # Load command <#> # cmd LC_SEGMENT # [...] # Section # sectname __text # segname __TEXT # addr 0x000009f8 # size 0x00018b9e # offset 2552 # align 2^2 (4) # We will need to strip off the leading 0x from the hex addresses, # and convert the offset into hex. if ($line =~ /Load command/) { $cmd = ""; $sectname = ""; $segname = ""; } elsif ($line =~ /Section/) { $sectname = ""; $segname = ""; } elsif ($line =~ /cmd (\w+)/) { $cmd = $1; } elsif ($line =~ /sectname (\w+)/) { $sectname = $1; } elsif ($line =~ /segname (\w+)/) { $segname = $1; } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && $sectname eq "__text" && $segname eq "__TEXT")) { next; } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { $vma = $1; } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { $size = $1; } elsif ($line =~ /\boffset ([0-9]+)/) { $file_offset = sprintf("%016x", $1); } if (defined($vma) && defined($size) && defined($file_offset)) { last; } } close(OTOOL); if (!defined($vma) || !defined($size) || !defined($file_offset)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } sub ParseTextSectionHeader { # obj_tool_map("otool") is only defined if we're in a Mach-O environment if (defined($obj_tool_map{"otool"})) { my $r = ParseTextSectionHeaderFromOtool(@_); if (defined($r)){ return $r; } } # If otool doesn't work, or we don't have it, fall back to objdump return ParseTextSectionHeaderFromObjdump(@_); } # Split /proc/pid/maps dump into a list of libraries sub ParseLibraries { return if $main::use_symbol_page; # We don't need libraries info. my $prog = Cwd::abs_path(shift); my $map = shift; my $pcs = shift; my $result = []; my $h = "[a-f0-9]+"; my $zero_offset = HexExtend("0"); my $buildvar = ""; foreach my $l (split("\n", $map)) { if ($l =~ m/^\s*build=(.*)$/) { $buildvar = $1; } my $start; my $finish; my $offset; my $lib; if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { # Full line from /proc/self/maps. Example: # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { # Cooked line from DumpAddressMap. Example: # 40000000-40015000: /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = $3; } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) { # PIEs and address space randomization do not play well with our # default assumption that main executable is at lowest # addresses. So we're detecting main executable in # /proc/self/maps as well. $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) # # Example: # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s # o.1 NCH -1 elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = FindLibrary($5); } else { next; } # Expand "$build" variable if available $lib =~ s/\$build\b/$buildvar/g; $lib = FindLibrary($lib); # Check for pre-relocated libraries, which use pre-relocated symbol tables # and thus require adjusting the offset that we'll use to translate # VM addresses into symbol table addresses. # Only do this if we're not going to fetch the symbol table from a # debugging copy of the library. if (!DebuggingLibrary($lib)) { my $text = ParseTextSectionHeader($lib); if (defined($text)) { my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); $offset = AddressAdd($offset, $vma_offset); } } if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } push(@{$result}, [$lib, $start, $finish, $offset]); } # Append special entry for additional library (not relocated) if ($main::opt_lib ne "") { my $text = ParseTextSectionHeader($main::opt_lib); if (defined($text)) { my $start = $text->{vma}; my $finish = AddressAdd($start, $text->{size}); push(@{$result}, [$main::opt_lib, $start, $finish, $start]); } } # Append special entry for the main program. This covers # 0..max_pc_value_seen, so that we assume pc values not found in one # of the library ranges will be treated as coming from the main # program binary. my $min_pc = HexExtend("0"); my $max_pc = $min_pc; # find the maximal PC value in any sample foreach my $pc (keys(%{$pcs})) { if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } } push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); return $result; } # Add two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressAdd { my $addr1 = shift; my $addr2 = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. if ($main::opt_debug and $main::opt_test) { print STDERR "AddressAdd $addr1 + $addr2 = "; } my $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); my $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2); my $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } my $r = sprintf("%07x", $sum); $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2) + $c; $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } $r = sprintf("%07x", $sum) . $r; $sum = hex($addr1) + hex($addr2) + $c; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } return $r; } } # Subtract two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressSub { my $addr1 = shift; my $addr2 = shift; my $diff; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $diff); } else { # Do the addition in 7-nibble chunks to trivialize borrow handling. # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } my $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); my $a2 = hex(substr($addr2,-7)); $addr2 = substr($addr2,0,-7); my $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; my $r = sprintf("%07x", $diff); $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); $a2 = hex(substr($addr2,-7)) + $b; $addr2 = substr($addr2,0,-7); $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; $r = sprintf("%07x", $diff) . $r; $a1 = hex($addr1); $a2 = hex($addr2) + $b; if ($a2 > $a1) { $a1 += 0x100; } $diff = $a1 - $a2; $r = sprintf("%02x", $diff) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Increment a hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressInc { my $addr = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr)+1) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. # We are always doing this to step through the addresses in a function, # and will almost never overflow the first chunk, so we check for this # case and exit early. # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } my $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; my $r = sprintf("%07x", $sum); if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "0000000"; } $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; $r = sprintf("%07x", $sum) . $r; if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "00000000000000"; } $sum = hex($addr) + 1; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Extract symbols for all PC values found in profile sub ExtractSymbols { my $libs = shift; my $pcset = shift; my $symbols = {}; # Map each PC value to the containing library. To make this faster, # we sort libraries by their starting pc value (highest first), and # advance through the libraries as we advance the pc. Sometimes the # addresses of libraries may overlap with the addresses of the main # binary, so to make sure the libraries 'win', we iterate over the # libraries in reverse order (which assumes the binary doesn't start # in the middle of a library, which seems a fair assumption). my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { my $libname = $lib->[0]; my $start = $lib->[1]; my $finish = $lib->[2]; my $offset = $lib->[3]; # Use debug library if it exists my $debug_libname = DebuggingLibrary($libname); if ($debug_libname) { $libname = $debug_libname; } # Get list of pcs that belong in this library. my $contained = []; my ($start_pc_index, $finish_pc_index); # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; $finish_pc_index--) { last if $pcs[$finish_pc_index - 1] le $finish; } # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; $start_pc_index--) { last if $pcs[$start_pc_index - 1] lt $start; } # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, # in case there are overlaps in libraries and the main binary. @{$contained} = splice(@pcs, $start_pc_index, $finish_pc_index - $start_pc_index); # Map to symbols MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); } return $symbols; } # Map list of PC values to symbols for a given image sub MapToSymbols { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; my $debug = 0; # Ignore empty binaries if ($#{$pclist} < 0) { return; } # Figure out the addr2line command to use my $addr2line = $obj_tool_map{"addr2line"}; my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); if (exists $obj_tool_map{"addr2line_pdb"}) { $addr2line = $obj_tool_map{"addr2line_pdb"}; $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); } # If "addr2line" isn't installed on the system at all, just use # nm to get what info we can (function names, but not line numbers). if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { MapSymbolsWithNM($image, $offset, $pclist, $symbols); return; } # "addr2line -i" can produce a variable number of lines per input # address, with no separator that allows us to tell when data for # the next address starts. So we find the address for a special # symbol (_fini) and interleave this address between all real # addresses passed to addr2line. The name of this special symbol # can then be used as a separator. $sep_address = undef; # May be filled in by MapSymbolsWithNM() my $nm_symbols = {}; MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); if (defined($sep_address)) { # Only add " -i" to addr2line if the binary supports it. # addr2line --help returns 0, but not if it sees an unknown flag first. if (system("$cmd -i --help >$dev_null 2>&1") == 0) { $cmd .= " -i"; } else { $sep_address = undef; # no need for sep_address if we don't support -i } } # Make file with all PC values with intervening 'sep_address' so # that we can reliably detect the end of inlined function list open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); if ($debug) { print("---- $image ---\n"); } for (my $i = 0; $i <= $#{$pclist}; $i++) { # addr2line always reads hex addresses, and does not need '0x' prefix. if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); if (defined($sep_address)) { printf ADDRESSES ("%s\n", $sep_address); } } close(ADDRESSES); if ($debug) { print("----\n"); system("cat", $main::tmpfile_sym); print("----\n"); system("$cmd < " . ShellEscape($main::tmpfile_sym)); print("----\n"); } open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") || error("$cmd: $!\n"); my $count = 0; # Index in pclist while () { # Read fullfunction and filelineinfo from next pair of lines s/\r?\n$//g; my $fullfunction = $_; $_ = ; s/\r?\n$//g; my $filelinenum = $_; if (defined($sep_address) && $fullfunction eq $sep_symbol) { # Terminating marker for data for this address $count++; next; } $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths my $pcstr = $pclist->[$count]; my $function = ShortFunctionName($fullfunction); my $nms = $nm_symbols->{$pcstr}; if (defined($nms)) { if ($fullfunction eq '??') { # nm found a symbol for us. $function = $nms->[0]; $fullfunction = $nms->[2]; } else { # MapSymbolsWithNM tags each routine with its starting address, # useful in case the image has multiple occurrences of this # routine. (It uses a syntax that resembles template paramters, # that are automatically stripped out by ShortFunctionName().) # addr2line does not provide the same information. So we check # if nm disambiguated our symbol, and if so take the annotated # (nm) version of the routine-name. TODO(csilvers): this won't # catch overloaded, inlined symbols, which nm doesn't see. # Better would be to do a check similar to nm's, in this fn. if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn $function = $nms->[0]; $fullfunction = $nms->[2]; } } } # Prepend to accumulated symbols for pcstr # (so that caller comes before callee) my $sym = $symbols->{$pcstr}; if (!defined($sym)) { $sym = []; $symbols->{$pcstr} = $sym; } unshift(@{$sym}, $function, $filelinenum, $fullfunction); if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } if (!defined($sep_address)) { # Inlining is off, so this entry ends immediately $count++; } } close(SYMBOLS); } # Use nm to map the list of referenced PCs to symbols. Return true iff we # are able to read procedure information via nm. sub MapSymbolsWithNM { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; # Get nm output sorted by increasing address my $symbol_table = GetProcedureBoundaries($image, "."); if (!%{$symbol_table}) { return 0; } # Start addresses are already the right length (8 or 16 hex digits). my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } keys(%{$symbol_table}); if ($#names < 0) { # No symbols: just use addresses foreach my $pc (@{$pclist}) { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } return 0; } # Sort addresses so we can do a join against nm output my $index = 0; my $fullname = $names[0]; my $name = ShortFunctionName($fullname); foreach my $pc (sort { $a cmp $b } @{$pclist}) { # Adjust for mapped offset my $mpc = AddressSub($pc, $offset); while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ $index++; $fullname = $names[$index]; $name = ShortFunctionName($fullname); } if ($mpc lt $symbol_table->{$fullname}->[1]) { $symbols->{$pc} = [$name, "?", $fullname]; } else { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } } return 1; } sub ShortFunctionName { my $function = shift; while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type return $function; } # Trim overly long symbols found in disassembler output sub CleanDisassembly { my $d = shift; while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments return $d; } # Clean file name for display sub CleanFileName { my ($f) = @_; $f =~ s|^/proc/self/cwd/||; $f =~ s|^\./||; return $f; } # Make address relative to section and clean up for display sub UnparseAddress { my ($offset, $address) = @_; $address = AddressSub($address, $offset); $address =~ s/^0x//; $address =~ s/^0*//; return $address; } ##### Miscellaneous ##### # Find the right versions of the above object tools to use. The # argument is the program file being analyzed, and should be an ELF # 32-bit or ELF 64-bit executable file. The location of the tools # is determined by considering the following options in this order: # 1) --tools option, if set # 2) JEPROF_TOOLS environment variable, if set # 3) the environment sub ConfigureObjTools { my $prog_file = shift; # Check for the existence of $prog_file because /usr/bin/file does not # predictably return error status in prod. (-e $prog_file) || error("$prog_file does not exist.\n"); my $file_type = undef; if (-e "/usr/bin/file") { # Follow symlinks (at least for systems where "file" supports that). my $escaped_prog_file = ShellEscape($prog_file); $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || /usr/bin/file $escaped_prog_file`; } elsif ($^O == "MSWin32") { $file_type = "MS Windows"; } else { print STDERR "WARNING: Can't determine the file type of $prog_file"; } if ($file_type =~ /64-bit/) { # Change $address_length to 16 if the program file is ELF 64-bit. # We can't detect this from many (most?) heap or lock contention # profiles, since the actual addresses referenced are generally in low # memory even for 64-bit programs. $address_length = 16; } if ($file_type =~ /MS Windows/) { # For windows, we provide a version of nm and addr2line as part of # the opensource release, which is capable of parsing # Windows-style PDB executables. It should live in the path, or # in the same directory as jeprof. $obj_tool_map{"nm_pdb"} = "nm-pdb"; $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; } if ($file_type =~ /Mach-O/) { # OS X uses otool to examine Mach-O files, rather than objdump. $obj_tool_map{"otool"} = "otool"; $obj_tool_map{"addr2line"} = "false"; # no addr2line $obj_tool_map{"objdump"} = "false"; # no objdump } # Go fill in %obj_tool_map with the pathnames to use: foreach my $tool (keys %obj_tool_map) { $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); } } # Returns the path of a caller-specified object tool. If --tools or # JEPROF_TOOLS are specified, then returns the full path to the tool # with that prefix. Otherwise, returns the path unmodified (which # means we will look for it on PATH). sub ConfigureTool { my $tool = shift; my $path; # --tools (or $JEPROF_TOOLS) is a comma separated list, where each # item is either a) a pathname prefix, or b) a map of the form # :. First we look for an entry of type (b) for our # tool. If one is found, we use it. Otherwise, we consider all the # pathname prefixes in turn, until one yields an existing file. If # none does, we use a default path. my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { $path = $2; # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. } elsif ($tools ne '') { foreach my $prefix (split(',', $tools)) { next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list if (-x $prefix . $tool) { $path = $prefix . $tool; last; } } if (!$path) { error("No '$tool' found with prefix specified by " . "--tools (or \$JEPROF_TOOLS) '$tools'\n"); } } else { # ... otherwise use the version that exists in the same directory as # jeprof. If there's nothing there, use $PATH. $0 =~ m,[^/]*$,; # this is everything after the last slash my $dirname = $`; # this is everything up to and including the last slash if (-x "$dirname$tool") { $path = "$dirname$tool"; } else { $path = $tool; } } if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } return $path; } sub ShellEscape { my @escaped_words = (); foreach my $word (@_) { my $escaped_word = $word; if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist $escaped_word =~ s/'/'\\''/; $escaped_word = "'$escaped_word'"; } push(@escaped_words, $escaped_word); } return join(" ", @escaped_words); } sub cleanup { unlink($main::tmpfile_sym); unlink(keys %main::tempnames); # We leave any collected profiles in $HOME/jeprof in case the user wants # to look at them later. We print a message informing them of this. if ((scalar(@main::profile_files) > 0) && defined($main::collected_profile)) { if (scalar(@main::profile_files) == 1) { print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; } print STDERR "If you want to investigate this profile further, you can do:\n"; print STDERR "\n"; print STDERR " jeprof \\\n"; print STDERR " $main::prog \\\n"; print STDERR " $main::collected_profile\n"; print STDERR "\n"; } } sub sighandler { cleanup(); exit(1); } sub error { my $msg = shift; print STDERR $msg; cleanup(); exit(1); } # Run $nm_command and get all the resulting procedure boundaries whose # names match "$regexp" and returns them in a hashtable mapping from # procedure name to a two-element vector of [start address, end address] sub GetProcedureBoundariesViaNm { my $escaped_nm_command = shift; # shell-escaped my $regexp = shift; my $symbol_table = {}; open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); my $last_start = "0"; my $routine = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (m/^\s*([0-9a-f]+) (.) (..*)/) { my $start_val = $1; my $type = $2; my $this_routine = $3; # It's possible for two symbols to share the same address, if # one is a zero-length variable (like __start_google_malloc) or # one symbol is a weak alias to another (like __libc_malloc). # In such cases, we want to ignore all values except for the # actual symbol, which in nm-speak has type "T". The logic # below does this, though it's a bit tricky: what happens when # we have a series of lines with the same address, is the first # one gets queued up to be processed. However, it won't # *actually* be processed until later, when we read a line with # a different address. That means that as long as we're reading # lines with the same address, we have a chance to replace that # item in the queue, which we do whenever we see a 'T' entry -- # that is, a line with type 'T'. If we never see a 'T' entry, # we'll just go ahead and process the first entry (which never # got touched in the queue), and ignore the others. if ($start_val eq $last_start && $type =~ /t/i) { # We are the 'T' symbol at this address, replace previous symbol. $routine = $this_routine; next; } elsif ($start_val eq $last_start) { # We're not the 'T' symbol at this address, so ignore us. next; } if ($this_routine eq $sep_symbol) { $sep_address = HexExtend($start_val); } # Tag this routine with the starting address in case the image # has multiple occurrences of this routine. We use a syntax # that resembles template parameters that are automatically # stripped out by ShortFunctionName() $this_routine .= "<$start_val>"; if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($start_val)]; } $last_start = $start_val; $routine = $this_routine; } elsif (m/^Loaded image name: (.+)/) { # The win32 nm workalike emits information about the binary it is using. if ($main::opt_debug) { print STDERR "Using Image $1\n"; } } elsif (m/^PDB file name: (.+)/) { # The win32 nm workalike emits information about the pdb it is using. if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } } } close(NM); # Handle the last line in the nm output. Unfortunately, we don't know # how big this last symbol is, because we don't know how big the file # is. For now, we just give it a size of 0. # TODO(csilvers): do better here. if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($last_start)]; } return $symbol_table; } # Gets the procedure boundaries for all routines in "$image" whose names # match "$regexp" and returns them in a hashtable mapping from procedure # name to a two-element vector of [start address, end address]. # Will return an empty map if nm is not installed or not working properly. sub GetProcedureBoundaries { my $image = shift; my $regexp = shift; # If $image doesn't start with /, then put ./ in front of it. This works # around an obnoxious bug in our probing of nm -f behavior. # "nm -f $image" is supposed to fail on GNU nm, but if: # # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND # b. you have a.out in your current directory (a not uncommon occurence) # # then "nm -f $image" succeeds because -f only looks at the first letter of # the argument, which looks valid because it's [BbSsPp], and then since # there's no image provided, it looks for a.out and finds it. # # This regex makes sure that $image starts with . or /, forcing the -f # parsing to fail since . and / are not valid formats. $image =~ s#^[^/]#./$&#; # For libc libraries, the copy in /usr/lib/debug contains debugging symbols my $debugging = DebuggingLibrary($image); if ($debugging) { $image = $debugging; } my $nm = $obj_tool_map{"nm"}; my $cppfilt = $obj_tool_map{"c++filt"}; # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm # binary doesn't support --demangle. In addition, for OS X we need # to use the -f flag to get 'flat' nm output (otherwise we don't sort # properly and get incorrect results). Unfortunately, GNU nm uses -f # in an incompatible way. So first we test whether our nm supports # --demangle and -f. my $demangle_flag = ""; my $cppfilt_flag = ""; my $to_devnull = ">$dev_null 2>&1"; if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { # In this mode, we do "nm --demangle " $demangle_flag = "--demangle"; $cppfilt_flag = ""; } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { # In this mode, we do "nm | c++filt" $cppfilt_flag = " | " . ShellEscape($cppfilt); }; my $flatten_flag = ""; if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { $flatten_flag = "-f"; } # Finally, in the case $imagie isn't a debug library, we try again with # -D to at least get *exported* symbols. If we can't use --demangle, # we use c++filt instead, if it exists on this system. my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", # 6nm is for Go binaries ShellEscape("6nm", "$image") . " 2>$dev_null | sort", ); # If the executable is an MS Windows PDB-format executable, we'll # have set up obj_tool_map("nm_pdb"). In this case, we actually # want to use both unix nm and windows-specific nm_pdb, since # PDB-format executables can apparently include dwarf .o files. if (exists $obj_tool_map{"nm_pdb"}) { push(@nm_commands, ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) . " 2>$dev_null"); } foreach my $nm_command (@nm_commands) { my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); return $symbol_table if (%{$symbol_table}); } my $symbol_table = {}; return $symbol_table; } # The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. # To make them more readable, we add underscores at interesting places. # This routine removes the underscores, producing the canonical representation # used by jeprof to represent addresses, particularly in the tested routines. sub CanonicalHex { my $arg = shift; return join '', (split '_',$arg); } # Unit test for AddressAdd: sub AddressAddUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd ($row->[0], $row->[1]); if ($sum ne $row->[2]) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); my $expected = join '', (split '_',$row->[2]); if ($sum ne CanonicalHex($row->[2])) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressSub: sub AddressSubUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub ($row->[0], $row->[1]); if ($sum ne $row->[3]) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); if ($sum ne CanonicalHex($row->[3])) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressInc: sub AddressIncUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc ($row->[0]); if ($sum ne $row->[4]) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc (CanonicalHex($row->[0])); if ($sum ne CanonicalHex($row->[4])) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Driver for unit tests. # Currently just the address add/subtract/increment routines for 64-bit. sub RunUnitTests { my $error_count = 0; # This is a list of tuples [a, b, a+b, a-b, a+1] my $unit_test_data_8 = [ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], [qw(00000001 ffffffff 00000000 00000002 00000002)], [qw(00000001 fffffff0 fffffff1 00000011 00000002)], ]; my $unit_test_data_16 = [ # The implementation handles data in 7-nibble chunks, so those are the # interesting boundaries. [qw(aaaaaaaa 50505050 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], [qw(50505050 aaaaaaaa 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], [qw(ffffffff aaaaaaaa 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], [qw(00000001 ffffffff 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], [qw(00000001 fffffff0 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], [qw(00_a00000a_aaaaaaa 50505050 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], [qw(0f_fff0005_0505050 aaaaaaaa 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], [qw(00_000000f_fffffff 01_800000a_aaaaaaa 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], [qw(00_0000000_0000001 ff_fffffff_fffffff 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], [qw(00_0000000_0000001 ff_fffffff_ffffff0 ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], ]; $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); if ($error_count > 0) { print STDERR $error_count, " errors: FAILED\n"; } else { print STDERR "PASS\n"; } exit ($error_count); } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/build-aux/config.guess�������������������������������������������������������0100755�0000765�0000024�00000125644�13446174740�0017363�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2016 Free Software Foundation, Inc. timestamp='2016-10-02' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "${UNAME_SYSTEM}" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval $set_cc_for_build cat <<-EOF > $dummy.c #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` ;; esac # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ /sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || \ echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` machine=${arch}${endian}-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "${UNAME_MACHINE_ARCH}" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "${UNAME_MACHINE_ARCH}" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}${abi}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; *:Sortix:*:*) echo ${UNAME_MACHINE}-unknown-sortix exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/lslpp ] ; then IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = hppa2.0w ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW64*:*) echo ${UNAME_MACHINE}-pc-mingw64 exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; *:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; e2k:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; k1om:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; mips64el:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-${LIBC} exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-${LIBC} exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval $set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac cat >&2 </dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: ��������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/build-aux/config.sub���������������������������������������������������������0100755�0000765�0000024�00000106763�13446174740�0017027�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2016 Free Software Foundation, Inc. timestamp='2016-11-04' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*178) os=-lynxos178 ;; -lynx*5) os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ | ba \ | be32 | be64 \ | bfin \ | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ | e2k | epiphany \ | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 | or1k | or1knd | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pru \ | pyramid \ | riscv32 | riscv64 \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu \ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | visium \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; c54x) basic_machine=tic54x-unknown ;; c55x) basic_machine=tic55x-unknown ;; c6x) basic_machine=tic6x-unknown ;; leon|leon[3-9]) basic_machine=sparc-$basic_machine ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown os=-none ;; xscaleeb) basic_machine=armeb-unknown ;; xscaleel) basic_machine=armel-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ | pru-* \ | pyramid-* \ | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ | visium-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; asmjs) basic_machine=asmjs-unknown ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c54x-*) basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c55x-*) basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c6x-*) basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16 | cr16-*) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; e500v[12]) basic_machine=powerpc-unknown os=$os"spe" ;; e500v[12]-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` os=$os"spe" ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; leon-*|leon[3-9]-*) basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze*) basic_machine=microblaze-xilinx ;; mingw64) basic_machine=x86_64-pc os=-mingw64 ;; mingw32) basic_machine=i686-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; moxiebox) basic_machine=moxie-unknown os=-moxiebox ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) basic_machine=i686-pc os=-msys ;; mvs) basic_machine=i370-ibm os=-mvs ;; nacl) basic_machine=le32-unknown os=-nacl ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; neo-tandem) basic_machine=neo-tandem ;; nse-tandem) basic_machine=nse-tandem ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=-rdos ;; rdos32) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; strongarm-* | thumb-*) basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tile*) basic_machine=$basic_machine-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* | -cloudabi* | -sortix* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ | -onefs* | -tirtos* | -phoenix* | -fuchsia*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -nacl*) ;; -ios) ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; c8051-*) os=-elf ;; hexagon-*) os=-elf ;; tic54x-*) os=-coff ;; tic55x-*) os=-coff ;; tic6x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: �������������jemalloc-sys-0.3.2/rep/build-aux/install-sh���������������������������������������������������������0100755�0000765�0000024�00000012721�13446174740�0017036�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # # install - install a program, script, or datafile # This comes from X11R5 (mit/util/scripts/install.sh). # # Copyright 1991 by the Massachusetts Institute of Technology # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that # the above copyright notice appear in all copies and that both that # copyright notice and this permission notice appear in supporting # documentation, and that the name of M.I.T. not be used in advertising or # publicity pertaining to distribution of the software without specific, # written prior permission. M.I.T. makes no representations about the # suitability of this software for any purpose. It is provided "as is" # without express or implied warranty. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. It can only install one file at a time, a restriction # shared with many OS's install programs. # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit="${DOITPROG-}" # put in absolute paths if you don't have them in your path; or use env. vars. mvprog="${MVPROG-mv}" cpprog="${CPPROG-cp}" chmodprog="${CHMODPROG-chmod}" chownprog="${CHOWNPROG-chown}" chgrpprog="${CHGRPPROG-chgrp}" stripprog="${STRIPPROG-strip}" rmprog="${RMPROG-rm}" mkdirprog="${MKDIRPROG-mkdir}" transformbasename="" transform_arg="" instcmd="$mvprog" chmodcmd="$chmodprog 0755" chowncmd="" chgrpcmd="" stripcmd="" rmcmd="$rmprog -f" mvcmd="$mvprog" src="" dst="" dir_arg="" while [ x"$1" != x ]; do case $1 in -c) instcmd="$cpprog" shift continue;; -d) dir_arg=true shift continue;; -m) chmodcmd="$chmodprog $2" shift shift continue;; -o) chowncmd="$chownprog $2" shift shift continue;; -g) chgrpcmd="$chgrpprog $2" shift shift continue;; -s) stripcmd="$stripprog" shift continue;; -t=*) transformarg=`echo $1 | sed 's/-t=//'` shift continue;; -b=*) transformbasename=`echo $1 | sed 's/-b=//'` shift continue;; *) if [ x"$src" = x ] then src=$1 else # this colon is to work around a 386BSD /bin/sh bug : dst=$1 fi shift continue;; esac done if [ x"$src" = x ] then echo "install: no input file specified" exit 1 else true fi if [ x"$dir_arg" != x ]; then dst=$src src="" if [ -d $dst ]; then instcmd=: else instcmd=mkdir fi else # Waiting for this to be detected by the "$instcmd $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if [ -f $src -o -d $src ] then true else echo "install: $src does not exist" exit 1 fi if [ x"$dst" = x ] then echo "install: no destination specified" exit 1 else true fi # If destination is a directory, append the input filename; if your system # does not like double slashes in filenames, you may need to add some logic if [ -d $dst ] then dst="$dst"/`basename $src` else true fi fi ## this sed command emulates the dirname command dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` # Make sure that the destination directory exists. # this part is taken from Noah Friedman's mkinstalldirs script # Skip lots of stat calls in the usual case. if [ ! -d "$dstdir" ]; then defaultIFS=' ' IFS="${IFS-${defaultIFS}}" oIFS="${IFS}" # Some sh's can't handle IFS=/ for some reason. IFS='%' set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` IFS="${oIFS}" pathcomp='' while [ $# -ne 0 ] ; do pathcomp="${pathcomp}${1}" shift if [ ! -d "${pathcomp}" ] ; then $mkdirprog "${pathcomp}" else true fi pathcomp="${pathcomp}/" done fi if [ x"$dir_arg" != x ] then $doit $instcmd $dst && if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi else # If we're going to rename the final executable, determine the name now. if [ x"$transformarg" = x ] then dstfile=`basename $dst` else dstfile=`basename $dst $transformbasename | sed $transformarg`$transformbasename fi # don't allow the sed command to completely eliminate the filename if [ x"$dstfile" = x ] then dstfile=`basename $dst` else true fi # Make a temp file name in the proper directory. dsttmp=$dstdir/#inst.$$# # Move or copy the file name to the temp name $doit $instcmd $src $dsttmp && trap "rm -f ${dsttmp}" 0 && # and set any options; do chmod last to preserve setuid bits # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $instcmd $src $dsttmp" command. if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && # Now rename the file to the real destination. $doit $rmcmd -f $dstdir/$dstfile && $doit $mvcmd $dsttmp $dstdir/$dstfile fi && exit 0 �����������������������������������������������jemalloc-sys-0.3.2/rep/config.log�������������������������������������������������������������������0100644�0000765�0000024�00000407710�13446175033�0015112�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by configure, which was generated by GNU Autoconf 2.69. Invocation command line was $ ./configure ## --------- ## ## Platform. ## ## --------- ## hostname = air uname -m = x86_64 uname -r = 18.2.0 uname -s = Darwin uname -v = Darwin Kernel Version 18.2.0: Thu Dec 20 20:46:53 PST 2018; root:xnu-4903.241.1~1/RELEASE_X86_64 /usr/bin/uname -p = i386 /bin/uname -X = unknown /bin/arch = unknown /usr/bin/arch -k = unknown /usr/convex/getsysinfo = unknown /usr/bin/hostinfo = Mach kernel version: Darwin Kernel Version 18.2.0: Thu Dec 20 20:46:53 PST 2018; root:xnu-4903.241.1~1/RELEASE_X86_64 Kernel configured for up to 4 processors. 2 processors are physically available. 4 processors are logically available. Processor type: i486 (Intel 80486) Processors active: 0 1 2 3 Primary memory available: 8.00 gigabytes Default processor set: 425 tasks, 2005 threads, 4 processors Load average: 2.08, Mach factor: 1.91 /bin/machine = unknown /usr/bin/oslevel = unknown /bin/universe = unknown PATH: /usr/local/bin PATH: /Users/gnzlbg/.cargo/bin PATH: /usr/local/bin PATH: /usr/local/sbin PATH: /usr/sbin PATH: /Users/gnzlbg/.cargo/bin PATH: /usr/local/bin PATH: /usr/local/sbin PATH: /usr/local/opt PATH: /usr/local/bin PATH: /usr/sbin PATH: /usr/bin PATH: /bin PATH: /Library/TeX/texbin PATH: /opt/X11/bin ## ----------- ## ## Core tests. ## ## ----------- ## configure:2658: checking for xsltproc configure:2676: found /usr/bin/xsltproc configure:2689: result: /usr/bin/xsltproc configure:2773: checking for gcc configure:2800: result: /usr/bin/clang configure:3029: checking for C compiler version configure:3038: /usr/bin/clang --version >&5 Apple LLVM version 10.0.0 (clang-1000.11.45.5) Target: x86_64-apple-darwin18.2.0 Thread model: posix InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin configure:3049: $? = 0 configure:3038: /usr/bin/clang -v >&5 Apple LLVM version 10.0.0 (clang-1000.11.45.5) Target: x86_64-apple-darwin18.2.0 Thread model: posix InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin Found CUDA installation: /usr/local/cuda, version unknown configure:3049: $? = 0 configure:3038: /usr/bin/clang -V >&5 clang: error: argument to '-V' is missing (expected 1 value) clang: error: no input files configure:3049: $? = 1 configure:3038: /usr/bin/clang -qversion >&5 clang: error: unknown argument: '-qversion' clang: error: no input files configure:3049: $? = 1 configure:3069: checking whether the C compiler works configure:3091: /usr/bin/clang conftest.c >&5 configure:3095: $? = 0 configure:3143: result: yes configure:3146: checking for C compiler default output file name configure:3148: result: a.out configure:3154: checking for suffix of executables configure:3161: /usr/bin/clang -o conftest conftest.c >&5 configure:3165: $? = 0 configure:3187: result: configure:3209: checking whether we are cross compiling configure:3217: /usr/bin/clang -o conftest conftest.c >&5 configure:3221: $? = 0 configure:3228: ./conftest configure:3232: $? = 0 configure:3247: result: no configure:3252: checking for suffix of object files configure:3274: /usr/bin/clang -c conftest.c >&5 configure:3278: $? = 0 configure:3299: result: o configure:3303: checking whether we are using the GNU C compiler configure:3322: /usr/bin/clang -c conftest.c >&5 configure:3322: $? = 0 configure:3331: result: yes configure:3340: checking whether /usr/bin/clang accepts -g configure:3360: /usr/bin/clang -c -g conftest.c >&5 configure:3360: $? = 0 configure:3401: result: yes configure:3418: checking for /usr/bin/clang option to accept ISO C89 configure:3481: /usr/bin/clang -c conftest.c >&5 configure:3481: $? = 0 configure:3494: result: none needed configure:3559: checking whether compiler is cray configure:3579: /usr/bin/clang -c conftest.c >&5 conftest.c:15:11: error: expected ';' at end of declaration int fail-1; ^ ; 1 error generated. configure:3579: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | /* end confdefs.h. */ | | int | main () | { | | #ifndef _CRAYC | int fail-1; | #endif | | ; | return 0; | } configure:3586: result: no configure:3623: checking whether compiler supports -std=gnu11 configure:3654: /usr/bin/clang -c -std=gnu11 conftest.c >&5 configure:3654: $? = 0 configure:3656: result: yes configure:3738: checking whether compiler supports -Wall configure:3769: /usr/bin/clang -c -std=gnu11 -Wall conftest.c >&5 configure:3769: $? = 0 configure:3771: result: yes configure:3789: checking whether compiler supports -Wextra configure:3820: /usr/bin/clang -c -std=gnu11 -Wall -Wextra conftest.c >&5 configure:3820: $? = 0 configure:3822: result: yes configure:3840: checking whether compiler supports -Wshorten-64-to-32 configure:3871: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 conftest.c >&5 configure:3871: $? = 0 configure:3873: result: yes configure:3891: checking whether compiler supports -Wsign-compare configure:3922: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare conftest.c >&5 configure:3922: $? = 0 configure:3924: result: yes configure:3942: checking whether compiler supports -Wundef configure:3973: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef conftest.c >&5 configure:3973: $? = 0 configure:3975: result: yes configure:3993: checking whether compiler supports -Wno-format-zero-length configure:4024: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length conftest.c >&5 configure:4024: $? = 0 configure:4026: result: yes configure:4044: checking whether compiler supports -pipe configure:4075: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe conftest.c >&5 configure:4075: $? = 0 configure:4077: result: yes configure:4095: checking whether compiler supports -g3 configure:4126: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:4126: $? = 0 configure:4128: result: yes configure:4576: checking how to run the C preprocessor configure:4607: /usr/bin/clang -E conftest.c configure:4607: $? = 0 configure:4621: /usr/bin/clang -E conftest.c conftest.c:10:10: fatal error: 'ac_nonexistent.h' file not found #include ^~~~~~~~~~~~~~~~~~ 1 error generated. configure:4621: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | /* end confdefs.h. */ | #include configure:4646: result: /usr/bin/clang -E configure:4666: /usr/bin/clang -E conftest.c configure:4666: $? = 0 configure:4680: /usr/bin/clang -E conftest.c conftest.c:10:10: fatal error: 'ac_nonexistent.h' file not found #include ^~~~~~~~~~~~~~~~~~ 1 error generated. configure:4680: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | /* end confdefs.h. */ | #include configure:4897: checking for C++ compiler version configure:4906: /usr/bin/clang++ --version >&5 Apple LLVM version 10.0.0 (clang-1000.11.45.5) Target: x86_64-apple-darwin18.2.0 Thread model: posix InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin configure:4917: $? = 0 configure:4906: /usr/bin/clang++ -v >&5 Apple LLVM version 10.0.0 (clang-1000.11.45.5) Target: x86_64-apple-darwin18.2.0 Thread model: posix InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin Found CUDA installation: /usr/local/cuda, version unknown configure:4917: $? = 0 configure:4906: /usr/bin/clang++ -V >&5 clang: error: argument to '-V' is missing (expected 1 value) clang: error: no input files configure:4917: $? = 1 configure:4906: /usr/bin/clang++ -qversion >&5 clang: error: unknown argument: '-qversion' clang: error: no input files configure:4917: $? = 1 configure:4921: checking whether we are using the GNU C++ compiler configure:4940: /usr/bin/clang++ -c conftest.cpp >&5 configure:4940: $? = 0 configure:4949: result: yes configure:4958: checking whether /usr/bin/clang++ accepts -g configure:4978: /usr/bin/clang++ -c -g conftest.cpp >&5 configure:4978: $? = 0 configure:5019: result: yes configure:5050: checking whether /usr/bin/clang++ supports C++14 features by default configure:5464: /usr/bin/clang++ -c -g -O2 conftest.cpp >&5 conftest.cpp:21:2: error: "This is not a C++11 compiler" #error "This is not a C++11 compiler" ^ conftest.cpp:306:2: error: "This is not a C++14 compiler" #error "This is not a C++14 compiler" ^ 2 errors generated. configure:5464: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | /* end confdefs.h. */ | | | // If the compiler admits that it is not ready for C++11, why torture it? | // Hopefully, this will speed up the test. | | #ifndef __cplusplus | | #error "This is not a C++ compiler" | | #elif __cplusplus < 201103L | | #error "This is not a C++11 compiler" | | #else | | namespace cxx11 | { | | namespace test_static_assert | { | | template | struct check | { | static_assert(sizeof(int) <= sizeof(T), "not big enough"); | }; | | } | | namespace test_final_override | { | | struct Base | { | virtual void f() {} | }; | | struct Derived : public Base | { | virtual void f() override {} | }; | | } | | namespace test_double_right_angle_brackets | { | | template < typename T > | struct check {}; | | typedef check single_type; | typedef check> double_type; | typedef check>> triple_type; | typedef check>>> quadruple_type; | | } | | namespace test_decltype | { | | int | f() | { | int a = 1; | decltype(a) b = 2; | return a + b; | } | | } | | namespace test_type_deduction | { | | template < typename T1, typename T2 > | struct is_same | { | static const bool value = false; | }; | | template < typename T > | struct is_same | { | static const bool value = true; | }; | | template < typename T1, typename T2 > | auto | add(T1 a1, T2 a2) -> decltype(a1 + a2) | { | return a1 + a2; | } | | int | test(const int c, volatile int v) | { | static_assert(is_same::value == true, ""); | static_assert(is_same::value == false, ""); | static_assert(is_same::value == false, ""); | auto ac = c; | auto av = v; | auto sumi = ac + av + 'x'; | auto sumf = ac + av + 1.0; | static_assert(is_same::value == true, ""); | static_assert(is_same::value == true, ""); | static_assert(is_same::value == true, ""); | static_assert(is_same::value == false, ""); | static_assert(is_same::value == true, ""); | return (sumf > 0.0) ? sumi : add(c, v); | } | | } | | namespace test_noexcept | { | | int f() { return 0; } | int g() noexcept { return 0; } | | static_assert(noexcept(f()) == false, ""); | static_assert(noexcept(g()) == true, ""); | | } | | namespace test_constexpr | { | | template < typename CharT > | unsigned long constexpr | strlen_c_r(const CharT *const s, const unsigned long acc) noexcept | { | return *s ? strlen_c_r(s + 1, acc + 1) : acc; | } | | template < typename CharT > | unsigned long constexpr | strlen_c(const CharT *const s) noexcept | { | return strlen_c_r(s, 0UL); | } | | static_assert(strlen_c("") == 0UL, ""); | static_assert(strlen_c("1") == 1UL, ""); | static_assert(strlen_c("example") == 7UL, ""); | static_assert(strlen_c("another\0example") == 7UL, ""); | | } | | namespace test_rvalue_references | { | | template < int N > | struct answer | { | static constexpr int value = N; | }; | | answer<1> f(int&) { return answer<1>(); } | answer<2> f(const int&) { return answer<2>(); } | answer<3> f(int&&) { return answer<3>(); } | | void | test() | { | int i = 0; | const int c = 0; | static_assert(decltype(f(i))::value == 1, ""); | static_assert(decltype(f(c))::value == 2, ""); | static_assert(decltype(f(0))::value == 3, ""); | } | | } | | namespace test_uniform_initialization | { | | struct test | { | static const int zero {}; | static const int one {1}; | }; | | static_assert(test::zero == 0, ""); | static_assert(test::one == 1, ""); | | } | | namespace test_lambdas | { | | void | test1() | { | auto lambda1 = [](){}; | auto lambda2 = lambda1; | lambda1(); | lambda2(); | } | | int | test2() | { | auto a = [](int i, int j){ return i + j; }(1, 2); | auto b = []() -> int { return '0'; }(); | auto c = [=](){ return a + b; }(); | auto d = [&](){ return c; }(); | auto e = [a, &b](int x) mutable { | const auto identity = [](int y){ return y; }; | for (auto i = 0; i < a; ++i) | a += b--; | return x + identity(a + b); | }(0); | return a + b + c + d + e; | } | | int | test3() | { | const auto nullary = [](){ return 0; }; | const auto unary = [](int x){ return x; }; | using nullary_t = decltype(nullary); | using unary_t = decltype(unary); | const auto higher1st = [](nullary_t f){ return f(); }; | const auto higher2nd = [unary](nullary_t f1){ | return [unary, f1](unary_t f2){ return f2(unary(f1())); }; | }; | return higher1st(nullary) + higher2nd(nullary)(unary); | } | | } | | namespace test_variadic_templates | { | | template | struct sum; | | template | struct sum | { | static constexpr auto value = N0 + sum::value; | }; | | template <> | struct sum<> | { | static constexpr auto value = 0; | }; | | static_assert(sum<>::value == 0, ""); | static_assert(sum<1>::value == 1, ""); | static_assert(sum<23>::value == 23, ""); | static_assert(sum<1, 2>::value == 3, ""); | static_assert(sum<5, 5, 11>::value == 21, ""); | static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); | | } | | // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae | // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function | // because of this. | namespace test_template_alias_sfinae | { | | struct foo {}; | | template | using member = typename T::member_type; | | template | void func(...) {} | | template | void func(member*) {} | | void test(); | | void test() { func(0); } | | } | | } // namespace cxx11 | | #endif // __cplusplus >= 201103L | | | | | // If the compiler admits that it is not ready for C++14, why torture it? | // Hopefully, this will speed up the test. | | #ifndef __cplusplus | | #error "This is not a C++ compiler" | | #elif __cplusplus < 201402L | | #error "This is not a C++14 compiler" | | #else | | namespace cxx14 | { | | namespace test_polymorphic_lambdas | { | | int | test() | { | const auto lambda = [](auto&&... args){ | const auto istiny = [](auto x){ | return (sizeof(x) == 1UL) ? 1 : 0; | }; | const int aretiny[] = { istiny(args)... }; | return aretiny[0]; | }; | return lambda(1, 1L, 1.0f, '1'); | } | | } | | namespace test_binary_literals | { | | constexpr auto ivii = 0b0000000000101010; | static_assert(ivii == 42, "wrong value"); | | } | | namespace test_generalized_constexpr | { | | template < typename CharT > | constexpr unsigned long | strlen_c(const CharT *const s) noexcept | { | auto length = 0UL; | for (auto p = s; *p; ++p) | ++length; | return length; | } | | static_assert(strlen_c("") == 0UL, ""); | static_assert(strlen_c("x") == 1UL, ""); | static_assert(strlen_c("test") == 4UL, ""); | static_assert(strlen_c("another\0test") == 7UL, ""); | | } | | namespace test_lambda_init_capture | { | | int | test() | { | auto x = 0; | const auto lambda1 = [a = x](int b){ return a + b; }; | const auto lambda2 = [a = lambda1(x)](){ return a; }; | return lambda2(); | } | | } | | namespace test_digit_seperators | { | | constexpr auto ten_million = 100'000'000; | static_assert(ten_million == 100000000, ""); | | } | | namespace test_return_type_deduction | { | | auto f(int& x) { return x; } | decltype(auto) g(int& x) { return x; } | | template < typename T1, typename T2 > | struct is_same | { | static constexpr auto value = false; | }; | | template < typename T > | struct is_same | { | static constexpr auto value = true; | }; | | int | test() | { | auto x = 0; | static_assert(is_same::value, ""); | static_assert(is_same::value, ""); | return x; | } | | } | | } // namespace cxx14 | | #endif // __cplusplus >= 201402L | | | configure:5471: result: no configure:5482: checking whether /usr/bin/clang++ supports C++14 features with -std=c++14 configure:5898: /usr/bin/clang++ -std=c++14 -c -g -O2 conftest.cpp >&5 configure:5898: $? = 0 configure:5907: result: yes configure:5944: checking whether compiler supports -Wall configure:5981: /usr/bin/clang++ -std=c++14 -c -Wall conftest.cpp >&5 configure:5981: $? = 0 configure:5983: result: yes configure:6007: checking whether compiler supports -Wextra configure:6044: /usr/bin/clang++ -std=c++14 -c -Wall -Wextra conftest.cpp >&5 configure:6044: $? = 0 configure:6046: result: yes configure:6070: checking whether compiler supports -g3 configure:6107: /usr/bin/clang++ -std=c++14 -c -Wall -Wextra -g3 conftest.cpp >&5 configure:6107: $? = 0 configure:6109: result: yes configure:6143: checking whether libstdc++ linkage is compilable configure:6165: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6165: $? = 0 configure:6173: result: yes configure:6189: checking for grep that handles long lines and -e configure:6247: result: /usr/local/bin/ggrep configure:6252: checking for egrep configure:6314: result: /usr/local/bin/ggrep -E configure:6319: checking for ANSI C header files configure:6339: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6339: $? = 0 configure:6412: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6412: $? = 0 configure:6412: ./conftest configure:6412: $? = 0 configure:6423: result: yes configure:6436: checking for sys/types.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for sys/stat.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for stdlib.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for string.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for memory.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for strings.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for inttypes.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for stdint.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6436: checking for unistd.h configure:6436: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6436: $? = 0 configure:6436: result: yes configure:6448: checking whether byte ordering is bigendian configure:6463: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6463: $? = 0 configure:6508: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 configure:6508: $? = 0 configure:6526: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 conftest.c:28:4: error: use of undeclared identifier 'not' not big endian ^ 1 error generated. configure:6526: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | /* end confdefs.h. */ | #include | #include | | int | main () | { | #if BYTE_ORDER != BIG_ENDIAN | not big endian | #endif | | ; | return 0; | } configure:6654: result: no configure:6698: checking size of void * configure:6703: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6703: $? = 0 configure:6703: ./conftest configure:6703: $? = 0 configure:6717: result: 8 configure:6744: checking size of int configure:6749: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6749: $? = 0 configure:6749: ./conftest configure:6749: $? = 0 configure:6763: result: 4 configure:6789: checking size of long configure:6794: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6794: $? = 0 configure:6794: ./conftest configure:6794: $? = 0 configure:6808: result: 8 configure:6834: checking size of long long configure:6839: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6839: $? = 0 configure:6839: ./conftest configure:6839: $? = 0 configure:6853: result: 8 configure:6879: checking size of intmax_t configure:6884: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:6884: $? = 0 configure:6884: ./conftest configure:6884: $? = 0 configure:6898: result: 8 configure:6926: checking build system type configure:6940: result: x86_64-apple-darwin18.2.0 configure:6960: checking host system type configure:6973: result: x86_64-apple-darwin18.2.0 configure:7039: checking whether pause instruction is compilable configure:7055: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:7055: $? = 0 configure:7063: result: yes configure:7113: checking number of significant virtual address bits configure:7164: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:7164: $? = 0 configure:7164: ./conftest configure:7164: $? = 0 configure:7174: result: 48 configure:7286: checking for ar configure:7302: found /usr/local/bin/ar configure:7313: result: ar configure:7382: checking for nm configure:7398: found /usr/local/bin/nm configure:7409: result: nm configure:7436: checking for gawk configure:7452: found /usr/local/bin/gawk configure:7463: result: gawk configure:7512: result: Missing VERSION file, and unable to generate it; creating bogus VERSION configure:7732: checking malloc.h usability configure:7732: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c >&5 conftest.c:68:10: fatal error: 'malloc.h' file not found #include ^~~~~~~~~~ 1 error generated. configure:7732: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | /* end confdefs.h. */ | #include | #ifdef HAVE_SYS_TYPES_H | # include | #endif | #ifdef HAVE_SYS_STAT_H | # include | #endif | #ifdef STDC_HEADERS | # include | # include | #else | # ifdef HAVE_STDLIB_H | # include | # endif | #endif | #ifdef HAVE_STRING_H | # if !defined STDC_HEADERS && defined HAVE_MEMORY_H | # include | # endif | # include | #endif | #ifdef HAVE_STRINGS_H | # include | #endif | #ifdef HAVE_INTTYPES_H | # include | #endif | #ifdef HAVE_STDINT_H | # include | #endif | #ifdef HAVE_UNISTD_H | # include | #endif | #include configure:7732: result: no configure:7732: checking malloc.h presence configure:7732: /usr/bin/clang -E conftest.c conftest.c:35:10: fatal error: 'malloc.h' file not found #include ^~~~~~~~~~ 1 error generated. configure:7732: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | /* end confdefs.h. */ | #include configure:7732: result: no configure:7732: checking for malloc.h configure:7732: result: no configure:7799: checking for library containing log configure:7830: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 conftest.c:43:6: warning: incompatible redeclaration of library function 'log' [-Wincompatible-library-redeclaration] char log (); ^ conftest.c:43:6: note: 'log' is a builtin with type 'double (double)' 1 warning generated. configure:7830: $? = 0 configure:7847: result: none required configure:7865: checking whether __attribute__ syntax is compilable configure:7881: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:7881: $? = 0 configure:7889: result: yes configure:8015: checking whether compiler supports -Werror configure:8046: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c >&5 configure:8046: $? = 0 configure:8048: result: yes configure:8066: checking whether compiler supports -herror_on_warning configure:8097: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror -herror_on_warning conftest.c >&5 clang: error: unknown argument: '-herror_on_warning' configure:8097: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | /* end confdefs.h. */ | | | int | main () | { | | return 0; | | ; | return 0; | } configure:8103: result: no configure:8117: checking whether tls_model attribute is compilable configure:8135: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c -lstdc++ >&5 configure:8135: $? = 0 configure:8143: result: yes configure:8158: checking whether compiler supports -Werror configure:8189: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c >&5 configure:8189: $? = 0 configure:8191: result: yes configure:8209: checking whether compiler supports -herror_on_warning configure:8240: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror -herror_on_warning conftest.c >&5 clang: error: unknown argument: '-herror_on_warning' configure:8240: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | /* end confdefs.h. */ | | | int | main () | { | | return 0; | | ; | return 0; | } configure:8246: result: no configure:8260: checking whether alloc_size attribute is compilable configure:8276: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c -lstdc++ >&5 configure:8276: $? = 0 configure:8284: result: yes configure:8302: checking whether compiler supports -Werror configure:8333: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c >&5 configure:8333: $? = 0 configure:8335: result: yes configure:8353: checking whether compiler supports -herror_on_warning configure:8384: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror -herror_on_warning conftest.c >&5 clang: error: unknown argument: '-herror_on_warning' configure:8384: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | /* end confdefs.h. */ | | | int | main () | { | | return 0; | | ; | return 0; | } configure:8390: result: no configure:8404: checking whether format(gnu_printf, ...) attribute is compilable configure:8420: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c -lstdc++ >&5 conftest.c:42:51: error: 'format' attribute argument not supported: gnu_printf [-Werror,-Wignored-attributes] void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); ^ 1 error generated. configure:8420: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | /* end confdefs.h. */ | #include | int | main () | { | void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); | ; | return 0; | } configure:8428: result: no configure:8446: checking whether compiler supports -Werror configure:8477: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c >&5 configure:8477: $? = 0 configure:8479: result: yes configure:8497: checking whether compiler supports -herror_on_warning configure:8528: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror -herror_on_warning conftest.c >&5 clang: error: unknown argument: '-herror_on_warning' configure:8528: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | /* end confdefs.h. */ | | | int | main () | { | | return 0; | | ; | return 0; | } configure:8534: result: no configure:8548: checking whether format(printf, ...) attribute is compilable configure:8564: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -Werror conftest.c -lstdc++ >&5 configure:8564: $? = 0 configure:8572: result: yes configure:8632: checking for a BSD-compatible install configure:8700: result: /usr/local/bin/ginstall -c configure:8754: checking for ranlib configure:8770: found /usr/local/bin/ranlib configure:8781: result: ranlib configure:8805: checking for ld configure:8823: found /usr/bin/ld configure:8836: result: /usr/bin/ld configure:8846: checking for autoconf configure:8864: found /usr/local/bin/autoconf configure:8877: result: /usr/local/bin/autoconf configure:8971: checking for memalign configure:8971: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 Undefined symbols for architecture x86_64: "_memalign", referenced from: _main in conftest-56ab61.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:8971: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | /* end confdefs.h. */ | /* Define memalign to an innocuous variant, in case declares memalign. | For example, HP-UX 11i declares gettimeofday. */ | #define memalign innocuous_memalign | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char memalign (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef memalign | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char memalign (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub_memalign || defined __stub___memalign | choke me | #endif | | int | main () | { | return memalign (); | ; | return 0; | } configure:8971: result: no configure:8978: checking for valloc configure:8978: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 conftest.c -lstdc++ >&5 configure:8978: $? = 0 configure:8978: result: yes configure:9185: checking whether compiler supports -O3 configure:9216: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 conftest.c >&5 configure:9216: $? = 0 configure:9218: result: yes configure:9236: checking whether compiler supports -O3 configure:9273: /usr/bin/clang++ -std=c++14 -c -Wall -Wextra -g3 -O3 conftest.cpp >&5 configure:9273: $? = 0 configure:9275: result: yes configure:9299: checking whether compiler supports -funroll-loops configure:9330: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c >&5 configure:9330: $? = 0 configure:9332: result: yes configure:9916: checking configured backtracing method configure:9918: result: N/A configure:9946: checking for sbrk configure:9946: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 configure:9946: $? = 0 configure:9946: result: yes configure:9955: result: Disabling dss allocation because sbrk is deprecated configure:10001: checking whether utrace(2) is compilable configure:10025: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 conftest.c:52:10: fatal error: 'sys/ktrace.h' file not found #include ^~~~~~~~~~~~~~ 1 error generated. configure:10025: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | /* end confdefs.h. */ | | #include | #include | #include | #include | #include | | int | main () | { | | utrace((void *)0, 0); | | ; | return 0; | } configure:10033: result: no configure:10141: checking whether a program using __builtin_unreachable is compilable configure:10165: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 configure:10165: $? = 0 configure:10173: result: yes configure:10185: checking whether a program using __builtin_ffsl is compilable configure:10210: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 configure:10210: $? = 0 configure:10218: result: yes configure:10279: checking whether a program using __builtin_popcountl is compilable configure:10304: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 configure:10304: $? = 0 configure:10312: result: yes configure:10346: checking LG_PAGE configure:10397: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ >&5 conftest.c:75:14: warning: implicit conversion loses integer precision: 'long' to 'int' [-Wshorten-64-to-32] result = sysconf(_SC_PAGESIZE); ~ ^~~~~~~~~~~~~~~~~~~~~ 1 warning generated. configure:10397: $? = 0 configure:10397: ./conftest configure:10397: $? = 0 configure:10407: result: 12 configure:10478: checking pthread.h usability configure:10478: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c >&5 configure:10478: $? = 0 configure:10478: result: yes configure:10478: checking pthread.h presence configure:10478: /usr/bin/clang -E conftest.c configure:10478: $? = 0 configure:10478: result: yes configure:10478: checking for pthread.h configure:10478: result: yes configure:10490: checking for pthread_create in -lpthread configure:10515: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lpthread -lstdc++ >&5 configure:10515: $? = 0 configure:10524: result: yes configure:10603: checking dlfcn.h usability configure:10603: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c >&5 configure:10603: $? = 0 configure:10603: result: yes configure:10603: checking dlfcn.h presence configure:10603: /usr/bin/clang -E conftest.c configure:10603: $? = 0 configure:10603: result: yes configure:10603: checking for dlfcn.h configure:10603: result: yes configure:10608: checking for dlsym configure:10608: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ -pthread >&5 configure:10608: $? = 0 configure:10608: result: yes configure:10671: checking whether pthread_atfork(3) is compilable configure:10691: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ -pthread >&5 configure:10691: $? = 0 configure:10699: result: yes configure:10707: checking whether pthread_setname_np(3) is compilable configure:10727: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops conftest.c -lstdc++ -pthread >&5 conftest.c:68:38: error: too many arguments to function call, expected 1, have 2 pthread_setname_np(pthread_self(), "setname_test"); ~~~~~~~~~~~~~~~~~~ ^~~~~~~~~~~~~~ /usr/include/pthread.h:499:1: note: 'pthread_setname_np' declared here __API_AVAILABLE(macos(10.6), ios(3.2)) ^ /usr/include/Availability.h:415:126: note: expanded from macro '__API_AVAILABLE' #define __API_AVAILABLE(...) __API_AVAILABLE_GET_MACRO(__VA_ARGS__,__API_AVAILABLE5, __API_AVAILABLE4, __API_AVAILABLE3, __API_AVAILABLE2, __API_AVAILABLE1)(__VA_ARGS__) ^ 1 error generated. configure:10727: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | /* end confdefs.h. */ | | #include | | int | main () | { | | pthread_setname_np(pthread_self(), "setname_test"); | | ; | return 0; | } configure:10735: result: no configure:10753: checking for library containing clock_gettime configure:10784: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:10784: $? = 0 configure:10801: result: none required configure:10936: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable configure:10958: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:70:16: error: use of undeclared identifier 'CLOCK_MONOTONIC_COARSE'; did you mean '_CLOCK_MONOTONIC_RAW'? clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); ^~~~~~~~~~~~~~~~~~~~~~ _CLOCK_MONOTONIC_RAW /usr/include/time.h:158:1: note: '_CLOCK_MONOTONIC_RAW' declared here _CLOCK_MONOTONIC_RAW __CLOCK_AVAILABILITY = 4, ^ 1 error generated. configure:10958: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | /* end confdefs.h. */ | | #include | | int | main () | { | | struct timespec ts; | | clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); | | ; | return 0; | } configure:10966: result: no configure:10975: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable configure:11001: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:73:4: error: _POSIX_MONOTONIC_CLOCK missing/invalid # error _POSIX_MONOTONIC_CLOCK missing/invalid ^ 1 error generated. configure:11001: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | /* end confdefs.h. */ | | #include | #include | | int | main () | { | | struct timespec ts; | | clock_gettime(CLOCK_MONOTONIC, &ts); | #if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 | # error _POSIX_MONOTONIC_CLOCK missing/invalid | #endif | | ; | return 0; | } configure:11009: result: no configure:11018: checking whether mach_absolute_time() is compilable configure:11038: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11038: $? = 0 configure:11046: result: yes configure:11071: checking whether compiler supports -Werror configure:11102: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -Werror -D_REENTRANT conftest.c >&5 configure:11102: $? = 0 configure:11104: result: yes configure:11122: checking whether syscall(2) is compilable configure:11143: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -Werror -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:70:2: error: 'syscall' is deprecated: first deprecated in macOS 10.12 - syscall(2) is unsupported; please switch to a supported interface. For SYS_kdebug_trace use kdebug_signpost(). [-Werror,-Wdeprecated-declarations] syscall(SYS_write, 2, "hello", 5); ^ /usr/include/unistd.h:745:6: note: 'syscall' has been explicitly marked deprecated here int syscall(int, ...); ^ 1 error generated. configure:11143: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | /* end confdefs.h. */ | | #include | #include | | int | main () | { | | syscall(SYS_write, 2, "hello", 5); | | ; | return 0; | } configure:11151: result: no configure:11168: checking for secure_getenv configure:11168: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "_secure_getenv", referenced from: _main in conftest-9fdacd.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11168: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | /* end confdefs.h. */ | /* Define secure_getenv to an innocuous variant, in case declares secure_getenv. | For example, HP-UX 11i declares gettimeofday. */ | #define secure_getenv innocuous_secure_getenv | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char secure_getenv (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef secure_getenv | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char secure_getenv (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub_secure_getenv || defined __stub___secure_getenv | choke me | #endif | | int | main () | { | return secure_getenv (); | ; | return 0; | } configure:11168: result: no configure:11181: checking for sched_getcpu configure:11181: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "_sched_getcpu", referenced from: _main in conftest-36fcca.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11181: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | /* end confdefs.h. */ | /* Define sched_getcpu to an innocuous variant, in case declares sched_getcpu. | For example, HP-UX 11i declares gettimeofday. */ | #define sched_getcpu innocuous_sched_getcpu | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char sched_getcpu (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef sched_getcpu | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char sched_getcpu (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub_sched_getcpu || defined __stub___sched_getcpu | choke me | #endif | | int | main () | { | return sched_getcpu (); | ; | return 0; | } configure:11181: result: no configure:11194: checking for sched_setaffinity configure:11194: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "_sched_setaffinity", referenced from: _main in conftest-fa7a48.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11194: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | /* end confdefs.h. */ | /* Define sched_setaffinity to an innocuous variant, in case declares sched_setaffinity. | For example, HP-UX 11i declares gettimeofday. */ | #define sched_setaffinity innocuous_sched_setaffinity | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char sched_setaffinity (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef sched_setaffinity | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char sched_setaffinity (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub_sched_setaffinity || defined __stub___sched_setaffinity | choke me | #endif | | int | main () | { | return sched_setaffinity (); | ; | return 0; | } configure:11194: result: no configure:11207: checking for issetugid configure:11207: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11207: $? = 0 configure:11207: result: yes configure:11220: checking for _malloc_thread_cleanup configure:11220: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "__malloc_thread_cleanup", referenced from: _main in conftest-290a18.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11220: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | /* end confdefs.h. */ | /* Define _malloc_thread_cleanup to an innocuous variant, in case declares _malloc_thread_cleanup. | For example, HP-UX 11i declares gettimeofday. */ | #define _malloc_thread_cleanup innocuous__malloc_thread_cleanup | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char _malloc_thread_cleanup (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef _malloc_thread_cleanup | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char _malloc_thread_cleanup (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub__malloc_thread_cleanup || defined __stub____malloc_thread_cleanup | choke me | #endif | | int | main () | { | return _malloc_thread_cleanup (); | ; | return 0; | } configure:11220: result: no configure:11235: checking for _pthread_mutex_init_calloc_cb configure:11235: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "__pthread_mutex_init_calloc_cb", referenced from: _main in conftest-cb26cd.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11235: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | /* end confdefs.h. */ | /* Define _pthread_mutex_init_calloc_cb to an innocuous variant, in case declares _pthread_mutex_init_calloc_cb. | For example, HP-UX 11i declares gettimeofday. */ | #define _pthread_mutex_init_calloc_cb innocuous__pthread_mutex_init_calloc_cb | | /* System header to define __stub macros and hopefully few prototypes, | which can conflict with char _pthread_mutex_init_calloc_cb (); below. | Prefer to if __STDC__ is defined, since | exists even on freestanding compilers. */ | | #ifdef __STDC__ | # include | #else | # include | #endif | | #undef _pthread_mutex_init_calloc_cb | | /* Override any GCC internal prototype to avoid an error. | Use char because int might match the return type of a GCC | builtin and then its argument prototype would still apply. */ | #ifdef __cplusplus | extern "C" | #endif | char _pthread_mutex_init_calloc_cb (); | /* The GNU C library defines this for functions which it implements | to always fail with ENOSYS. Some functions are actually named | something starting with __ and the normal name is an alias. */ | #if defined __stub__pthread_mutex_init_calloc_cb || defined __stub____pthread_mutex_init_calloc_cb | choke me | #endif | | int | main () | { | return _pthread_mutex_init_calloc_cb (); | ; | return 0; | } configure:11235: result: no configure:11335: checking whether C11 atomics is compilable configure:11364: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11364: $? = 0 configure:11372: result: yes configure:11382: checking whether GCC __atomic atomics is compilable configure:11405: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:72:9: warning: unused variable 'y' [-Wunused-variable] int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); ^ 1 warning generated. configure:11405: $? = 0 configure:11413: result: yes configure:11421: checking whether GCC 8-bit __atomic atomics is compilable configure:11444: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:73:11: warning: unused variable 'y' [-Wunused-variable] int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); ^ 1 warning generated. configure:11444: $? = 0 configure:11452: result: yes configure:11463: checking whether GCC __sync atomics is compilable configure:11485: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11485: $? = 0 configure:11493: result: yes configure:11501: checking whether GCC 8-bit __sync atomics is compilable configure:11523: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11523: $? = 0 configure:11531: result: yes configure:11542: checking whether Darwin OSAtomic*() is compilable configure:11572: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:79:3: warning: 'OSAtomicAdd32' is deprecated: first deprecated in macOS 10.12 - Use atomic_fetch_add_explicit(memory_order_relaxed) from instead [-Wdeprecated-declarations] OSAtomicAdd32(1, x32p); ^ /usr/include/libkern/OSAtomicDeprecated.h:146:9: note: 'OSAtomicAdd32' has been explicitly marked deprecated here int32_t OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue ); ^ conftest.c:84:3: warning: 'OSAtomicAdd64' is deprecated: first deprecated in macOS 10.12 - Use atomic_fetch_add_explicit(memory_order_relaxed) from instead [-Wdeprecated-declarations] OSAtomicAdd64(1, x64p); ^ /usr/include/libkern/OSAtomicDeprecated.h:231:9: note: 'OSAtomicAdd64' has been explicitly marked deprecated here int64_t OSAtomicAdd64( int64_t __theAmount, ^ 2 warnings generated. configure:11572: $? = 0 configure:11580: result: yes configure:11590: checking whether madvise(2) is compilable configure:11610: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11610: $? = 0 configure:11618: result: yes configure:11626: checking whether madvise(..., MADV_FREE) is compilable configure:11646: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11646: $? = 0 configure:11654: result: yes configure:11674: checking whether madvise(..., MADV_DONTNEED) is compilable configure:11694: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11694: $? = 0 configure:11702: result: yes configure:11711: checking whether madvise(..., MADV_DO[NT]DUMP) is compilable configure:11732: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:79:24: error: use of undeclared identifier 'MADV_DONTDUMP' madvise((void *)0, 0, MADV_DONTDUMP); ^ conftest.c:80:24: error: use of undeclared identifier 'MADV_DODUMP' madvise((void *)0, 0, MADV_DODUMP); ^ 2 errors generated. configure:11732: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | /* end confdefs.h. */ | | #include | | int | main () | { | | madvise((void *)0, 0, MADV_DONTDUMP); | madvise((void *)0, 0, MADV_DODUMP); | | ; | return 0; | } configure:11740: result: no configure:11749: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable configure:11770: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:79:24: error: use of undeclared identifier 'MADV_HUGEPAGE' madvise((void *)0, 0, MADV_HUGEPAGE); ^ conftest.c:80:24: error: use of undeclared identifier 'MADV_NOHUGEPAGE' madvise((void *)0, 0, MADV_NOHUGEPAGE); ^ 2 errors generated. configure:11770: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | /* end confdefs.h. */ | | #include | | int | main () | { | | madvise((void *)0, 0, MADV_HUGEPAGE); | madvise((void *)0, 0, MADV_NOHUGEPAGE); | | ; | return 0; | } configure:11778: result: no configure:11794: checking for __builtin_clz configure:11819: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:79:61: warning: unused variable 'y' [-Wunused-variable] int y = __builtin_clz(x); ^ conftest.c:83:61: warning: unused variable 'y' [-Wunused-variable] int y = __builtin_clzl(x); ^ 2 warnings generated. configure:11819: $? = 0 configure:11827: result: yes configure:11837: checking whether Darwin os_unfair_lock_*() is compilable configure:11864: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 configure:11864: $? = 0 configure:11872: result: yes configure:11939: checking whether glibc malloc hook is compilable configure:11966: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "___free_hook", referenced from: _main in conftest-6b32c8.o "___malloc_hook", referenced from: _main in conftest-6b32c8.o "___realloc_hook", referenced from: _main in conftest-6b32c8.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:11966: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | | #include | | extern void (* __free_hook)(void *ptr); | extern void *(* __malloc_hook)(size_t size); | extern void *(* __realloc_hook)(void *ptr, size_t size); | | int | main () | { | | void *ptr = 0L; | if (__malloc_hook) ptr = __malloc_hook(1); | if (__realloc_hook) ptr = __realloc_hook(ptr, 2); | if (__free_hook && ptr) __free_hook(ptr); | | ; | return 0; | } configure:11974: result: no configure:11986: checking whether glibc memalign hook is compilable configure:12009: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 Undefined symbols for architecture x86_64: "___memalign_hook", referenced from: _main in conftest-186e6a.o ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) configure:12009: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | | #include | | extern void *(* __memalign_hook)(size_t alignment, size_t size); | | int | main () | { | | void *ptr = 0L; | if (__memalign_hook) ptr = __memalign_hook(16, 7); | | ; | return 0; | } configure:12017: result: no configure:12029: checking whether pthreads adaptive mutexes is compilable configure:12052: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:85:36: error: use of undeclared identifier 'PTHREAD_MUTEX_ADAPTIVE_NP' pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); ^ 1 error generated. configure:12052: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | | #include | | int | main () | { | | pthread_mutexattr_t attr; | pthread_mutexattr_init(&attr); | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); | pthread_mutexattr_destroy(&attr); | | ; | return 0; | } configure:12060: result: no configure:12071: checking whether compiler supports -D_GNU_SOURCE configure:12102: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_GNU_SOURCE -D_REENTRANT conftest.c >&5 configure:12102: $? = 0 configure:12104: result: yes configure:12122: checking whether compiler supports -Werror configure:12153: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_GNU_SOURCE -Werror -D_REENTRANT conftest.c >&5 configure:12153: $? = 0 configure:12155: result: yes configure:12173: checking whether compiler supports -herror_on_warning configure:12204: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_GNU_SOURCE -Werror -herror_on_warning -D_REENTRANT conftest.c >&5 clang: error: unknown argument: '-herror_on_warning' configure:12204: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | | | int | main () | { | | return 0; | | ; | return 0; | } configure:12210: result: no configure:12224: checking whether strerror_r returns char with gnu source is compilable configure:12249: /usr/bin/clang -o conftest -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_GNU_SOURCE -Werror -D_REENTRANT conftest.c -lstdc++ -pthread >&5 conftest.c:87:9: error: incompatible integer to pointer conversion initializing 'char *' with an expression of type 'int' [-Werror,-Wint-conversion] char *error = strerror_r(EINVAL, buffer, 100); ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1 error generated. configure:12249: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | | #include | #include | #include | #include | | int | main () | { | | char *buffer = (char *) malloc(100); | char *error = strerror_r(EINVAL, buffer, 100); | printf("%s\n", error); | | ; | return 0; | } configure:12257: result: no configure:12273: checking for stdbool.h that conforms to C99 configure:12340: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c >&5 conftest.c:125:24: warning: address of 's' will always evaluate to 'true' [-Wpointer-bool-conversion] bool e = &s; ~ ^ conftest.c:129:23: warning: address of array 'a' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:28: warning: address of array 'b' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:33: warning: address of array 'c' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:38: warning: address of array 'd' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:48: warning: address of array 'f' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:53: warning: address of array 'g' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:58: warning: address of array 'h' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:129:63: warning: address of array 'i' will always evaluate to 'true' [-Wpointer-bool-conversion] return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l ~^ conftest.c:130:30: warning: address of array 'n' will always evaluate to 'true' [-Wpointer-bool-conversion] ~^ conftest.c:130:35: warning: address of array 'o' will always evaluate to 'true' [-Wpointer-bool-conversion] ~^ conftest.c:130:40: warning: address of array 'p' will always evaluate to 'true' [-Wpointer-bool-conversion] ~^ 12 warnings generated. configure:12340: $? = 0 configure:12347: result: yes configure:12349: checking for _Bool configure:12349: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c >&5 configure:12349: $? = 0 configure:12349: /usr/bin/clang -c -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops -D_REENTRANT conftest.c >&5 conftest.c:112:20: error: expected expression if (sizeof ((_Bool))) ^ 1 error generated. configure:12349: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "" | #define PACKAGE_TARNAME "" | #define PACKAGE_VERSION "" | #define PACKAGE_STRING "" | #define PACKAGE_BUGREPORT "" | #define PACKAGE_URL "" | #define JEMALLOC_HAS_RESTRICT 1 | #define HAVE_CXX14 1 | #define STDC_HEADERS 1 | #define HAVE_SYS_TYPES_H 1 | #define HAVE_SYS_STAT_H 1 | #define HAVE_STDLIB_H 1 | #define HAVE_STRING_H 1 | #define HAVE_MEMORY_H 1 | #define HAVE_STRINGS_H 1 | #define HAVE_INTTYPES_H 1 | #define HAVE_STDINT_H 1 | #define HAVE_UNISTD_H 1 | #define SIZEOF_VOID_P 8 | #define LG_SIZEOF_PTR 3 | #define SIZEOF_INT 4 | #define LG_SIZEOF_INT 2 | #define SIZEOF_LONG 8 | #define LG_SIZEOF_LONG 3 | #define SIZEOF_LONG_LONG 8 | #define LG_SIZEOF_LONG_LONG 3 | #define SIZEOF_INTMAX_T 8 | #define LG_SIZEOF_INTMAX_T 3 | #define HAVE_CPU_SPINWAIT 1 | #define CPU_SPINWAIT __asm__ volatile("pause") | #define LG_VADDR 48 | #define LG_VADDR 48 | #define JEMALLOC_USABLE_SIZE_CONST const | #define JEMALLOC_HAVE_ATTR | #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE | #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF | #define JEMALLOC_PREFIX "je_" | #define JEMALLOC_CPREFIX "JE_" | #define JEMALLOC_OVERRIDE_VALLOC | #define JEMALLOC_PRIVATE_NAMESPACE je_ | #define JEMALLOC_CONFIG_MALLOC_CONF "" | #define JEMALLOC_STATS | #define JEMALLOC_MAPS_COALESCE | #define JEMALLOC_FILL | #define JEMALLOC_CACHE_OBLIVIOUS | #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable | #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll | #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl | #define JEMALLOC_INTERNAL_FFS __builtin_ffs | #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount | #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl | #define LG_PAGE 12 | #define LG_HUGEPAGE 21 | #define JEMALLOC_HAVE_PTHREAD | #define HAVE_PTHREAD_H 1 | #define HAVE_DLFCN_H 1 | #define JEMALLOC_HAVE_DLSYM | #define JEMALLOC_HAVE_PTHREAD_ATFORK | #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 | #define JEMALLOC_HAVE_ISSETUGID | #define JEMALLOC_C11_ATOMICS 1 | #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 | #define JEMALLOC_GCC_SYNC_ATOMICS 1 | #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 | #define JEMALLOC_OSATOMIC | #define JEMALLOC_HAVE_MADVISE | #define JEMALLOC_PURGE_MADVISE_FREE | #define JEMALLOC_PURGE_MADVISE_DONTNEED | #define JEMALLOC_HAVE_BUILTIN_CLZ | #define JEMALLOC_OS_UNFAIR_LOCK | #define JEMALLOC_ZONE | #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) | /* end confdefs.h. */ | #include | #ifdef HAVE_SYS_TYPES_H | # include | #endif | #ifdef HAVE_SYS_STAT_H | # include | #endif | #ifdef STDC_HEADERS | # include | # include | #else | # ifdef HAVE_STDLIB_H | # include | # endif | #endif | #ifdef HAVE_STRING_H | # if !defined STDC_HEADERS && defined HAVE_MEMORY_H | # include | # endif | # include | #endif | #ifdef HAVE_STRINGS_H | # include | #endif | #ifdef HAVE_INTTYPES_H | # include | #endif | #ifdef HAVE_STDINT_H | # include | #endif | #ifdef HAVE_UNISTD_H | # include | #endif | int | main () | { | if (sizeof ((_Bool))) | return 0; | ; | return 0; | } configure:12349: result: yes configure:12514: creating ./config.status ## ---------------------- ## ## Running config.status. ## ## ---------------------- ## This file was extended by config.status, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = CONFIG_HEADERS = CONFIG_LINKS = CONFIG_COMMANDS = $ ./config.status on air config.status:1040: creating Makefile config.status:1040: creating jemalloc.pc config.status:1040: creating doc/html.xsl config.status:1040: creating doc/manpages.xsl config.status:1040: creating doc/jemalloc.xml config.status:1040: creating include/jemalloc/jemalloc_macros.h config.status:1040: creating include/jemalloc/jemalloc_protos.h config.status:1040: creating include/jemalloc/jemalloc_typedefs.h config.status:1040: creating include/jemalloc/internal/jemalloc_preamble.h config.status:1040: creating test/test.sh config.status:1040: creating test/include/test/jemalloc_test.h config.status:1040: creating config.stamp config.status:1040: creating bin/jemalloc-config config.status:1040: creating bin/jemalloc.sh config.status:1040: creating bin/jeprof config.status:1040: creating include/jemalloc/jemalloc_defs.h config.status:1040: creating include/jemalloc/internal/jemalloc_internal_defs.h config.status:1040: creating test/include/test/jemalloc_test_defs.h config.status:1229: executing include/jemalloc/internal/public_symbols.txt commands config.status:1229: executing include/jemalloc/internal/private_symbols.awk commands config.status:1229: executing include/jemalloc/internal/private_symbols_jet.awk commands config.status:1229: executing include/jemalloc/internal/public_namespace.h commands config.status:1229: executing include/jemalloc/internal/public_unnamespace.h commands config.status:1229: executing include/jemalloc/jemalloc_protos_jet.h commands config.status:1229: executing include/jemalloc/jemalloc_rename.h commands config.status:1229: executing include/jemalloc/jemalloc_mangle.h commands config.status:1229: executing include/jemalloc/jemalloc_mangle_jet.h commands config.status:1229: executing include/jemalloc/jemalloc.h commands configure:13816: result: =============================================================================== configure:13818: result: jemalloc version : 0.0.0-0-g0000000000000000000000000000000000000000 configure:13820: result: library revision : 2 configure:13822: result: configure:13824: result: CONFIG : CC=/usr/bin/clang CXX=/usr/bin/clang++ configure:13826: result: CC : /usr/bin/clang configure:13828: result: CONFIGURE_CFLAGS : -std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops configure:13830: result: SPECIFIED_CFLAGS : configure:13832: result: EXTRA_CFLAGS : configure:13834: result: CPPFLAGS : -D_REENTRANT configure:13836: result: CXX : /usr/bin/clang++ -std=c++14 configure:13838: result: CONFIGURE_CXXFLAGS : -Wall -Wextra -g3 -O3 configure:13840: result: SPECIFIED_CXXFLAGS : configure:13842: result: EXTRA_CXXFLAGS : configure:13844: result: LDFLAGS : configure:13846: result: EXTRA_LDFLAGS : configure:13848: result: DSO_LDFLAGS : -shared -Wl,-install_name,$(LIBDIR)/$(@F) configure:13850: result: LIBS : -lstdc++ -pthread configure:13852: result: RPATH_EXTRA : configure:13854: result: configure:13856: result: XSLTPROC : /usr/bin/xsltproc configure:13858: result: XSLROOT : configure:13860: result: configure:13862: result: PREFIX : /usr/local configure:13864: result: BINDIR : /usr/local/bin configure:13866: result: DATADIR : /usr/local/share configure:13868: result: INCLUDEDIR : /usr/local/include configure:13870: result: LIBDIR : /usr/local/lib configure:13872: result: MANDIR : /usr/local/share/man configure:13874: result: configure:13876: result: srcroot : configure:13878: result: abs_srcroot : /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ configure:13880: result: objroot : configure:13882: result: abs_objroot : /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ configure:13884: result: configure:13886: result: JEMALLOC_PREFIX : je_ configure:13888: result: JEMALLOC_PRIVATE_NAMESPACE configure:13890: result: : je_ configure:13892: result: install_suffix : configure:13894: result: malloc_conf : configure:13896: result: shared libs : 1 configure:13898: result: static libs : 1 configure:13900: result: autogen : 0 configure:13902: result: debug : 0 configure:13904: result: stats : 1 configure:13906: result: experimetal_smallocx : 0 configure:13908: result: prof : 0 configure:13910: result: prof-libunwind : 0 configure:13912: result: prof-libgcc : 0 configure:13914: result: prof-gcc : 0 configure:13916: result: fill : 1 configure:13918: result: utrace : 0 configure:13920: result: xmalloc : 0 configure:13922: result: log : 0 configure:13924: result: lazy_lock : 0 configure:13926: result: cache-oblivious : 1 configure:13928: result: cxx : 1 configure:13930: result: =============================================================================== ## ---------------- ## ## Cache variables. ## ## ---------------- ## ac_cv_big_endian=0 ac_cv_build=x86_64-apple-darwin18.2.0 ac_cv_c_bigendian=no ac_cv_c_compiler_gnu=yes ac_cv_cxx_compiler_gnu=yes ac_cv_env_CCC_set= ac_cv_env_CCC_value= ac_cv_env_CC_set=set ac_cv_env_CC_value=/usr/bin/clang ac_cv_env_CFLAGS_set= ac_cv_env_CFLAGS_value= ac_cv_env_CPPFLAGS_set= ac_cv_env_CPPFLAGS_value= ac_cv_env_CPP_set= ac_cv_env_CPP_value= ac_cv_env_CXXFLAGS_set= ac_cv_env_CXXFLAGS_value= ac_cv_env_CXX_set=set ac_cv_env_CXX_value=/usr/bin/clang++ ac_cv_env_LDFLAGS_set= ac_cv_env_LDFLAGS_value= ac_cv_env_LIBS_set= ac_cv_env_LIBS_value= ac_cv_env_build_alias_set= ac_cv_env_build_alias_value= ac_cv_env_host_alias_set= ac_cv_env_host_alias_value= ac_cv_env_target_alias_set= ac_cv_env_target_alias_value= ac_cv_func__malloc_thread_cleanup=no ac_cv_func__pthread_mutex_init_calloc_cb=no ac_cv_func_dlsym=yes ac_cv_func_issetugid=yes ac_cv_func_memalign=no ac_cv_func_sbrk=yes ac_cv_func_sched_getcpu=no ac_cv_func_sched_setaffinity=no ac_cv_func_secure_getenv=no ac_cv_func_valloc=yes ac_cv_header_dlfcn_h=yes ac_cv_header_inttypes_h=yes ac_cv_header_malloc_h=no ac_cv_header_memory_h=yes ac_cv_header_pthread_h=yes ac_cv_header_stdbool_h=yes ac_cv_header_stdc=yes ac_cv_header_stdint_h=yes ac_cv_header_stdlib_h=yes ac_cv_header_string_h=yes ac_cv_header_strings_h=yes ac_cv_header_sys_stat_h=yes ac_cv_header_sys_types_h=yes ac_cv_header_unistd_h=yes ac_cv_host=x86_64-apple-darwin18.2.0 ac_cv_lib_pthread_pthread_create=yes ac_cv_objext=o ac_cv_path_AUTOCONF=/usr/local/bin/autoconf ac_cv_path_EGREP='/usr/local/bin/ggrep -E' ac_cv_path_GREP=/usr/local/bin/ggrep ac_cv_path_LD=/usr/bin/ld ac_cv_path_XSLTPROC=/usr/bin/xsltproc ac_cv_path_install='/usr/local/bin/ginstall -c' ac_cv_prog_AWK=gawk ac_cv_prog_CPP='/usr/bin/clang -E' ac_cv_prog_ac_ct_AR=ar ac_cv_prog_ac_ct_CC=/usr/bin/clang ac_cv_prog_ac_ct_NM=nm ac_cv_prog_ac_ct_RANLIB=ranlib ac_cv_prog_cc_c89= ac_cv_prog_cc_g=yes ac_cv_prog_cxx_g=yes ac_cv_search_clock_gettime='none required' ac_cv_search_log='none required' ac_cv_sizeof_int=4 ac_cv_sizeof_intmax_t=8 ac_cv_sizeof_long=8 ac_cv_sizeof_long_long=8 ac_cv_sizeof_void_p=8 ac_cv_type__Bool=yes ax_cv_cxx_compile_cxx14=no ax_cv_cxx_compile_cxx14__std_cpp14=yes je_cv_alloc_size=yes je_cv_attribute=yes je_cv_builtin_clz=yes je_cv_c11_atomics=yes je_cv_cflags_added= je_cv_clock_monotonic=no je_cv_clock_monotonic_coarse=no je_cv_cray=no je_cv_cray_prgenv_wrapper= je_cv_cxxflags_added=-O3 je_cv_format_gnu_printf=no je_cv_format_printf=yes je_cv_gcc_atomic_atomics=yes je_cv_gcc_builtin_ffsl=yes je_cv_gcc_builtin_popcountl=yes je_cv_gcc_builtin_unreachable=yes je_cv_gcc_sync_atomics=yes je_cv_gcc_u8_atomic_atomics=yes je_cv_gcc_u8_sync_atomics=yes je_cv_glibc_malloc_hook=no je_cv_glibc_memalign_hook=no je_cv_lg_hugepage=21 je_cv_lg_page=12 je_cv_lg_vaddr=48 je_cv_libstdcxx=yes je_cv_mach_absolute_time=yes je_cv_madv_dontdump=no je_cv_madv_dontneed=yes je_cv_madv_free=yes je_cv_madvise=yes je_cv_os_unfair_lock=yes je_cv_osatomic=yes je_cv_pause=yes je_cv_pthread_atfork=yes je_cv_pthread_mutex_adaptive_np=no je_cv_pthread_setname_np=no je_cv_strerror_r_returns_char_with_gnu_source=no je_cv_syscall=no je_cv_thp=no je_cv_tls_model=yes je_cv_utrace=no ## ----------------- ## ## Output variables. ## ## ----------------- ## AR='ar' ARFLAGS='crus' AROUT=' $@' AUTOCONF='/usr/local/bin/autoconf' AWK='gawk' BINDIR='/usr/local/bin' CC='/usr/bin/clang' CC_MM='1' CFLAGS='-std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops' CONFIG='CC=/usr/bin/clang CXX=/usr/bin/clang++' CONFIGURE_CFLAGS='-std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops' CONFIGURE_CXXFLAGS='-Wall -Wextra -g3 -O3' CPP='/usr/bin/clang -E' CPPFLAGS='-D_REENTRANT' CTARGET='-o $@' CXX='/usr/bin/clang++ -std=c++14' CXXFLAGS='-Wall -Wextra -g3 -O3' DATADIR='/usr/local/share' DEFS='-DHAVE_CONFIG_H' DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' DUMP_SYMS='nm -a' ECHO_C='\c' ECHO_N='' ECHO_T='' EGREP='/usr/local/bin/ggrep -E' EXEEXT='' EXTRA_CFLAGS='' EXTRA_CXXFLAGS='' EXTRA_LDFLAGS='' GREP='/usr/local/bin/ggrep' HAVE_CXX14='1' INCLUDEDIR='/usr/local/include' INSTALL_DATA='${INSTALL} -m 644' INSTALL_PROGRAM='${INSTALL}' INSTALL_SCRIPT='${INSTALL}' JEMALLOC_CPREFIX='JE_' JEMALLOC_PREFIX='je_' LD='/usr/bin/ld' LDFLAGS='' LDTARGET='-o $@' LD_PRELOAD_VAR='DYLD_INSERT_LIBRARIES' LIBDIR='/usr/local/lib' LIBOBJS='' LIBS='-lstdc++ -pthread' LM='' LTLIBOBJS='' MANDIR='/usr/local/share/man' MKLIB='' NM='nm' OBJEXT='o' PACKAGE_BUGREPORT='' PACKAGE_NAME='' PACKAGE_STRING='' PACKAGE_TARNAME='' PACKAGE_URL='' PACKAGE_VERSION='' PATH_SEPARATOR=':' PIC_CFLAGS='-fPIC -DPIC' PREFIX='/usr/local' RANLIB='ranlib' RPATH='' RPATH_EXTRA='' SHELL='/bin/sh' SOREV='2.dylib' SPECIFIED_CFLAGS='' SPECIFIED_CXXFLAGS='' TEST_LD_MODE='' XSLROOT='' XSLTPROC='/usr/bin/xsltproc' a='a' abi='macho' abs_objroot='/Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/' abs_srcroot='/Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/' ac_ct_CC='/usr/bin/clang' ac_ct_CXX='' bindir='${exec_prefix}/bin' build='x86_64-apple-darwin18.2.0' build_alias='' build_cpu='x86_64' build_os='darwin18.2.0' build_vendor='apple' cfghdrs_in='include/jemalloc/jemalloc_defs.h.in include/jemalloc/internal/jemalloc_internal_defs.h.in include/jemalloc/internal/private_symbols.sh include/jemalloc/internal/private_namespace.sh include/jemalloc/internal/public_namespace.sh include/jemalloc/internal/public_unnamespace.sh include/jemalloc/jemalloc_rename.sh include/jemalloc/jemalloc_mangle.sh include/jemalloc/jemalloc.sh test/include/test/jemalloc_test_defs.h.in' cfghdrs_out='include/jemalloc/jemalloc_defs.h include/jemalloc/jemalloc.h include/jemalloc/internal/private_symbols.awk include/jemalloc/internal/private_symbols_jet.awk include/jemalloc/internal/public_symbols.txt include/jemalloc/internal/public_namespace.h include/jemalloc/internal/public_unnamespace.h include/jemalloc/jemalloc_protos_jet.h include/jemalloc/jemalloc_rename.h include/jemalloc/jemalloc_mangle.h include/jemalloc/jemalloc_mangle_jet.h include/jemalloc/internal/jemalloc_internal_defs.h test/include/test/jemalloc_test_defs.h' cfgoutputs_in='Makefile.in jemalloc.pc.in doc/html.xsl.in doc/manpages.xsl.in doc/jemalloc.xml.in include/jemalloc/jemalloc_macros.h.in include/jemalloc/jemalloc_protos.h.in include/jemalloc/jemalloc_typedefs.h.in include/jemalloc/internal/jemalloc_preamble.h.in test/test.sh.in test/include/test/jemalloc_test.h.in' cfgoutputs_out='Makefile jemalloc.pc doc/html.xsl doc/manpages.xsl doc/jemalloc.xml include/jemalloc/jemalloc_macros.h include/jemalloc/jemalloc_protos.h include/jemalloc/jemalloc_typedefs.h include/jemalloc/internal/jemalloc_preamble.h test/test.sh test/include/test/jemalloc_test.h' datadir='${datarootdir}' datarootdir='${prefix}/share' docdir='${datarootdir}/doc/${PACKAGE}' dvidir='${docdir}' enable_autogen='0' enable_cache_oblivious='1' enable_cxx='1' enable_debug='0' enable_experimental_smallocx='0' enable_extra_size_check='0' enable_fill='1' enable_initial_exec_tls='1' enable_lazy_lock='0' enable_log='0' enable_prof='0' enable_readlinkat='0' enable_shared='1' enable_static='1' enable_stats='1' enable_tls='0' enable_utrace='0' enable_xmalloc='0' enable_zone_allocator='1' exe='' exec_prefix='/usr/local' host='x86_64-apple-darwin18.2.0' host_alias='' host_cpu='x86_64' host_os='darwin18.2.0' host_vendor='apple' htmldir='${docdir}' importlib='dylib' includedir='${prefix}/include' infodir='${datarootdir}/info' install_suffix='' je_='je_' jemalloc_version='0.0.0-0-g0000000000000000000000000000000000000000' jemalloc_version_bugfix='0' jemalloc_version_gid='0000000000000000000000000000000000000000' jemalloc_version_major='0' jemalloc_version_minor='0' jemalloc_version_nrev='0' libdir='${exec_prefix}/lib' libdl='' libexecdir='${exec_prefix}/libexec' libprefix='lib' link_whole_archive='0' localedir='${datarootdir}/locale' localstatedir='${prefix}/var' mandir='${datarootdir}/man' o='o' objroot='' oldincludedir='/usr/include' pdfdir='${docdir}' prefix='/usr/local' private_namespace='je_' program_transform_name='s,x,x,' psdir='${docdir}' rev='2' sbindir='${exec_prefix}/sbin' sharedstatedir='${prefix}/com' so='dylib' srcroot='' sysconfdir='${prefix}/etc' target_alias='' ## ----------- ## ## confdefs.h. ## ## ----------- ## /* confdefs.h */ #define PACKAGE_NAME "" #define PACKAGE_TARNAME "" #define PACKAGE_VERSION "" #define PACKAGE_STRING "" #define PACKAGE_BUGREPORT "" #define PACKAGE_URL "" #define JEMALLOC_HAS_RESTRICT 1 #define HAVE_CXX14 1 #define STDC_HEADERS 1 #define HAVE_SYS_TYPES_H 1 #define HAVE_SYS_STAT_H 1 #define HAVE_STDLIB_H 1 #define HAVE_STRING_H 1 #define HAVE_MEMORY_H 1 #define HAVE_STRINGS_H 1 #define HAVE_INTTYPES_H 1 #define HAVE_STDINT_H 1 #define HAVE_UNISTD_H 1 #define SIZEOF_VOID_P 8 #define LG_SIZEOF_PTR 3 #define SIZEOF_INT 4 #define LG_SIZEOF_INT 2 #define SIZEOF_LONG 8 #define LG_SIZEOF_LONG 3 #define SIZEOF_LONG_LONG 8 #define LG_SIZEOF_LONG_LONG 3 #define SIZEOF_INTMAX_T 8 #define LG_SIZEOF_INTMAX_T 3 #define HAVE_CPU_SPINWAIT 1 #define CPU_SPINWAIT __asm__ volatile("pause") #define LG_VADDR 48 #define LG_VADDR 48 #define JEMALLOC_USABLE_SIZE_CONST const #define JEMALLOC_HAVE_ATTR #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF #define JEMALLOC_PREFIX "je_" #define JEMALLOC_CPREFIX "JE_" #define JEMALLOC_OVERRIDE_VALLOC #define JEMALLOC_PRIVATE_NAMESPACE je_ #define JEMALLOC_CONFIG_MALLOC_CONF "" #define JEMALLOC_STATS #define JEMALLOC_MAPS_COALESCE #define JEMALLOC_FILL #define JEMALLOC_CACHE_OBLIVIOUS #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl #define JEMALLOC_INTERNAL_FFS __builtin_ffs #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl #define LG_PAGE 12 #define LG_HUGEPAGE 21 #define JEMALLOC_HAVE_PTHREAD #define HAVE_PTHREAD_H 1 #define HAVE_DLFCN_H 1 #define JEMALLOC_HAVE_DLSYM #define JEMALLOC_HAVE_PTHREAD_ATFORK #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 #define JEMALLOC_HAVE_ISSETUGID #define JEMALLOC_C11_ATOMICS 1 #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 #define JEMALLOC_GCC_SYNC_ATOMICS 1 #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 #define JEMALLOC_OSATOMIC #define JEMALLOC_HAVE_MADVISE #define JEMALLOC_PURGE_MADVISE_FREE #define JEMALLOC_PURGE_MADVISE_DONTNEED #define JEMALLOC_HAVE_BUILTIN_CLZ #define JEMALLOC_OS_UNFAIR_LOCK #define JEMALLOC_ZONE #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) #define HAVE__BOOL 1 #define HAVE_STDBOOL_H 1 configure: exit 0 ��������������������������������������������������������jemalloc-sys-0.3.2/rep/config.stamp�����������������������������������������������������������������0100644�0000765�0000024�00000000000�13446175027�0015435�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/config.stamp.in��������������������������������������������������������������0100644�0000765�0000024�00000000000�13446174740�0016043�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/config.status����������������������������������������������������������������0100755�0000765�0000024�00000121011�13446175027�0015645�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Generated by configure. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=${CONFIG_SHELL-/bin/sh} export SHELL ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " # Files that config.status was made for. config_files=" Makefile jemalloc.pc:jemalloc.pc.in doc/html.xsl:doc/html.xsl.in doc/manpages.xsl:doc/manpages.xsl.in doc/jemalloc.xml:doc/jemalloc.xml.in include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in include/jemalloc/internal/jemalloc_preamble.h test/test.sh:test/test.sh.in test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof" config_headers=" include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" config_commands=" include/jemalloc/internal/public_symbols.txt include/jemalloc/internal/private_symbols.awk include/jemalloc/internal/private_symbols_jet.awk include/jemalloc/internal/public_namespace.h include/jemalloc/internal/public_unnamespace.h include/jemalloc/jemalloc_protos_jet.h include/jemalloc/jemalloc_rename.h include/jemalloc/jemalloc_mangle.h include/jemalloc/jemalloc_mangle_jet.h include/jemalloc/jemalloc.h" ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." ac_cs_config="'CC=/usr/bin/clang' 'CXX=/usr/bin/clang++'" ac_cs_version="\ config.status configured by ./configure, generated by GNU Autoconf 2.69, with options \"$ac_cs_config\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='/Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep' srcdir='.' INSTALL='/usr/local/bin/ginstall -c' AWK='gawk' test -n "$AWK" || AWK=awk # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi if $ac_cs_recheck; then set X /bin/sh './configure' 'CC=/usr/bin/clang' 'CXX=/usr/bin/clang++' $ac_configure_extra_args --no-create --no-recursion shift $as_echo "running CONFIG_SHELL=/bin/sh $*" >&6 CONFIG_SHELL='/bin/sh' export CONFIG_SHELL exec "$@" fi exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 # # INIT-COMMANDS # srcdir="." objroot="" mangling_map="" public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_0000000000000000000000000000000000000000 nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx valloc" JEMALLOC_PREFIX="je_" srcdir="." objroot="" public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_0000000000000000000000000000000000000000 nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx valloc" wrap_syms=" pthread_create" SYM_PREFIX="_" JEMALLOC_PREFIX="je_" srcdir="." objroot="" public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_0000000000000000000000000000000000000000 nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx valloc" wrap_syms=" pthread_create" SYM_PREFIX="_" srcdir="." objroot="" srcdir="." objroot="" srcdir="." objroot="" srcdir="." objroot="" srcdir="." objroot="" srcdir="." objroot="" srcdir="." objroot="" install_suffix="" # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;; "include/jemalloc/internal/private_symbols.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;; "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;; "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && cat >>"$ac_tmp/subs1.awk" <<\_ACAWK && S["LTLIBOBJS"]="" S["LIBOBJS"]="" S["cfgoutputs_out"]="Makefile jemalloc.pc doc/html.xsl doc/manpages.xsl doc/jemalloc.xml include/jemalloc/jemalloc_macros.h include/jemalloc/jemalloc_protos.h include/je"\ "malloc/jemalloc_typedefs.h include/jemalloc/internal/jemalloc_preamble.h test/test.sh test/include/test/jemalloc_test.h" S["cfgoutputs_in"]="Makefile.in jemalloc.pc.in doc/html.xsl.in doc/manpages.xsl.in doc/jemalloc.xml.in include/jemalloc/jemalloc_macros.h.in include/jemalloc/jemalloc_p"\ "rotos.h.in include/jemalloc/jemalloc_typedefs.h.in include/jemalloc/internal/jemalloc_preamble.h.in test/test.sh.in test/include/test/jemalloc_test."\ "h.in" S["cfghdrs_out"]="include/jemalloc/jemalloc_defs.h include/jemalloc/jemalloc.h include/jemalloc/internal/private_symbols.awk include/jemalloc/internal/private_symbols"\ "_jet.awk include/jemalloc/internal/public_symbols.txt include/jemalloc/internal/public_namespace.h include/jemalloc/internal/public_unnamespace.h in"\ "clude/jemalloc/jemalloc_protos_jet.h include/jemalloc/jemalloc_rename.h include/jemalloc/jemalloc_mangle.h include/jemalloc/jemalloc_mangle_jet.h in"\ "clude/jemalloc/internal/jemalloc_internal_defs.h test/include/test/jemalloc_test_defs.h" S["cfghdrs_in"]="include/jemalloc/jemalloc_defs.h.in include/jemalloc/internal/jemalloc_internal_defs.h.in include/jemalloc/internal/private_symbols.sh include/jemal"\ "loc/internal/private_namespace.sh include/jemalloc/internal/public_namespace.sh include/jemalloc/internal/public_unnamespace.sh include/jemalloc/jem"\ "alloc_rename.sh include/jemalloc/jemalloc_mangle.sh include/jemalloc/jemalloc.sh test/include/test/jemalloc_test_defs.h.in" S["enable_initial_exec_tls"]="1" S["enable_zone_allocator"]="1" S["enable_tls"]="0" S["enable_lazy_lock"]="0" S["libdl"]="" S["enable_extra_size_check"]="0" S["enable_readlinkat"]="0" S["enable_log"]="0" S["enable_cache_oblivious"]="1" S["enable_xmalloc"]="0" S["enable_utrace"]="0" S["enable_fill"]="1" S["enable_prof"]="0" S["enable_experimental_smallocx"]="0" S["enable_stats"]="1" S["enable_debug"]="0" S["je_"]="je_" S["install_suffix"]="" S["private_namespace"]="je_" S["JEMALLOC_CPREFIX"]="JE_" S["JEMALLOC_PREFIX"]="je_" S["enable_static"]="1" S["enable_shared"]="1" S["AUTOCONF"]="/usr/local/bin/autoconf" S["LD"]="/usr/bin/ld" S["RANLIB"]="ranlib" S["INSTALL_DATA"]="${INSTALL} -m 644" S["INSTALL_SCRIPT"]="${INSTALL}" S["INSTALL_PROGRAM"]="${INSTALL}" S["enable_autogen"]="0" S["RPATH_EXTRA"]="" S["LM"]="" S["CC_MM"]="1" S["DUMP_SYMS"]="nm -a" S["AROUT"]=" $@" S["ARFLAGS"]="crus" S["MKLIB"]="" S["TEST_LD_MODE"]="" S["LDTARGET"]="-o $@" S["CTARGET"]="-o $@" S["PIC_CFLAGS"]="-fPIC -DPIC" S["SOREV"]="2.dylib" S["EXTRA_LDFLAGS"]="" S["DSO_LDFLAGS"]="-shared -Wl,-install_name,$(LIBDIR)/$(@F)" S["link_whole_archive"]="0" S["libprefix"]="lib" S["exe"]="" S["a"]="a" S["o"]="o" S["importlib"]="dylib" S["so"]="dylib" S["LD_PRELOAD_VAR"]="DYLD_INSERT_LIBRARIES" S["RPATH"]="" S["abi"]="macho" S["jemalloc_version_gid"]="0000000000000000000000000000000000000000" S["jemalloc_version_nrev"]="0" S["jemalloc_version_bugfix"]="0" S["jemalloc_version_minor"]="0" S["jemalloc_version_major"]="0" S["jemalloc_version"]="0.0.0-0-g0000000000000000000000000000000000000000" S["AWK"]="gawk" S["NM"]="nm" S["AR"]="ar" S["host_os"]="darwin18.2.0" S["host_vendor"]="apple" S["host_cpu"]="x86_64" S["host"]="x86_64-apple-darwin18.2.0" S["build_os"]="darwin18.2.0" S["build_vendor"]="apple" S["build_cpu"]="x86_64" S["build"]="x86_64-apple-darwin18.2.0" S["EGREP"]="/usr/local/bin/ggrep -E" S["GREP"]="/usr/local/bin/ggrep" S["EXTRA_CXXFLAGS"]="" S["SPECIFIED_CXXFLAGS"]="" S["CONFIGURE_CXXFLAGS"]="-Wall -Wextra -g3 -O3" S["enable_cxx"]="1" S["HAVE_CXX14"]="1" S["ac_ct_CXX"]="" S["CXXFLAGS"]="-Wall -Wextra -g3 -O3" S["CXX"]="/usr/bin/clang++ -std=c++14" S["CPP"]="/usr/bin/clang -E" S["EXTRA_CFLAGS"]="" S["SPECIFIED_CFLAGS"]="" S["CONFIGURE_CFLAGS"]="-std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops" S["OBJEXT"]="o" S["EXEEXT"]="" S["ac_ct_CC"]="/usr/bin/clang" S["CPPFLAGS"]="-D_REENTRANT" S["LDFLAGS"]="" S["CFLAGS"]="-std=gnu11 -Wall -Wextra -Wshorten-64-to-32 -Wsign-compare -Wundef -Wno-format-zero-length -pipe -g3 -O3 -funroll-loops" S["CC"]="/usr/bin/clang" S["XSLROOT"]="" S["XSLTPROC"]="/usr/bin/xsltproc" S["MANDIR"]="/usr/local/share/man" S["DATADIR"]="/usr/local/share" S["LIBDIR"]="/usr/local/lib" S["INCLUDEDIR"]="/usr/local/include" S["BINDIR"]="/usr/local/bin" S["PREFIX"]="/usr/local" S["abs_objroot"]="/Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/" S["objroot"]="" S["abs_srcroot"]="/Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/" S["srcroot"]="" S["rev"]="2" S["CONFIG"]="CC=/usr/bin/clang CXX=/usr/bin/clang++" S["target_alias"]="" S["host_alias"]="" S["build_alias"]="" S["LIBS"]="-lstdc++ -pthread" S["ECHO_T"]="" S["ECHO_N"]="" S["ECHO_C"]="\\c" S["DEFS"]="-DHAVE_CONFIG_H" S["mandir"]="${datarootdir}/man" S["localedir"]="${datarootdir}/locale" S["libdir"]="${exec_prefix}/lib" S["psdir"]="${docdir}" S["pdfdir"]="${docdir}" S["dvidir"]="${docdir}" S["htmldir"]="${docdir}" S["infodir"]="${datarootdir}/info" S["docdir"]="${datarootdir}/doc/${PACKAGE}" S["oldincludedir"]="/usr/include" S["includedir"]="${prefix}/include" S["localstatedir"]="${prefix}/var" S["sharedstatedir"]="${prefix}/com" S["sysconfdir"]="${prefix}/etc" S["datadir"]="${datarootdir}" S["datarootdir"]="${prefix}/share" S["libexecdir"]="${exec_prefix}/libexec" S["sbindir"]="${exec_prefix}/sbin" S["bindir"]="${exec_prefix}/bin" S["program_transform_name"]="s,x,x," S["prefix"]="/usr/local" S["exec_prefix"]="/usr/local" S["PACKAGE_URL"]="" S["PACKAGE_BUGREPORT"]="" S["PACKAGE_STRING"]="" S["PACKAGE_VERSION"]="" S["PACKAGE_TARNAME"]="" S["PACKAGE_NAME"]="" S["PATH_SEPARATOR"]=":" S["SHELL"]="/bin/sh" _ACAWK cat >>"$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { D["PACKAGE_NAME"]=" \"\"" D["PACKAGE_TARNAME"]=" \"\"" D["PACKAGE_VERSION"]=" \"\"" D["PACKAGE_STRING"]=" \"\"" D["PACKAGE_BUGREPORT"]=" \"\"" D["PACKAGE_URL"]=" \"\"" D["JEMALLOC_HAS_RESTRICT"]=" 1" D["HAVE_CXX14"]=" 1" D["STDC_HEADERS"]=" 1" D["HAVE_SYS_TYPES_H"]=" 1" D["HAVE_SYS_STAT_H"]=" 1" D["HAVE_STDLIB_H"]=" 1" D["HAVE_STRING_H"]=" 1" D["HAVE_MEMORY_H"]=" 1" D["HAVE_STRINGS_H"]=" 1" D["HAVE_INTTYPES_H"]=" 1" D["HAVE_STDINT_H"]=" 1" D["HAVE_UNISTD_H"]=" 1" D["SIZEOF_VOID_P"]=" 8" D["LG_SIZEOF_PTR"]=" 3" D["SIZEOF_INT"]=" 4" D["LG_SIZEOF_INT"]=" 2" D["SIZEOF_LONG"]=" 8" D["LG_SIZEOF_LONG"]=" 3" D["SIZEOF_LONG_LONG"]=" 8" D["LG_SIZEOF_LONG_LONG"]=" 3" D["SIZEOF_INTMAX_T"]=" 8" D["LG_SIZEOF_INTMAX_T"]=" 3" D["HAVE_CPU_SPINWAIT"]=" 1" D["CPU_SPINWAIT"]=" __asm__ volatile(\"pause\")" D["LG_VADDR"]=" 48" D["LG_VADDR"]=" 48" D["JEMALLOC_USABLE_SIZE_CONST"]=" const" D["JEMALLOC_HAVE_ATTR"]=" " D["JEMALLOC_HAVE_ATTR_ALLOC_SIZE"]=" " D["JEMALLOC_HAVE_ATTR_FORMAT_PRINTF"]=" " D["JEMALLOC_PREFIX"]=" \"je_\"" D["JEMALLOC_CPREFIX"]=" \"JE_\"" D["JEMALLOC_OVERRIDE_VALLOC"]=" " D["JEMALLOC_PRIVATE_NAMESPACE"]=" je_" D["JEMALLOC_CONFIG_MALLOC_CONF"]=" \"\"" D["JEMALLOC_STATS"]=" " D["JEMALLOC_MAPS_COALESCE"]=" " D["JEMALLOC_FILL"]=" " D["JEMALLOC_CACHE_OBLIVIOUS"]=" " D["JEMALLOC_INTERNAL_UNREACHABLE"]=" __builtin_unreachable" D["JEMALLOC_INTERNAL_FFSLL"]=" __builtin_ffsll" D["JEMALLOC_INTERNAL_FFSL"]=" __builtin_ffsl" D["JEMALLOC_INTERNAL_FFS"]=" __builtin_ffs" D["JEMALLOC_INTERNAL_POPCOUNT"]=" __builtin_popcount" D["JEMALLOC_INTERNAL_POPCOUNTL"]=" __builtin_popcountl" D["LG_PAGE"]=" 12" D["LG_HUGEPAGE"]=" 21" D["JEMALLOC_HAVE_PTHREAD"]=" " D["HAVE_PTHREAD_H"]=" 1" D["HAVE_DLFCN_H"]=" 1" D["JEMALLOC_HAVE_DLSYM"]=" " D["JEMALLOC_HAVE_PTHREAD_ATFORK"]=" " D["JEMALLOC_HAVE_MACH_ABSOLUTE_TIME"]=" 1" D["JEMALLOC_HAVE_ISSETUGID"]=" " D["JEMALLOC_C11_ATOMICS"]=" 1" D["JEMALLOC_GCC_ATOMIC_ATOMICS"]=" 1" D["JEMALLOC_GCC_U8_ATOMIC_ATOMICS"]=" 1" D["JEMALLOC_GCC_SYNC_ATOMICS"]=" 1" D["JEMALLOC_GCC_U8_SYNC_ATOMICS"]=" 1" D["JEMALLOC_OSATOMIC"]=" " D["JEMALLOC_HAVE_MADVISE"]=" " D["JEMALLOC_PURGE_MADVISE_FREE"]=" " D["JEMALLOC_PURGE_MADVISE_DONTNEED"]=" " D["JEMALLOC_HAVE_BUILTIN_CLZ"]=" " D["JEMALLOC_OS_UNFAIR_LOCK"]=" " D["JEMALLOC_ZONE"]=" " D["JEMALLOC_TLS_MODEL"]=" __attribute__((tls_model(\"initial-exec\")))" D["HAVE__BOOL"]=" 1" D["HAVE_STDBOOL_H"]=" 1" for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*([\t (]|$)/ { line = $ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} ac_datarootdir_hack=' s&@datadir@&${datarootdir}&g s&@docdir@&${datarootdir}/doc/${PACKAGE}&g s&@infodir@&${datarootdir}/info&g s&@localedir@&${datarootdir}/locale&g s&@mandir@&${datarootdir}/man&g s&\${datarootdir}&${prefix}/share&g' ;; esac ac_sed_extra="/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// } :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "include/jemalloc/internal/public_symbols.txt":C) f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "${n}:${m}" >> "${f}" public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ;; "include/jemalloc/internal/private_symbols.awk":C) f="${objroot}include/jemalloc/internal/private_symbols.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk":C) f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/jemalloc_protos_jet.h":C) mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ;; esac done # for ac_tag as_fn_exit 0 �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/configure��������������������������������������������������������������������0100755�0000765�0000024�00001335221�13446174776�0015064�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= PACKAGE_URL= ac_unique_file="Makefile.in" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='LTLIBOBJS LIBOBJS cfgoutputs_out cfgoutputs_in cfghdrs_out cfghdrs_in enable_initial_exec_tls enable_zone_allocator enable_tls enable_lazy_lock libdl enable_extra_size_check enable_readlinkat enable_log enable_cache_oblivious enable_xmalloc enable_utrace enable_fill enable_prof enable_experimental_smallocx enable_stats enable_debug je_ install_suffix private_namespace JEMALLOC_CPREFIX JEMALLOC_PREFIX enable_static enable_shared AUTOCONF LD RANLIB INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM enable_autogen RPATH_EXTRA LM CC_MM DUMP_SYMS AROUT ARFLAGS MKLIB TEST_LD_MODE LDTARGET CTARGET PIC_CFLAGS SOREV EXTRA_LDFLAGS DSO_LDFLAGS link_whole_archive libprefix exe a o importlib so LD_PRELOAD_VAR RPATH abi jemalloc_version_gid jemalloc_version_nrev jemalloc_version_bugfix jemalloc_version_minor jemalloc_version_major jemalloc_version AWK NM AR host_os host_vendor host_cpu host build_os build_vendor build_cpu build EGREP GREP EXTRA_CXXFLAGS SPECIFIED_CXXFLAGS CONFIGURE_CXXFLAGS enable_cxx HAVE_CXX14 ac_ct_CXX CXXFLAGS CXX CPP EXTRA_CFLAGS SPECIFIED_CFLAGS CONFIGURE_CFLAGS OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC XSLROOT XSLTPROC MANDIR DATADIR LIBDIR INCLUDEDIR BINDIR PREFIX abs_objroot objroot abs_srcroot srcroot rev CONFIG target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking with_xslroot enable_cxx with_lg_vaddr with_version with_rpath enable_autogen enable_shared enable_static with_mangling with_jemalloc_prefix with_export with_private_namespace with_install_suffix with_malloc_conf enable_debug enable_stats enable_experimental_smallocx enable_prof enable_prof_libunwind with_static_libunwind enable_prof_libgcc enable_prof_gcc enable_fill enable_utrace enable_xmalloc enable_cache_oblivious enable_log enable_readlinkat enable_extra_size_check with_lg_quantum with_lg_page with_lg_hugepage enable_libdl enable_syscall enable_lazy_lock enable_zone_allocator enable_initial_exec_tls ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP CXX CXXFLAGS CCC' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --disable-cxx Disable C++ integration --enable-autogen Automatically regenerate configure output --enable-shared Build shared libaries --enable-static Build static libaries --enable-debug Build debugging code --disable-stats Disable statistics calculation/reporting --enable-experimental-smallocx Enable experimental smallocx API --enable-prof Enable allocation profiling --enable-prof-libunwind Use libunwind for backtracing --disable-prof-libgcc Do not use libgcc for backtracing --disable-prof-gcc Do not use gcc intrinsics for backtracing --disable-fill Disable support for junk/zero filling --enable-utrace Enable utrace(2)-based tracing --enable-xmalloc Support xmalloc option --disable-cache-oblivious Disable support for cache-oblivious allocation alignment --enable-log Support debug logging --enable-readlinkat Use readlinkat over readlink --enable-extra-size-check Perform additonal size related sanity checks --disable-libdl Do not use libdl --disable-syscall Disable use of syscall(2) --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) --disable-zone-allocator Disable zone allocator for Darwin --disable-initial-exec-tls Disable the initial-exec tls model Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-xslroot= XSL stylesheet root path --with-lg-vaddr= Number of significant virtual address bits --with-version=..--g Version string --with-rpath= Colon-separated rpath (ELF systems only) --with-mangling= Mangle symbols in --with-jemalloc-prefix= Prefix to prepend to all public APIs --without-export disable exporting jemalloc public APIs --with-private-namespace= Prefix to prepend to all library-private APIs --with-install-suffix= Suffix to append to all installed files --with-malloc-conf= config.malloc_conf options string --with-static-libunwind= Path to static libunwind library; use rather than dynamically linking --with-lg-quantum= Base 2 log of minimum allocation alignment --with-lg-page= Base 2 log of system page size --with-lg-hugepage= Base 2 log of system huge page size Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor CXX C++ compiler command CXXFLAGS C++ compiler flags Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_compute_int LINENO EXPR VAR INCLUDES # -------------------------------------------- # Tries to find the compile-time value of EXPR in a program that includes # INCLUDES, setting VAR accordingly. Returns whether the value could be # computed ac_fn_c_compute_int () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=0 ac_mid=0 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid; break else as_fn_arith $ac_mid + 1 && ac_lo=$as_val if test $ac_lo -le $ac_mid; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) < 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=-1 ac_mid=-1 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=$ac_mid; break else as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val if test $ac_mid -le $ac_hi; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else ac_lo= ac_hi= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid else as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in #(( ?*) eval "$3=\$ac_lo"; ac_retval=0 ;; '') ac_retval=1 ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 static long int longval () { return $2; } static unsigned long int ulongval () { return $2; } #include #include int main () { FILE *f = fopen ("conftest.val", "w"); if (! f) return 1; if (($2) < 0) { long int i = longval (); if (i != ($2)) return 1; fprintf (f, "%ld", i); } else { unsigned long int i = ulongval (); if (i != ($2)) return 1; fprintf (f, "%lu", i); } /* Do not output a trailing newline, as this causes \r\n confusion on some platforms. */ return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : echo >>conftest.val; read $3 &5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in build-aux "$srcdir"/build-aux; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. CONFIGURE_CFLAGS= SPECIFIED_CFLAGS="${CFLAGS}" CONFIGURE_CXXFLAGS= SPECIFIED_CXXFLAGS="${CXXFLAGS}" CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` rev=2 srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi abs_srcroot="`cd \"${srcdir}\"; pwd`/" objroot="" abs_objroot="`pwd`/" if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` # Extract the first word of "xsltproc", so it can be a program name with args. set dummy xsltproc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XSLTPROC+:} false; then : $as_echo_n "(cached) " >&6 else case $XSLTPROC in [\\/]* | ?:[\\/]*) ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false" ;; esac fi XSLTPROC=$ac_cv_path_XSLTPROC if test -n "$XSLTPROC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5 $as_echo "$XSLTPROC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else DEFAULT_XSLROOT="" fi # Check whether --with-xslroot was given. if test "${with_xslroot+set}" = set; then : withval=$with_xslroot; if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi else XSLROOT="${DEFAULT_XSLROOT}" fi if test "x$XSLTPROC" = "xfalse" ; then XSLROOT="" fi CFLAGS=$CFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x$GCC" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 $as_echo_n "checking whether compiler is MSVC... " >&6; } if ${je_cv_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef _MSC_VER int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_msvc=yes else je_cv_msvc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 $as_echo "$je_cv_msvc" >&6; } fi je_cv_cray_prgenv_wrapper="" if test "x${PE_ENV}" != "x" ; then case "${CC}" in CC|cc) je_cv_cray_prgenv_wrapper="yes" ;; *) ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5 $as_echo_n "checking whether compiler is cray... " >&6; } if ${je_cv_cray+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef _CRAYC int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cray=yes else je_cv_cray=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5 $as_echo "$je_cv_cray" >&6; } if test "x${je_cv_cray}" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5 $as_echo_n "checking whether cray compiler version is 8.4... " >&6; } if ${je_cv_cray_84+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cray_84=yes else je_cv_cray_84=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5 $as_echo "$je_cv_cray_84" >&6; } fi if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5 $as_echo_n "checking whether compiler supports -std=gnu11... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-std=gnu11 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-std=gnu11 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 $as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-std=gnu99 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-std=gnu99 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wall if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wall { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5 $as_echo_n "checking whether compiler supports -Wextra... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wextra if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wextra { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5 $as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wshorten-64-to-32 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wshorten-64-to-32 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5 $as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wsign-compare if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wsign-compare { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wundef" >&5 $as_echo_n "checking whether compiler supports -Wundef... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wundef if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wundef { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-format-zero-length" >&5 $as_echo_n "checking whether compiler supports -Wno-format-zero-length... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Wno-format-zero-length if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Wno-format-zero-length { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 $as_echo_n "checking whether compiler supports -pipe... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-pipe if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-pipe { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 $as_echo_n "checking whether compiler supports -g3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-g3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-g3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5 $as_echo_n "checking whether compiler supports -Zi... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Zi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Zi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5 $as_echo_n "checking whether compiler supports -MT... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-MT if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-MT { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5 $as_echo_n "checking whether compiler supports -W3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-W3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-W3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5 $as_echo_n "checking whether compiler supports -FS... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-FS if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-FS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi T_APPEND_V=-I${srcdir}/include/msvc_compat if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi fi if test "x$je_cv_cray" = "xyes" ; then if test "x$je_cv_cray_84" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5 $as_echo_n "checking whether compiler supports -hipa2... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hipa2 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hipa2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5 $as_echo_n "checking whether compiler supports -hnognu... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnognu if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnognu { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5 $as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnomessage=128 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnomessage=128 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5 $as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-hnomessage=1357 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-hnomessage=1357 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Check whether --enable-cxx was given. if test "${enable_cxx+set}" = set; then : enableval=$enable_cxx; if test "x$enable_cxx" = "xno" ; then enable_cxx="0" else enable_cxx="1" fi else enable_cxx="1" fi if test "x$enable_cxx" = "x1" ; then # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html # =========================================================================== # # SYNOPSIS # # AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) # # DESCRIPTION # # Check for baseline language coverage in the compiler for the specified # version of the C++ standard. If necessary, add switches to CXX and # CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) # or '14' (for the C++14 standard). # # The second argument, if specified, indicates whether you insist on an # extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. # -std=c++11). If neither is specified, you get whatever works, with # preference for an extended mode. # # The third argument, if specified 'mandatory' or if left unspecified, # indicates that baseline support for the specified C++ standard is # required and that the macro should error out if no mode with that # support is found. If specified 'optional', then configuration proceeds # regardless, after defining HAVE_CXX${VERSION} if and only if a # supporting mode is found. # # LICENSE # # Copyright (c) 2008 Benjamin Kosnik # Copyright (c) 2012 Zack Weinberg # Copyright (c) 2013 Roy Stogner # Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov # Copyright (c) 2015 Paul Norman # Copyright (c) 2015 Moritz Klammler # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 4 ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ax_cxx_compile_cxx14_required=false ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ac_success=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features by default" >&5 $as_echo_n "checking whether $CXX supports C++14 features by default... " >&6; } if ${ax_cv_cxx_compile_cxx14+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ax_cv_cxx_compile_cxx14=yes else ax_cv_cxx_compile_cxx14=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_cxx_compile_cxx14" >&5 $as_echo "$ax_cv_cxx_compile_cxx14" >&6; } if test x$ax_cv_cxx_compile_cxx14 = xyes; then ac_success=yes fi if test x$ac_success = xno; then for switch in -std=c++14 -std=c++0x +std=c++14 "-h std=c++14"; do cachevar=`$as_echo "ax_cv_cxx_compile_cxx14_$switch" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features with $switch" >&5 $as_echo_n "checking whether $CXX supports C++14 features with $switch... " >&6; } if eval \${$cachevar+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_CXX="$CXX" CXX="$CXX $switch" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval $cachevar=yes else eval $cachevar=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext CXX="$ac_save_CXX" fi eval ac_res=\$$cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test x$ax_cxx_compile_cxx14_required = xtrue; then if test x$ac_success = xno; then as_fn_error $? "*** A compiler with support for C++14 language features is required." "$LINENO" 5 fi fi if test x$ac_success = xno; then HAVE_CXX14=0 { $as_echo "$as_me:${as_lineno-$LINENO}: No compiler with C++14 support was found" >&5 $as_echo "$as_me: No compiler with C++14 support was found" >&6;} else HAVE_CXX14=1 $as_echo "#define HAVE_CXX14 1" >>confdefs.h fi if test "x${HAVE_CXX14}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-Wall if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-Wall { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5 $as_echo_n "checking whether compiler supports -Wextra... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-Wextra if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-Wextra { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 $as_echo_n "checking whether compiler supports -g3... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-g3 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-g3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi SAVED_LIBS="${LIBS}" T_APPEND_V=-lstdc++ if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether libstdc++ linkage is compilable" >&5 $as_echo_n "checking whether libstdc++ linkage is compilable... " >&6; } if ${je_cv_libstdcxx+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int *arr = (int *)malloc(sizeof(int) * 42); if (arr == NULL) return 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_libstdcxx=yes else je_cv_libstdcxx=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_libstdcxx" >&5 $as_echo "$je_cv_libstdcxx" >&6; } if test "x${je_cv_libstdcxx}" = "xno" ; then LIBS="${SAVED_LIBS}" fi else enable_cxx="0" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) ac_cv_big_endian=1;; #( no) ac_cv_big_endian=0 ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac if test "x${ac_cv_big_endian}" = "x1" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_BIG_ENDIAN _ACEOF fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then T_APPEND_V=-I${srcdir}/include/msvc_compat/C99 if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi fi if test "x${je_cv_msvc}" = "xyes" ; then LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5 $as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; } else # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 $as_echo_n "checking size of void *... " >&6; } if ${ac_cv_sizeof_void_p+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : else if test "$ac_cv_type_void_p" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (void *) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_void_p=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 $as_echo "$ac_cv_sizeof_void_p" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_VOID_P $ac_cv_sizeof_void_p _ACEOF if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5 fi fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_PTR $LG_SIZEOF_PTR _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 $as_echo_n "checking size of int... " >&6; } if ${ac_cv_sizeof_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : else if test "$ac_cv_type_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 $as_echo "$ac_cv_sizeof_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INT $ac_cv_sizeof_int _ACEOF if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INT $LG_SIZEOF_INT _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 $as_echo_n "checking size of long... " >&6; } if ${ac_cv_sizeof_long+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : else if test "$ac_cv_type_long" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 $as_echo "$ac_cv_sizeof_long" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG $ac_cv_sizeof_long _ACEOF if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_LONG $LG_SIZEOF_LONG _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5 $as_echo_n "checking size of long long... " >&6; } if ${ac_cv_sizeof_long_long+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then : else if test "$ac_cv_type_long_long" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long long) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long_long=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5 $as_echo "$ac_cv_sizeof_long_long" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long _ACEOF if test "x${ac_cv_sizeof_long_long}" = "x8" ; then LG_SIZEOF_LONG_LONG=3 elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then LG_SIZEOF_LONG_LONG=2 else as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5 $as_echo_n "checking size of intmax_t... " >&6; } if ${ac_cv_sizeof_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then : else if test "$ac_cv_type_intmax_t" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (intmax_t) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_intmax_t=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5 $as_echo "$ac_cv_sizeof_intmax_t" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t _ACEOF if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T _ACEOF # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) HAVE_CPU_SPINWAIT=1 if test "x${je_cv_msvc}" = "xyes" ; then if ${je_cv_pause_msvc+:} false; then : $as_echo_n "(cached) " >&6 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5 $as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; } if ${je_cv_pause_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { _mm_pause(); return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pause_msvc=yes else je_cv_pause_msvc=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5 $as_echo "$je_cv_pause_msvc" >&6; } fi if test "x${je_cv_pause_msvc}" = "xyes" ; then CPU_SPINWAIT='_mm_pause()' fi else if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5 $as_echo_n "checking whether pause instruction is compilable... " >&6; } if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { __asm__ volatile("pause"); return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pause=yes else je_cv_pause=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5 $as_echo "$je_cv_pause" >&6; } fi if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi fi ;; *) HAVE_CPU_SPINWAIT=0 ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_CPU_SPINWAIT $HAVE_CPU_SPINWAIT _ACEOF cat >>confdefs.h <<_ACEOF #define CPU_SPINWAIT $CPU_SPINWAIT _ACEOF # Check whether --with-lg_vaddr was given. if test "${with_lg_vaddr+set}" = set; then : withval=$with_lg_vaddr; LG_VADDR="$with_lg_vaddr" else LG_VADDR="detect" fi case "${host_cpu}" in aarch64) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if test "x${LG_SIZEOF_PTR}" = "x2" ; then #aarch64 ILP32 LG_VADDR=32 else #aarch64 LP64 LG_VADDR=48 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 $as_echo "$LG_VADDR" >&6; } fi ;; x86_64) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if ${je_cv_lg_vaddr+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : je_cv_lg_vaddr=57 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef _WIN32 #include #include typedef unsigned __int32 uint32_t; #else #include #endif int main () { uint32_t r[4]; uint32_t eax_in = 0x80000008U; #ifdef _WIN32 __cpuid((int *)r, (int)eax_in); #else asm volatile ("cpuid" : "=a" (r[0]), "=b" (r[1]), "=c" (r[2]), "=d" (r[3]) : "a" (eax_in), "c" (0) ); #endif uint32_t eax_out = r[0]; uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); FILE *f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } if (vaddr > (sizeof(void *) << 3)) { vaddr = sizeof(void *) << 3; } fprintf(f, "%u", vaddr); fclose(f); return 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : je_cv_lg_vaddr=`cat conftest.out` else je_cv_lg_vaddr=error fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_vaddr" >&5 $as_echo "$je_cv_lg_vaddr" >&6; } if test "x${je_cv_lg_vaddr}" != "x" ; then LG_VADDR="${je_cv_lg_vaddr}" fi if test "x${LG_VADDR}" != "xerror" ; then cat >>confdefs.h <<_ACEOF #define LG_VADDR $LG_VADDR _ACEOF else as_fn_error $? "cannot determine number of significant virtual address bits" "$LINENO" 5 fi fi ;; *) if test "x$LG_VADDR" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 $as_echo_n "checking number of significant virtual address bits... " >&6; } if test "x${LG_SIZEOF_PTR}" = "x3" ; then LG_VADDR=64 elif test "x${LG_SIZEOF_PTR}" = "x2" ; then LG_VADDR=32 elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" else as_fn_error $? "Unsupported lg(pointer size): ${LG_SIZEOF_PTR}" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 $as_echo "$LG_VADDR" >&6; } fi ;; esac cat >>confdefs.h <<_ACEOF #define LG_VADDR $LG_VADDR _ACEOF LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then TEST_LD_MODE='-dynamic' fi if test "x${je_cv_cray}" = "xyes" ; then CC_MM= fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nm", so it can be a program name with args. set dummy ${ac_tool_prefix}nm; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then ac_cv_prog_NM="$NM" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NM="${ac_tool_prefix}nm" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NM=$ac_cv_prog_NM if test -n "$NM"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NM" >&5 $as_echo "$NM" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NM"; then ac_ct_NM=$NM # Extract the first word of "nm", so it can be a program name with args. set dummy nm; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NM"; then ac_cv_prog_ac_ct_NM="$ac_ct_NM" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NM="nm" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NM=$ac_cv_prog_ac_ct_NM if test -n "$ac_ct_NM"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NM" >&5 $as_echo "$ac_ct_NM" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NM" = x; then NM=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NM=$ac_ct_NM fi else NM="$ac_cv_prog_NM" fi for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done # Check whether --with-version was given. if test "${with_version+set}" = set; then : withval=$with_version; echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null if test $? -eq 0 ; then echo "$with_version" > "${objroot}VERSION" else echo "${with_version}" | grep '^VERSION$' 2>&1 1>/dev/null if test $? -ne 0 ; then as_fn_error $? "${with_version} does not match ..--g or VERSION" "$LINENO" 5 fi fi else if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null if test $? -eq 0 ; then mv "${objroot}VERSION.tmp" "${objroot}VERSION" break fi done fi rm -f "${objroot}VERSION.tmp" fi if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5 $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; } echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'` default_retain="0" maps_coalesce="1" DUMP_SYMS="${NM} -a" SYM_PREFIX="" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" SYM_PREFIX="_" ;; *-*-freebsd*) abi="elf" $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" ;; *-*-openbsd*) abi="elf" force_tls="0" ;; *-*-bitrig*) abi="elf" ;; *-*-linux-android) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h force_tls="0" if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-linux*) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-kfreebsd*) T_APPEND_V=-D_GNU_SOURCE if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi abi="elf" $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h ;; *-*-netbsd*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5 $as_echo_n "checking ABI... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __ELF__ /* ELF */ #else #error aout #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : abi="elf" else abi="aout" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 $as_echo "$abi" >&6; } ;; *-*-solaris2*) abi="elf" RPATH='-Wl,-R,$(1)' T_APPEND_V=-D_POSIX_PTHREAD_SEMANTICS if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi T_APPEND_V=-lposix4 -lsocket -lnsl if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi ;; *-ibm-aix*) if test "${LG_SIZEOF_PTR}" = "3"; then LD_PRELOAD_VAR="LDR_PRELOAD64" else LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" link_whole_archive="1" fi case "${host}" in *-*-cygwin*) DUMP_SYMS="dumpbin /SYMBOLS" ;; *) ;; esac a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 $as_echo "Unsupported operating system: ${host}" >&6; } abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const for ac_header in malloc.h do : ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default" if test "x$ac_cv_header_malloc_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MALLOC_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5 $as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include size_t malloc_usable_size(const void *ptr); int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else JEMALLOC_USABLE_SIZE_CONST= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi done cat >>confdefs.h <<_ACEOF #define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 $as_echo_n "checking for library containing log... " >&6; } if ${ac_cv_search_log+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char log (); int main () { return log (); ; return 0; } _ACEOF for ac_lib in '' m; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_log=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_log+:} false; then : break fi done if ${ac_cv_search_log+:} false; then : else ac_cv_search_log=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 $as_echo "$ac_cv_search_log" >&6; } ac_res=$ac_cv_search_log if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "Missing math functions" "$LINENO" 5 fi if test "x$ac_cv_search_log" != "xnone required" ; then LM="$ac_cv_search_log" else LM= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 $as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } if ${je_cv_attribute+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ static __attribute__((unused)) void foo(void){} int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_attribute=yes else je_cv_attribute=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5 $as_echo "$je_cv_attribute" >&6; } if test "x${je_cv_attribute}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 $as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-fvisibility=hidden if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-fvisibility=hidden { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 $as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-fvisibility=hidden if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-fvisibility=hidden { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi fi fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 $as_echo_n "checking whether tls_model attribute is compilable... " >&6; } if ${je_cv_tls_model+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_tls_model=yes else je_cv_tls_model=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5 $as_echo "$je_cv_tls_model" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5 $as_echo_n "checking whether alloc_size attribute is compilable... " >&6; } if ${je_cv_alloc_size+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(size_t size) __attribute__((alloc_size(1))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_alloc_size=yes else je_cv_alloc_size=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5 $as_echo "$je_cv_alloc_size" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_alloc_size}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_gnu_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_gnu_printf=yes else je_cv_format_gnu_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5 $as_echo "$je_cv_format_gnu_printf" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_format_gnu_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_printf=yes else je_cv_format_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5 $as_echo "$je_cv_format_printf" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_format_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h fi # Check whether --with-rpath was given. if test "${with_rpath+set}" = set; then : withval=$with_rpath; if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi else RPATH_EXTRA= fi # Check whether --enable-autogen was given. if test "${enable_autogen+set}" = set; then : enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi else enable_autogen="0" fi # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi # Extract the first word of "ld", so it can be a program name with args. set dummy ld; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else case $LD in [\\/]* | ?:[\\/]*) ac_cv_path_LD="$LD" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LD" && ac_cv_path_LD="false" ;; esac fi LD=$ac_cv_path_LD if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "autoconf", so it can be a program name with args. set dummy autoconf; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_AUTOCONF+:} false; then : $as_echo_n "(cached) " >&6 else case $AUTOCONF in [\\/]* | ?:[\\/]*) ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false" ;; esac fi AUTOCONF=$ac_cv_path_AUTOCONF if test -n "$AUTOCONF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5 $as_echo "$AUTOCONF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; if test "x$enable_shared" = "xno" ; then enable_shared="0" else enable_shared="1" fi else enable_shared="1" fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; if test "x$enable_static" = "xno" ; then enable_static="0" else enable_static="1" fi else enable_static="1" fi if test "$enable_shared$enable_static" = "00" ; then as_fn_error $? "Please enable one of shared or static builds" "$LINENO" 5 fi # Check whether --with-mangling was given. if test "${with_mangling+set}" = set; then : withval=$with_mangling; mangling_map="$with_mangling" else mangling_map="" fi # Check whether --with-jemalloc_prefix was given. if test "${with_jemalloc_prefix+set}" = set; then : withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix" else if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi fi if test "x$JEMALLOC_PREFIX" = "x" ; then $as_echo "#define JEMALLOC_IS_MALLOC 1" >>confdefs.h else JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` cat >>confdefs.h <<_ACEOF #define JEMALLOC_PREFIX "$JEMALLOC_PREFIX" _ACEOF cat >>confdefs.h <<_ACEOF #define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX" _ACEOF fi # Check whether --with-export was given. if test "${with_export+set}" = set; then : withval=$with_export; if test "x$with_export" = "xno"; then $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h fi fi public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign" if test "x$ac_cv_func_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h public_syms="${public_syms} memalign" fi ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc" if test "x$ac_cv_func_valloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h public_syms="${public_syms} valloc" fi wrap_syms= if test "x${JEMALLOC_PREFIX}" = "x" ; then ac_fn_c_check_func "$LINENO" "__libc_calloc" "ac_cv_func___libc_calloc" if test "x$ac_cv_func___libc_calloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_CALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_calloc" fi ac_fn_c_check_func "$LINENO" "__libc_free" "ac_cv_func___libc_free" if test "x$ac_cv_func___libc_free" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_FREE " >>confdefs.h wrap_syms="${wrap_syms} __libc_free" fi ac_fn_c_check_func "$LINENO" "__libc_malloc" "ac_cv_func___libc_malloc" if test "x$ac_cv_func___libc_malloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_malloc" fi ac_fn_c_check_func "$LINENO" "__libc_memalign" "ac_cv_func___libc_memalign" if test "x$ac_cv_func___libc_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN " >>confdefs.h wrap_syms="${wrap_syms} __libc_memalign" fi ac_fn_c_check_func "$LINENO" "__libc_realloc" "ac_cv_func___libc_realloc" if test "x$ac_cv_func___libc_realloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_REALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_realloc" fi ac_fn_c_check_func "$LINENO" "__libc_valloc" "ac_cv_func___libc_valloc" if test "x$ac_cv_func___libc_valloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___LIBC_VALLOC " >>confdefs.h wrap_syms="${wrap_syms} __libc_valloc" fi ac_fn_c_check_func "$LINENO" "__posix_memalign" "ac_cv_func___posix_memalign" if test "x$ac_cv_func___posix_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN " >>confdefs.h wrap_syms="${wrap_syms} __posix_memalign" fi fi case "${host}" in *-*-mingw* | *-*-cygwin*) wrap_syms="${wrap_syms} tls_callback" ;; *) ;; esac # Check whether --with-private_namespace was given. if test "${with_private_namespace+set}" = set; then : withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_" else JEMALLOC_PRIVATE_NAMESPACE="je_" fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE _ACEOF private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" # Check whether --with-install_suffix was given. if test "${with_install_suffix+set}" = set; then : withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix" else INSTALL_SUFFIX= fi install_suffix="$INSTALL_SUFFIX" # Check whether --with-malloc_conf was given. if test "${with_malloc_conf+set}" = set; then : withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf" else JEMALLOC_CONFIG_MALLOC_CONF="" fi config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" cat >>confdefs.h <<_ACEOF #define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf" _ACEOF je_="je_" cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi else enable_debug="0" fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h fi if test "x$enable_debug" = "x0" ; then if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 $as_echo_n "checking whether compiler supports -O3... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O3 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 $as_echo_n "checking whether compiler supports -O3... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O3 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5 $as_echo_n "checking whether compiler supports -funroll-loops... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-funroll-loops if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-funroll-loops { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi elif test "x$je_cv_msvc" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 $as_echo_n "checking whether compiler supports -O2... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O2 if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 $as_echo_n "checking whether compiler supports -O2... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O2 if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 $as_echo_n "checking whether compiler supports -O... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-O if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-O { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 $as_echo_n "checking whether compiler supports -O... " >&6; } T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" T_APPEND_V=-O if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" else CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : je_cv_cxxflags_added=-O { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cxxflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" else CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" fi fi fi # Check whether --enable-stats was given. if test "${enable_stats+set}" = set; then : enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi else enable_stats="1" fi if test "x$enable_stats" = "x1" ; then $as_echo "#define JEMALLOC_STATS " >>confdefs.h fi # Check whether --enable-experimental_smallocx was given. if test "${enable_experimental_smallocx+set}" = set; then : enableval=$enable_experimental_smallocx; if test "x$enable_experimental_smallocx" = "xno" ; then enable_experimental_smallocx="0" else enable_experimental_smallocx="1" fi else enable_experimental_smallocx="0" fi if test "x$enable_experimental_smallocx" = "x1" ; then $as_echo "#define JEMALLOC_EXPERIMENTAL_SMALLOCX_API 1" >>confdefs.h fi # Check whether --enable-prof was given. if test "${enable_prof+set}" = set; then : enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi else enable_prof="0" fi if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi # Check whether --enable-prof-libunwind was given. if test "${enable_prof_libunwind+set}" = set; then : enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi else enable_prof_libunwind="0" fi # Check whether --with-static_libunwind was given. if test "${with_static_libunwind+set}" = set; then : withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5 fi LUNWIND="$with_static_libunwind" fi else LUNWIND="-lunwind" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then for ac_header in libunwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default" if test "x$ac_cv_header_libunwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBUNWIND_H 1 _ACEOF else enable_prof_libunwind="0" fi done if test "x$LUNWIND" = "x-lunwind" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5 $as_echo_n "checking for unw_backtrace in -lunwind... " >&6; } if ${ac_cv_lib_unwind_unw_backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lunwind $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char unw_backtrace (); int main () { return unw_backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_unwind_unw_backtrace=yes else ac_cv_lib_unwind_unw_backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5 $as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; } if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then : T_APPEND_V=$LUNWIND if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else enable_prof_libunwind="0" fi else T_APPEND_V=$LUNWIND if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h fi fi # Check whether --enable-prof-libgcc was given. if test "${enable_prof_libgcc+set}" = set; then : enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi else enable_prof_libgcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then for ac_header in unwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" if test "x$ac_cv_header_unwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNWIND_H 1 _ACEOF else enable_prof_libgcc="0" fi done if test "x${enable_prof_libgcc}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5 $as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; } if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgcc $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char _Unwind_Backtrace (); int main () { return _Unwind_Backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gcc__Unwind_Backtrace=yes else ac_cv_lib_gcc__Unwind_Backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5 $as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; } if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then : T_APPEND_V=-lgcc if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else enable_prof_libgcc="0" fi fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h fi else enable_prof_libgcc="0" fi # Check whether --enable-prof-gcc was given. if test "${enable_prof_gcc+set}" = set; then : enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi else enable_prof_gcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5 $as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-fno-omit-frame-pointer if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-fno-omit-frame-pointer { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi backtrace_method="gcc intrinsics" $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 $as_echo_n "checking configured backtracing method... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 $as_echo "$backtrace_method" >&6; } if test "x$enable_prof" = "x1" ; then T_APPEND_V=$LM if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi $as_echo "#define JEMALLOC_PROF " >>confdefs.h fi if test "x${maps_coalesce}" = "x1" ; then $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h fi if test "x$default_retain" = "x1" ; then $as_echo "#define JEMALLOC_RETAIN " >>confdefs.h fi have_dss="1" ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk" if test "x$ac_cv_func_sbrk" = xyes; then : have_sbrk="1" else have_sbrk="0" fi if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5 $as_echo "Disabling dss allocation because sbrk is deprecated" >&6; } have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then $as_echo "#define JEMALLOC_DSS " >>confdefs.h fi # Check whether --enable-fill was given. if test "${enable_fill+set}" = set; then : enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi else enable_fill="1" fi if test "x$enable_fill" = "x1" ; then $as_echo "#define JEMALLOC_FILL " >>confdefs.h fi # Check whether --enable-utrace was given. if test "${enable_utrace+set}" = set; then : enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi else enable_utrace="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5 $as_echo_n "checking whether utrace(2) is compilable... " >&6; } if ${je_cv_utrace+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include #include int main () { utrace((void *)0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_utrace=yes else je_cv_utrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5 $as_echo "$je_cv_utrace" >&6; } if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h fi # Check whether --enable-xmalloc was given. if test "${enable_xmalloc+set}" = set; then : enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi else enable_xmalloc="0" fi if test "x$enable_xmalloc" = "x1" ; then $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h fi # Check whether --enable-cache-oblivious was given. if test "${enable_cache_oblivious+set}" = set; then : enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi else enable_cache_oblivious="1" fi if test "x$enable_cache_oblivious" = "x1" ; then $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h fi # Check whether --enable-log was given. if test "${enable_log+set}" = set; then : enableval=$enable_log; if test "x$enable_log" = "xno" ; then enable_log="0" else enable_log="1" fi else enable_log="0" fi if test "x$enable_log" = "x1" ; then $as_echo "#define JEMALLOC_LOG " >>confdefs.h fi # Check whether --enable-readlinkat was given. if test "${enable_readlinkat+set}" = set; then : enableval=$enable_readlinkat; if test "x$enable_readlinkat" = "xno" ; then enable_readlinkat="0" else enable_readlinkat="1" fi else enable_readlinkat="0" fi if test "x$enable_readlinkat" = "x1" ; then $as_echo "#define JEMALLOC_READLINKAT " >>confdefs.h fi # Check whether --enable-extra-size-check was given. if test "${enable_extra_size_check+set}" = set; then : enableval=$enable_extra_size_check; if test "x$enable_extra_size_check" = "xno" ; then enable_extra_size_check="0" else enable_extra_size_check="1" fi else enable_extra_size_check="0" fi if test "x$enable_extra_size_check" = "x1" ; then $as_echo "#define JEMALLOC_EXTRA_SIZE_CHECK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5 $as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; } if ${je_cv_gcc_builtin_unreachable+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void foo (void) { __builtin_unreachable(); } int main () { { foo(); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_unreachable=yes else je_cv_gcc_builtin_unreachable=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5 $as_echo "$je_cv_gcc_builtin_unreachable" >&6; } if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h else $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5 $as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; } if ${je_cv_gcc_builtin_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_ffsl=yes else je_cv_gcc_builtin_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5 $as_echo "$je_cv_gcc_builtin_ffsl" >&6; } if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5 $as_echo_n "checking whether a program using ffsl is compilable... " >&6; } if ${je_cv_function_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_function_ffsl=yes else je_cv_function_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5 $as_echo "$je_cv_function_ffsl" >&6; } if test "x${je_cv_function_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h else as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_popcountl is compilable" >&5 $as_echo_n "checking whether a program using __builtin_popcountl is compilable... " >&6; } if ${je_cv_gcc_builtin_popcountl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = __builtin_popcountl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_popcountl=yes else je_cv_gcc_builtin_popcountl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_popcountl" >&5 $as_echo "$je_cv_gcc_builtin_popcountl" >&6; } if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl" >>confdefs.h fi # Check whether --with-lg_quantum was given. if test "${with_lg_quantum+set}" = set; then : withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum" else LG_QUANTA="3 4" fi if test "x$with_lg_quantum" != "x" ; then cat >>confdefs.h <<_ACEOF #define LG_QUANTUM $with_lg_quantum _ACEOF fi # Check whether --with-lg_page was given. if test "${with_lg_page+set}" = set; then : withval=$with_lg_page; LG_PAGE="$with_lg_page" else LG_PAGE="detect" fi if test "x$LG_PAGE" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5 $as_echo_n "checking LG_PAGE... " >&6; } if ${je_cv_lg_page+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : je_cv_lg_page=12 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef _WIN32 #include #else #include #endif #include int main () { int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d", result); fclose(f); return 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : je_cv_lg_page=`cat conftest.out` else je_cv_lg_page=undefined fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5 $as_echo "$je_cv_lg_page" >&6; } fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then cat >>confdefs.h <<_ACEOF #define LG_PAGE $LG_PAGE _ACEOF else as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5 fi # Check whether --with-lg_hugepage was given. if test "${with_lg_hugepage+set}" = set; then : withval=$with_lg_hugepage; je_cv_lg_hugepage="${with_lg_hugepage}" else je_cv_lg_hugepage="" fi if test "x${je_cv_lg_hugepage}" = "x" ; then if test -e "/proc/meminfo" ; then hpsk=`cat /proc/meminfo 2>/dev/null | \ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ awk '{print $2}'` if test "x${hpsk}" != "x" ; then je_cv_lg_hugepage=10 while test "${hpsk}" -gt 1 ; do hpsk="$((hpsk / 2))" je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" done fi fi if test "x${je_cv_lg_hugepage}" = "x" ; then je_cv_lg_hugepage=21 fi fi if test "x${LG_PAGE}" != "xundefined" -a \ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then as_fn_error $? "Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_HUGEPAGE ${je_cv_lg_hugepage} _ACEOF # Check whether --enable-libdl was given. if test "${enable_libdl+set}" = set; then : enableval=$enable_libdl; if test "x$enable_libdl" = "xno" ; then enable_libdl="0" else enable_libdl="1" fi else enable_libdl="1" fi if test "x$abi" != "xpecoff" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD " >>confdefs.h for ac_header in pthread.h do : ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" if test "x$ac_cv_header_pthread_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PTHREAD_H 1 _ACEOF else as_fn_error $? "pthread.h is missing" "$LINENO" 5 fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 $as_echo_n "checking for pthread_create in -lpthread... " >&6; } if ${ac_cv_lib_pthread_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthread $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthread_pthread_create=yes else ac_cv_lib_pthread_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 $as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : T_APPEND_V=-pthread if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then LIBS="${LIBS}${T_APPEND_V}" else LIBS="${LIBS} ${T_APPEND_V}" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 $as_echo_n "checking for library containing pthread_create... " >&6; } if ${ac_cv_search_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF for ac_lib in '' ; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_pthread_create=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_pthread_create+:} false; then : break fi done if ${ac_cv_search_pthread_create+:} false; then : else ac_cv_search_pthread_create=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 $as_echo "$ac_cv_search_pthread_create" >&6; } ac_res=$ac_cv_search_pthread_create if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "libpthread is missing" "$LINENO" 5 fi fi wrap_syms="${wrap_syms} pthread_create" have_pthread="1" if test "x$enable_libdl" = "x1" ; then have_dlsym="1" for ac_header in dlfcn.h do : ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym" if test "x$ac_cv_func_dlsym" = xyes; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5 $as_echo_n "checking for dlsym in -ldl... " >&6; } if ${ac_cv_lib_dl_dlsym+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlsym (); int main () { return dlsym (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlsym=yes else ac_cv_lib_dl_dlsym=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5 $as_echo "$ac_cv_lib_dl_dlsym" >&6; } if test "x$ac_cv_lib_dl_dlsym" = xyes; then : LIBS="$LIBS -ldl" else have_dlsym="0" fi fi else have_dlsym="0" fi done if test "x$have_dlsym" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_DLSYM " >>confdefs.h fi else have_dlsym="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5 $as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; } if ${je_cv_pthread_atfork+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_atfork((void *)0, (void *)0, (void *)0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_atfork=yes else je_cv_pthread_atfork=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5 $as_echo "$je_cv_pthread_atfork" >&6; } if test "x${je_cv_pthread_atfork}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_setname_np(3) is compilable" >&5 $as_echo_n "checking whether pthread_setname_np(3) is compilable... " >&6; } if ${je_cv_pthread_setname_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_setname_np(pthread_self(), "setname_test"); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_setname_np=yes else je_cv_pthread_setname_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_setname_np" >&5 $as_echo "$je_cv_pthread_setname_np" >&6; } if test "x${je_cv_pthread_setname_np}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP " >>confdefs.h fi fi T_APPEND_V=-D_REENTRANT if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" else CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then if test "$ac_cv_search_clock_gettime" != "-lrt"; then SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" unset ac_cv_search_clock_gettime { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5 $as_echo_n "checking whether compiler supports -dynamic... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-dynamic if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-dynamic { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5 $as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; } if ${je_cv_clock_monotonic_coarse+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_clock_monotonic_coarse=yes else je_cv_clock_monotonic_coarse=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5 $as_echo "$je_cv_clock_monotonic_coarse" >&6; } if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5 $as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; } if ${je_cv_clock_monotonic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); #if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 # error _POSIX_MONOTONIC_CLOCK missing/invalid #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_clock_monotonic=yes else je_cv_clock_monotonic=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5 $as_echo "$je_cv_clock_monotonic" >&6; } if test "x${je_cv_clock_monotonic}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5 $as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; } if ${je_cv_mach_absolute_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { mach_absolute_time(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_mach_absolute_time=yes else je_cv_mach_absolute_time=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5 $as_echo "$je_cv_mach_absolute_time" >&6; } if test "x${je_cv_mach_absolute_time}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h fi # Check whether --enable-syscall was given. if test "${enable_syscall+set}" = set; then : enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then enable_syscall="0" else enable_syscall="1" fi else enable_syscall="1" fi if test "x$enable_syscall" = "x1" ; then SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5 $as_echo_n "checking whether syscall(2) is compilable... " >&6; } if ${je_cv_syscall+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { syscall(SYS_write, 2, "hello", 5); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_syscall=yes else je_cv_syscall=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5 $as_echo "$je_cv_syscall" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x$je_cv_syscall" = "xyes" ; then $as_echo "#define JEMALLOC_USE_SYSCALL " >>confdefs.h fi fi ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" if test "x$ac_cv_func_secure_getenv" = xyes; then : have_secure_getenv="1" else have_secure_getenv="0" fi if test "x$have_secure_getenv" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "sched_getcpu" "ac_cv_func_sched_getcpu" if test "x$ac_cv_func_sched_getcpu" = xyes; then : have_sched_getcpu="1" else have_sched_getcpu="0" fi if test "x$have_sched_getcpu" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SCHED_GETCPU " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "sched_setaffinity" "ac_cv_func_sched_setaffinity" if test "x$ac_cv_func_sched_setaffinity" = xyes; then : have_sched_setaffinity="1" else have_sched_setaffinity="0" fi if test "x$have_sched_setaffinity" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SCHED_SETAFFINITY " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid" if test "x$ac_cv_func_issetugid" = xyes; then : have_issetugid="1" else have_issetugid="0" fi if test "x$have_issetugid" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup" if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then : have__malloc_thread_cleanup="1" else have__malloc_thread_cleanup="0" fi if test "x$have__malloc_thread_cleanup" = "x1" ; then $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h wrap_syms="${wrap_syms} _malloc_thread_cleanup" force_tls="1" fi ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb" if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then : have__pthread_mutex_init_calloc_cb="1" else have__pthread_mutex_init_calloc_cb="0" fi if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" fi # Check whether --enable-lazy_lock was given. if test "${enable_lazy_lock+set}" = set; then : enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi else enable_lazy_lock="" fi if test "x${enable_lazy_lock}" = "x" ; then if test "x${force_lazy_lock}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } enable_lazy_lock="1" else enable_lazy_lock="0" fi fi if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5 $as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; } enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$have_dlsym" = "x1" ; then $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h else as_fn_error $? "Missing dlsym support: lazy-lock cannot be enabled." "$LINENO" 5 fi fi if test "x${force_tls}" = "x1" ; then enable_tls="1" elif test "x${force_tls}" = "x0" ; then enable_tls="0" else enable_tls="1" fi if test "x${enable_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 $as_echo_n "checking for TLS... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ __thread int x; int main () { x = 42; return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } enable_tls="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else enable_tls="0" fi if test "x${enable_tls}" = "x1" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_TLS _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5 $as_echo_n "checking whether C11 atomics is compilable... " >&6; } if ${je_cv_c11_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif int main () { uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return r == 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_c11_atomics=yes else je_cv_c11_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11_atomics" >&5 $as_echo "$je_cv_c11_atomics" >&6; } if test "x${je_cv_c11_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __atomic atomics is compilable" >&5 $as_echo_n "checking whether GCC __atomic atomics is compilable... " >&6; } if ${je_cv_gcc_atomic_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { int x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = x; return after_add == 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_atomic_atomics=yes else je_cv_gcc_atomic_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_atomic_atomics" >&5 $as_echo "$je_cv_gcc_atomic_atomics" >&6; } if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_ATOMIC_ATOMICS 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __atomic atomics is compilable" >&5 $as_echo_n "checking whether GCC 8-bit __atomic atomics is compilable... " >&6; } if ${je_cv_gcc_u8_atomic_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { unsigned char x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = (int)x; return after_add == 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_u8_atomic_atomics=yes else je_cv_gcc_u8_atomic_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_atomic_atomics" >&5 $as_echo "$je_cv_gcc_u8_atomic_atomics" >&6; } if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1" >>confdefs.h fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __sync atomics is compilable" >&5 $as_echo_n "checking whether GCC __sync atomics is compilable... " >&6; } if ${je_cv_gcc_sync_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { int x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = x; return (before_add == 0) && (after_add == 1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_sync_atomics=yes else je_cv_gcc_sync_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_sync_atomics" >&5 $as_echo "$je_cv_gcc_sync_atomics" >&6; } if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_SYNC_ATOMICS 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __sync atomics is compilable" >&5 $as_echo_n "checking whether GCC 8-bit __sync atomics is compilable... " >&6; } if ${je_cv_gcc_u8_sync_atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { unsigned char x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = (int)x; return (before_add == 0) && (after_add == 1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_u8_sync_atomics=yes else je_cv_gcc_u8_sync_atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_sync_atomics" >&5 $as_echo "$je_cv_gcc_u8_sync_atomics" >&6; } if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1" >>confdefs.h fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5 $as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; } if ${je_cv_osatomic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_osatomic=yes else je_cv_osatomic=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5 $as_echo "$je_cv_osatomic" >&6; } if test "x${je_cv_osatomic}" = "xyes" ; then $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5 $as_echo_n "checking whether madvise(2) is compilable... " >&6; } if ${je_cv_madvise+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madvise=yes else je_cv_madvise=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5 $as_echo "$je_cv_madvise" >&6; } if test "x${je_cv_madvise}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; } if ${je_cv_madv_free+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_FREE); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_free=yes else je_cv_madv_free=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5 $as_echo "$je_cv_madv_free" >&6; } if test "x${je_cv_madv_free}" = "xyes" ; then $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h elif test "x${je_cv_madvise}" = "xyes" ; then case "${host_cpu}" in i686|x86_64) case "${host}" in *-*-linux*) $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h $as_echo "#define JEMALLOC_DEFINE_MADVISE_FREE " >>confdefs.h ;; esac ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; } if ${je_cv_madv_dontneed+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_DONTNEED); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_dontneed=yes else je_cv_madv_dontneed=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5 $as_echo "$je_cv_madv_dontneed" >&6; } if test "x${je_cv_madv_dontneed}" = "xyes" ; then $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DO[NT]DUMP) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_DO[NT]DUMP) is compilable... " >&6; } if ${je_cv_madv_dontdump+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_DONTDUMP); madvise((void *)0, 0, MADV_DODUMP); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madv_dontdump=yes else je_cv_madv_dontdump=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontdump" >&5 $as_echo "$je_cv_madv_dontdump" >&6; } if test "x${je_cv_madv_dontdump}" = "xyes" ; then $as_echo "#define JEMALLOC_MADVISE_DONTDUMP " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5 $as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; } if ${je_cv_thp+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { madvise((void *)0, 0, MADV_HUGEPAGE); madvise((void *)0, 0, MADV_NOHUGEPAGE); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_thp=yes else je_cv_thp=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5 $as_echo "$je_cv_thp" >&6; } case "${host_cpu}" in arm*) ;; *) if test "x${je_cv_thp}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE_HUGE " >>confdefs.h fi ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5 $as_echo_n "checking for __builtin_clz... " >&6; } if ${je_cv_builtin_clz+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_builtin_clz=yes else je_cv_builtin_clz=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5 $as_echo "$je_cv_builtin_clz" >&6; } if test "x${je_cv_builtin_clz}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5 $as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; } if ${je_cv_os_unfair_lock+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 #error "os_unfair_lock is not supported" #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_os_unfair_lock=yes else je_cv_os_unfair_lock=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5 $as_echo "$je_cv_os_unfair_lock" >&6; } if test "x${je_cv_os_unfair_lock}" = "xyes" ; then $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h fi # Check whether --enable-zone-allocator was given. if test "${enable_zone_allocator+set}" = set; then : enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi else if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi fi if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5 fi $as_echo "#define JEMALLOC_ZONE " >>confdefs.h fi # Check whether --enable-initial-exec-tls was given. if test "${enable_initial_exec_tls+set}" = set; then : enableval=$enable_initial_exec_tls; if test "x$enable_initial_exec_tls" = "xno" ; then enable_initial_exec_tls="0" else enable_initial_exec_tls="1" fi else enable_initial_exec_tls="1" fi if test "x${je_cv_tls_model}" = "xyes" -a \ "x${enable_initial_exec_tls}" = "x1" ; then $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h else $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h fi if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" ; then $as_echo "#define JEMALLOC_BACKGROUND_THREAD 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5 $as_echo_n "checking whether glibc malloc hook is compilable... " >&6; } if ${je_cv_glibc_malloc_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); int main () { void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_malloc_hook=yes else je_cv_glibc_malloc_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5 $as_echo "$je_cv_glibc_malloc_hook" >&6; } if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5 $as_echo_n "checking whether glibc memalign hook is compilable... " >&6; } if ${je_cv_glibc_memalign_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); int main () { void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_memalign_hook=yes else je_cv_glibc_memalign_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5 $as_echo "$je_cv_glibc_memalign_hook" >&6; } if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h wrap_syms="${wrap_syms} __memalign_hook" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5 $as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; } if ${je_cv_pthread_mutex_adaptive_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_mutex_adaptive_np=yes else je_cv_pthread_mutex_adaptive_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5 $as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; } if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h fi SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -D_GNU_SOURCE" >&5 $as_echo_n "checking whether compiler supports -D_GNU_SOURCE... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-D_GNU_SOURCE if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-D_GNU_SOURCE { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-Werror if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 $as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" T_APPEND_V=-herror_on_warning if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" else CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" fi if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_added=-herror_on_warning { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_added= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char with gnu source is compilable" >&5 $as_echo_n "checking whether strerror_r returns char with gnu source is compilable... " >&6; } if ${je_cv_strerror_r_returns_char_with_gnu_source+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { char *buffer = (char *) malloc(100); char *error = strerror_r(EINVAL, buffer, 100); printf("%s\n", error); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_strerror_r_returns_char_with_gnu_source=yes else je_cv_strerror_r_returns_char_with_gnu_source=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_strerror_r_returns_char_with_gnu_source" >&5 $as_echo "$je_cv_strerror_r_returns_char_with_gnu_source" >&6; } CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" else CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" fi if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then $as_echo "#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt" ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols.awk" ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols_jet.awk" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h" ac_config_headers="$ac_config_headers $cfghdrs_tup" ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;; "include/jemalloc/internal/private_symbols.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;; "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;; "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "include/jemalloc/internal/public_symbols.txt":C) f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "${n}:${m}" >> "${f}" public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ;; "include/jemalloc/internal/private_symbols.awk":C) f="${objroot}include/jemalloc/internal/private_symbols.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" ;; "include/jemalloc/internal/private_symbols_jet.awk":C) f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" ;; "include/jemalloc/internal/public_namespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/jemalloc_protos_jet.h":C) mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5 $as_echo "jemalloc version : ${jemalloc_version}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5 $as_echo "library revision : ${rev}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5 $as_echo "CONFIG : ${CONFIG}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 $as_echo "CC : ${CC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&5 $as_echo "CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&5 $as_echo "SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5 $as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 $as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CXX : ${CXX}" >&5 $as_echo "CXX : ${CXX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&5 $as_echo "CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&5 $as_echo "SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&5 $as_echo "EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 $as_echo "LDFLAGS : ${LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5 $as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: DSO_LDFLAGS : ${DSO_LDFLAGS}" >&5 $as_echo "DSO_LDFLAGS : ${DSO_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 $as_echo "LIBS : ${LIBS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 $as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 $as_echo "XSLTPROC : ${XSLTPROC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 $as_echo "XSLROOT : ${XSLROOT}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5 $as_echo "PREFIX : ${PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5 $as_echo "BINDIR : ${BINDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5 $as_echo "DATADIR : ${DATADIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5 $as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5 $as_echo "LIBDIR : ${LIBDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5 $as_echo "MANDIR : ${MANDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5 $as_echo "srcroot : ${srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5 $as_echo "abs_srcroot : ${abs_srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5 $as_echo "objroot : ${objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5 $as_echo "abs_objroot : ${abs_objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5 $as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5 $as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5 $as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5 $as_echo "install_suffix : ${install_suffix}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf : ${config_malloc_conf}" >&5 $as_echo "malloc_conf : ${config_malloc_conf}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: shared libs : ${enable_shared}" >&5 $as_echo "shared libs : ${enable_shared}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: static libs : ${enable_static}" >&5 $as_echo "static libs : ${enable_static}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5 $as_echo "autogen : ${enable_autogen}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5 $as_echo "debug : ${enable_debug}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5 $as_echo "stats : ${enable_stats}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: experimetal_smallocx : ${enable_experimental_smallocx}" >&5 $as_echo "experimetal_smallocx : ${enable_experimental_smallocx}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5 $as_echo "prof : ${enable_prof}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5 $as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5 $as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5 $as_echo "prof-gcc : ${enable_prof_gcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5 $as_echo "fill : ${enable_fill}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5 $as_echo "utrace : ${enable_utrace}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5 $as_echo "xmalloc : ${enable_xmalloc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: log : ${enable_log}" >&5 $as_echo "log : ${enable_log}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5 $as_echo "lazy_lock : ${enable_lazy_lock}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5 $as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cxx : ${enable_cxx}" >&5 $as_echo "cxx : ${enable_cxx}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/configure.ac�����������������������������������������������������������������0100644�0000765�0000024�00000217574�13446174740�0015443�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.68) AC_INIT([Makefile.in]) AC_CONFIG_AUX_DIR([build-aux]) dnl ============================================================================ dnl Custom macro definitions. dnl JE_CONCAT_VVV(r, a, b) dnl dnl Set $r to the concatenation of $a and $b, with a space separating them iff dnl both $a and $b are non-empty. AC_DEFUN([JE_CONCAT_VVV], if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then $1="[$]{$2}[$]{$3}" else $1="[$]{$2} [$]{$3}" fi ) dnl JE_APPEND_VS(a, b) dnl dnl Set $a to the concatenation of $a and b, with a space separating them iff dnl both $a and b are non-empty. AC_DEFUN([JE_APPEND_VS], T_APPEND_V=$2 JE_CONCAT_VVV($1, $1, T_APPEND_V) ) CONFIGURE_CFLAGS= SPECIFIED_CFLAGS="${CFLAGS}" dnl JE_CFLAGS_ADD(cflag) dnl dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS. AC_DEFUN([JE_CFLAGS_ADD], [ AC_MSG_CHECKING([whether compiler supports $1]) T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" JE_APPEND_VS(CONFIGURE_CFLAGS, $1) JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], [je_cv_cflags_added=$1] AC_MSG_RESULT([yes]), [je_cv_cflags_added=] AC_MSG_RESULT([no]) [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"] ) JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) ]) dnl JE_CFLAGS_SAVE() dnl JE_CFLAGS_RESTORE() dnl dnl Save/restore CFLAGS. Nesting is not supported. AC_DEFUN([JE_CFLAGS_SAVE], SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" ) AC_DEFUN([JE_CFLAGS_RESTORE], CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) ) CONFIGURE_CXXFLAGS= SPECIFIED_CXXFLAGS="${CXXFLAGS}" dnl JE_CXXFLAGS_ADD(cxxflag) AC_DEFUN([JE_CXXFLAGS_ADD], [ AC_MSG_CHECKING([whether compiler supports $1]) T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1) JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], [je_cv_cxxflags_added=$1] AC_MSG_RESULT([yes]), [je_cv_cxxflags_added=] AC_MSG_RESULT([no]) [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"] ) AC_LANG_POP([C++]) JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) ]) dnl JE_COMPILABLE(label, hcode, mcode, rvar) dnl dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors dnl cause failure. AC_DEFUN([JE_COMPILABLE], [ AC_CACHE_CHECK([whether $1 is compilable], [$4], [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], [$3])], [$4=yes], [$4=no])]) ]) dnl ============================================================================ CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` AC_SUBST([CONFIG]) dnl Library revision. rev=2 AC_SUBST([rev]) srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi AC_SUBST([srcroot]) abs_srcroot="`cd \"${srcdir}\"; pwd`/" AC_SUBST([abs_srcroot]) objroot="" AC_SUBST([objroot]) abs_objroot="`pwd`/" AC_SUBST([abs_objroot]) dnl Munge install path variables. if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix AC_SUBST([PREFIX]) BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` AC_SUBST([BINDIR]) INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` AC_SUBST([INCLUDEDIR]) LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` AC_SUBST([LIBDIR]) DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` AC_SUBST([DATADIR]) MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` AC_SUBST([MANDIR]) dnl Support for building documentation. AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else dnl Documentation building will fail if this default gets used. DEFAULT_XSLROOT="" fi AC_ARG_WITH([xslroot], [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi ], XSLROOT="${DEFAULT_XSLROOT}" ) if test "x$XSLTPROC" = "xfalse" ; then XSLROOT="" fi AC_SUBST([XSLROOT]) dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, dnl just prevent autoconf from molesting CFLAGS. CFLAGS=$CFLAGS AC_PROG_CC if test "x$GCC" != "xyes" ; then AC_CACHE_CHECK([whether compiler is MSVC], [je_cv_msvc], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _MSC_VER int fail[-1]; #endif ])], [je_cv_msvc=yes], [je_cv_msvc=no])]) fi dnl check if a cray prgenv wrapper compiler is being used je_cv_cray_prgenv_wrapper="" if test "x${PE_ENV}" != "x" ; then case "${CC}" in CC|cc) je_cv_cray_prgenv_wrapper="yes" ;; *) ;; esac fi AC_CACHE_CHECK([whether compiler is cray], [je_cv_cray], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _CRAYC int fail[-1]; #endif ])], [je_cv_cray=yes], [je_cv_cray=no])]) if test "x${je_cv_cray}" = "xyes" ; then AC_CACHE_CHECK([whether cray compiler version is 8.4], [je_cv_cray_84], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) int fail[-1]; #endif ])], [je_cv_cray_84=yes], [je_cv_cray_84=no])]) fi if test "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-std=gnu11]) if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) else JE_CFLAGS_ADD([-std=gnu99]) if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) fi fi JE_CFLAGS_ADD([-Wall]) JE_CFLAGS_ADD([-Wextra]) JE_CFLAGS_ADD([-Wshorten-64-to-32]) JE_CFLAGS_ADD([-Wsign-compare]) JE_CFLAGS_ADD([-Wundef]) JE_CFLAGS_ADD([-Wno-format-zero-length]) JE_CFLAGS_ADD([-pipe]) JE_CFLAGS_ADD([-g3]) elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" JE_CFLAGS_ADD([-Zi]) JE_CFLAGS_ADD([-MT]) JE_CFLAGS_ADD([-W3]) JE_CFLAGS_ADD([-FS]) JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat) fi if test "x$je_cv_cray" = "xyes" ; then dnl cray compiler 8.4 has an inlining bug if test "x$je_cv_cray_84" = "xyes" ; then JE_CFLAGS_ADD([-hipa2]) JE_CFLAGS_ADD([-hnognu]) fi dnl ignore unreachable code warning JE_CFLAGS_ADD([-hnomessage=128]) dnl ignore redefinition of "malloc", "free", etc warning JE_CFLAGS_ADD([-hnomessage=1357]) fi AC_SUBST([CONFIGURE_CFLAGS]) AC_SUBST([SPECIFIED_CFLAGS]) AC_SUBST([EXTRA_CFLAGS]) AC_PROG_CPP AC_ARG_ENABLE([cxx], [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])], if test "x$enable_cxx" = "xno" ; then enable_cxx="0" else enable_cxx="1" fi , enable_cxx="1" ) if test "x$enable_cxx" = "x1" ; then dnl Require at least c++14, which is the first version to support sized dnl deallocation. C++ support is not compiled otherwise. m4_include([m4/ax_cxx_compile_stdcxx.m4]) AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) if test "x${HAVE_CXX14}" = "x1" ; then JE_CXXFLAGS_ADD([-Wall]) JE_CXXFLAGS_ADD([-Wextra]) JE_CXXFLAGS_ADD([-g3]) SAVED_LIBS="${LIBS}" JE_APPEND_VS(LIBS, -lstdc++) JE_COMPILABLE([libstdc++ linkage], [ #include ], [[ int *arr = (int *)malloc(sizeof(int) * 42); if (arr == NULL) return 1; ]], [je_cv_libstdcxx]) if test "x${je_cv_libstdcxx}" = "xno" ; then LIBS="${SAVED_LIBS}" fi else enable_cxx="0" fi fi AC_SUBST([enable_cxx]) AC_SUBST([CONFIGURE_CXXFLAGS]) AC_SUBST([SPECIFIED_CXXFLAGS]) AC_SUBST([EXTRA_CXXFLAGS]) AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) if test "x${ac_cv_big_endian}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99) fi if test "x${je_cv_msvc}" = "xyes" ; then LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit]) else AC_CHECK_SIZEOF([void *]) if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) fi fi AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) AC_CHECK_SIZEOF([int]) if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) AC_CHECK_SIZEOF([long]) if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) AC_CHECK_SIZEOF([long long]) if test "x${ac_cv_sizeof_long_long}" = "x8" ; then LG_SIZEOF_LONG_LONG=3 elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then LG_SIZEOF_LONG_LONG=2 else AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG]) AC_CHECK_SIZEOF([intmax_t]) if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) AC_CANONICAL_HOST dnl CPU-specific settings. CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) HAVE_CPU_SPINWAIT=1 if test "x${je_cv_msvc}" = "xyes" ; then AC_CACHE_VAL([je_cv_pause_msvc], [JE_COMPILABLE([pause instruction MSVC], [], [[_mm_pause(); return 0;]], [je_cv_pause_msvc])]) if test "x${je_cv_pause_msvc}" = "xyes" ; then CPU_SPINWAIT='_mm_pause()' fi else AC_CACHE_VAL([je_cv_pause], [JE_COMPILABLE([pause instruction], [], [[__asm__ volatile("pause"); return 0;]], [je_cv_pause])]) if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi fi ;; *) HAVE_CPU_SPINWAIT=0 ;; esac AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT]) AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) AC_ARG_WITH([lg_vaddr], [AS_HELP_STRING([--with-lg-vaddr=], [Number of significant virtual address bits])], [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"]) case "${host_cpu}" in aarch64) if test "x$LG_VADDR" = "xdetect"; then AC_MSG_CHECKING([number of significant virtual address bits]) if test "x${LG_SIZEOF_PTR}" = "x2" ; then #aarch64 ILP32 LG_VADDR=32 else #aarch64 LP64 LG_VADDR=48 fi AC_MSG_RESULT([$LG_VADDR]) fi ;; x86_64) if test "x$LG_VADDR" = "xdetect"; then AC_CACHE_CHECK([number of significant virtual address bits], [je_cv_lg_vaddr], AC_RUN_IFELSE([AC_LANG_PROGRAM( [[ #include #ifdef _WIN32 #include #include typedef unsigned __int32 uint32_t; #else #include #endif ]], [[ uint32_t r[[4]]; uint32_t eax_in = 0x80000008U; #ifdef _WIN32 __cpuid((int *)r, (int)eax_in); #else asm volatile ("cpuid" : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]]) : "a" (eax_in), "c" (0) ); #endif uint32_t eax_out = r[[0]]; uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); FILE *f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } if (vaddr > (sizeof(void *) << 3)) { vaddr = sizeof(void *) << 3; } fprintf(f, "%u", vaddr); fclose(f); return 0; ]])], [je_cv_lg_vaddr=`cat conftest.out`], [je_cv_lg_vaddr=error], [je_cv_lg_vaddr=57])) if test "x${je_cv_lg_vaddr}" != "x" ; then LG_VADDR="${je_cv_lg_vaddr}" fi if test "x${LG_VADDR}" != "xerror" ; then AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) else AC_MSG_ERROR([cannot determine number of significant virtual address bits]) fi fi ;; *) if test "x$LG_VADDR" = "xdetect"; then AC_MSG_CHECKING([number of significant virtual address bits]) if test "x${LG_SIZEOF_PTR}" = "x3" ; then LG_VADDR=64 elif test "x${LG_SIZEOF_PTR}" = "x2" ; then LG_VADDR=32 elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" else AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}]) fi AC_MSG_RESULT([$LG_VADDR]) fi ;; esac AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then TEST_LD_MODE='-dynamic' fi if test "x${je_cv_cray}" = "xyes" ; then CC_MM= fi AN_MAKEVAR([AR], [AC_PROG_AR]) AN_PROGRAM([ar], [AC_PROG_AR]) AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) AC_PROG_AR AN_MAKEVAR([NM], [AC_PROG_NM]) AN_PROGRAM([nm], [AC_PROG_NM]) AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)]) AC_PROG_NM AC_PROG_AWK dnl ============================================================================ dnl jemalloc version. dnl AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=..--g], [Version string])], [ echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null if test $? -eq 0 ; then echo "$with_version" > "${objroot}VERSION" else echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null if test $? -ne 0 ; then AC_MSG_ERROR([${with_version} does not match ..--g or VERSION]) fi fi ], [ dnl Set VERSION if source directory is inside a git repository. if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then dnl Pattern globs aren't powerful enough to match both single- and dnl double-digit version numbers, so iterate over patterns to support up dnl to version 99.99.99 without any accidental matches. for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null if test $? -eq 0 ; then mv "${objroot}VERSION.tmp" "${objroot}VERSION" break fi done fi rm -f "${objroot}VERSION.tmp" ]) if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then AC_MSG_RESULT( [Missing VERSION file, and unable to generate it; creating bogus VERSION]) echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` AC_SUBST([jemalloc_version]) AC_SUBST([jemalloc_version_major]) AC_SUBST([jemalloc_version_minor]) AC_SUBST([jemalloc_version_bugfix]) AC_SUBST([jemalloc_version_nrev]) AC_SUBST([jemalloc_version_gid]) dnl Platform-specific settings. abi and RPATH can probably be determined dnl programmatically, but doing so is error-prone, which makes it generally dnl not worth the trouble. dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. default_retain="0" maps_coalesce="1" DUMP_SYMS="${NM} -a" SYM_PREFIX="" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" SYM_PREFIX="_" ;; *-*-freebsd*) abi="elf" AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" ;; *-*-openbsd*) abi="elf" force_tls="0" ;; *-*-bitrig*) abi="elf" ;; *-*-linux-android) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_C11_ATOMICS]) force_tls="0" if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-linux*) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) if test "${LG_SIZEOF_PTR}" = "3"; then default_retain="1" fi ;; *-*-kfreebsd*) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) ;; *-*-netbsd*) AC_MSG_CHECKING([ABI]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifdef __ELF__ /* ELF */ #else #error aout #endif ]])], [abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) ;; *-*-solaris2*) abi="elf" RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS) JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl) ;; *-ibm-aix*) if test "${LG_SIZEOF_PTR}" = "3"; then dnl 64bit AIX LD_PRELOAD_VAR="LDR_PRELOAD64" else dnl 32bit AIX LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" link_whole_archive="1" fi case "${host}" in *-*-cygwin*) DUMP_SYMS="dumpbin /SYMBOLS" ;; *) ;; esac a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) AC_MSG_RESULT([Unsupported operating system: ${host}]) abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const AC_CHECK_HEADERS([malloc.h], [ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [#include #include size_t malloc_usable_size(const void *ptr); ], [])],[ AC_MSG_RESULT([yes]) ],[ JEMALLOC_USABLE_SIZE_CONST= AC_MSG_RESULT([no]) ]) ]) AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) AC_SUBST([abi]) AC_SUBST([RPATH]) AC_SUBST([LD_PRELOAD_VAR]) AC_SUBST([so]) AC_SUBST([importlib]) AC_SUBST([o]) AC_SUBST([a]) AC_SUBST([exe]) AC_SUBST([libprefix]) AC_SUBST([link_whole_archive]) AC_SUBST([DSO_LDFLAGS]) AC_SUBST([EXTRA_LDFLAGS]) AC_SUBST([SOREV]) AC_SUBST([PIC_CFLAGS]) AC_SUBST([CTARGET]) AC_SUBST([LDTARGET]) AC_SUBST([TEST_LD_MODE]) AC_SUBST([MKLIB]) AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) AC_SUBST([DUMP_SYMS]) AC_SUBST([CC_MM]) dnl Determine whether libm must be linked to use e.g. log(3). AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])]) if test "x$ac_cv_search_log" != "xnone required" ; then LM="$ac_cv_search_log" else LM= fi AC_SUBST(LM) JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], [je_cv_attribute]) if test "x${je_cv_attribute}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then JE_CFLAGS_ADD([-fvisibility=hidden]) JE_CXXFLAGS_ADD([-fvisibility=hidden]) fi fi dnl Check for tls_model attribute support (clang 3.0 still lacks support). JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([tls_model attribute], [], [static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0;], [je_cv_tls_model]) JE_CFLAGS_RESTORE() dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for dnl --disable-initial-exec-tls) dnl Check for alloc_size attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([alloc_size attribute], [#include ], [void *foo(size_t size) __attribute__((alloc_size(1)));], [je_cv_alloc_size]) JE_CFLAGS_RESTORE() if test "x${je_cv_alloc_size}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) fi dnl Check for format(gnu_printf, ...) attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], [je_cv_format_gnu_printf]) JE_CFLAGS_RESTORE() if test "x${je_cv_format_gnu_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) fi dnl Check for format(printf, ...) attribute support. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], [je_cv_format_printf]) JE_CFLAGS_RESTORE() if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi dnl Support optional additions to rpath. AC_ARG_WITH([rpath], [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi, RPATH_EXTRA= ) AC_SUBST([RPATH_EXTRA]) dnl Disable rules that do automatic regeneration of configure output by default. AC_ARG_ENABLE([autogen], [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi , enable_autogen="0" ) AC_SUBST([enable_autogen]) AC_PROG_INSTALL AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) dnl Enable shared libs AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared], [Build shared libaries])], if test "x$enable_shared" = "xno" ; then enable_shared="0" else enable_shared="1" fi , enable_shared="1" ) AC_SUBST([enable_shared]) dnl Enable static libs AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static], [Build static libaries])], if test "x$enable_static" = "xno" ; then enable_static="0" else enable_static="1" fi , enable_static="1" ) AC_SUBST([enable_static]) if test "$enable_shared$enable_static" = "00" ; then AC_MSG_ERROR([Please enable one of shared or static builds]) fi dnl Perform no name mangling by default. AC_ARG_WITH([mangling], [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], [mangling_map="$with_mangling"], [mangling_map=""]) dnl Do not prefix public APIs by default. AC_ARG_WITH([jemalloc_prefix], [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], [JEMALLOC_PREFIX="$with_jemalloc_prefix"], [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi] ) if test "x$JEMALLOC_PREFIX" = "x" ; then AC_DEFINE([JEMALLOC_IS_MALLOC]) else JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) fi AC_SUBST([JEMALLOC_PREFIX]) AC_SUBST([JEMALLOC_CPREFIX]) AC_ARG_WITH([export], [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], [if test "x$with_export" = "xno"; then AC_DEFINE([JEMALLOC_EXPORT],[]) fi] ) public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" dnl Check for additional platform-specific public API functions. AC_CHECK_FUNC([memalign], [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) public_syms="${public_syms} memalign"]) AC_CHECK_FUNC([valloc], [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) public_syms="${public_syms} valloc"]) dnl Check for allocator-related functions that should be wrapped. wrap_syms= if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_CHECK_FUNC([__libc_calloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ]) wrap_syms="${wrap_syms} __libc_calloc"]) AC_CHECK_FUNC([__libc_free], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ]) wrap_syms="${wrap_syms} __libc_free"]) AC_CHECK_FUNC([__libc_malloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ]) wrap_syms="${wrap_syms} __libc_malloc"]) AC_CHECK_FUNC([__libc_memalign], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ]) wrap_syms="${wrap_syms} __libc_memalign"]) AC_CHECK_FUNC([__libc_realloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ]) wrap_syms="${wrap_syms} __libc_realloc"]) AC_CHECK_FUNC([__libc_valloc], [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ]) wrap_syms="${wrap_syms} __libc_valloc"]) AC_CHECK_FUNC([__posix_memalign], [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ]) wrap_syms="${wrap_syms} __posix_memalign"]) fi case "${host}" in *-*-mingw* | *-*-cygwin*) wrap_syms="${wrap_syms} tls_callback" ;; *) ;; esac dnl Mangle library-private APIs. AC_ARG_WITH([private_namespace], [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], [JEMALLOC_PRIVATE_NAMESPACE="je_"] ) AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" AC_SUBST([private_namespace]) dnl Do not add suffix to installed files by default. AC_ARG_WITH([install_suffix], [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], [INSTALL_SUFFIX="$with_install_suffix"], [INSTALL_SUFFIX=] ) install_suffix="$INSTALL_SUFFIX" AC_SUBST([install_suffix]) dnl Specify default malloc_conf. AC_ARG_WITH([malloc_conf], [AS_HELP_STRING([--with-malloc-conf=], [config.malloc_conf options string])], [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"], [JEMALLOC_CONFIG_MALLOC_CONF=""] ) config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"]) dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of dnl jemalloc_protos_jet.h easy. je_="je_" AC_SUBST([je_]) cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" dnl ============================================================================ dnl jemalloc build options. dnl dnl Do not compile with debugging by default. AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], [Build debugging code])], [if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi ], [enable_debug="0"] ) if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) fi if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) fi AC_SUBST([enable_debug]) dnl Only optimize if not debugging. if test "x$enable_debug" = "x0" ; then if test "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-O3]) JE_CXXFLAGS_ADD([-O3]) JE_CFLAGS_ADD([-funroll-loops]) elif test "x$je_cv_msvc" = "xyes" ; then JE_CFLAGS_ADD([-O2]) JE_CXXFLAGS_ADD([-O2]) else JE_CFLAGS_ADD([-O]) JE_CXXFLAGS_ADD([-O]) fi fi dnl Enable statistics calculation by default. AC_ARG_ENABLE([stats], [AS_HELP_STRING([--disable-stats], [Disable statistics calculation/reporting])], [if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi ], [enable_stats="1"] ) if test "x$enable_stats" = "x1" ; then AC_DEFINE([JEMALLOC_STATS], [ ]) fi AC_SUBST([enable_stats]) dnl Do not enable smallocx by default. AC_ARG_ENABLE([experimental_smallocx], [AS_HELP_STRING([--enable-experimental-smallocx], [Enable experimental smallocx API])], [if test "x$enable_experimental_smallocx" = "xno" ; then enable_experimental_smallocx="0" else enable_experimental_smallocx="1" fi ], [enable_experimental_smallocx="0"] ) if test "x$enable_experimental_smallocx" = "x1" ; then AC_DEFINE([JEMALLOC_EXPERIMENTAL_SMALLOCX_API]) fi AC_SUBST([enable_experimental_smallocx]) dnl Do not enable profiling by default. AC_ARG_ENABLE([prof], [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], [if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi ], [enable_prof="0"] ) if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi AC_ARG_ENABLE([prof-libunwind], [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], [if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi ], [enable_prof_libunwind="0"] ) AC_ARG_WITH([static_libunwind], [AS_HELP_STRING([--with-static-libunwind=], [Path to static libunwind library; use rather than dynamically linking])], if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) fi LUNWIND="$with_static_libunwind" fi, LUNWIND="-lunwind" ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) if test "x$LUNWIND" = "x-lunwind" ; then AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)], [enable_prof_libunwind="0"]) else JE_APPEND_VS(LIBS, $LUNWIND) fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) fi fi AC_ARG_ENABLE([prof-libgcc], [AS_HELP_STRING([--disable-prof-libgcc], [Do not use libgcc for backtracing])], [if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi ], [enable_prof_libgcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) if test "x${enable_prof_libgcc}" = "x1" ; then AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"]) fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) fi else enable_prof_libgcc="0" fi AC_ARG_ENABLE([prof-gcc], [AS_HELP_STRING([--disable-prof-gcc], [Do not use gcc intrinsics for backtracing])], [if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi ], [enable_prof_gcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then JE_CFLAGS_ADD([-fno-omit-frame-pointer]) backtrace_method="gcc intrinsics" AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi AC_MSG_CHECKING([configured backtracing method]) AC_MSG_RESULT([$backtrace_method]) if test "x$enable_prof" = "x1" ; then dnl Heap profiling uses the log(3) function. JE_APPEND_VS(LIBS, $LM) AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) dnl Indicate whether adjacent virtual memory mappings automatically coalesce dnl (and fragment on demand). if test "x${maps_coalesce}" = "x1" ; then AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) fi dnl Indicate whether to retain memory (rather than using munmap()) by default. if test "x$default_retain" = "x1" ; then AC_DEFINE([JEMALLOC_RETAIN], [ ]) fi dnl Enable allocation from DSS if supported by the OS. have_dss="1" dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then AC_DEFINE([JEMALLOC_DSS], [ ]) fi dnl Support the junk/zero filling option by default. AC_ARG_ENABLE([fill], [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])], [if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi ], [enable_fill="1"] ) if test "x$enable_fill" = "x1" ; then AC_DEFINE([JEMALLOC_FILL], [ ]) fi AC_SUBST([enable_fill]) dnl Disable utrace(2)-based tracing by default. AC_ARG_ENABLE([utrace], [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], [if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi ], [enable_utrace="0"] ) JE_COMPILABLE([utrace(2)], [ #include #include #include #include #include ], [ utrace((void *)0, 0); ], [je_cv_utrace]) if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then AC_DEFINE([JEMALLOC_UTRACE], [ ]) fi AC_SUBST([enable_utrace]) dnl Do not support the xmalloc option by default. AC_ARG_ENABLE([xmalloc], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], [if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi ], [enable_xmalloc="0"] ) if test "x$enable_xmalloc" = "x1" ; then AC_DEFINE([JEMALLOC_XMALLOC], [ ]) fi AC_SUBST([enable_xmalloc]) dnl Support cache-oblivious allocation alignment by default. AC_ARG_ENABLE([cache-oblivious], [AS_HELP_STRING([--disable-cache-oblivious], [Disable support for cache-oblivious allocation alignment])], [if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi ], [enable_cache_oblivious="1"] ) if test "x$enable_cache_oblivious" = "x1" ; then AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) fi AC_SUBST([enable_cache_oblivious]) dnl Do not log by default. AC_ARG_ENABLE([log], [AS_HELP_STRING([--enable-log], [Support debug logging])], [if test "x$enable_log" = "xno" ; then enable_log="0" else enable_log="1" fi ], [enable_log="0"] ) if test "x$enable_log" = "x1" ; then AC_DEFINE([JEMALLOC_LOG], [ ]) fi AC_SUBST([enable_log]) dnl Do not use readlinkat by default AC_ARG_ENABLE([readlinkat], [AS_HELP_STRING([--enable-readlinkat], [Use readlinkat over readlink])], [if test "x$enable_readlinkat" = "xno" ; then enable_readlinkat="0" else enable_readlinkat="1" fi ], [enable_readlinkat="0"] ) if test "x$enable_readlinkat" = "x1" ; then AC_DEFINE([JEMALLOC_READLINKAT], [ ]) fi AC_SUBST([enable_readlinkat]) dnl Avoid the extra size checking by default AC_ARG_ENABLE([extra-size-check], [AS_HELP_STRING([--enable-extra-size-check], [Perform additonal size related sanity checks])], [if test "x$enable_extra_size_check" = "xno" ; then enable_extra_size_check="0" else enable_extra_size_check="1" fi ], [enable_extra_size_check="0"] ) if test "x$enable_extra_size_check" = "x1" ; then AC_DEFINE([JEMALLOC_EXTRA_SIZE_CHECK], [ ]) fi AC_SUBST([enable_extra_size_check]) JE_COMPILABLE([a program using __builtin_unreachable], [ void foo (void) { __builtin_unreachable(); } ], [ { foo(); } ], [je_cv_gcc_builtin_unreachable]) if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable]) else AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort]) fi dnl ============================================================================ dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. dnl One of those two functions should (theoretically) exist on all platforms dnl that jemalloc currently has a chance of functioning on without modification. dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if dnl ffsl() or __builtin_ffsl() are defined, respectively. JE_COMPILABLE([a program using __builtin_ffsl], [ #include #include #include ], [ { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ], [je_cv_gcc_builtin_ffsl]) if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) else JE_COMPILABLE([a program using ffsl], [ #include #include #include ], [ { int rv = ffsl(0x08); printf("%d\n", rv); } ], [je_cv_function_ffsl]) if test "x${je_cv_function_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) else AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()]) fi fi JE_COMPILABLE([a program using __builtin_popcountl], [ #include #include #include ], [ { int rv = __builtin_popcountl(0x08); printf("%d\n", rv); } ], [je_cv_gcc_builtin_popcountl]) if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNT], [__builtin_popcount]) AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTL], [__builtin_popcountl]) fi AC_ARG_WITH([lg_quantum], [AS_HELP_STRING([--with-lg-quantum=], [Base 2 log of minimum allocation alignment])], [LG_QUANTA="$with_lg_quantum"], [LG_QUANTA="3 4"]) if test "x$with_lg_quantum" != "x" ; then AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum]) fi AC_ARG_WITH([lg_page], [AS_HELP_STRING([--with-lg-page=], [Base 2 log of system page size])], [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"]) if test "x$LG_PAGE" = "xdetect"; then AC_CACHE_CHECK([LG_PAGE], [je_cv_lg_page], AC_RUN_IFELSE([AC_LANG_PROGRAM( [[ #include #ifdef _WIN32 #include #else #include #endif #include ]], [[ int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d", result); fclose(f); return 0; ]])], [je_cv_lg_page=`cat conftest.out`], [je_cv_lg_page=undefined], [je_cv_lg_page=12])) fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE]) else AC_MSG_ERROR([cannot determine value for LG_PAGE]) fi AC_ARG_WITH([lg_hugepage], [AS_HELP_STRING([--with-lg-hugepage=], [Base 2 log of system huge page size])], [je_cv_lg_hugepage="${with_lg_hugepage}"], [je_cv_lg_hugepage=""]) if test "x${je_cv_lg_hugepage}" = "x" ; then dnl Look in /proc/meminfo (Linux-specific) for information on the default huge dnl page size, if any. The relevant line looks like: dnl dnl Hugepagesize: 2048 kB if test -e "/proc/meminfo" ; then hpsk=[`cat /proc/meminfo 2>/dev/null | \ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ awk '{print $2}'`] if test "x${hpsk}" != "x" ; then je_cv_lg_hugepage=10 while test "${hpsk}" -gt 1 ; do hpsk="$((hpsk / 2))" je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" done fi fi dnl Set default if unable to automatically configure. if test "x${je_cv_lg_hugepage}" = "x" ; then je_cv_lg_hugepage=21 fi fi if test "x${LG_PAGE}" != "xundefined" -a \ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})]) fi AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}]) dnl ============================================================================ dnl Enable libdl by default. AC_ARG_ENABLE([libdl], [AS_HELP_STRING([--disable-libdl], [Do not use libdl])], [if test "x$enable_libdl" = "xno" ; then enable_libdl="0" else enable_libdl="1" fi ], [enable_libdl="1"] ) AC_SUBST([libdl]) dnl ============================================================================ dnl Configure pthreads. if test "x$abi" != "xpecoff" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ]) AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) dnl Some systems may embed pthreads functionality in libc; check for libpthread dnl first, but try libc too before failing. AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -pthread)], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) wrap_syms="${wrap_syms} pthread_create" have_pthread="1" dnl Check if we have dlsym support. if test "x$enable_libdl" = "x1" ; then have_dlsym="1" AC_CHECK_HEADERS([dlfcn.h], AC_CHECK_FUNC([dlsym], [], [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]), [have_dlsym="0"]) if test "x$have_dlsym" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ]) fi else have_dlsym="0" fi JE_COMPILABLE([pthread_atfork(3)], [ #include ], [ pthread_atfork((void *)0, (void *)0, (void *)0); ], [je_cv_pthread_atfork]) if test "x${je_cv_pthread_atfork}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) fi dnl Check if pthread_setname_np is available with the expected API. JE_COMPILABLE([pthread_setname_np(3)], [ #include ], [ pthread_setname_np(pthread_self(), "setname_test"); ], [je_cv_pthread_setname_np]) if test "x${je_cv_pthread_setname_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ]) fi fi JE_APPEND_VS(CPPFLAGS, -D_REENTRANT) dnl Check whether clock_gettime(2) is in libc or librt. AC_SEARCH_LIBS([clock_gettime], [rt]) dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then if test "$ac_cv_search_clock_gettime" != "-lrt"; then JE_CFLAGS_SAVE() unset ac_cv_search_clock_gettime JE_CFLAGS_ADD([-dynamic]) AC_SEARCH_LIBS([clock_gettime], [rt]) JE_CFLAGS_RESTORE() fi fi dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific). JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [ #include ], [ struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); ], [je_cv_clock_monotonic_coarse]) if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE]) fi dnl check for CLOCK_MONOTONIC. JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [ #include #include ], [ struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); #if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 # error _POSIX_MONOTONIC_CLOCK missing/invalid #endif ], [je_cv_clock_monotonic]) if test "x${je_cv_clock_monotonic}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC]) fi dnl Check for mach_absolute_time(). JE_COMPILABLE([mach_absolute_time()], [ #include ], [ mach_absolute_time(); ], [je_cv_mach_absolute_time]) if test "x${je_cv_mach_absolute_time}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) fi dnl Use syscall(2) (if available) by default. AC_ARG_ENABLE([syscall], [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])], [if test "x$enable_syscall" = "xno" ; then enable_syscall="0" else enable_syscall="1" fi ], [enable_syscall="1"] ) if test "x$enable_syscall" = "x1" ; then dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS dnl X 10.12's deprecation warning prevents use. JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-Werror]) JE_COMPILABLE([syscall(2)], [ #include #include ], [ syscall(SYS_write, 2, "hello", 5); ], [je_cv_syscall]) JE_CFLAGS_RESTORE() if test "x$je_cv_syscall" = "xyes" ; then AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ]) fi fi dnl Check if the GNU-specific secure_getenv function exists. AC_CHECK_FUNC([secure_getenv], [have_secure_getenv="1"], [have_secure_getenv="0"] ) if test "x$have_secure_getenv" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) fi dnl Check if the GNU-specific sched_getcpu function exists. AC_CHECK_FUNC([sched_getcpu], [have_sched_getcpu="1"], [have_sched_getcpu="0"] ) if test "x$have_sched_getcpu" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ]) fi dnl Check if the GNU-specific sched_setaffinity function exists. AC_CHECK_FUNC([sched_setaffinity], [have_sched_setaffinity="1"], [have_sched_setaffinity="0"] ) if test "x$have_sched_setaffinity" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ]) fi dnl Check if the Solaris/BSD issetugid function exists. AC_CHECK_FUNC([issetugid], [have_issetugid="1"], [have_issetugid="0"] ) if test "x$have_issetugid" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ]) fi dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use dnl it rather than pthreads TSD cleanup functions to support cleanup during dnl thread exit, in order to avoid pthreads library recursion during dnl bootstrapping. AC_CHECK_FUNC([_malloc_thread_cleanup], [have__malloc_thread_cleanup="1"], [have__malloc_thread_cleanup="0"] ) if test "x$have__malloc_thread_cleanup" = "x1" ; then AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) wrap_syms="${wrap_syms} _malloc_thread_cleanup" force_tls="1" fi dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If dnl so, mutex initialization causes allocation, and we need to implement this dnl callback function in order to prevent recursive allocation. AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], [have__pthread_mutex_init_calloc_cb="1"], [have__pthread_mutex_init_calloc_cb="0"] ) if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" fi dnl Disable lazy locking by default. AC_ARG_ENABLE([lazy_lock], [AS_HELP_STRING([--enable-lazy-lock], [Enable lazy locking (only lock when multi-threaded)])], [if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi ], [enable_lazy_lock=""] ) if test "x${enable_lazy_lock}" = "x" ; then if test "x${force_lazy_lock}" = "x1" ; then AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) enable_lazy_lock="1" else enable_lazy_lock="0" fi fi if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented]) enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$have_dlsym" = "x1" ; then AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) else AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.]) fi fi AC_SUBST([enable_lazy_lock]) dnl Automatically configure TLS. if test "x${force_tls}" = "x1" ; then enable_tls="1" elif test "x${force_tls}" = "x0" ; then enable_tls="0" else enable_tls="1" fi if test "x${enable_tls}" = "x1" ; then AC_MSG_CHECKING([for TLS]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ __thread int x; ]], [[ x = 42; return 0; ]])], AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]) enable_tls="0") else enable_tls="0" fi AC_SUBST([enable_tls]) if test "x${enable_tls}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) fi dnl ============================================================================ dnl Check for C11 atomics. JE_COMPILABLE([C11 atomics], [ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif ], [ uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return r == 0; ], [je_cv_c11_atomics]) if test "x${je_cv_c11_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_C11_ATOMICS]) fi dnl ============================================================================ dnl Check for GCC-style __atomic atomics. JE_COMPILABLE([GCC __atomic atomics], [ ], [ int x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = x; return after_add == 1; ], [je_cv_gcc_atomic_atomics]) if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS]) dnl check for 8-bit atomic support JE_COMPILABLE([GCC 8-bit __atomic atomics], [ ], [ unsigned char x = 0; int val = 1; int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); int after_add = (int)x; return after_add == 1; ], [je_cv_gcc_u8_atomic_atomics]) if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_U8_ATOMIC_ATOMICS]) fi fi dnl ============================================================================ dnl Check for GCC-style __sync atomics. JE_COMPILABLE([GCC __sync atomics], [ ], [ int x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = x; return (before_add == 0) && (after_add == 1); ], [je_cv_gcc_sync_atomics]) if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS]) dnl check for 8-bit atomic support JE_COMPILABLE([GCC 8-bit __sync atomics], [ ], [ unsigned char x = 0; int before_add = __sync_fetch_and_add(&x, 1); int after_add = (int)x; return (before_add == 0) && (after_add == 1); ], [je_cv_gcc_u8_sync_atomics]) if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_GCC_U8_SYNC_ATOMICS]) fi fi dnl ============================================================================ dnl Check for atomic(3) operations as provided on Darwin. dnl We need this not for the atomic operations (which are provided above), but dnl rather for the OS_unfair_lock type it exposes. JE_COMPILABLE([Darwin OSAtomic*()], [ #include #include ], [ { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ], [je_cv_osatomic]) if test "x${je_cv_osatomic}" = "xyes" ; then AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) fi dnl ============================================================================ dnl Check for madvise(2). JE_COMPILABLE([madvise(2)], [ #include ], [ madvise((void *)0, 0, 0); ], [je_cv_madvise]) if test "x${je_cv_madvise}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) dnl Check for madvise(..., MADV_FREE). JE_COMPILABLE([madvise(..., MADV_FREE)], [ #include ], [ madvise((void *)0, 0, MADV_FREE); ], [je_cv_madv_free]) if test "x${je_cv_madv_free}" = "xyes" ; then AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) elif test "x${je_cv_madvise}" = "xyes" ; then case "${host_cpu}" in i686|x86_64) case "${host}" in *-*-linux*) AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ]) ;; esac ;; esac fi dnl Check for madvise(..., MADV_DONTNEED). JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [ #include ], [ madvise((void *)0, 0, MADV_DONTNEED); ], [je_cv_madv_dontneed]) if test "x${je_cv_madv_dontneed}" = "xyes" ; then AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) fi dnl Check for madvise(..., MADV_DO[NT]DUMP). JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [ #include ], [ madvise((void *)0, 0, MADV_DONTDUMP); madvise((void *)0, 0, MADV_DODUMP); ], [je_cv_madv_dontdump]) if test "x${je_cv_madv_dontdump}" = "xyes" ; then AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ]) fi dnl Check for madvise(..., MADV_[NO]HUGEPAGE). JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [ #include ], [ madvise((void *)0, 0, MADV_HUGEPAGE); madvise((void *)0, 0, MADV_NOHUGEPAGE); ], [je_cv_thp]) case "${host_cpu}" in arm*) ;; *) if test "x${je_cv_thp}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ]) fi ;; esac fi dnl ============================================================================ dnl Check for __builtin_clz() and __builtin_clzl(). AC_CACHE_CHECK([for __builtin_clz], [je_cv_builtin_clz], [AC_LINK_IFELSE([AC_LANG_PROGRAM([], [ { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ])], [je_cv_builtin_clz=yes], [je_cv_builtin_clz=no])]) if test "x${je_cv_builtin_clz}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) fi dnl ============================================================================ dnl Check for os_unfair_lock operations as provided on Darwin. JE_COMPILABLE([Darwin os_unfair_lock_*()], [ #include #include ], [ #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 #error "os_unfair_lock is not supported" #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); #endif ], [je_cv_os_unfair_lock]) if test "x${je_cv_os_unfair_lock}" = "xyes" ; then AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) fi dnl ============================================================================ dnl Darwin-related configuration. AC_ARG_ENABLE([zone-allocator], [AS_HELP_STRING([--disable-zone-allocator], [Disable zone allocator for Darwin])], [if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi ], [if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi ] ) AC_SUBST([enable_zone_allocator]) if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) fi AC_DEFINE([JEMALLOC_ZONE], [ ]) fi dnl ============================================================================ dnl Use initial-exec TLS by default. AC_ARG_ENABLE([initial-exec-tls], [AS_HELP_STRING([--disable-initial-exec-tls], [Disable the initial-exec tls model])], [if test "x$enable_initial_exec_tls" = "xno" ; then enable_initial_exec_tls="0" else enable_initial_exec_tls="1" fi ], [enable_initial_exec_tls="1"] ) AC_SUBST([enable_initial_exec_tls]) if test "x${je_cv_tls_model}" = "xyes" -a \ "x${enable_initial_exec_tls}" = "x1" ; then AC_DEFINE([JEMALLOC_TLS_MODEL], [__attribute__((tls_model("initial-exec")))]) else AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) fi dnl ============================================================================ dnl Enable background threads if possible. if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" ; then AC_DEFINE([JEMALLOC_BACKGROUND_THREAD]) fi dnl ============================================================================ dnl Check for glibc malloc hooks JE_COMPILABLE([glibc malloc hook], [ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); ], [ void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ], [je_cv_glibc_malloc_hook]) if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" fi fi JE_COMPILABLE([glibc memalign hook], [ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); ], [ void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ], [je_cv_glibc_memalign_hook]) if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then if test "x${JEMALLOC_PREFIX}" = "x" ; then AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) wrap_syms="${wrap_syms} __memalign_hook" fi fi JE_COMPILABLE([pthreads adaptive mutexes], [ #include ], [ pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ], [je_cv_pthread_mutex_adaptive_np]) if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) fi JE_CFLAGS_SAVE() JE_CFLAGS_ADD([-D_GNU_SOURCE]) JE_CFLAGS_ADD([-Werror]) JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([strerror_r returns char with gnu source], [ #include #include #include #include ], [ char *buffer = (char *) malloc(100); char *error = strerror_r(EINVAL, buffer, 100); printf("%s\n", error); ], [je_cv_strerror_r_returns_char_with_gnu_source]) JE_CFLAGS_RESTORE() if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ]) fi dnl ============================================================================ dnl Check for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL dnl ============================================================================ dnl Define commands that generate output files. AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` echo "${n}:${m}" >> "${f}" dnl Remove name from public_syms so that it isn't redefined later. public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ], [ srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [ f="${objroot}include/jemalloc/internal/private_symbols.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" ], [ srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [ f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" mkdir -p "${objroot}include/jemalloc/internal" export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" ], [ srcdir="${srcdir}" objroot="${objroot}" public_syms="${public_syms}" wrap_syms="${wrap_syms}" SYM_PREFIX="${SYM_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ], [ srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" ]) dnl Process .in files. AC_SUBST([cfghdrs_in]) AC_SUBST([cfghdrs_out]) AC_CONFIG_HEADERS([$cfghdrs_tup]) dnl ============================================================================ dnl Generate outputs. AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof]) AC_SUBST([cfgoutputs_in]) AC_SUBST([cfgoutputs_out]) AC_OUTPUT dnl ============================================================================ dnl Print out the results of configuration. AC_MSG_RESULT([===============================================================================]) AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) AC_MSG_RESULT([library revision : ${rev}]) AC_MSG_RESULT([]) AC_MSG_RESULT([CONFIG : ${CONFIG}]) AC_MSG_RESULT([CC : ${CC}]) AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}]) AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}]) AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) AC_MSG_RESULT([CXX : ${CXX}]) AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}]) AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}]) AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) AC_MSG_RESULT([]) AC_MSG_RESULT([PREFIX : ${PREFIX}]) AC_MSG_RESULT([BINDIR : ${BINDIR}]) AC_MSG_RESULT([DATADIR : ${DATADIR}]) AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) AC_MSG_RESULT([MANDIR : ${MANDIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([srcroot : ${srcroot}]) AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) AC_MSG_RESULT([objroot : ${objroot}]) AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) AC_MSG_RESULT([]) AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) AC_MSG_RESULT([install_suffix : ${install_suffix}]) AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}]) AC_MSG_RESULT([shared libs : ${enable_shared}]) AC_MSG_RESULT([static libs : ${enable_static}]) AC_MSG_RESULT([autogen : ${enable_autogen}]) AC_MSG_RESULT([debug : ${enable_debug}]) AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([experimetal_smallocx : ${enable_experimental_smallocx}]) AC_MSG_RESULT([prof : ${enable_prof}]) AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([utrace : ${enable_utrace}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([log : ${enable_log}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) AC_MSG_RESULT([cxx : ${enable_cxx}]) AC_MSG_RESULT([===============================================================================]) ������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/doc/html.xsl�����������������������������������������������������������������0100644�0000765�0000024�00000000445�13446175027�0015400�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/doc/html.xsl.in��������������������������������������������������������������0100644�0000765�0000024�00000000371�13446174740�0016004�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������jemalloc-sys-0.3.2/rep/doc/jemalloc.xml�������������������������������������������������������������0100644�0000765�0000024�00000461645�13446175027�0016231�0����������������������������������������������������������������������������������������������������ustar�00����������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ User Manual jemalloc 0.0.0-0-g0000000000000000000000000000000000000000 Jason Evans Author JEMALLOC 3 jemalloc jemalloc general purpose memory allocation functions LIBRARY This manual describes jemalloc 0.0.0-0-g0000000000000000000000000000000000000000. More information can be found at the jemalloc website. SYNOPSIS #include <jemalloc/jemalloc.h> Standard API void *malloc size_t size void *calloc size_t number size_t size int posix_memalign void **ptr size_t alignment size_t size void *aligned_alloc size_t alignment size_t size void *realloc void *ptr size_t size void free void *ptr Non-standard API void *mallocx size_t size int flags void *rallocx void *ptr size_t size int flags size_t xallocx void *ptr size_t size size_t extra int flags size_t sallocx void *ptr int flags void dallocx void *ptr int flags void sdallocx void *ptr size_t size int flags size_t nallocx size_t size int flags int mallctl const char *name void *oldp size_t *oldlenp void *newp size_t newlen int mallctlnametomib const char *name size_t *mibp size_t *miblenp int mallctlbymib const size_t *mib size_t miblen void *oldp size_t *oldlenp void *newp size_t newlen void malloc_stats_print void (*write_cb) void *, const char * void *cbopaque const char *opts size_t malloc_usable_size const void *ptr void (*malloc_message) void *cbopaque const char *s const char *malloc_conf; DESCRIPTION Standard API The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object. The calloc() function allocates space for number objects, each size bytes in length. The result is identical to calling malloc() with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes. The posix_memalign() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large as sizeof(void *). The aligned_alloc() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested alignment must be a power of 2. Behavior is undefined if size is not an integral multiple of alignment. The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that realloc() may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the realloc() function behaves identically to malloc() for the specified size. The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs. Non-standard API The mallocx(), rallocx(), xallocx(), sallocx(), dallocx(), sdallocx(), and nallocx() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to specify one or more of the following: MALLOCX_LG_ALIGN(la) Align the memory allocation to start at an address that is a multiple of (1 << la). This macro does not validate that la is within the valid range. MALLOCX_ALIGN(a) Align the memory allocation to start at an address that is a multiple of a, where a is a power of two. This macro does not validate that a is a power of 2. MALLOCX_ZERO Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized. MALLOCX_TCACHE(tc) Use the thread-specific cache (tcache) specified by the identifier tc, which must have been acquired via the tcache.create mallctl. This macro does not validate that tc specifies a valid identifier. MALLOCX_TCACHE_NONE Do not use a thread-specific cache (tcache). Unless MALLOCX_TCACHE(tc) or MALLOCX_TCACHE_NONE is specified, an automatically managed tcache will be used under many circumstances. This macro cannot be used in the same flags argument as MALLOCX_TCACHE(tc). MALLOCX_ARENA(a) Use the arena specified by the index a. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that a specifies an arena index in the valid range. The mallocx() function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if size is 0. The rallocx() function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if size is 0. The xallocx() function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is made to resize the allocation to be at least (size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if size is 0, or if (size + extra > SIZE_T_MAX). The sallocx() function returns the real size of the allocation at ptr. The dallocx() function causes the memory referenced by ptr to be made available for future allocations. The sdallocx() function is an extension of dallocx() with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by nallocx() or sallocx(). The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function, and returns the real size of the allocation that would result from the equivalent mallocx() function call, or 0 if the inputs exceed the maximum supported size class and/or alignment. Behavior is undefined if size is 0. The mallctl() function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a location in a tree-structured namespace; see the section for documentation on the tree contents. To read a value, pass a pointer via oldp to adequate space to contain the value, and a pointer to its length via oldlenp; otherwise pass NULL and NULL. Similarly, to write a value, pass a pointer to the value via newp, and its length via newlen; otherwise pass NULL and 0. The mallctlnametomib() function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a Management Information Base (MIB) that can be passed repeatedly to mallctlbymib(). Upon successful return from mallctlnametomib(), mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components in name and the input value of *miblenp. Thus it is possible to pass a *miblenp that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name components that are integers (e.g. the 2 in arenas.bin.2.size), the corresponding MIB component will always be that integer. Therefore, it is legitimate to construct code like the following: The malloc_stats_print() function writes summary statistics via the write_cb callback function pointer and cbopaque data passed to write_cb, or malloc_message() if write_cb is NULL. The statistics are presented in human-readable form unless J is specified as a character within the opts string, in which case the statistics are presented in JSON format. This function can be called repeatedly. General information that never changes during execution can be omitted by specifying g as a character within the opts string. Note that malloc_message() uses the mallctl*() functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If is specified during configuration, m, d, and a can be specified to omit merged arena, destroyed merged arena, and per arena statistics, respectively; b and l can be specified to omit per size class statistics for bins and large objects, respectively; x can be specified to omit all mutex statistics; e can be used to omit extent statistics. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The malloc_usable_size() function is not a mechanism for in-place realloc(); rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported by malloc_usable_size() should not be depended on, since such behavior is entirely implementation-dependent. TUNING Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time. The string specified via , the string pointed to by the global variable malloc_conf, the name of the file referenced by the symbolic link named /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before main() is entered, so the declaration of malloc_conf should specify an initializer that contains the final value to be read by jemalloc. and malloc_conf are compile-time mechanisms, whereas /etc/malloc.conf and MALLOC_CONF can be safely set any time prior to program invocation. An options string is a comma-separated list of option:value pairs. There is one key corresponding to each opt.* mallctl (see the section for options documentation). For example, abort:true,narenas:1 sets the opt.abort and opt.narenas options. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values. IMPLEMENTATION NOTES Traditionally, allocators have used sbrk 2 to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If sbrk 2 is supported by the operating system, this allocator uses both mmap 2 and sbrk 2, in that order of preference; otherwise only mmap 2 is used. This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is a small fixed per-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation. These overheads are not generally an issue, given the number of arenas normally used. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions. In addition to multiple arenas, this allocator supports thread-specific caching, in order to make it possible to completely avoid synchronization for most allocation requests. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache. Memory is conceptually broken into extents. Extents are always aligned to multiples of the page size. This alignment makes it possible to find metadata for user objects quickly. User objects are broken into two categories according to size: small and large. Contiguous small objects comprise a slab, which resides within a single extent, whereas large objects each have their own extents backing them. Small objects are managed in groups by slabs. Each slab maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(double). All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, and large size classes extend from four times the page size up to the largest size class that does not exceed PTRDIFF_MAX. Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating. The realloc(), rallocx(), and xallocx() functions may resize allocations without moving them under limited circumstances. Unlike the *allocx() API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call realloc() to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large allocations in place, as long as the pre-size and post-size are both large. For shrinkage to succeed, the extent allocator must support splitting (see arena.<i>.extent_hooks). Growth only succeeds if the trailing memory is currently available, and the extent allocator supports merging. Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the size classes in each category are as shown in . Size classes Category Spacing Size Small lg [8] 16 [16, 32, 48, 64, 80, 96, 112, 128] 32 [160, 192, 224, 256] 64 [320, 384, 448, 512] 128 [640, 768, 896, 1024] 256 [1280, 1536, 1792, 2048] 512 [2560, 3072, 3584, 4096] 1 KiB [5 KiB, 6 KiB, 7 KiB, 8 KiB] 2 KiB [10 KiB, 12 KiB, 14 KiB] Large 2 KiB [16 KiB] 4 KiB [20 KiB, 24 KiB, 28 KiB, 32 KiB] 8 KiB [40 KiB, 48 KiB, 54 KiB, 64 KiB] 16 KiB [80 KiB, 96 KiB, 112 KiB, 128 KiB] 32 KiB [160 KiB, 192 KiB, 224 KiB, 256 KiB] 64 KiB [320 KiB, 384 KiB, 448 KiB, 512 KiB] 128 KiB [640 KiB, 768 KiB, 896 KiB, 1 MiB] 256 KiB [1280 KiB, 1536 KiB, 1792 KiB, 2 MiB] 512 KiB [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] 1 MiB [5 MiB, 6 MiB, 7 MiB, 8 MiB] 2 MiB [10 MiB, 12 MiB, 14 MiB, 16 MiB] 4 MiB [20 MiB, 24 MiB, 28 MiB, 32 MiB] 8 MiB [40 MiB, 48 MiB, 56 MiB, 64 MiB] ... ... 512 PiB [2560 PiB, 3 EiB, 3584 PiB, 4 EiB] 1 EiB [5 EiB, 6 EiB, 7 EiB]
MALLCTL NAMESPACE The following names are defined in the namespace accessible via the mallctl*() functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of stats.arenas.<i>.* and arena.<i>.{initialized,purge,decay,dss}, <i> equal to MALLCTL_ARENAS_ALL can be used to operate on all arenas or access the summation of statistics from all arenas; similarly <i> equal to MALLCTL_ARENAS_DESTROYED can be used to access the summation of statistics from all destroyed arenas. These constants can be utilized either via mallctlnametomib() followed by mallctlbymib(), or via code such as the following: Take special note of the epoch mallctl, which controls refreshing of cached dynamic statistics. version (const char *) r- Return the jemalloc version string. epoch (uint64_t) rw If a value is passed in, refresh the data from which the mallctl*() functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh. background_thread (bool) rw Enable/disable internal background worker threads. When set to true, background threads are created on demand (the number of background threads will be no more than the number of CPUs or active arenas). Threads run periodically, and handle purging asynchronously. When switching off, background threads are terminated synchronously. Note that after fork2 function, the state in the child process will be disabled regardless the state in parent process. See stats.background_thread for related stats. opt.background_thread can be used to set the default option. This option is only available on selected pthread-based platforms. max_background_threads (size_t) rw Maximum number of background worker threads that will be created. This value is capped at opt.max_background_threads at startup. config.cache_oblivious (bool) r- was specified during build configuration. config.debug (bool) r- was specified during build configuration. config.fill (bool) r- was specified during build configuration. config.lazy_lock (bool) r- was specified during build configuration. config.malloc_conf (const char *) r- Embedded configure-time-specified run-time options string, empty unless was specified during build configuration. config.prof (bool) r- was specified during build configuration. config.prof_libgcc (bool) r- was not specified during build configuration. config.prof_libunwind (bool) r- was specified during build configuration. config.stats (bool) r- was specified during build configuration. config.utrace (bool) r- was specified during build configuration. config.xmalloc (bool) r- was specified during build configuration. opt.abort (bool) r- Abort-on-warning enabled/disabled. If true, most warnings are fatal. Note that runtime option warnings are not included (see opt.abort_conf for that). The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.abort_conf (bool) r- Abort-on-invalid-configuration enabled/disabled. If true, invalid runtime options are fatal. The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.metadata_thp (const char *) r- Controls whether to allow jemalloc to use transparent huge page (THP) for internal metadata (see stats.metadata). always allows such usage. auto uses no THP initially, but may begin to do so when metadata usage reaches certain level. The default is disabled. opt.retain (bool) r- If true, retain unused virtual memory for later reuse rather than discarding it by calling munmap 2 or equivalent (see stats.retained for related details). It also makes jemalloc use mmap2 in a more greedy way, mapping larger chunks in one go. This option is disabled by default unless discarding virtual memory is known to trigger platform-specific performance problems, e.g. for [64-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi-permanent VM map holes under normal jemalloc operation. Although munmap 2 causes issues on 32-bit Linux as well, retaining virtual memory for 32-bit Linux is disabled by default due to the practical possibility of address space exhaustion. opt.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. The following settings are supported if sbrk 2 is supported by the operating system: disabled, primary, and secondary; otherwise only disabled is supported. The default is secondary if sbrk 2 is supported by the operating system; disabled otherwise. opt.narenas (unsigned) r- Maximum number of arenas to use for automatic multiplexing of threads and arenas. The default is four times the number of CPUs, or one if there is a single CPU. opt.percpu_arena (const char *) r- Per CPU arena mode. Use the percpu setting to enable this feature, which uses number of CPUs to determine number of arenas, and bind threads to arenas dynamically based on the CPU the thread runs on currently. phycpu setting uses one arena per physical CPU, which means the two hyper threads on the same CPU share one arena. Note that no runtime checking regarding the availability of hyper threading is done at the moment. When set to disabled, narenas and thread to arena association will not be impacted by this option. The default is disabled. opt.background_thread (const bool) r- Internal background worker threads enabled/disabled. Because of potential circular dependencies, enabling background thread using this option may cause crash or deadlock during initialization. For a reliable way to use this feature, see background_thread for dynamic control options and details. This option is disabled by default. opt.max_background_threads (const size_t) r- Maximum number of background threads that will be created if background_thread is set. Defaults to number of cpus. opt.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged (i.e. converted to muzzy via e.g. madvise(...MADV_FREE) if supported by the operating system, or converted to clean otherwise) and/or reused. Dirty pages are defined as previously having been potentially written to by the application, and therefore consuming physical memory, yet having no current use. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.dirty_decay_ms and arena.<i>.dirty_decay_ms for related dynamic control options. See opt.muzzy_decay_ms for a description of muzzy pages. opt.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged (i.e. converted to clean) and/or reused. Muzzy pages are defined as previously having been unused dirty pages that were subsequently purged in a manner that left them subject to the reclamation whims of the operating system (e.g. madvise(...MADV_FREE)), and therefore in an indeterminate state. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused muzzy pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.muzzy_decay_ms and arena.<i>.muzzy_decay_ms for related dynamic control options. opt.lg_extent_max_active_fit (size_t) r- When reusing dirty extents, this determines the (log base 2 of the) maximum ratio between the size of the active extent selected (to split off from) and the size of the requested allocation. This prevents the splitting of large active extents for smaller allocations, which can reduce fragmentation over the long run (especially for non-active extents). Lower value may reduce fragmentation, at the cost of extra active extents. The default value is 6, which gives a maximum ratio of 64 (2^6). opt.stats_print (bool) r- Enable/disable statistics printing at exit. If enabled, the malloc_stats_print() function is called at program exit via an atexit 3 function. opt.stats_print_opts can be combined to specify output options. If is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation functions. Furthermore, atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default. opt.stats_print_opts (const char *) r- Options (the opts string) to pass to the malloc_stats_print() at exit (enabled through opt.stats_print). See available options in malloc_stats_print(). Has no effect unless opt.stats_print is enabled. The default is . opt.junk (const char *) r- [] Junk filling. If set to alloc, each byte of uninitialized allocated memory will be initialized to 0xa5. If set to free, all deallocated memory will be initialized to 0x5a. If set to true, both allocated and deallocated memory will be initialized, and if set to false, junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is false by default unless is specified during configuration, in which case it is true by default. opt.zero (bool) r- [] Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so realloc() and rallocx() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. opt.utrace (bool) r- [] Allocation tracing based on utrace 2 enabled/disabled. This option is disabled by default. opt.xmalloc (bool) r- [] Abort-on-out-of-memory enabled/disabled. If enabled, rather than returning failure for any allocation function, display a diagnostic message on STDERR_FILENO and cause the program to drop core (using abort 3). If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: This option is disabled by default. opt.tcache (bool) r- Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by default. opt.lg_tcache_max (size_t) r- Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes are cached, and at a maximum all large size classes are cached. The default maximum is 32 KiB (2^15). opt.thp (const char *) r- Transparent hugepage (THP) mode. Settings "always", "never" and "default" are available if THP is supported by the operating system. The "always" setting enables transparent hugepage for all user memory mappings with MADV_HUGEPAGE; "never" ensures no transparent hugepage with MADV_NOHUGEPAGE; the default setting "default" makes no changes. Note that: this option does not affect THP for jemalloc internal metadata (see opt.metadata_thp); in addition, for arenas with customized extent_hooks, this option is bypassed as it is implemented as part of the default extent hooks. opt.prof (bool) r- [] Memory profiling enabled/disabled. If enabled, profile memory allocation activity. See the opt.prof_active option for on-the-fly activation/deactivation. See the opt.lg_prof_sample option for probabilistic sampling control. See the opt.prof_accum option for control of cumulative sample reporting. See the opt.lg_prof_interval option for information on interval-triggered profile dumping, the opt.prof_gdump option for information on high-water-triggered profile dumping, and the opt.prof_final option for final profile dumping. Profile output is compatible with the jeprof command, which is based on the pprof that is developed as part of the gperftools package. See HEAP PROFILE FORMAT for heap profile format documentation. opt.prof_prefix (const char *) r- [] Filename prefix for profile dumps. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is jeprof. opt.prof_active (bool) r- [] Profiling activated/deactivated. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the opt.prof option) but inactive, then toggle profiling at any time during program execution with the prof.active mallctl. This option is enabled by default. opt.prof_thread_active_init (bool) r- [] Initial setting for thread.prof.active in newly created threads. The initial setting for newly created threads can also be changed during execution via the prof.thread_active_init mallctl. This option is enabled by default. opt.lg_prof_sample (size_t) r- [] Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead. The default sample interval is 512 KiB (2^19 B). opt.prof_accum (bool) r- [] Reporting of cumulative object/byte counts in profile dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest. This option is disabled by default. opt.lg_prof_interval (ssize_t) r- [] Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.i<iseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. By default, interval-triggered profile dumping is disabled (encoded as -1). opt.prof_gdump (bool) r- [] Set the initial state of prof.gdump, which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum. This option is disabled by default. opt.prof_final (bool) r- [] Use an atexit 3 function to dump final memory usage to a file named according to the pattern <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the opt.prof_prefix option. Note that atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). This option is disabled by default. opt.prof_leak (bool) r- [] Leak reporting enabled/disabled. If enabled, use an atexit 3 function to report memory leaks detected by allocation sampling. See the opt.prof option for information on analyzing heap profile output. This option is disabled by default. thread.arena (unsigned) rw Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the arena.i.initialized mallctl), it will be automatically initialized as a side effect of calling this interface. thread.allocated (uint64_t) r- [] Get the total number of bytes ever allocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.allocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.allocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.deallocated (uint64_t) r- [] Get the total number of bytes ever deallocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.deallocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.deallocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.tcache.enabled (bool) rw Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming disabled (see thread.tcache.flush). thread.tcache.flush (void) -- Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures associated with the calling thread's tcache. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful. thread.prof.name (const char *) r- or -w [] Get/set the descriptive name associated with the calling thread in memory profile dumps. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution. The output string of this interface should be copied for non-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The name string must be nil-terminated and comprised only of characters in the sets recognized by isgraph 3 and isblank 3. thread.prof.active (bool) rw [] Control whether sampling is currently active for the calling thread. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. This flag is enabled by default. tcache.create (unsigned) r- Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the MALLOCX_TCACHE(tc) macro to explicitly use the specified cache rather than the automatically managed one that is used by default. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds. tcache.flush (unsigned) -w Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to thread.tcache.flush, except that the tcache will never be automatically discarded. tcache.destroy (unsigned) -w Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation. arena.<i>.initialized (bool) r- Get whether the specified arena's statistics are initialized (i.e. the arena was initialized prior to the current epoch). This interface can also be nominally used to query whether the merged statistics corresponding to MALLCTL_ARENAS_ALL are initialized (always true). arena.<i>.decay (void) -- Trigger decay-based purging of unused dirty/muzzy pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. The proportion of unused dirty/muzzy pages to be purged depends on the current time; see opt.dirty_decay_ms and opt.muzy_decay_ms for details. arena.<i>.purge (void) -- Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. arena.<i>.reset (void) -- Discard all of the arena's extant allocations. This interface can only be used with arenas explicitly created via arenas.create. None of the arena's discarded/cached allocations may accessed afterward. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand. arena.<i>.destroy (void) -- Destroy the arena. Discard all of the arena's extant allocations using the same mechanism as for arena.<i>.reset (with all the same constraints and side effects), merge the arena stats into those accessible at arena index MALLCTL_ARENAS_DESTROYED, and then completely discard all metadata associated with the arena. Future calls to arenas.create may recycle the arena index. Destruction will fail if any threads are currently associated with the arena as a result of calls to thread.arena. arena.<i>.dss (const char *) rw Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. See opt.dss for supported settings. arena.<i>.dirty_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to -1 (i.e. purging disabled). See opt.dirty_decay_ms for additional information. arena.<i>.muzzy_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. Each time this interface is set, all currently unused muzzy pages are considered to have fully decayed, which causes immediate purging of all unused muzzy pages unless the decay time is set to -1 (i.e. purging disabled). See opt.muzzy_decay_ms for additional information. arena.<i>.retain_grow_limit (size_t) rw Maximum size to grow retained region (only relevant when opt.retain is enabled). This controls the maximum increment to expand virtual memory, or allocation through arena.<i>extent_hooks. In particular, if customized extent hooks reserve physical memory (e.g. 1G huge pages), this is useful to control the allocation hook's input size. The default is no limit. arena.<i>.extent_hooks (extent_hooks_t *) rw Get or set the extent management hook functions for arena <i>. The functions must be capable of operating on all extant extents associated with arena <i>, usually by passing unknown extents to the replaced functions. In practice, it is feasible to control allocation for arenas explicitly created via arenas.create such that all extents originate from an application-supplied extent allocator (by specifying the custom extent hook functions during arena creation). However, the API guarantees for the automatically created arenas may be relaxed -- hooks set there may be called in a "best effort" fashion; in addition there may be extents created prior to the application having an opportunity to take over extent allocation. The extent_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. However, there are performance and platform reasons to retain extents for later reuse. Cleanup attempts cascade from deallocation to decommit to forced purging to lazy purging, which gives the extent management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations. All operations except allocation can be universally opted out of by setting the hook pointers to NULL, or selectively opted out of by returning failure. Note that once the extent hook is set, the structure is accessed directly by the associated arenas, so it must remain valid for the entire lifetime of the arenas. typedef void *(extent_alloc_t) extent_hooks_t *extent_hooks void *new_addr size_t size size_t alignment bool *zero bool *commit unsigned arena_ind An extent allocation function conforms to the extent_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that the extent's base address is a multiple of alignment, as well as setting *zero to indicate whether the extent is zeroed and *commit to indicate whether the extent is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The size parameter is always a multiple of the page size. The alignment parameter is always a power of two at least as large as the page size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. If new_addr is not NULL, the returned pointer must be new_addr on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the default extent allocation function makes the arena's arena.<i>.dss setting irrelevant. typedef bool (extent_dalloc_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent deallocation function conforms to the extent_dalloc_t type and deallocates an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from deallocation; the virtual memory mapping associated with the extent remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse. typedef void (extent_destroy_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent destruction function conforms to the extent_destroy_t type and unconditionally destroys an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind. This function may be called to destroy retained extents during arena destruction (see arena.<i>.destroy). typedef bool (extent_commit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent commit function conforms to the extent_commit_t type and commits zeroed physical memory to back pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. If the function returns true, this indicates insufficient physical memory to satisfy the request. typedef bool (extent_decommit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent decommit function conforms to the extent_decommit_t type and decommits any physical memory that is backing pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success, in which case the pages will be committed via the extent commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse. typedef bool (extent_purge_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent purge function conforms to the extent_purge_t type and discards physical pages within the virtual memory mapping associated with an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind. A lazy extent purge function (e.g. implemented via madvise(...MADV_FREE)) can delay purging indefinitely and leave the pages within the purged virtual memory range in an indeterminite state, whereas a forced extent purge function immediately purges, and the pages within the virtual memory range will be zero-filled the next time they are accessed. If the function returns true, this indicates failure to purge. typedef bool (extent_split_t) extent_hooks_t *extent_hooks void *addr size_t size size_t size_a size_t size_b bool committed unsigned arena_ind An extent split function conforms to the extent_split_t type and optionally splits an extent at given addr and size into two adjacent extents, the first of size_a bytes, and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extent remains unsplit and therefore should continue to be operated on as a whole. typedef bool (extent_merge_t) extent_hooks_t *extent_hooks void *addr_a size_t size_a void *addr_b size_t size_b bool committed unsigned arena_ind An extent merge function conforms to the extent_merge_t type and optionally merges adjacent extents, at given addr_a and size_a with given addr_b and size_b into one contiguous extent, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extents remain distinct mappings and therefore should continue to be operated on independently. arenas.narenas (unsigned) r- Current limit on number of arenas. arenas.dirty_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize arena.<i>.dirty_decay_ms during arena creation. See opt.dirty_decay_ms for additional information. arenas.muzzy_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused, used to initialize arena.<i>.muzzy_decay_ms during arena creation. See opt.muzzy_decay_ms for additional information. arenas.quantum (size_t) r- Quantum size. arenas.page (size_t) r- Page size. arenas.tcache_max (size_t) r- Maximum thread-cached size class. arenas.nbins (unsigned) r- Number of bin size classes. arenas.nhbins (unsigned) r- Total number of thread cache bin size classes. arenas.bin.<i>.size (size_t) r- Maximum size supported by size class. arenas.bin.<i>.nregs (uint32_t) r- Number of regions per slab. arenas.bin.<i>.slab_size (size_t) r- Number of bytes per slab. arenas.nlextents (unsigned) r- Total number of large size classes. arenas.lextent.<i>.size (size_t) r- Maximum size supported by this large size class. arenas.create (unsigned, extent_hooks_t *) rw Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index. arenas.lookup (unsigned, void*) rw Index of the arena to which an allocation belongs to. prof.thread_active_init (bool) rw [] Control the initial setting for thread.prof.active in newly created threads. See the opt.prof_thread_active_init option for additional information. prof.active (bool) rw [] Control whether sampling is currently active. See the opt.prof_active option for additional information, as well as the interrelated thread.prof.active mallctl. prof.dump (const char *) -w [] Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern <prefix>.<pid>.<seq>.m<mseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.gdump (bool) rw [] When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.u<useq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.reset (size_t) -w [] Reset all memory profile statistics, and optionally update the sample rate (see opt.lg_prof_sample and prof.lg_sample). prof.lg_sample (size_t) r- [] Get the current sample rate (see opt.lg_prof_sample). prof.interval (uint64_t) r- [] Average number of bytes allocated between interval-based profile dumps. See the opt.lg_prof_interval option for additional information. stats.allocated (size_t) r- [] Total number of bytes allocated by the application. stats.active (size_t) r- [] Total number of bytes in active pages allocated by the application. This is a multiple of the page size, and greater than or equal to stats.allocated. This does not include stats.arenas.<i>.pdirty, stats.arenas.<i>.pmuzzy, nor pages entirely devoted to allocator metadata. stats.metadata (size_t) r- [] Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap-sensitive allocator metadata structures (see stats.arenas.<i>.base) and internal allocations (see stats.arenas.<i>.internal). Transparent huge page (enabled with opt.metadata_thp) usage is not considered. stats.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See stats.metadata and opt.metadata_thp) for details. stats.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size, and is larger than stats.active. stats.mapped (size_t) r- [] Total number of bytes in active extents mapped by the allocator. This is larger than stats.active. This does not include inactive extents, even those that contain unused dirty pages, which means that there is no strict ordering between this and stats.resident. stats.retained (size_t) r- [] Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e.g. munmap 2 or similar. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see extent hooks for details). Retained memory is excluded from mapped memory statistics, e.g. stats.mapped. stats.background_thread.num_threads (size_t) r- [] Number of background threads running currently. stats.background_thread.num_runs (uint64_t) r- [] Total number of runs from all background threads. stats.background_thread.run_interval (uint64_t) r- [] Average run interval in nanoseconds of background threads. stats.mutexes.ctl.{counter}; (counter specific type) r- [] Statistics on ctl mutex (global scope; mallctl related). {counter} is one of the counters below: num_ops (uint64_t): Total number of lock acquisition operations on this mutex. num_spin_acq (uint64_t): Number of times the mutex was spin-acquired. When the mutex is currently locked and cannot be acquired immediately, a short period of spin-retry within jemalloc will be performed. Acquired through spin generally means the contention was lightweight and not causing context switches. num_wait (uint64_t): Number of times the mutex was wait-acquired, which means the mutex contention was not solved by spin-retry, and blocking operation was likely involved in order to acquire the mutex. This event generally implies higher cost / longer delay, and should be investigated if it happens often. max_wait_time (uint64_t): Maximum length of time in nanoseconds spent on a single wait-acquired lock operation. Note that to avoid profiling overhead on the common path, this does not consider spin-acquired cases. total_wait_time (uint64_t): Cumulative time in nanoseconds spent on wait-acquired lock operations. Similarly, spin-acquired cases are not considered. max_num_thds (uint32_t): Maximum number of threads waiting on this mutex simultaneously. Similarly, spin-acquired cases are not considered. num_owner_switch (uint64_t): Number of times the current mutex owner is different from the previous one. This event does not generally imply an issue; rather it is an indicator of how often the protected data are accessed by different threads. stats.mutexes.background_thread.{counter} (counter specific type) r- [] Statistics on background_thread mutex (global scope; background_thread related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.prof.{counter} (counter specific type) r- [] Statistics on prof mutex (global scope; profiling related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.reset (void) -- [] Reset all mutex profile statistics, including global mutexes, arena mutexes and bin mutexes. stats.arenas.<i>.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. See opt.dss for details. stats.arenas.<i>.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. See opt.dirty_decay_ms for details. stats.arenas.<i>.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. See opt.muzzy_decay_ms for details. stats.arenas.<i>.nthreads (unsigned) r- Number of threads currently assigned to arena. stats.arenas.<i>.uptime (uint64_t) r- Time elapsed (in nanoseconds) since the arena was created. If <i> equals 0 or MALLCTL_ARENAS_ALL, this is the uptime since malloc initialization. stats.arenas.<i>.pactive (size_t) r- Number of pages in active extents. stats.arenas.<i>.pdirty (size_t) r- Number of pages within unused extents that are potentially dirty, and for which madvise() or similar has not been called. See opt.dirty_decay_ms for a description of dirty pages. stats.arenas.<i>.pmuzzy (size_t) r- Number of pages within unused extents that are muzzy. See opt.muzzy_decay_ms for a description of muzzy pages. stats.arenas.<i>.mapped (size_t) r- [] Number of mapped bytes. stats.arenas.<i>.retained (size_t) r- [] Number of retained bytes. See stats.retained for details. stats.arenas.<i>.extent_avail (size_t) r- [] Number of allocated (but unused) extent structs in this arena. stats.arenas.<i>.base (size_t) r- [] Number of bytes dedicated to bootstrap-sensitive allocator metadata structures. stats.arenas.<i>.internal (size_t) r- [] Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap profiles. stats.arenas.<i>.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See opt.metadata_thp for details. stats.arenas.<i>.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the arena, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size. stats.arenas.<i>.dirty_npurge (uint64_t) r- [] Number of dirty page purge sweeps performed. stats.arenas.<i>.dirty_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge dirty pages. stats.arenas.<i>.dirty_purged (uint64_t) r- [] Number of dirty pages purged. stats.arenas.<i>.muzzy_npurge (uint64_t) r- [] Number of muzzy page purge sweeps performed. stats.arenas.<i>.muzzy_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge muzzy pages. stats.arenas.<i>.muzzy_purged (uint64_t) r- [] Number of muzzy pages purged. stats.arenas.<i>.small.allocated (size_t) r- [] Number of bytes currently allocated by small objects. stats.arenas.<i>.small.nmalloc (uint64_t) r- [] Cumulative number of times a small allocation was requested from the arena's bins, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.small.ndalloc (uint64_t) r- [] Cumulative number of times a small allocation was returned to the arena's bins, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.small.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all bin size classes. stats.arenas.<i>.large.allocated (size_t) r- [] Number of bytes currently allocated by large objects. stats.arenas.<i>.large.nmalloc (uint64_t) r- [] Cumulative number of times a large extent was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.large.ndalloc (uint64_t) r- [] Cumulative number of times a large extent was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.large.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all large size classes. stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by bin regions of the corresponding size class. stats.arenas.<i>.bins.<j>.curregs (size_t) r- [] Current number of regions for this size class. stats.arenas.<i>.bins.<j>.nfills (uint64_t) r- Cumulative number of tcache fills. stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r- Cumulative number of tcache flushes. stats.arenas.<i>.bins.<j>.nslabs (uint64_t) r- [] Cumulative number of slabs created. stats.arenas.<i>.bins.<j>.nreslabs (uint64_t) r- [] Cumulative number of times the current slab from which to allocate changed. stats.arenas.<i>.bins.<j>.curslabs (size_t) r- [] Current number of slabs. stats.arenas.<i>.bins.<j>.mutex.{counter} (counter specific type) r- [] Statistics on arena.<i>.bins.<j> mutex (arena bin scope; bin operation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.extents.<j>.n{extent_type} (size_t) r- [] Number of extents of the given type in this arena in the bucket corresponding to page size index <j>. The extent type is one of dirty, muzzy, or retained. stats.arenas.<i>.extents.<j>.{extent_type}_bytes (size_t) r- [] Sum of the bytes managed by extents of the given type in this arena in the bucket corresponding to page size index <j>. The extent type is one of dirty, muzzy, or retained. stats.arenas.<i>.lextents.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.lextents.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.lextents.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by large extents of the corresponding size class. stats.arenas.<i>.lextents.<j>.curlextents (size_t) r- [] Current number of large allocations for this size class. stats.arenas.<i>.mutexes.large.{counter} (counter specific type) r- [] Statistics on arena.<i>.large mutex (arena scope; large allocation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extent_avail.{counter} (counter specific type) r- [] Statistics on arena.<i>.extent_avail mutex (arena scope; extent avail related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_dirty mutex (arena scope; dirty extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_muzzy mutex (arena scope; muzzy extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_retained.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_retained mutex (arena scope; retained extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_dirty mutex (arena scope; decay for dirty pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_muzzy mutex (arena scope; decay for muzzy pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.base.{counter} (counter specific type) r- [] Statistics on arena.<i>.base mutex (arena scope; base allocator related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.tcache_list.{counter} (counter specific type) r- [] Statistics on arena.<i>.tcache_list mutex (arena scope; tcache to arena association related). This mutex is expected to be accessed less often. {counter} is one of the counters in mutex profiling counters. HEAP PROFILE FORMAT Although the heap profiling functionality was originally designed to be compatible with the pprof command that is developed as part of the gperftools package, the addition of per thread heap profiling functionality required a different heap profile format. The jeprof command is derived from pprof, with enhancements to support the heap profile format described here. In the following hypothetical heap profile, [...] indicates elision for the sake of compactness. The following matches the above heap profile, but most tokens are replaced with <description> to indicate descriptions of the corresponding fields. / : : [: ] [...] : : [: ] [...] : : [: ] [...] @ [...] [...] : : [: ] : : [: ] : : [: ] [...] MAPPED_LIBRARIES: /maps>]]> DEBUGGING MALLOC PROBLEMS When debugging, it is a good idea to configure/build jemalloc with the and options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc. Programs often accidentally depend on uninitialized memory actually being filled with zero bytes. Junk filling (see the opt.junk option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the opt.zero option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs. This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive. DIAGNOSTIC MESSAGES If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the opt.abort option is set, most warnings are treated as errors. The malloc_message variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. malloc_message() takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock. All messages are prefixed by <jemalloc>: . RETURN VALUES Standard API The malloc() and calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM. The posix_memalign() function returns the value 0 if successful; otherwise it returns an error value. The posix_memalign() function will fail if: EINVAL The alignment parameter is not a power of 2 at least as large as sizeof(void *). ENOMEM Memory allocation error. The aligned_alloc() function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The aligned_alloc() function will fail if: EINVAL The alignment parameter is not a power of 2. ENOMEM Memory allocation error. The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an allocation failure. The realloc() function always leaves the original buffer intact when an error occurs. The free() function returns no value. Non-standard API The mallocx() and rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. The xallocx() function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place. The sallocx() function returns the real size of the allocation pointed to by ptr. The nallocx() returns the real size that would result from a successful equivalent mallocx() function call, or zero if insufficient memory is available to perform the size computation. The mallctl(), mallctlnametomib(), and mallctlbymib() functions return 0 on success; otherwise they return an error value. The functions will fail if: EINVAL newp is not NULL, and newlen is too large or too small. Alternatively, *oldlenp is too large or too small; in this case as much data as possible are read despite the error. ENOENT name or mib specifies an unknown/invalid value. EPERM Attempt to read or write void value, or attempt to write read-only value. EAGAIN A memory allocation failure occurred. EFAULT An interface with side effects failed in some way not directly related to mallctl*() read/write processing. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. ENVIRONMENT The following environment variable affects the execution of the allocation functions: MALLOC_CONF If the environment variable MALLOC_CONF is set, the characters it contains will be interpreted as options. EXAMPLES To dump core whenever a problem occurs: ln -s 'abort:true' /etc/malloc.conf To specify in the source that only one arena should be automatically created: SEE ALSO madvise 2, mmap 2, sbrk 2, utrace 2, alloca 3, atexit 3, getpagesize 3 STANDARDS The malloc(), calloc(), realloc(), and free() functions conform to ISO/IEC 9899:1990 (ISO C90). The posix_memalign() function conforms to IEEE Std 1003.1-2001 (POSIX.1). jemalloc-sys-0.3.2/rep/doc/jemalloc.xml.in010064400007650000024000004615471344617474000166400ustar0000000000000000 User Manual jemalloc @jemalloc_version@ Jason Evans Author JEMALLOC 3 jemalloc jemalloc general purpose memory allocation functions LIBRARY This manual describes jemalloc @jemalloc_version@. More information can be found at the jemalloc website. SYNOPSIS #include <jemalloc/jemalloc.h> Standard API void *malloc size_t size void *calloc size_t number size_t size int posix_memalign void **ptr size_t alignment size_t size void *aligned_alloc size_t alignment size_t size void *realloc void *ptr size_t size void free void *ptr Non-standard API void *mallocx size_t size int flags void *rallocx void *ptr size_t size int flags size_t xallocx void *ptr size_t size size_t extra int flags size_t sallocx void *ptr int flags void dallocx void *ptr int flags void sdallocx void *ptr size_t size int flags size_t nallocx size_t size int flags int mallctl const char *name void *oldp size_t *oldlenp void *newp size_t newlen int mallctlnametomib const char *name size_t *mibp size_t *miblenp int mallctlbymib const size_t *mib size_t miblen void *oldp size_t *oldlenp void *newp size_t newlen void malloc_stats_print void (*write_cb) void *, const char * void *cbopaque const char *opts size_t malloc_usable_size const void *ptr void (*malloc_message) void *cbopaque const char *s const char *malloc_conf; DESCRIPTION Standard API The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object. The calloc() function allocates space for number objects, each size bytes in length. The result is identical to calling malloc() with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes. The posix_memalign() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large as sizeof(void *). The aligned_alloc() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested alignment must be a power of 2. Behavior is undefined if size is not an integral multiple of alignment. The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that realloc() may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the realloc() function behaves identically to malloc() for the specified size. The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs. Non-standard API The mallocx(), rallocx(), xallocx(), sallocx(), dallocx(), sdallocx(), and nallocx() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to specify one or more of the following: MALLOCX_LG_ALIGN(la) Align the memory allocation to start at an address that is a multiple of (1 << la). This macro does not validate that la is within the valid range. MALLOCX_ALIGN(a) Align the memory allocation to start at an address that is a multiple of a, where a is a power of two. This macro does not validate that a is a power of 2. MALLOCX_ZERO Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized. MALLOCX_TCACHE(tc) Use the thread-specific cache (tcache) specified by the identifier tc, which must have been acquired via the tcache.create mallctl. This macro does not validate that tc specifies a valid identifier. MALLOCX_TCACHE_NONE Do not use a thread-specific cache (tcache). Unless MALLOCX_TCACHE(tc) or MALLOCX_TCACHE_NONE is specified, an automatically managed tcache will be used under many circumstances. This macro cannot be used in the same flags argument as MALLOCX_TCACHE(tc). MALLOCX_ARENA(a) Use the arena specified by the index a. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that a specifies an arena index in the valid range. The mallocx() function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if size is 0. The rallocx() function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if size is 0. The xallocx() function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is made to resize the allocation to be at least (size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if size is 0, or if (size + extra > SIZE_T_MAX). The sallocx() function returns the real size of the allocation at ptr. The dallocx() function causes the memory referenced by ptr to be made available for future allocations. The sdallocx() function is an extension of dallocx() with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by nallocx() or sallocx(). The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function, and returns the real size of the allocation that would result from the equivalent mallocx() function call, or 0 if the inputs exceed the maximum supported size class and/or alignment. Behavior is undefined if size is 0. The mallctl() function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a location in a tree-structured namespace; see the section for documentation on the tree contents. To read a value, pass a pointer via oldp to adequate space to contain the value, and a pointer to its length via oldlenp; otherwise pass NULL and NULL. Similarly, to write a value, pass a pointer to the value via newp, and its length via newlen; otherwise pass NULL and 0. The mallctlnametomib() function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a Management Information Base (MIB) that can be passed repeatedly to mallctlbymib(). Upon successful return from mallctlnametomib(), mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components in name and the input value of *miblenp. Thus it is possible to pass a *miblenp that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name components that are integers (e.g. the 2 in arenas.bin.2.size), the corresponding MIB component will always be that integer. Therefore, it is legitimate to construct code like the following: The malloc_stats_print() function writes summary statistics via the write_cb callback function pointer and cbopaque data passed to write_cb, or malloc_message() if write_cb is NULL. The statistics are presented in human-readable form unless J is specified as a character within the opts string, in which case the statistics are presented in JSON format. This function can be called repeatedly. General information that never changes during execution can be omitted by specifying g as a character within the opts string. Note that malloc_message() uses the mallctl*() functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If is specified during configuration, m, d, and a can be specified to omit merged arena, destroyed merged arena, and per arena statistics, respectively; b and l can be specified to omit per size class statistics for bins and large objects, respectively; x can be specified to omit all mutex statistics; e can be used to omit extent statistics. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The malloc_usable_size() function is not a mechanism for in-place realloc(); rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported by malloc_usable_size() should not be depended on, since such behavior is entirely implementation-dependent. TUNING Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time. The string specified via , the string pointed to by the global variable malloc_conf, the name of the file referenced by the symbolic link named /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before main() is entered, so the declaration of malloc_conf should specify an initializer that contains the final value to be read by jemalloc. and malloc_conf are compile-time mechanisms, whereas /etc/malloc.conf and MALLOC_CONF can be safely set any time prior to program invocation. An options string is a comma-separated list of option:value pairs. There is one key corresponding to each opt.* mallctl (see the section for options documentation). For example, abort:true,narenas:1 sets the opt.abort and opt.narenas options. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values. IMPLEMENTATION NOTES Traditionally, allocators have used sbrk 2 to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If sbrk 2 is supported by the operating system, this allocator uses both mmap 2 and sbrk 2, in that order of preference; otherwise only mmap 2 is used. This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is a small fixed per-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation. These overheads are not generally an issue, given the number of arenas normally used. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions. In addition to multiple arenas, this allocator supports thread-specific caching, in order to make it possible to completely avoid synchronization for most allocation requests. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache. Memory is conceptually broken into extents. Extents are always aligned to multiples of the page size. This alignment makes it possible to find metadata for user objects quickly. User objects are broken into two categories according to size: small and large. Contiguous small objects comprise a slab, which resides within a single extent, whereas large objects each have their own extents backing them. Small objects are managed in groups by slabs. Each slab maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(double). All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, and large size classes extend from four times the page size up to the largest size class that does not exceed PTRDIFF_MAX. Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating. The realloc(), rallocx(), and xallocx() functions may resize allocations without moving them under limited circumstances. Unlike the *allocx() API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call realloc() to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large allocations in place, as long as the pre-size and post-size are both large. For shrinkage to succeed, the extent allocator must support splitting (see arena.<i>.extent_hooks). Growth only succeeds if the trailing memory is currently available, and the extent allocator supports merging. Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the size classes in each category are as shown in . Size classes Category Spacing Size Small lg [8] 16 [16, 32, 48, 64, 80, 96, 112, 128] 32 [160, 192, 224, 256] 64 [320, 384, 448, 512] 128 [640, 768, 896, 1024] 256 [1280, 1536, 1792, 2048] 512 [2560, 3072, 3584, 4096] 1 KiB [5 KiB, 6 KiB, 7 KiB, 8 KiB] 2 KiB [10 KiB, 12 KiB, 14 KiB] Large 2 KiB [16 KiB] 4 KiB [20 KiB, 24 KiB, 28 KiB, 32 KiB] 8 KiB [40 KiB, 48 KiB, 54 KiB, 64 KiB] 16 KiB [80 KiB, 96 KiB, 112 KiB, 128 KiB] 32 KiB [160 KiB, 192 KiB, 224 KiB, 256 KiB] 64 KiB [320 KiB, 384 KiB, 448 KiB, 512 KiB] 128 KiB [640 KiB, 768 KiB, 896 KiB, 1 MiB] 256 KiB [1280 KiB, 1536 KiB, 1792 KiB, 2 MiB] 512 KiB [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] 1 MiB [5 MiB, 6 MiB, 7 MiB, 8 MiB] 2 MiB [10 MiB, 12 MiB, 14 MiB, 16 MiB] 4 MiB [20 MiB, 24 MiB, 28 MiB, 32 MiB] 8 MiB [40 MiB, 48 MiB, 56 MiB, 64 MiB] ... ... 512 PiB [2560 PiB, 3 EiB, 3584 PiB, 4 EiB] 1 EiB [5 EiB, 6 EiB, 7 EiB]
MALLCTL NAMESPACE The following names are defined in the namespace accessible via the mallctl*() functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of stats.arenas.<i>.* and arena.<i>.{initialized,purge,decay,dss}, <i> equal to MALLCTL_ARENAS_ALL can be used to operate on all arenas or access the summation of statistics from all arenas; similarly <i> equal to MALLCTL_ARENAS_DESTROYED can be used to access the summation of statistics from all destroyed arenas. These constants can be utilized either via mallctlnametomib() followed by mallctlbymib(), or via code such as the following: Take special note of the epoch mallctl, which controls refreshing of cached dynamic statistics. version (const char *) r- Return the jemalloc version string. epoch (uint64_t) rw If a value is passed in, refresh the data from which the mallctl*() functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh. background_thread (bool) rw Enable/disable internal background worker threads. When set to true, background threads are created on demand (the number of background threads will be no more than the number of CPUs or active arenas). Threads run periodically, and handle purging asynchronously. When switching off, background threads are terminated synchronously. Note that after fork2 function, the state in the child process will be disabled regardless the state in parent process. See stats.background_thread for related stats. opt.background_thread can be used to set the default option. This option is only available on selected pthread-based platforms. max_background_threads (size_t) rw Maximum number of background worker threads that will be created. This value is capped at opt.max_background_threads at startup. config.cache_oblivious (bool) r- was specified during build configuration. config.debug (bool) r- was specified during build configuration. config.fill (bool) r- was specified during build configuration. config.lazy_lock (bool) r- was specified during build configuration. config.malloc_conf (const char *) r- Embedded configure-time-specified run-time options string, empty unless was specified during build configuration. config.prof (bool) r- was specified during build configuration. config.prof_libgcc (bool) r- was not specified during build configuration. config.prof_libunwind (bool) r- was specified during build configuration. config.stats (bool) r- was specified during build configuration. config.utrace (bool) r- was specified during build configuration. config.xmalloc (bool) r- was specified during build configuration. opt.abort (bool) r- Abort-on-warning enabled/disabled. If true, most warnings are fatal. Note that runtime option warnings are not included (see opt.abort_conf for that). The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.abort_conf (bool) r- Abort-on-invalid-configuration enabled/disabled. If true, invalid runtime options are fatal. The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.metadata_thp (const char *) r- Controls whether to allow jemalloc to use transparent huge page (THP) for internal metadata (see stats.metadata). always allows such usage. auto uses no THP initially, but may begin to do so when metadata usage reaches certain level. The default is disabled. opt.retain (bool) r- If true, retain unused virtual memory for later reuse rather than discarding it by calling munmap 2 or equivalent (see stats.retained for related details). It also makes jemalloc use mmap2 in a more greedy way, mapping larger chunks in one go. This option is disabled by default unless discarding virtual memory is known to trigger platform-specific performance problems, e.g. for [64-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi-permanent VM map holes under normal jemalloc operation. Although munmap 2 causes issues on 32-bit Linux as well, retaining virtual memory for 32-bit Linux is disabled by default due to the practical possibility of address space exhaustion. opt.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. The following settings are supported if sbrk 2 is supported by the operating system: disabled, primary, and secondary; otherwise only disabled is supported. The default is secondary if sbrk 2 is supported by the operating system; disabled otherwise. opt.narenas (unsigned) r- Maximum number of arenas to use for automatic multiplexing of threads and arenas. The default is four times the number of CPUs, or one if there is a single CPU. opt.percpu_arena (const char *) r- Per CPU arena mode. Use the percpu setting to enable this feature, which uses number of CPUs to determine number of arenas, and bind threads to arenas dynamically based on the CPU the thread runs on currently. phycpu setting uses one arena per physical CPU, which means the two hyper threads on the same CPU share one arena. Note that no runtime checking regarding the availability of hyper threading is done at the moment. When set to disabled, narenas and thread to arena association will not be impacted by this option. The default is disabled. opt.background_thread (const bool) r- Internal background worker threads enabled/disabled. Because of potential circular dependencies, enabling background thread using this option may cause crash or deadlock during initialization. For a reliable way to use this feature, see background_thread for dynamic control options and details. This option is disabled by default. opt.max_background_threads (const size_t) r- Maximum number of background threads that will be created if background_thread is set. Defaults to number of cpus. opt.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged (i.e. converted to muzzy via e.g. madvise(...MADV_FREE) if supported by the operating system, or converted to clean otherwise) and/or reused. Dirty pages are defined as previously having been potentially written to by the application, and therefore consuming physical memory, yet having no current use. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.dirty_decay_ms and arena.<i>.dirty_decay_ms for related dynamic control options. See opt.muzzy_decay_ms for a description of muzzy pages. opt.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged (i.e. converted to clean) and/or reused. Muzzy pages are defined as previously having been unused dirty pages that were subsequently purged in a manner that left them subject to the reclamation whims of the operating system (e.g. madvise(...MADV_FREE)), and therefore in an indeterminate state. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate. A decay time of 0 causes all unused muzzy pages to be purged immediately upon creation. A decay time of -1 disables purging. The default decay time is 10 seconds. See arenas.muzzy_decay_ms and arena.<i>.muzzy_decay_ms for related dynamic control options. opt.lg_extent_max_active_fit (size_t) r- When reusing dirty extents, this determines the (log base 2 of the) maximum ratio between the size of the active extent selected (to split off from) and the size of the requested allocation. This prevents the splitting of large active extents for smaller allocations, which can reduce fragmentation over the long run (especially for non-active extents). Lower value may reduce fragmentation, at the cost of extra active extents. The default value is 6, which gives a maximum ratio of 64 (2^6). opt.stats_print (bool) r- Enable/disable statistics printing at exit. If enabled, the malloc_stats_print() function is called at program exit via an atexit 3 function. opt.stats_print_opts can be combined to specify output options. If is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation functions. Furthermore, atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default. opt.stats_print_opts (const char *) r- Options (the opts string) to pass to the malloc_stats_print() at exit (enabled through opt.stats_print). See available options in malloc_stats_print(). Has no effect unless opt.stats_print is enabled. The default is . opt.junk (const char *) r- [] Junk filling. If set to alloc, each byte of uninitialized allocated memory will be initialized to 0xa5. If set to free, all deallocated memory will be initialized to 0x5a. If set to true, both allocated and deallocated memory will be initialized, and if set to false, junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is false by default unless is specified during configuration, in which case it is true by default. opt.zero (bool) r- [] Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so realloc() and rallocx() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. opt.utrace (bool) r- [] Allocation tracing based on utrace 2 enabled/disabled. This option is disabled by default. opt.xmalloc (bool) r- [] Abort-on-out-of-memory enabled/disabled. If enabled, rather than returning failure for any allocation function, display a diagnostic message on STDERR_FILENO and cause the program to drop core (using abort 3). If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: This option is disabled by default. opt.tcache (bool) r- Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by default. opt.lg_tcache_max (size_t) r- Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes are cached, and at a maximum all large size classes are cached. The default maximum is 32 KiB (2^15). opt.thp (const char *) r- Transparent hugepage (THP) mode. Settings "always", "never" and "default" are available if THP is supported by the operating system. The "always" setting enables transparent hugepage for all user memory mappings with MADV_HUGEPAGE; "never" ensures no transparent hugepage with MADV_NOHUGEPAGE; the default setting "default" makes no changes. Note that: this option does not affect THP for jemalloc internal metadata (see opt.metadata_thp); in addition, for arenas with customized extent_hooks, this option is bypassed as it is implemented as part of the default extent hooks. opt.prof (bool) r- [] Memory profiling enabled/disabled. If enabled, profile memory allocation activity. See the opt.prof_active option for on-the-fly activation/deactivation. See the opt.lg_prof_sample option for probabilistic sampling control. See the opt.prof_accum option for control of cumulative sample reporting. See the opt.lg_prof_interval option for information on interval-triggered profile dumping, the opt.prof_gdump option for information on high-water-triggered profile dumping, and the opt.prof_final option for final profile dumping. Profile output is compatible with the jeprof command, which is based on the pprof that is developed as part of the gperftools package. See HEAP PROFILE FORMAT for heap profile format documentation. opt.prof_prefix (const char *) r- [] Filename prefix for profile dumps. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is jeprof. opt.prof_active (bool) r- [] Profiling activated/deactivated. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the opt.prof option) but inactive, then toggle profiling at any time during program execution with the prof.active mallctl. This option is enabled by default. opt.prof_thread_active_init (bool) r- [] Initial setting for thread.prof.active in newly created threads. The initial setting for newly created threads can also be changed during execution via the prof.thread_active_init mallctl. This option is enabled by default. opt.lg_prof_sample (size_t) r- [] Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead. The default sample interval is 512 KiB (2^19 B). opt.prof_accum (bool) r- [] Reporting of cumulative object/byte counts in profile dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest. This option is disabled by default. opt.lg_prof_interval (ssize_t) r- [] Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.i<iseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. By default, interval-triggered profile dumping is disabled (encoded as -1). opt.prof_gdump (bool) r- [] Set the initial state of prof.gdump, which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum. This option is disabled by default. opt.prof_final (bool) r- [] Use an atexit 3 function to dump final memory usage to a file named according to the pattern <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the opt.prof_prefix option. Note that atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own atexit() function with equivalent functionality). This option is disabled by default. opt.prof_leak (bool) r- [] Leak reporting enabled/disabled. If enabled, use an atexit 3 function to report memory leaks detected by allocation sampling. See the opt.prof option for information on analyzing heap profile output. This option is disabled by default. thread.arena (unsigned) rw Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the arena.i.initialized mallctl), it will be automatically initialized as a side effect of calling this interface. thread.allocated (uint64_t) r- [] Get the total number of bytes ever allocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.allocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.allocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.deallocated (uint64_t) r- [] Get the total number of bytes ever deallocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.deallocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.deallocated mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls. thread.tcache.enabled (bool) rw Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming disabled (see thread.tcache.flush). thread.tcache.flush (void) -- Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures associated with the calling thread's tcache. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful. thread.prof.name (const char *) r- or -w [] Get/set the descriptive name associated with the calling thread in memory profile dumps. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution. The output string of this interface should be copied for non-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The name string must be nil-terminated and comprised only of characters in the sets recognized by isgraph 3 and isblank 3. thread.prof.active (bool) rw [] Control whether sampling is currently active for the calling thread. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. This flag is enabled by default. tcache.create (unsigned) r- Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the MALLOCX_TCACHE(tc) macro to explicitly use the specified cache rather than the automatically managed one that is used by default. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds. tcache.flush (unsigned) -w Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to thread.tcache.flush, except that the tcache will never be automatically discarded. tcache.destroy (unsigned) -w Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation. arena.<i>.initialized (bool) r- Get whether the specified arena's statistics are initialized (i.e. the arena was initialized prior to the current epoch). This interface can also be nominally used to query whether the merged statistics corresponding to MALLCTL_ARENAS_ALL are initialized (always true). arena.<i>.decay (void) -- Trigger decay-based purging of unused dirty/muzzy pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. The proportion of unused dirty/muzzy pages to be purged depends on the current time; see opt.dirty_decay_ms and opt.muzy_decay_ms for details. arena.<i>.purge (void) -- Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. arena.<i>.reset (void) -- Discard all of the arena's extant allocations. This interface can only be used with arenas explicitly created via arenas.create. None of the arena's discarded/cached allocations may accessed afterward. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand. arena.<i>.destroy (void) -- Destroy the arena. Discard all of the arena's extant allocations using the same mechanism as for arena.<i>.reset (with all the same constraints and side effects), merge the arena stats into those accessible at arena index MALLCTL_ARENAS_DESTROYED, and then completely discard all metadata associated with the arena. Future calls to arenas.create may recycle the arena index. Destruction will fail if any threads are currently associated with the arena as a result of calls to thread.arena. arena.<i>.dss (const char *) rw Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals MALLCTL_ARENAS_ALL. See opt.dss for supported settings. arena.<i>.dirty_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to -1 (i.e. purging disabled). See opt.dirty_decay_ms for additional information. arena.<i>.muzzy_decay_ms (ssize_t) rw Current per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. Each time this interface is set, all currently unused muzzy pages are considered to have fully decayed, which causes immediate purging of all unused muzzy pages unless the decay time is set to -1 (i.e. purging disabled). See opt.muzzy_decay_ms for additional information. arena.<i>.retain_grow_limit (size_t) rw Maximum size to grow retained region (only relevant when opt.retain is enabled). This controls the maximum increment to expand virtual memory, or allocation through arena.<i>extent_hooks. In particular, if customized extent hooks reserve physical memory (e.g. 1G huge pages), this is useful to control the allocation hook's input size. The default is no limit. arena.<i>.extent_hooks (extent_hooks_t *) rw Get or set the extent management hook functions for arena <i>. The functions must be capable of operating on all extant extents associated with arena <i>, usually by passing unknown extents to the replaced functions. In practice, it is feasible to control allocation for arenas explicitly created via arenas.create such that all extents originate from an application-supplied extent allocator (by specifying the custom extent hook functions during arena creation). However, the API guarantees for the automatically created arenas may be relaxed -- hooks set there may be called in a "best effort" fashion; in addition there may be extents created prior to the application having an opportunity to take over extent allocation. The extent_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. However, there are performance and platform reasons to retain extents for later reuse. Cleanup attempts cascade from deallocation to decommit to forced purging to lazy purging, which gives the extent management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations. All operations except allocation can be universally opted out of by setting the hook pointers to NULL, or selectively opted out of by returning failure. Note that once the extent hook is set, the structure is accessed directly by the associated arenas, so it must remain valid for the entire lifetime of the arenas. typedef void *(extent_alloc_t) extent_hooks_t *extent_hooks void *new_addr size_t size size_t alignment bool *zero bool *commit unsigned arena_ind An extent allocation function conforms to the extent_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that the extent's base address is a multiple of alignment, as well as setting *zero to indicate whether the extent is zeroed and *commit to indicate whether the extent is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The size parameter is always a multiple of the page size. The alignment parameter is always a power of two at least as large as the page size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. If new_addr is not NULL, the returned pointer must be new_addr on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the default extent allocation function makes the arena's arena.<i>.dss setting irrelevant. typedef bool (extent_dalloc_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent deallocation function conforms to the extent_dalloc_t type and deallocates an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from deallocation; the virtual memory mapping associated with the extent remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse. typedef void (extent_destroy_t) extent_hooks_t *extent_hooks void *addr size_t size bool committed unsigned arena_ind An extent destruction function conforms to the extent_destroy_t type and unconditionally destroys an extent at given addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind. This function may be called to destroy retained extents during arena destruction (see arena.<i>.destroy). typedef bool (extent_commit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent commit function conforms to the extent_commit_t type and commits zeroed physical memory to back pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. If the function returns true, this indicates insufficient physical memory to satisfy the request. typedef bool (extent_decommit_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent decommit function conforms to the extent_decommit_t type and decommits any physical memory that is backing pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success, in which case the pages will be committed via the extent commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse. typedef bool (extent_purge_t) extent_hooks_t *extent_hooks void *addr size_t size size_t offset size_t length unsigned arena_ind An extent purge function conforms to the extent_purge_t type and discards physical pages within the virtual memory mapping associated with an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind. A lazy extent purge function (e.g. implemented via madvise(...MADV_FREE)) can delay purging indefinitely and leave the pages within the purged virtual memory range in an indeterminite state, whereas a forced extent purge function immediately purges, and the pages within the virtual memory range will be zero-filled the next time they are accessed. If the function returns true, this indicates failure to purge. typedef bool (extent_split_t) extent_hooks_t *extent_hooks void *addr size_t size size_t size_a size_t size_b bool committed unsigned arena_ind An extent split function conforms to the extent_split_t type and optionally splits an extent at given addr and size into two adjacent extents, the first of size_a bytes, and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extent remains unsplit and therefore should continue to be operated on as a whole. typedef bool (extent_merge_t) extent_hooks_t *extent_hooks void *addr_a size_t size_a void *addr_b size_t size_b bool committed unsigned arena_ind An extent merge function conforms to the extent_merge_t type and optionally merges adjacent extents, at given addr_a and size_a with given addr_b and size_b into one contiguous extent, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the extents remain distinct mappings and therefore should continue to be operated on independently. arenas.narenas (unsigned) r- Current limit on number of arenas. arenas.dirty_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize arena.<i>.dirty_decay_ms during arena creation. See opt.dirty_decay_ms for additional information. arenas.muzzy_decay_ms (ssize_t) rw Current default per-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused, used to initialize arena.<i>.muzzy_decay_ms during arena creation. See opt.muzzy_decay_ms for additional information. arenas.quantum (size_t) r- Quantum size. arenas.page (size_t) r- Page size. arenas.tcache_max (size_t) r- Maximum thread-cached size class. arenas.nbins (unsigned) r- Number of bin size classes. arenas.nhbins (unsigned) r- Total number of thread cache bin size classes. arenas.bin.<i>.size (size_t) r- Maximum size supported by size class. arenas.bin.<i>.nregs (uint32_t) r- Number of regions per slab. arenas.bin.<i>.slab_size (size_t) r- Number of bytes per slab. arenas.nlextents (unsigned) r- Total number of large size classes. arenas.lextent.<i>.size (size_t) r- Maximum size supported by this large size class. arenas.create (unsigned, extent_hooks_t *) rw Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index. arenas.lookup (unsigned, void*) rw Index of the arena to which an allocation belongs to. prof.thread_active_init (bool) rw [] Control the initial setting for thread.prof.active in newly created threads. See the opt.prof_thread_active_init option for additional information. prof.active (bool) rw [] Control whether sampling is currently active. See the opt.prof_active option for additional information, as well as the interrelated thread.prof.active mallctl. prof.dump (const char *) -w [] Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern <prefix>.<pid>.<seq>.m<mseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.gdump (bool) rw [] When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.u<useq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.reset (size_t) -w [] Reset all memory profile statistics, and optionally update the sample rate (see opt.lg_prof_sample and prof.lg_sample). prof.lg_sample (size_t) r- [] Get the current sample rate (see opt.lg_prof_sample). prof.interval (uint64_t) r- [] Average number of bytes allocated between interval-based profile dumps. See the opt.lg_prof_interval option for additional information. stats.allocated (size_t) r- [] Total number of bytes allocated by the application. stats.active (size_t) r- [] Total number of bytes in active pages allocated by the application. This is a multiple of the page size, and greater than or equal to stats.allocated. This does not include stats.arenas.<i>.pdirty, stats.arenas.<i>.pmuzzy, nor pages entirely devoted to allocator metadata. stats.metadata (size_t) r- [] Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap-sensitive allocator metadata structures (see stats.arenas.<i>.base) and internal allocations (see stats.arenas.<i>.internal). Transparent huge page (enabled with opt.metadata_thp) usage is not considered. stats.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See stats.metadata and opt.metadata_thp) for details. stats.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size, and is larger than stats.active. stats.mapped (size_t) r- [] Total number of bytes in active extents mapped by the allocator. This is larger than stats.active. This does not include inactive extents, even those that contain unused dirty pages, which means that there is no strict ordering between this and stats.resident. stats.retained (size_t) r- [] Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e.g. munmap 2 or similar. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see extent hooks for details). Retained memory is excluded from mapped memory statistics, e.g. stats.mapped. stats.background_thread.num_threads (size_t) r- [] Number of background threads running currently. stats.background_thread.num_runs (uint64_t) r- [] Total number of runs from all background threads. stats.background_thread.run_interval (uint64_t) r- [] Average run interval in nanoseconds of background threads. stats.mutexes.ctl.{counter}; (counter specific type) r- [] Statistics on ctl mutex (global scope; mallctl related). {counter} is one of the counters below: num_ops (uint64_t): Total number of lock acquisition operations on this mutex. num_spin_acq (uint64_t): Number of times the mutex was spin-acquired. When the mutex is currently locked and cannot be acquired immediately, a short period of spin-retry within jemalloc will be performed. Acquired through spin generally means the contention was lightweight and not causing context switches. num_wait (uint64_t): Number of times the mutex was wait-acquired, which means the mutex contention was not solved by spin-retry, and blocking operation was likely involved in order to acquire the mutex. This event generally implies higher cost / longer delay, and should be investigated if it happens often. max_wait_time (uint64_t): Maximum length of time in nanoseconds spent on a single wait-acquired lock operation. Note that to avoid profiling overhead on the common path, this does not consider spin-acquired cases. total_wait_time (uint64_t): Cumulative time in nanoseconds spent on wait-acquired lock operations. Similarly, spin-acquired cases are not considered. max_num_thds (uint32_t): Maximum number of threads waiting on this mutex simultaneously. Similarly, spin-acquired cases are not considered. num_owner_switch (uint64_t): Number of times the current mutex owner is different from the previous one. This event does not generally imply an issue; rather it is an indicator of how often the protected data are accessed by different threads. stats.mutexes.background_thread.{counter} (counter specific type) r- [] Statistics on background_thread mutex (global scope; background_thread related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.prof.{counter} (counter specific type) r- [] Statistics on prof mutex (global scope; profiling related). {counter} is one of the counters in mutex profiling counters. stats.mutexes.reset (void) -- [] Reset all mutex profile statistics, including global mutexes, arena mutexes and bin mutexes. stats.arenas.<i>.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. See opt.dss for details. stats.arenas.<i>.dirty_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused. See opt.dirty_decay_ms for details. stats.arenas.<i>.muzzy_decay_ms (ssize_t) r- Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused. See opt.muzzy_decay_ms for details. stats.arenas.<i>.nthreads (unsigned) r- Number of threads currently assigned to arena. stats.arenas.<i>.uptime (uint64_t) r- Time elapsed (in nanoseconds) since the arena was created. If <i> equals 0 or MALLCTL_ARENAS_ALL, this is the uptime since malloc initialization. stats.arenas.<i>.pactive (size_t) r- Number of pages in active extents. stats.arenas.<i>.pdirty (size_t) r- Number of pages within unused extents that are potentially dirty, and for which madvise() or similar has not been called. See opt.dirty_decay_ms for a description of dirty pages. stats.arenas.<i>.pmuzzy (size_t) r- Number of pages within unused extents that are muzzy. See opt.muzzy_decay_ms for a description of muzzy pages. stats.arenas.<i>.mapped (size_t) r- [] Number of mapped bytes. stats.arenas.<i>.retained (size_t) r- [] Number of retained bytes. See stats.retained for details. stats.arenas.<i>.extent_avail (size_t) r- [] Number of allocated (but unused) extent structs in this arena. stats.arenas.<i>.base (size_t) r- [] Number of bytes dedicated to bootstrap-sensitive allocator metadata structures. stats.arenas.<i>.internal (size_t) r- [] Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap profiles. stats.arenas.<i>.metadata_thp (size_t) r- [] Number of transparent huge pages (THP) used for metadata. See opt.metadata_thp for details. stats.arenas.<i>.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the arena, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size. stats.arenas.<i>.dirty_npurge (uint64_t) r- [] Number of dirty page purge sweeps performed. stats.arenas.<i>.dirty_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge dirty pages. stats.arenas.<i>.dirty_purged (uint64_t) r- [] Number of dirty pages purged. stats.arenas.<i>.muzzy_npurge (uint64_t) r- [] Number of muzzy page purge sweeps performed. stats.arenas.<i>.muzzy_nmadvise (uint64_t) r- [] Number of madvise() or similar calls made to purge muzzy pages. stats.arenas.<i>.muzzy_purged (uint64_t) r- [] Number of muzzy pages purged. stats.arenas.<i>.small.allocated (size_t) r- [] Number of bytes currently allocated by small objects. stats.arenas.<i>.small.nmalloc (uint64_t) r- [] Cumulative number of times a small allocation was requested from the arena's bins, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.small.ndalloc (uint64_t) r- [] Cumulative number of times a small allocation was returned to the arena's bins, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.small.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all bin size classes. stats.arenas.<i>.large.allocated (size_t) r- [] Number of bytes currently allocated by large objects. stats.arenas.<i>.large.nmalloc (uint64_t) r- [] Cumulative number of times a large extent was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.large.ndalloc (uint64_t) r- [] Cumulative number of times a large extent was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.large.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by all large size classes. stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a bin region of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled, or to directly deallocate an allocation otherwise. stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by bin regions of the corresponding size class. stats.arenas.<i>.bins.<j>.curregs (size_t) r- [] Current number of regions for this size class. stats.arenas.<i>.bins.<j>.nfills (uint64_t) r- Cumulative number of tcache fills. stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r- Cumulative number of tcache flushes. stats.arenas.<i>.bins.<j>.nslabs (uint64_t) r- [] Cumulative number of slabs created. stats.arenas.<i>.bins.<j>.nreslabs (uint64_t) r- [] Cumulative number of times the current slab from which to allocate changed. stats.arenas.<i>.bins.<j>.curslabs (size_t) r- [] Current number of slabs. stats.arenas.<i>.bins.<j>.mutex.{counter} (counter specific type) r- [] Statistics on arena.<i>.bins.<j> mutex (arena bin scope; bin operation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.extents.<j>.n{extent_type} (size_t) r- [] Number of extents of the given type in this arena in the bucket corresponding to page size index <j>. The extent type is one of dirty, muzzy, or retained. stats.arenas.<i>.extents.<j>.{extent_type}_bytes (size_t) r- [] Sum of the bytes managed by extents of the given type in this arena in the bucket corresponding to page size index <j>. The extent type is one of dirty, muzzy, or retained. stats.arenas.<i>.lextents.<j>.nmalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise. stats.arenas.<i>.lextents.<j>.ndalloc (uint64_t) r- [] Cumulative number of times a large extent of the corresponding size class was returned to the arena, whether to flush the relevant tcache if opt.tcache is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise. stats.arenas.<i>.lextents.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests satisfied by large extents of the corresponding size class. stats.arenas.<i>.lextents.<j>.curlextents (size_t) r- [] Current number of large allocations for this size class. stats.arenas.<i>.mutexes.large.{counter} (counter specific type) r- [] Statistics on arena.<i>.large mutex (arena scope; large allocation related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extent_avail.{counter} (counter specific type) r- [] Statistics on arena.<i>.extent_avail mutex (arena scope; extent avail related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_dirty mutex (arena scope; dirty extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_muzzy mutex (arena scope; muzzy extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.extents_retained.{counter} (counter specific type) r- [] Statistics on arena.<i>.extents_retained mutex (arena scope; retained extents related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_dirty.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_dirty mutex (arena scope; decay for dirty pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.decay_muzzy.{counter} (counter specific type) r- [] Statistics on arena.<i>.decay_muzzy mutex (arena scope; decay for muzzy pages related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.base.{counter} (counter specific type) r- [] Statistics on arena.<i>.base mutex (arena scope; base allocator related). {counter} is one of the counters in mutex profiling counters. stats.arenas.<i>.mutexes.tcache_list.{counter} (counter specific type) r- [] Statistics on arena.<i>.tcache_list mutex (arena scope; tcache to arena association related). This mutex is expected to be accessed less often. {counter} is one of the counters in mutex profiling counters. HEAP PROFILE FORMAT Although the heap profiling functionality was originally designed to be compatible with the pprof command that is developed as part of the gperftools package, the addition of per thread heap profiling functionality required a different heap profile format. The jeprof command is derived from pprof, with enhancements to support the heap profile format described here. In the following hypothetical heap profile, [...] indicates elision for the sake of compactness. The following matches the above heap profile, but most tokens are replaced with <description> to indicate descriptions of the corresponding fields. / : : [: ] [...] : : [: ] [...] : : [: ] [...] @ [...] [...] : : [: ] : : [: ] : : [: ] [...] MAPPED_LIBRARIES: /maps>]]> DEBUGGING MALLOC PROBLEMS When debugging, it is a good idea to configure/build jemalloc with the and options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc. Programs often accidentally depend on uninitialized memory actually being filled with zero bytes. Junk filling (see the opt.junk option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the opt.zero option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs. This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive. DIAGNOSTIC MESSAGES If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the opt.abort option is set, most warnings are treated as errors. The malloc_message variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. malloc_message() takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock. All messages are prefixed by <jemalloc>: . RETURN VALUES Standard API The malloc() and calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM. The posix_memalign() function returns the value 0 if successful; otherwise it returns an error value. The posix_memalign() function will fail if: EINVAL The alignment parameter is not a power of 2 at least as large as sizeof(void *). ENOMEM Memory allocation error. The aligned_alloc() function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The aligned_alloc() function will fail if: EINVAL The alignment parameter is not a power of 2. ENOMEM Memory allocation error. The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an allocation failure. The realloc() function always leaves the original buffer intact when an error occurs. The free() function returns no value. Non-standard API The mallocx() and rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. The xallocx() function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place. The sallocx() function returns the real size of the allocation pointed to by ptr. The nallocx() returns the real size that would result from a successful equivalent mallocx() function call, or zero if insufficient memory is available to perform the size computation. The mallctl(), mallctlnametomib(), and mallctlbymib() functions return 0 on success; otherwise they return an error value. The functions will fail if: EINVAL newp is not NULL, and newlen is too large or too small. Alternatively, *oldlenp is too large or too small; in this case as much data as possible are read despite the error. ENOENT name or mib specifies an unknown/invalid value. EPERM Attempt to read or write void value, or attempt to write read-only value. EAGAIN A memory allocation failure occurred. EFAULT An interface with side effects failed in some way not directly related to mallctl*() read/write processing. The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. ENVIRONMENT The following environment variable affects the execution of the allocation functions: MALLOC_CONF If the environment variable MALLOC_CONF is set, the characters it contains will be interpreted as options. EXAMPLES To dump core whenever a problem occurs: ln -s 'abort:true' /etc/malloc.conf To specify in the source that only one arena should be automatically created: SEE ALSO madvise 2, mmap 2, sbrk 2, utrace 2, alloca 3, atexit 3, getpagesize 3 STANDARDS The malloc(), calloc(), realloc(), and free() functions conform to ISO/IEC 9899:1990 (ISO C90). The posix_memalign() function conforms to IEEE Std 1003.1-2001 (POSIX.1).
jemalloc-sys-0.3.2/rep/doc/manpages.xsl010064400007650000024000000003731344617502700162270ustar0000000000000000 jemalloc-sys-0.3.2/rep/doc/manpages.xsl.in010064400007650000024000000003171344617474000166330ustar0000000000000000 jemalloc-sys-0.3.2/rep/doc/stylesheet.xsl010064400007650000024000000006371344617474000166310ustar0000000000000000 ansi jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_externs.h010064400007650000024000000115251344617474000231750ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H #include "jemalloc/internal/bin.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/stats.h" extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_muzzy_decay_ms; extern percpu_arena_mode_t opt_percpu_arena; extern const char *percpu_arena_mode_names[]; extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; extern malloc_mutex_t arenas_lock; extern size_t opt_oversize_threshold; extern size_t oversize_threshold; void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, bin_stats_t *bstats, arena_stats_large_t *lstats, arena_stats_extents_t *estats); void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); #ifdef JEMALLOC_JET size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); #endif extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero); void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent); void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); ssize_t arena_dirty_decay_ms_get(arena_t *arena); bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); ssize_t arena_muzzy_decay_ms_get(arena_t *arena); bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); void arena_reset(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena); void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero); typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero); void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, extent_t *extent, void *ptr); void arena_dalloc_small(tsdn_t *tsdn, void *ptr); bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero, size_t *newsize); void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args); dss_prec_t arena_dss_prec_get(arena_t *arena); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_dirty_decay_ms_default_get(void); bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); ssize_t arena_muzzy_decay_ms_default_get(void); bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit); unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); size_t arena_extent_sn_next(arena_t *arena); arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); bool arena_init_huge(void); bool arena_is_huge(unsigned arena_ind); arena_t *arena_choose_huge(tsd_t *tsd); bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard); void arena_boot(sc_data_t *sc_data); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork2(tsdn_t *tsdn, arena_t *arena); void arena_prefork3(tsdn_t *tsdn, arena_t *arena); void arena_prefork4(tsdn_t *tsdn, arena_t *arena); void arena_prefork5(tsdn_t *tsdn, arena_t *arena); void arena_prefork6(tsdn_t *tsdn, arena_t *arena); void arena_prefork7(tsdn_t *tsdn, arena_t *arena); void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_inlines_a.h010064400007650000024000000027021344617474000234430ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H static inline unsigned arena_ind_get(const arena_t *arena) { return base_ind_get(arena->base); } static inline void arena_internal_add(arena_t *arena, size_t size) { atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); } static inline void arena_internal_sub(arena_t *arena, size_t size) { atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); } static inline size_t arena_internal_get(arena_t *arena) { return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); } static inline bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { return false; } return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); } static inline void percpu_arena_update(tsd_t *tsd, unsigned cpu) { assert(have_percpu_arena); arena_t *oldarena = tsd_arena_get(tsd); assert(oldarena != NULL); unsigned oldind = arena_ind_get(oldarena); if (oldind != cpu) { unsigned newind = cpu; arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); assert(newarena != NULL); /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); tcache_t *tcache = tcache_get(tsd); if (tcache != NULL) { tcache_arena_reassociate(tsd_tsdn(tsd), tcache, newarena); } } } #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_inlines_b.h010064400007650000024000000266451344617474000234600ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" JEMALLOC_ALWAYS_INLINE bool arena_has_default_hooks(arena_t *arena) { return (extent_hooks_get(arena) == &extent_hooks_default); } JEMALLOC_ALWAYS_INLINE arena_t * arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) { if (arena != NULL) { return arena; } /* * For huge allocations, use the dedicated huge arena if both are true: * 1) is using auto arena selection (i.e. arena == NULL), and 2) the * thread is not assigned to a manual arena. */ if (unlikely(size >= oversize_threshold)) { arena_t *tsd_arena = tsd_arena_get(tsd); if (tsd_arena == NULL || arena_is_auto(tsd_arena)) { return arena_choose_huge(tsd); } } return arena_choose(tsd, NULL); } JEMALLOC_ALWAYS_INLINE prof_tctx_t * arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { const extent_t *extent = iealloc(tsdn, ptr); if (unlikely(!extent_slab_get(extent))) { return large_prof_tctx_get(tsdn, extent); } } else { if (unlikely(!alloc_ctx->slab)) { return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); } } return (prof_tctx_t *)(uintptr_t)1U; } JEMALLOC_ALWAYS_INLINE void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { extent_t *extent = iealloc(tsdn, ptr); if (unlikely(!extent_slab_get(extent))) { large_prof_tctx_set(tsdn, extent, tctx); } } else { if (unlikely(!alloc_ctx->slab)) { large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); } } } static inline void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); extent_t *extent = iealloc(tsdn, ptr); assert(!extent_slab_get(extent)); large_prof_tctx_reset(tsdn, extent); } JEMALLOC_ALWAYS_INLINE nstime_t arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); extent_t *extent = iealloc(tsdn, ptr); /* * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're * sure we have a sampled allocation. */ assert(!extent_slab_get(extent)); return large_prof_alloc_time_get(extent); } JEMALLOC_ALWAYS_INLINE void arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, nstime_t t) { cassert(config_prof); assert(ptr != NULL); extent_t *extent = iealloc(tsdn, ptr); assert(!extent_slab_get(extent)); large_prof_alloc_time_set(extent, t); } JEMALLOC_ALWAYS_INLINE void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { tsd_t *tsd; ticker_t *decay_ticker; if (unlikely(tsdn_null(tsdn))) { return; } tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); if (unlikely(decay_ticker == NULL)) { return; } if (unlikely(ticker_ticks(decay_ticker, nticks))) { arena_decay(tsdn, arena, false, false); } } JEMALLOC_ALWAYS_INLINE void arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); arena_decay_ticks(tsdn, arena, 1); } /* Purge a single extent to retained / unmapped directly. */ JEMALLOC_ALWAYS_INLINE void arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { size_t extent_size = extent_size_get(extent); extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent); if (config_stats) { /* Update stats accordingly. */ arena_stats_lock(tsdn, &arena->stats); arena_stats_add_u64(tsdn, &arena->stats, &arena->decay_dirty.stats->nmadvise, 1); arena_stats_add_u64(tsdn, &arena->stats, &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE); arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, extent_size); arena_stats_unlock(tsdn, &arena->stats); } } JEMALLOC_ALWAYS_INLINE void * arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); if (likely(tcache != NULL)) { if (likely(size <= SC_SMALL_MAXCLASS)) { return tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path); } if (likely(size <= tcache_maxclass)) { return tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } return arena_malloc_hard(tsdn, arena, size, ind, zero); } JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(tsdn_t *tsdn, const void *ptr) { return extent_arena_get(iealloc(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE size_t arena_salloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind != SC_NSIZES); return sz_index2size(szind); } JEMALLOC_ALWAYS_INLINE size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr) { /* * Return 0 if ptr is not within an extent managed by jemalloc. This * function has two extra costs relative to isalloc(): * - The rtree calls cannot claim to be dependent lookups, which induces * rtree lookup load dependencies. * - The lookup may fail, so there is an extra branch to check for * failure. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent; szind_t szind; if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, false, &extent, &szind)) { return 0; } if (extent == NULL) { return 0; } assert(extent_state_get(extent) == extent_state_active); /* Only slab members should be looked up via interior pointers. */ assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); assert(szind != SC_NSIZES); return sz_index2size(szind); } static inline void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { assert(ptr != NULL); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind; bool slab; rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); if (config_debug) { extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(szind < SC_NSIZES); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); if (unlikely(tcache == NULL)) { arena_dalloc_no_tcache(tsdn, ptr); return; } szind_t szind; bool slab; rtree_ctx_t *rtree_ctx; if (alloc_ctx != NULL) { szind = alloc_ctx->szind; slab = alloc_ctx->slab; assert(szind != SC_NSIZES); } else { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); } if (config_debug) { rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(szind < SC_NSIZES); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { if (szind < nhbins) { if (config_prof && unlikely(szind < SC_NBINS)) { arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); } else { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } } static inline void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); szind_t szind; bool slab; if (!config_prof || !opt_prof) { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ szind = sz_size2index(size); slab = (szind < SC_NBINS); } if ((config_prof && opt_prof) || config_debug) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); assert(szind == sz_size2index(size)); assert((config_prof && opt_prof) || slab == (szind < SC_NBINS)); if (config_debug) { extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(slab == extent_slab_get(extent)); } } if (likely(slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); if (unlikely(tcache == NULL)) { arena_sdalloc_no_tcache(tsdn, ptr, size); return; } szind_t szind; bool slab; alloc_ctx_t local_ctx; if (config_prof && opt_prof) { if (alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind, &local_ctx.slab); assert(local_ctx.szind == sz_size2index(size)); alloc_ctx = &local_ctx; } slab = alloc_ctx->slab; szind = alloc_ctx->szind; } else { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ szind = sz_size2index(size); slab = (szind < SC_NBINS); } if (config_debug) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &slab); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); assert(szind == extent_szind_get(extent)); assert(slab == extent_slab_get(extent)); } if (likely(slab)) { /* Small allocation. */ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { if (szind < nhbins) { if (config_prof && unlikely(szind < SC_NBINS)) { arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); } else { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } } else { extent_t *extent = iealloc(tsdn, ptr); large_dalloc(tsdn, extent); } } } #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_stats.h010064400007650000024000000165061344617474000226470ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H #define JEMALLOC_INTERNAL_ARENA_STATS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/sc.h" JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /* * In those architectures that support 64-bit atomics, we use atomic updates for * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize * externally. */ #ifdef JEMALLOC_ATOMIC_U64 typedef atomic_u64_t arena_stats_u64_t; #else /* Must hold the arena stats mutex while reading atomically. */ typedef uint64_t arena_stats_u64_t; #endif typedef struct arena_stats_large_s arena_stats_large_t; struct arena_stats_large_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ arena_stats_u64_t nmalloc; arena_stats_u64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ arena_stats_u64_t nrequests; /* Partially derived. */ /* Current number of allocations of this size class. */ size_t curlextents; /* Derived. */ }; typedef struct arena_stats_decay_s arena_stats_decay_t; struct arena_stats_decay_s { /* Total number of purge sweeps. */ arena_stats_u64_t npurge; /* Total number of madvise calls made. */ arena_stats_u64_t nmadvise; /* Total number of pages purged. */ arena_stats_u64_t purged; }; typedef struct arena_stats_extents_s arena_stats_extents_t; struct arena_stats_extents_s { /* * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t. * We track both bytes and # of extents: two extents in the same bucket * may have different sizes if adjacent size classes differ by more than * a page, so bytes cannot always be derived from # of extents. */ atomic_zu_t ndirty; atomic_zu_t dirty_bytes; atomic_zu_t nmuzzy; atomic_zu_t muzzy_bytes; atomic_zu_t nretained; atomic_zu_t retained_bytes; }; /* * Arena stats. Note that fields marked "derived" are not directly maintained * within the arena code; rather their values are derived during stats merge * requests. */ typedef struct arena_stats_s arena_stats_t; struct arena_stats_s { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_t mtx; #endif /* Number of bytes currently mapped, excluding retained memory. */ atomic_zu_t mapped; /* Partially derived. */ /* * Number of unused virtual memory bytes currently retained. Retained * bytes are technically mapped (though always decommitted or purged), * but they are excluded from the mapped statistic (above). */ atomic_zu_t retained; /* Derived. */ /* Number of extent_t structs allocated by base, but not being used. */ atomic_zu_t extent_avail; arena_stats_decay_t decay_dirty; arena_stats_decay_t decay_muzzy; atomic_zu_t base; /* Derived. */ atomic_zu_t internal; atomic_zu_t resident; /* Derived. */ atomic_zu_t metadata_thp; atomic_zu_t allocated_large; /* Derived. */ arena_stats_u64_t nmalloc_large; /* Derived. */ arena_stats_u64_t ndalloc_large; /* Derived. */ arena_stats_u64_t nrequests_large; /* Derived. */ /* Number of bytes cached in tcache associated with this arena. */ atomic_zu_t tcache_bytes; /* Derived. */ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; /* One element for each large size class. */ arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; /* Arena uptime. */ nstime_t uptime; }; static inline bool arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { if (config_debug) { for (size_t i = 0; i < sizeof(arena_stats_t); i++) { assert(((char *)arena_stats)[i] == 0); } } #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { return true; } #endif /* Memory is zeroed, so there is no need to clear stats. */ return false; } static inline void arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_lock(tsdn, &arena_stats->mtx); #endif } static inline void arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_unlock(tsdn, &arena_stats->mtx); #endif } static inline uint64_t arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_u64(p, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); return *p; #endif } static inline void arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p, uint64_t x) { #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); *p += x; #endif } static inline void arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p, uint64_t x) { #ifdef JEMALLOC_ATOMIC_U64 uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); *p -= x; assert(*p + x >= *p); #endif } /* * Non-atomically sets *dst += src. *dst needs external synchronization. * This lets us avoid the cost of a fetch_add when its unnecessary (note that * the types here are atomic). */ static inline void arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { #ifdef JEMALLOC_ATOMIC_U64 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); #else *dst += src; #endif } static inline size_t arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_zu(p, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); return atomic_load_zu(p, ATOMIC_RELAXED); #endif } static inline void arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); atomic_store_zu(p, cur + x, ATOMIC_RELAXED); #endif } static inline void arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); atomic_store_zu(p, cur - x, ATOMIC_RELAXED); #endif } /* Like the _u64 variant, needs an externally synchronized *dst. */ static inline void arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); } static inline void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, szind_t szind, uint64_t nrequests) { arena_stats_lock(tsdn, arena_stats); arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - SC_NBINS].nrequests, nrequests); arena_stats_unlock(tsdn, arena_stats); } static inline void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { arena_stats_lock(tsdn, arena_stats); arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); arena_stats_unlock(tsdn, arena_stats); } #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_structs_a.h010064400007650000024000000004451344617474000235130ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H #include "jemalloc/internal/bitmap.h" struct arena_slab_data_s { /* Per region allocated/deallocated bitmap. */ bitmap_t bitmap[BITMAP_GROUPS_MAX]; }; #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_structs_b.h010064400007650000024000000152441344617474000235170ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #include "jemalloc/internal/arena_stats.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bin.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/ticker.h" struct arena_decay_s { /* Synchronizes all non-atomic fields. */ malloc_mutex_t mtx; /* * True if a thread is currently purging the extents associated with * this decay structure. */ bool purging; /* * Approximate time in milliseconds from the creation of a set of unused * dirty pages until an equivalent set of unused dirty pages is purged * and/or reused. */ atomic_zd_t time_ms; /* time / SMOOTHSTEP_NSTEPS. */ nstime_t interval; /* * Time at which the current decay interval logically started. We do * not actually advance to a new epoch until sometime after it starts * because of scheduling and computation delays, and it is even possible * to completely skip epochs. In all cases, during epoch advancement we * merge all relevant activity into the most recently recorded epoch. */ nstime_t epoch; /* Deadline randomness generator. */ uint64_t jitter_state; /* * Deadline for current epoch. This is the sum of interval and per * epoch jitter which is a uniform random variable in [0..interval). * Epochs always advance by precise multiples of interval, but we * randomize the deadline to reduce the likelihood of arenas purging in * lockstep. */ nstime_t deadline; /* * Number of unpurged pages at beginning of current epoch. During epoch * advancement we use the delta between arena->decay_*.nunpurged and * extents_npages_get(&arena->extents_*) to determine how many dirty * pages, if any, were generated. */ size_t nunpurged; /* * Trailing log of how many unused dirty pages were generated during * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last * element is the most recent epoch. Corresponding epoch times are * relative to epoch. */ size_t backlog[SMOOTHSTEP_NSTEPS]; /* * Pointer to associated stats. These stats are embedded directly in * the arena's stats due to how stats structures are shared between the * arena and ctl code. * * Synchronization: Same as associated arena's stats field. */ arena_stats_decay_t *stats; /* Peak number of pages in associated extents. Used for debug only. */ uint64_t ceil_npages; }; struct arena_s { /* * Number of threads currently assigned to this arena. Each thread has * two distinct assignments, one for application-serving allocation, and * the other for internal metadata allocation. Internal metadata must * not be allocated from arenas explicitly created via the arenas.create * mallctl, because the arena..reset mallctl indiscriminately * discards all allocations for the affected arena. * * 0: Application allocation. * 1: Internal metadata allocation. * * Synchronization: atomic. */ atomic_u_t nthreads[2]; /* Next bin shard for binding new threads. Synchronization: atomic. */ atomic_u_t binshard_next; /* * When percpu_arena is enabled, to amortize the cost of reading / * updating the current CPU id, track the most recent thread accessing * this arena, and only read CPU if there is a mismatch. */ tsdn_t *last_thd; /* Synchronization: internal. */ arena_stats_t stats; /* * Lists of tcaches and cache_bin_array_descriptors for extant threads * associated with this arena. Stats from these are merged * incrementally, and at exit if opt_stats_print is enabled. * * Synchronization: tcache_ql_mtx. */ ql_head(tcache_t) tcache_ql; ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; malloc_mutex_t tcache_ql_mtx; /* Synchronization: internal. */ prof_accum_t prof_accum; uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base * pointers. * * Synchronization: atomic. */ atomic_zu_t offset_state; /* * Extent serial number generator state. * * Synchronization: atomic. */ atomic_zu_t extent_sn_next; /* * Represents a dss_prec_t, but atomically. * * Synchronization: atomic. */ atomic_u_t dss_prec; /* * Number of pages in active extents. * * Synchronization: atomic. */ atomic_zu_t nactive; /* * Extant large allocations. * * Synchronization: large_mtx. */ extent_list_t large; /* Synchronizes all large allocation/update/deallocation. */ malloc_mutex_t large_mtx; /* * Collections of extents that were previously allocated. These are * used when allocating extents, in an attempt to re-use address space. * * Synchronization: internal. */ extents_t extents_dirty; extents_t extents_muzzy; extents_t extents_retained; /* * Decay-based purging state, responsible for scheduling extent state * transitions. * * Synchronization: internal. */ arena_decay_t decay_dirty; /* dirty --> muzzy */ arena_decay_t decay_muzzy; /* muzzy --> retained */ /* * Next extent size class in a growing series to use when satisfying a * request via the extent hooks (only if opt_retain). This limits the * number of disjoint virtual memory ranges so that extent merging can * be effective even if multiple arenas' extent allocation requests are * highly interleaved. * * retain_grow_limit is the max allowed size ind to expand (unless the * required size is greater). Default is no limit, and controlled * through mallctl only. * * Synchronization: extent_grow_mtx */ pszind_t extent_grow_next; pszind_t retain_grow_limit; malloc_mutex_t extent_grow_mtx; /* * Available extent structures that were allocated via * base_alloc_extent(). * * Synchronization: extent_avail_mtx. */ extent_tree_t extent_avail; atomic_zu_t extent_avail_cnt; malloc_mutex_t extent_avail_mtx; /* * bins is used to store heaps of free regions. * * Synchronization: internal. */ bins_t bins[SC_NBINS]; /* * Base allocator, from which arena metadata are allocated. * * Synchronization: internal. */ base_t *base; /* Used to determine uptime. Read-only after initialization. */ nstime_t create_time; }; /* Used in conjunction with tsd for fast arena-related context lookup. */ struct arena_tdata_s { ticker_t decay_ticker; }; /* Used to pass rtree lookup context down the path. */ struct alloc_ctx_s { szind_t szind; bool slab; }; #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/arena_types.h010064400007650000024000000032561344617474000226530ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H #define JEMALLOC_INTERNAL_ARENA_TYPES_H #include "jemalloc/internal/sc.h" /* Maximum number of regions in one slab. */ #define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) #define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) /* Default decay times in milliseconds. */ #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) #define MUZZY_DECAY_MS_DEFAULT (0) /* Number of event ticks between time checks. */ #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_slab_data_s arena_slab_data_t; typedef struct arena_decay_s arena_decay_t; typedef struct arena_s arena_t; typedef struct arena_tdata_s arena_tdata_t; typedef struct alloc_ctx_s alloc_ctx_t; typedef enum { percpu_arena_mode_names_base = 0, /* Used for options processing. */ /* * *_uninit are used only during bootstrapping, and must correspond * to initialized variant plus percpu_arena_mode_enabled_base. */ percpu_arena_uninit = 0, per_phycpu_arena_uninit = 1, /* All non-disabled modes must come after percpu_arena_disabled. */ percpu_arena_disabled = 2, percpu_arena_mode_names_limit = 3, /* Used for options processing. */ percpu_arena_mode_enabled_base = 3, percpu_arena = 3, per_phycpu_arena = 4 /* Hyper threads share arena. */ } percpu_arena_mode_t; #define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) #define PERCPU_ARENA_DEFAULT percpu_arena_disabled /* * When allocation_size >= oversize_threshold, use the dedicated huge arena * (unless have explicitly spicified arena index). 0 disables the feature. */ #define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/assert.h010064400007650000024000000024621344617474000216400ustar0000000000000000#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/util.h" /* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (unlikely(config_debug && !(e))) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ unreachable(); \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) { \ not_implemented(); \ } \ } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #ifndef cassert #define cassert(c) do { \ if (unlikely(!(c))) { \ not_reached(); \ } \ } while (0) #endif jemalloc-sys-0.3.2/rep/include/jemalloc/internal/atomic.h010064400007650000024000000052421344617474000216120ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_H #define JEMALLOC_INTERNAL_ATOMIC_H #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE #define JEMALLOC_U8_ATOMICS #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) # include "jemalloc/internal/atomic_gcc_atomic.h" # if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) # undef JEMALLOC_U8_ATOMICS # endif #elif defined(JEMALLOC_GCC_SYNC_ATOMICS) # include "jemalloc/internal/atomic_gcc_sync.h" # if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) # undef JEMALLOC_U8_ATOMICS # endif #elif defined(_MSC_VER) # include "jemalloc/internal/atomic_msvc.h" #elif defined(JEMALLOC_C11_ATOMICS) # include "jemalloc/internal/atomic_c11.h" #else # error "Don't have atomics implemented on this platform." #endif /* * This header gives more or less a backport of C11 atomics. The user can write * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate * counterparts of the C11 atomic functions for type, as so: * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); * and then write things like: * int *some_ptr; * atomic_pi_t atomic_ptr_to_int; * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); * assert(some_ptr == prev_value); * and expect things to work in the obvious way. * * Also included (with naming differences to avoid conflicts with the standard * library): * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). */ /* * Pure convenience, so that we don't have to type "atomic_memory_order_" * quite so often. */ #define ATOMIC_RELAXED atomic_memory_order_relaxed #define ATOMIC_ACQUIRE atomic_memory_order_acquire #define ATOMIC_RELEASE atomic_memory_order_release #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst /* * Not all platforms have 64-bit atomics. If we do, this #define exposes that * fact. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # define JEMALLOC_ATOMIC_U64 #endif JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) /* * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only * platform that actually needs to know the size, MSVC. */ JEMALLOC_GENERATE_ATOMICS(bool, b, 0) JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) #ifdef JEMALLOC_ATOMIC_U64 JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) #endif #undef ATOMIC_INLINE #endif /* JEMALLOC_INTERNAL_ATOMIC_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/atomic_c11.h010064400007650000024000000067321344617474000222630ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H #define JEMALLOC_INTERNAL_ATOMIC_C11_H #include #define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) #define atomic_memory_order_t memory_order #define atomic_memory_order_relaxed memory_order_relaxed #define atomic_memory_order_acquire memory_order_acquire #define atomic_memory_order_release memory_order_release #define atomic_memory_order_acq_rel memory_order_acq_rel #define atomic_memory_order_seq_cst memory_order_seq_cst #define atomic_fence atomic_thread_fence #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef _Atomic(type) atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ /* \ * A strict interpretation of the C standard prevents \ * atomic_load from taking a const argument, but it's \ * convenient for our purposes. This cast is a workaround. \ */ \ atomic_##short_type##_t* a_nonconst = \ (atomic_##short_type##_t*)a; \ return atomic_load_explicit(a_nonconst, mo); \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ atomic_store_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return atomic_exchange_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return atomic_compare_exchange_weak_explicit(a, expected, \ desired, success_mo, failure_mo); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return atomic_compare_exchange_strong_explicit(a, expected, \ desired, success_mo, failure_mo); \ } /* * Integral types have some special operations available that non-integral ones * lack. */ #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_add_explicit(a, val, mo); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_sub_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_and_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_or_explicit(a, val, mo); \ } \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return atomic_fetch_xor_explicit(a, val, mo); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/atomic_gcc_atomic.h010064400007650000024000000100211344617474000237510ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H #define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H #include "jemalloc/internal/assert.h" #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; ATOMIC_INLINE int atomic_enum_to_builtin(atomic_memory_order_t mo) { switch (mo) { case atomic_memory_order_relaxed: return __ATOMIC_RELAXED; case atomic_memory_order_acquire: return __ATOMIC_ACQUIRE; case atomic_memory_order_release: return __ATOMIC_RELEASE; case atomic_memory_order_acq_rel: return __ATOMIC_ACQ_REL; case atomic_memory_order_seq_cst: return __ATOMIC_SEQ_CST; } /* Can't happen; the switch is exhaustive. */ not_reached(); } ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { __atomic_thread_fence(atomic_enum_to_builtin(mo)); } #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef struct { \ type repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ type result; \ __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ return result; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ type result; \ __atomic_exchange(&a->repr, &val, &result, \ atomic_enum_to_builtin(mo)); \ return result; \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ true, atomic_enum_to_builtin(success_mo), \ atomic_enum_to_builtin(failure_mo)); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ false, \ atomic_enum_to_builtin(success_mo), \ atomic_enum_to_builtin(failure_mo)); \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_add(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_sub(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_and(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_or(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __atomic_fetch_xor(&a->repr, val, \ atomic_enum_to_builtin(mo)); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/atomic_gcc_sync.h010064400007650000024000000143331344617474000234630ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H #define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { /* Easy cases first: no barrier, and full barrier. */ if (mo == atomic_memory_order_relaxed) { asm volatile("" ::: "memory"); return; } if (mo == atomic_memory_order_seq_cst) { asm volatile("" ::: "memory"); __sync_synchronize(); asm volatile("" ::: "memory"); return; } asm volatile("" ::: "memory"); # if defined(__i386__) || defined(__x86_64__) /* This is implicit on x86. */ # elif defined(__ppc64__) asm volatile("lwsync"); # elif defined(__ppc__) asm volatile("sync"); # elif defined(__sparc__) && defined(__arch64__) if (mo == atomic_memory_order_acquire) { asm volatile("membar #LoadLoad | #LoadStore"); } else if (mo == atomic_memory_order_release) { asm volatile("membar #LoadStore | #StoreStore"); } else { asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); } # else __sync_synchronize(); # endif asm volatile("" ::: "memory"); } /* * A correct implementation of seq_cst loads and stores on weakly ordered * architectures could do either of the following: * 1. store() is weak-fence -> store -> strong fence, load() is load -> * strong-fence. * 2. store() is strong-fence -> store, load() is strong-fence -> load -> * weak-fence. * The tricky thing is, load() and store() above can be the load or store * portions of a gcc __sync builtin, so we have to follow GCC's lead, which * means going with strategy 2. * On strongly ordered architectures, the natural strategy is to stick a strong * fence after seq_cst stores, and have naked loads. So we want the strong * fences in different places on different architectures. * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to * accomplish this. */ ATOMIC_INLINE void atomic_pre_sc_load_fence() { # if defined(__i386__) || defined(__x86_64__) || \ (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_relaxed); # else atomic_fence(atomic_memory_order_seq_cst); # endif } ATOMIC_INLINE void atomic_post_sc_store_fence() { # if defined(__i386__) || defined(__x86_64__) || \ (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_seq_cst); # else atomic_fence(atomic_memory_order_relaxed); # endif } #define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ /* unused */ lg_size) \ typedef struct { \ type volatile repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ if (mo == atomic_memory_order_seq_cst) { \ atomic_pre_sc_load_fence(); \ } \ type result = a->repr; \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_acquire); \ } \ return result; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_release); \ } \ a->repr = val; \ if (mo == atomic_memory_order_seq_cst) { \ atomic_post_sc_store_fence(); \ } \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ /* \ * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ * an atomic exchange builtin. We fake it with a CAS loop. \ */ \ while (true) { \ type old = a->repr; \ if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ return old; \ } \ } \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, \ atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ return true; \ } else { \ *expected = prev; \ return false; \ } \ } \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, \ atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ return true; \ } else { \ *expected = prev; \ return false; \ } \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ /* unused */ lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_add(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_sub(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_and(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_or(&a->repr, val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return __sync_fetch_and_xor(&a->repr, val); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/atomic_msvc.h010064400007650000024000000126441344617474000226460ustar0000000000000000#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H #define JEMALLOC_INTERNAL_ATOMIC_MSVC_H #define ATOMIC_INIT(...) {__VA_ARGS__} typedef enum { atomic_memory_order_relaxed, atomic_memory_order_acquire, atomic_memory_order_release, atomic_memory_order_acq_rel, atomic_memory_order_seq_cst } atomic_memory_order_t; typedef char atomic_repr_0_t; typedef short atomic_repr_1_t; typedef long atomic_repr_2_t; typedef __int64 atomic_repr_3_t; ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { _ReadWriteBarrier(); # if defined(_M_ARM) || defined(_M_ARM64) /* ARM needs a barrier for everything but relaxed. */ if (mo != atomic_memory_order_relaxed) { MemoryBarrier(); } # elif defined(_M_IX86) || defined (_M_X64) /* x86 needs a barrier only for seq_cst. */ if (mo == atomic_memory_order_seq_cst) { MemoryBarrier(); } # else # error "Don't know how to create atomics for this platform for MSVC." # endif _ReadWriteBarrier(); } #define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t #define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) #define ATOMIC_RAW_CONCAT(a, b) a ## b #define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) #define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) #define ATOMIC_INTERLOCKED_SUFFIX_0 8 #define ATOMIC_INTERLOCKED_SUFFIX_1 16 #define ATOMIC_INTERLOCKED_SUFFIX_2 #define ATOMIC_INTERLOCKED_SUFFIX_3 64 #define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ typedef struct { \ ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ } atomic_##short_type##_t; \ \ ATOMIC_INLINE type \ atomic_load_##short_type(const atomic_##short_type##_t *a, \ atomic_memory_order_t mo) { \ ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_acquire); \ } \ return (type) ret; \ } \ \ ATOMIC_INLINE void \ atomic_store_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ if (mo != atomic_memory_order_relaxed) { \ atomic_fence(atomic_memory_order_release); \ } \ a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ if (mo == atomic_memory_order_seq_cst) { \ atomic_fence(atomic_memory_order_seq_cst); \ } \ } \ \ ATOMIC_INLINE type \ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ ATOMIC_INTERLOCKED_REPR(lg_size) e = \ (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ ATOMIC_INTERLOCKED_REPR(lg_size) d = \ (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ ATOMIC_INTERLOCKED_REPR(lg_size) old = \ ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ lg_size)(&a->repr, d, e); \ if (old == e) { \ return true; \ } else { \ *expected = (type)old; \ return false; \ } \ } \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ type *expected, type desired, atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ /* We implement the weak version with strong semantics. */ \ return atomic_compare_exchange_weak_##short_type(a, expected, \ desired, success_mo, failure_mo); \ } #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ \ ATOMIC_INLINE type \ atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ \ ATOMIC_INLINE type \ atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ /* \ * MSVC warns on negation of unsigned operands, but for us it \ * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ */ \ __pragma(warning(push)) \ __pragma(warning(disable: 4146)) \ return atomic_fetch_add_##short_type(a, -val, mo); \ __pragma(warning(pop)) \ } \ ATOMIC_INLINE type \ atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ ATOMIC_INLINE type \ atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } \ ATOMIC_INLINE type \ atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ type val, atomic_memory_order_t mo) { \ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ } #endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/background_thread_externs.h010064400007650000024000000024561344617474000255600ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H extern bool opt_background_thread; extern size_t opt_max_background_threads; extern malloc_mutex_t background_thread_lock; extern atomic_b_t background_thread_enabled_state; extern size_t n_background_threads; extern size_t max_background_threads; extern background_thread_info_t *background_thread_info; bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_threads_enable(tsd_t *tsd); bool background_threads_disable(tsd_t *tsd); void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new); void background_thread_prefork0(tsdn_t *tsdn); void background_thread_prefork1(tsdn_t *tsdn); void background_thread_postfork_parent(tsdn_t *tsdn); void background_thread_postfork_child(tsdn_t *tsdn); bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats); void background_thread_ctl_init(tsdn_t *tsdn); #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); #endif bool background_thread_boot0(void); bool background_thread_boot1(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/background_thread_inlines.h010064400007650000024000000041301344617474000255200ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H JEMALLOC_ALWAYS_INLINE bool background_thread_enabled(void) { return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE void background_thread_enabled_set(tsdn_t *tsdn, bool state) { malloc_mutex_assert_owner(tsdn, &background_thread_lock); atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE background_thread_info_t * arena_background_thread_info_get(arena_t *arena) { unsigned arena_ind = arena_ind_get(arena); return &background_thread_info[arena_ind % max_background_threads]; } JEMALLOC_ALWAYS_INLINE background_thread_info_t * background_thread_info_get(size_t ind) { return &background_thread_info[ind % max_background_threads]; } JEMALLOC_ALWAYS_INLINE uint64_t background_thread_wakeup_time_get(background_thread_info_t *info) { uint64_t next_wakeup = nstime_ns(&info->next_wakeup); assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); return next_wakeup; } JEMALLOC_ALWAYS_INLINE void background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, uint64_t wakeup_time) { malloc_mutex_assert_owner(tsdn, &info->mtx); atomic_store_b(&info->indefinite_sleep, wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); nstime_init(&info->next_wakeup, wakeup_time); } JEMALLOC_ALWAYS_INLINE bool background_thread_indefinite_sleep(background_thread_info_t *info) { return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); } JEMALLOC_ALWAYS_INLINE void arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, bool is_background_thread) { if (!background_thread_enabled() || is_background_thread) { return; } background_thread_info_t *info = arena_background_thread_info_get(arena); if (background_thread_indefinite_sleep(info)) { background_thread_interval_check(tsdn, arena, &arena->decay_dirty, 0); } } #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/background_thread_structs.h010064400007650000024000000033701344617474000255730ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H #define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H /* This file really combines "structs" and "types", but only transitionally. */ #if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) # define JEMALLOC_PTHREAD_CREATE_WRAPPER #endif #define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT #define DEFAULT_NUM_BACKGROUND_THREAD 4 typedef enum { background_thread_stopped, background_thread_started, /* Thread waits on the global lock when paused (for arena_reset). */ background_thread_paused, } background_thread_state_t; struct background_thread_info_s { #ifdef JEMALLOC_BACKGROUND_THREAD /* Background thread is pthread specific. */ pthread_t thread; pthread_cond_t cond; #endif malloc_mutex_t mtx; background_thread_state_t state; /* When true, it means no wakeup scheduled. */ atomic_b_t indefinite_sleep; /* Next scheduled wakeup time (absolute time in ns). */ nstime_t next_wakeup; /* * Since the last background thread run, newly added number of pages * that need to be purged by the next wakeup. This is adjusted on * epoch advance, and is used to determine whether we should signal the * background thread to wake up earlier. */ size_t npages_to_purge_new; /* Stats: total number of runs since started. */ uint64_t tot_n_runs; /* Stats: total sleep time since started. */ nstime_t tot_sleep_time; }; typedef struct background_thread_info_s background_thread_info_t; struct background_thread_stats_s { size_t num_threads; uint64_t num_runs; nstime_t run_interval; }; typedef struct background_thread_stats_s background_thread_stats_t; #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/base_externs.h010064400007650000024000000016621344617474000230220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H #define JEMALLOC_INTERNAL_BASE_EXTERNS_H extern metadata_thp_mode_t opt_metadata_thp; extern const char *metadata_thp_mode_names[]; base_t *b0get(void); base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); void base_delete(tsdn_t *tsdn, base_t *base); extent_hooks_t *base_extent_hooks_get(base_t *base); extent_hooks_t *base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks); void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, size_t *mapped, size_t *n_thp); void base_prefork(tsdn_t *tsdn, base_t *base); void base_postfork_parent(tsdn_t *tsdn, base_t *base); void base_postfork_child(tsdn_t *tsdn, base_t *base); bool base_boot(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/base_inlines.h010064400007650000024000000004701344617474000227670ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H #define JEMALLOC_INTERNAL_BASE_INLINES_H static inline unsigned base_ind_get(const base_t *base) { return base->ind; } static inline bool metadata_thp_enabled(void) { return (opt_metadata_thp != metadata_thp_disabled); } #endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/base_structs.h010064400007650000024000000030241344617474000230330ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H #define JEMALLOC_INTERNAL_BASE_STRUCTS_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sc.h" /* Embedded at the beginning of every block of base-managed virtual memory. */ struct base_block_s { /* Total size of block's virtual memory mapping. */ size_t size; /* Next block in list of base's blocks. */ base_block_t *next; /* Tracks unused trailing space. */ extent_t extent; }; struct base_s { /* Associated arena's index within the arenas array. */ unsigned ind; /* * User-configurable extent hook functions. Points to an * extent_hooks_t. */ atomic_p_t extent_hooks; /* Protects base_alloc() and base_stats_get() operations. */ malloc_mutex_t mtx; /* Using THP when true (metadata_thp auto mode). */ bool auto_thp_switched; /* * Most recent size class in the series of increasingly large base * extents. Logarithmic spacing between subsequent allocations ensures * that the total number of distinct mappings remains small. */ pszind_t pind_last; /* Serial number generation state. */ size_t extent_sn_next; /* Chain of all blocks associated with base. */ base_block_t *blocks; /* Heap of extents that track unused trailing space within blocks. */ extent_heap_t avail[SC_NSIZES]; /* Stats, only maintained if config_stats. */ size_t allocated; size_t resident; size_t mapped; /* Number of THP regions touched. */ size_t n_thp; }; #endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/base_types.h010064400007650000024000000021211344617474000224650ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H #define JEMALLOC_INTERNAL_BASE_TYPES_H typedef struct base_block_s base_block_t; typedef struct base_s base_t; #define METADATA_THP_DEFAULT metadata_thp_disabled /* * In auto mode, arenas switch to huge pages for the base allocator on the * second base block. a0 switches to thp on the 5th block (after 20 megabytes * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. */ #define BASE_AUTO_THP_THRESHOLD 2 #define BASE_AUTO_THP_THRESHOLD_A0 5 typedef enum { metadata_thp_disabled = 0, /* * Lazily enable hugepage for metadata. To avoid high RSS caused by THP * + low usage arena (i.e. THP becomes a significant percentage), the * "auto" option only starts using THP after a base allocator used up * the first THP region. Starting from the second hugepage (in a single * arena), "auto" behaves the same as "always", i.e. madvise hugepage * right away. */ metadata_thp_auto = 1, metadata_thp_always = 2, metadata_thp_mode_limit = 3 } metadata_thp_mode_t; #endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/bin.h010064400007650000024000000070571344617474000211140ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIN_H #define JEMALLOC_INTERNAL_BIN_H #include "jemalloc/internal/bin_stats.h" #include "jemalloc/internal/bin_types.h" #include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sc.h" /* * A bin contains a set of extents that are currently being used for slab * allocations. */ /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each slab has the following layout: * * /--------------------\ * | region 0 | * |--------------------| * | region 1 | * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | region nregs-1 | * \--------------------/ */ typedef struct bin_info_s bin_info_t; struct bin_info_s { /* Size of regions in a slab for this bin's size class. */ size_t reg_size; /* Total size of a slab for this bin's size class. */ size_t slab_size; /* Total number of regions in a slab for this bin's size class. */ uint32_t nregs; /* Number of sharded bins in each arena for this size class. */ uint32_t n_shards; /* * Metadata used to manipulate bitmaps for slabs associated with this * bin. */ bitmap_info_t bitmap_info; }; extern bin_info_t bin_infos[SC_NBINS]; typedef struct bin_s bin_t; struct bin_s { /* All operations on bin_t fields require lock ownership. */ malloc_mutex_t lock; /* * Current slab being used to service allocations of this bin's size * class. slabcur is independent of slabs_{nonfull,full}; whenever * slabcur is reassigned, the previous slab must be deallocated or * inserted into slabs_{nonfull,full}. */ extent_t *slabcur; /* * Heap of non-full slabs. This heap is used to assure that new * allocations come from the non-full slab that is oldest/lowest in * memory. */ extent_heap_t slabs_nonfull; /* List used to track full slabs. */ extent_list_t slabs_full; /* Bin statistics. */ bin_stats_t stats; }; /* A set of sharded bins of the same size class. */ typedef struct bins_s bins_t; struct bins_s { /* Sharded bins. Dynamically sized. */ bin_t *bin_shards; }; void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]); bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size, size_t end_size, size_t nshards); void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]); /* Initializes a bin to empty. Returns true on error. */ bool bin_init(bin_t *bin); /* Forking. */ void bin_prefork(tsdn_t *tsdn, bin_t *bin); void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin); void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); /* Stats. */ static inline void bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock); dst_bin_stats->nmalloc += bin->stats.nmalloc; dst_bin_stats->ndalloc += bin->stats.ndalloc; dst_bin_stats->nrequests += bin->stats.nrequests; dst_bin_stats->curregs += bin->stats.curregs; dst_bin_stats->nfills += bin->stats.nfills; dst_bin_stats->nflushes += bin->stats.nflushes; dst_bin_stats->nslabs += bin->stats.nslabs; dst_bin_stats->reslabs += bin->stats.reslabs; dst_bin_stats->curslabs += bin->stats.curslabs; malloc_mutex_unlock(tsdn, &bin->lock); } #endif /* JEMALLOC_INTERNAL_BIN_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/bin_stats.h010064400007650000024000000024271344617474000223260ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIN_STATS_H #define JEMALLOC_INTERNAL_BIN_STATS_H #include "jemalloc/internal/mutex_prof.h" typedef struct bin_stats_s bin_stats_t; struct bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ size_t curregs; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of slabs created for this bin's size class. */ uint64_t nslabs; /* * Total number of slabs reused by extracting them from the slabs heap * for this bin's size class. */ uint64_t reslabs; /* Current number of slabs in this bin. */ size_t curslabs; mutex_prof_data_t mutex_data; }; #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/bin_types.h010064400007650000024000000007321344617474000223310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H #define JEMALLOC_INTERNAL_BIN_TYPES_H #include "jemalloc/internal/sc.h" #define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH) #define N_BIN_SHARDS_DEFAULT 1 /* Used in TSD static initializer only. Real init in arena_bind(). */ #define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}} typedef struct tsd_binshards_s tsd_binshards_t; struct tsd_binshards_s { uint8_t binshard[SC_NBINS]; }; #endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/bit_util.h010064400007650000024000000123441344617474000221520ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H #define JEMALLOC_INTERNAL_BIT_UTIL_H #include "jemalloc/internal/assert.h" #define BIT_UTIL_INLINE static inline /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif BIT_UTIL_INLINE unsigned ffs_llu(unsigned long long bitmap) { return JEMALLOC_INTERNAL_FFSLL(bitmap); } BIT_UTIL_INLINE unsigned ffs_lu(unsigned long bitmap) { return JEMALLOC_INTERNAL_FFSL(bitmap); } BIT_UTIL_INLINE unsigned ffs_u(unsigned bitmap) { return JEMALLOC_INTERNAL_FFS(bitmap); } #ifdef JEMALLOC_INTERNAL_POPCOUNTL BIT_UTIL_INLINE unsigned popcount_lu(unsigned long bitmap) { return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); } #endif /* * Clears first unset bit in bitmap, and returns * place of bit. bitmap *must not* be 0. */ BIT_UTIL_INLINE size_t cfs_lu(unsigned long* bitmap) { size_t bit = ffs_lu(*bitmap) - 1; *bitmap ^= ZU(1) << bit; return bit; } BIT_UTIL_INLINE unsigned ffs_zu(size_t bitmap) { #if LG_SIZEOF_PTR == LG_SIZEOF_INT return ffs_u(bitmap); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG return ffs_lu(bitmap); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return ffs_llu(bitmap); #else #error No implementation for size_t ffs() #endif } BIT_UTIL_INLINE unsigned ffs_u64(uint64_t bitmap) { #if LG_SIZEOF_LONG == 3 return ffs_lu(bitmap); #elif LG_SIZEOF_LONG_LONG == 3 return ffs_llu(bitmap); #else #error No implementation for 64-bit ffs() #endif } BIT_UTIL_INLINE unsigned ffs_u32(uint32_t bitmap) { #if LG_SIZEOF_INT == 2 return ffs_u(bitmap); #else #error No implementation for 32-bit ffs() #endif return ffs_u(bitmap); } BIT_UTIL_INLINE uint64_t pow2_ceil_u64(uint64_t x) { #if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) if(unlikely(x <= 1)) { return x; } size_t msb_on_index; #if (defined(__amd64__) || defined(__x86_64__)) asm ("bsrq %1, %0" : "=r"(msb_on_index) // Outputs. : "r"(x-1) // Inputs. ); #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) msb_on_index = (63 ^ __builtin_clzll(x - 1)); #endif assert(msb_on_index < 63); return 1ULL << (msb_on_index + 1); #else x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x |= x >> 32; x++; return x; #endif } BIT_UTIL_INLINE uint32_t pow2_ceil_u32(uint32_t x) { #if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__))) if(unlikely(x <= 1)) { return x; } size_t msb_on_index; #if (defined(__i386__)) asm ("bsr %1, %0" : "=r"(msb_on_index) // Outputs. : "r"(x-1) // Inputs. ); #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) msb_on_index = (31 ^ __builtin_clz(x - 1)); #endif assert(msb_on_index < 31); return 1U << (msb_on_index + 1); #else x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return x; #endif } /* Compute the smallest power of 2 that is >= x. */ BIT_UTIL_INLINE size_t pow2_ceil_zu(size_t x) { #if (LG_SIZEOF_PTR == 3) return pow2_ceil_u64(x); #else return pow2_ceil_u32(x); #endif } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { size_t ret; assert(x != 0); asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); assert(ret < UINT_MAX); return (unsigned)ret; } #elif (defined(_MSC_VER)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { unsigned long ret; assert(x != 0); #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type size for lg_floor()" #endif assert(ret < UINT_MAX); return (unsigned)ret; } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) BIT_UTIL_INLINE unsigned lg_floor(size_t x) { assert(x != 0); #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); #else # error "Unsupported type size for lg_floor()" #endif } #else BIT_UTIL_INLINE unsigned lg_floor(size_t x) { assert(x != 0); x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3) x |= (x >> 32); #endif if (x == SIZE_T_MAX) { return (8 << LG_SIZEOF_PTR) - 1; } x++; return ffs_zu(x) - 2; } #endif BIT_UTIL_INLINE unsigned lg_ceil(size_t x) { return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1); } #undef BIT_UTIL_INLINE /* A compile-time version of lg_floor and lg_ceil. */ #define LG_FLOOR_1(x) 0 #define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) #define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2)) #define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4)) #define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8)) #define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16)) #define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32)) #if LG_SIZEOF_PTR == 2 # define LG_FLOOR(x) LG_FLOOR_32((x)) #else # define LG_FLOOR(x) LG_FLOOR_64((x)) #endif #define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1)) #endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/bitmap.h010064400007650000024000000257361344617474000216240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_BITMAP_H #define JEMALLOC_INTERNAL_BITMAP_H #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/sc.h" typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) /* Maximum bitmap bit count is determined by maximum regions per slab. */ # define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS #else /* Maximum bitmap bit count is determined by number of extent size classes. */ # define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) #endif #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute * force linear search, if we would have to call ffs_lu() more than 2^3 times, * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define BITMAP_USE_TREE #endif /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ #define BITMAP_GROUPS_L0(nbits) \ BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_L1(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) #define BITMAP_GROUPS_L2(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) #define BITMAP_GROUPS_L3(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS((nbits))))) #define BITMAP_GROUPS_L4(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ #define BITMAP_GROUPS_1_LEVEL(nbits) \ BITMAP_GROUPS_L0(nbits) #define BITMAP_GROUPS_2_LEVEL(nbits) \ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) #define BITMAP_GROUPS_3_LEVEL(nbits) \ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) #define BITMAP_GROUPS_4_LEVEL(nbits) \ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) #define BITMAP_GROUPS_5_LEVEL(nbits) \ (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #ifdef BITMAP_USE_TREE #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 # define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) # define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) #else # error "Unsupported bitmap size" #endif /* * Maximum number of levels possible. This could be statically computed based * on LG_BITMAP_MAXBITS: * * #define BITMAP_MAX_LEVELS \ * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) * * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the * various cascading macros. The only additional cost this incurs is some * unused trailing entries in bitmap_info_t structures; the bitmaps themselves * are not impacted. */ #define BITMAP_MAX_LEVELS 5 #define BITMAP_INFO_INITIALIZER(nbits) { \ /* nbits. */ \ nbits, \ /* nlevels. */ \ (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ /* levels. */ \ { \ {0}, \ {BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + BITMAP_GROUPS_L0(nbits)} \ } \ } #else /* BITMAP_USE_TREE */ #define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) #define BITMAP_INFO_INITIALIZER(nbits) { \ /* nbits. */ \ nbits, \ /* ngroups. */ \ BITMAP_BITS2GROUPS(nbits) \ } #endif /* BITMAP_USE_TREE */ typedef struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; } bitmap_level_t; typedef struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; #ifdef BITMAP_USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; #else /* BITMAP_USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* BITMAP_USE_TREE */ } bitmap_info_t; void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); size_t bitmap_size(const bitmap_info_t *binfo); static inline bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef BITMAP_USE_TREE size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); #else size_t i; for (i = 0; i < binfo->ngroups; i++) { if (bitmap[i] != 0) { return false; } } return true; #endif } static inline bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); } static inline void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) { break; } } } #endif } /* ffu: find first unset >= bit. */ static inline size_t bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { assert(min_bit < binfo->nbits); #ifdef BITMAP_USE_TREE size_t bit = 0; for (unsigned level = binfo->nlevels; level--;) { size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + 1)); bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit >> lg_bits_per_group)]; unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); assert(group_nmask <= BITMAP_GROUP_NBITS); bitmap_t group_mask = ~((1LU << group_nmask) - 1); bitmap_t group_masked = group & group_mask; if (group_masked == 0LU) { if (group == 0LU) { return binfo->nbits; } /* * min_bit was preceded by one or more unset bits in * this group, but there are no other unset bits in this * group. Try again starting at the first bit of the * next sibling. This will recurse at most once per * non-root level. */ size_t sib_base = bit + (ZU(1) << lg_bits_per_group); assert(sib_base > min_bit); assert(sib_base > bit); if (sib_base >= binfo->nbits) { return binfo->nbits; } return bitmap_ffu(bitmap, binfo, sib_base); } bit += ((size_t)(ffs_lu(group_masked) - 1)) << (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); } assert(bit >= min_bit); assert(bit < binfo->nbits); return bit; #else size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) - 1); size_t bit; do { bit = ffs_lu(g); if (bit != 0) { return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); } i++; g = bitmap[i]; } while (i < binfo->ngroups); return binfo->nbits; #endif } /* sfu: set first unset. */ static inline size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(!bitmap_full(bitmap, binfo)); #ifdef BITMAP_USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } #else i = 0; g = bitmap[0]; while ((bit = ffs_lu(g)) == 0) { i++; g = bitmap[i]; } bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif bitmap_set(bitmap, binfo, bit); return bit; } static inline void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (!propagate) { break; } } } #endif /* BITMAP_USE_TREE */ } #endif /* JEMALLOC_INTERNAL_BITMAP_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/cache_bin.h010064400007650000024000000073031344617474000222310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H #define JEMALLOC_INTERNAL_CACHE_BIN_H #include "jemalloc/internal/ql.h" /* * The cache_bins are the mechanism that the tcache and the arena use to * communicate. The tcache fills from and flushes to the arena by passing a * cache_bin_t to fill/flush. When the arena needs to pull stats from the * tcaches associated with it, it does so by iterating over its * cache_bin_array_descriptor_t objects and reading out per-bin stats it * contains. This makes it so that the arena need not know about the existence * of the tcache at all. */ /* * The count of the number of cached allocations in a bin. We make this signed * so that negative numbers can encode "invalid" states (e.g. a low water mark * of -1 for a cache that has been depleted). */ typedef int32_t cache_bin_sz_t; typedef struct cache_bin_stats_s cache_bin_stats_t; struct cache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ typedef struct cache_bin_info_s cache_bin_info_t; struct cache_bin_info_s { /* Upper limit on ncached. */ cache_bin_sz_t ncached_max; }; typedef struct cache_bin_s cache_bin_t; struct cache_bin_s { /* Min # cached since last GC. */ cache_bin_sz_t low_water; /* # of cached objects. */ cache_bin_sz_t ncached; /* * ncached and stats are both modified frequently. Let's keep them * close so that they have a higher chance of being on the same * cacheline, thus less write-backs. */ cache_bin_stats_t tstats; /* * Stack of available objects. * * To make use of adjacent cacheline prefetch, the items in the avail * stack goes to higher address for newer allocations. avail points * just above the available space, which means that * avail[-ncached, ... -1] are available items and the lowest item will * be allocated first. */ void **avail; }; typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; struct cache_bin_array_descriptor_s { /* * The arena keeps a list of the cache bins associated with it, for * stats collection. */ ql_elm(cache_bin_array_descriptor_t) link; /* Pointers to the tcache bins. */ cache_bin_t *bins_small; cache_bin_t *bins_large; }; static inline void cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, cache_bin_t *bins_small, cache_bin_t *bins_large) { ql_elm_new(descriptor, link); descriptor->bins_small = bins_small; descriptor->bins_large = bins_large; } JEMALLOC_ALWAYS_INLINE void * cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { void *ret; bin->ncached--; /* * Check for both bin->ncached == 0 and ncached < low_water * in a single branch. */ if (unlikely(bin->ncached <= bin->low_water)) { bin->low_water = bin->ncached; if (bin->ncached == -1) { bin->ncached = 0; *success = false; return NULL; } } /* * success (instead of ret) should be checked upon the return of this * function. We avoid checking (ret == NULL) because there is never a * null stored on the avail stack (which is unknown to the compiler), * and eagerly checking ret would cause pipeline stall (waiting for the * cacheline). */ *success = true; ret = *(bin->avail - (bin->ncached + 1)); return ret; } JEMALLOC_ALWAYS_INLINE bool cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) { if (unlikely(bin->ncached == bin_info->ncached_max)) { return false; } assert(bin->ncached < bin_info->ncached_max); bin->ncached++; *(bin->avail - bin->ncached) = ptr; return true; } #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/ckh.h010064400007650000024000000063011344617474000211000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CKH_H #define JEMALLOC_INTERNAL_CKH_H #include "jemalloc/internal/tsd.h" /* Cuckoo hashing implementation. Skip to the end for the interface. */ /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Hash table cell. */ typedef struct { const void *key; const void *data; } ckhc_t; /* The hash table itself. */ typedef struct { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ uint64_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; } ckh_t; /******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ /* Lifetime management. Minitems is the initial capacity. */ bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); /* Get the number of elements in the set. */ size_t ckh_count(ckh_t *ckh); /* * To iterate over the elements in the table, initialize *tabind to 0 and call * this function until it returns true. Each call that returns false will * update *key and *data to the next element in the table, assuming the pointers * are non-NULL. */ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); /* * Basic hash table operations -- insert, removal, lookup. For ckh_remove and * ckh_search, key or data can be NULL. The hash-table only stores pointers to * the key and value, and doesn't do any lifetime management. */ bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); /* Some useful hash and comparison functions for strings and pointers. */ void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_INTERNAL_CKH_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/ctl.h010064400007650000024000000071071344617474000211220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_CTL_H #define JEMALLOC_INTERNAL_CTL_H #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/stats.h" /* Maximum ctl tree depth. */ #define CTL_MAX_DEPTH 7 typedef struct ctl_node_s { bool named; } ctl_node_t; typedef struct ctl_named_node_s { ctl_node_t node; const char *name; /* If (nchildren == 0), this is a terminal node. */ size_t nchildren; const ctl_node_t *children; int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); } ctl_named_node_t; typedef struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); } ctl_indexed_node_t; typedef struct ctl_arena_stats_s { arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; bin_stats_t bstats[SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; arena_stats_extents_t estats[SC_NPSIZES]; } ctl_arena_stats_t; typedef struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t metadata_thp; size_t resident; size_t mapped; size_t retained; background_thread_stats_t background_thread; mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; } ctl_stats_t; typedef struct ctl_arena_s ctl_arena_t; struct ctl_arena_s { unsigned arena_ind; bool initialized; ql_elm(ctl_arena_t) destroyed_link; /* Basic stats, supported even if !config_stats. */ unsigned nthreads; const char *dss; ssize_t dirty_decay_ms; ssize_t muzzy_decay_ms; size_t pactive; size_t pdirty; size_t pmuzzy; /* NULL if !config_stats. */ ctl_arena_stats_t *astats; }; typedef struct ctl_arenas_s { uint64_t epoch; unsigned narenas; ql_head(ctl_arena_t) destroyed; /* * Element 0 corresponds to merged stats for extant arenas (accessed via * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. */ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; } ctl_arenas_t; int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ ": Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf(": Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ ": Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_INTERNAL_CTL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/div.h010064400007650000024000000022241344617474000211150ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DIV_H #define JEMALLOC_INTERNAL_DIV_H #include "jemalloc/internal/assert.h" /* * This module does the division that computes the index of a region in a slab, * given its offset relative to the base. * That is, given a divisor d, an n = i * d (all integers), we'll return i. * We do some pre-computation to do this more quickly than a CPU division * instruction. * We bound n < 2^32, and don't support dividing by one. */ typedef struct div_info_s div_info_t; struct div_info_s { uint32_t magic; #ifdef JEMALLOC_DEBUG size_t d; #endif }; void div_init(div_info_t *div_info, size_t divisor); static inline size_t div_compute(div_info_t *div_info, size_t n) { assert(n <= (uint32_t)-1); /* * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, * the compilers I tried were all smart enough to turn this into the * appropriate "get the high 32 bits of the result of a multiply" (e.g. * mul; mov edx eax; on x86, umull on arm, etc.). */ size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; #ifdef JEMALLOC_DEBUG assert(i * div_info->d == n); #endif return i; } #endif /* JEMALLOC_INTERNAL_DIV_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/emitter.h010064400007650000024000000317451344617474000220160ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EMITTER_H #define JEMALLOC_INTERNAL_EMITTER_H #include "jemalloc/internal/ql.h" typedef enum emitter_output_e emitter_output_t; enum emitter_output_e { emitter_output_json, emitter_output_table }; typedef enum emitter_justify_e emitter_justify_t; enum emitter_justify_e { emitter_justify_left, emitter_justify_right, /* Not for users; just to pass to internal functions. */ emitter_justify_none }; typedef enum emitter_type_e emitter_type_t; enum emitter_type_e { emitter_type_bool, emitter_type_int, emitter_type_unsigned, emitter_type_uint32, emitter_type_uint64, emitter_type_size, emitter_type_ssize, emitter_type_string, /* * A title is a column title in a table; it's just a string, but it's * not quoted. */ emitter_type_title, }; typedef struct emitter_col_s emitter_col_t; struct emitter_col_s { /* Filled in by the user. */ emitter_justify_t justify; int width; emitter_type_t type; union { bool bool_val; int int_val; unsigned unsigned_val; uint32_t uint32_val; uint32_t uint32_t_val; uint64_t uint64_val; uint64_t uint64_t_val; size_t size_val; ssize_t ssize_val; const char *str_val; }; /* Filled in by initialization. */ ql_elm(emitter_col_t) link; }; typedef struct emitter_row_s emitter_row_t; struct emitter_row_s { ql_head(emitter_col_t) cols; }; typedef struct emitter_s emitter_t; struct emitter_s { emitter_output_t output; /* The output information. */ void (*write_cb)(void *, const char *); void *cbopaque; int nesting_depth; /* True if we've already emitted a value at the given depth. */ bool item_at_depth; /* True if we emitted a key and will emit corresponding value next. */ bool emitted_key; }; /* Internal convenience function. Write to the emitter the given string. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void emitter_printf(emitter_t *emitter, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); va_end(ap); } static inline void emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, emitter_justify_t justify, int width) { size_t written; if (justify == emitter_justify_none) { written = malloc_snprintf(out_fmt, out_size, "%%%s", fmt_specifier); } else if (justify == emitter_justify_left) { written = malloc_snprintf(out_fmt, out_size, "%%-%d%s", width, fmt_specifier); } else { written = malloc_snprintf(out_fmt, out_size, "%%%d%s", width, fmt_specifier); } /* Only happens in case of bad format string, which *we* choose. */ assert(written < out_size); } /* * Internal. Emit the given value type in the relevant encoding (so that the * bool true gets mapped to json "true", but the string "true" gets mapped to * json "\"true\"", for instance. * * Width is ignored if justify is emitter_justify_none. */ static inline void emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, emitter_type_t value_type, const void *value) { size_t str_written; #define BUF_SIZE 256 #define FMT_SIZE 10 /* * We dynamically generate a format string to emit, to let us use the * snprintf machinery. This is kinda hacky, but gets the job done * quickly without having to think about the various snprintf edge * cases. */ char fmt[FMT_SIZE]; char buf[BUF_SIZE]; #define EMIT_SIMPLE(type, format) \ emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \ emitter_printf(emitter, fmt, *(const type *)value); \ switch (value_type) { case emitter_type_bool: emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); emitter_printf(emitter, fmt, *(const bool *)value ? "true" : "false"); break; case emitter_type_int: EMIT_SIMPLE(int, "d") break; case emitter_type_unsigned: EMIT_SIMPLE(unsigned, "u") break; case emitter_type_ssize: EMIT_SIMPLE(ssize_t, "zd") break; case emitter_type_size: EMIT_SIMPLE(size_t, "zu") break; case emitter_type_string: str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", *(const char *const *)value); /* * We control the strings we output; we shouldn't get anything * anywhere near the fmt size. */ assert(str_written < BUF_SIZE); emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); emitter_printf(emitter, fmt, buf); break; case emitter_type_uint32: EMIT_SIMPLE(uint32_t, FMTu32) break; case emitter_type_uint64: EMIT_SIMPLE(uint64_t, FMTu64) break; case emitter_type_title: EMIT_SIMPLE(char *const, "s"); break; default: unreachable(); } #undef BUF_SIZE #undef FMT_SIZE } /* Internal functions. In json mode, tracks nesting state. */ static inline void emitter_nest_inc(emitter_t *emitter) { emitter->nesting_depth++; emitter->item_at_depth = false; } static inline void emitter_nest_dec(emitter_t *emitter) { emitter->nesting_depth--; emitter->item_at_depth = true; } static inline void emitter_indent(emitter_t *emitter) { int amount = emitter->nesting_depth; const char *indent_str; if (emitter->output == emitter_output_json) { indent_str = "\t"; } else { amount *= 2; indent_str = " "; } for (int i = 0; i < amount; i++) { emitter_printf(emitter, "%s", indent_str); } } static inline void emitter_json_key_prefix(emitter_t *emitter) { if (emitter->emitted_key) { emitter->emitted_key = false; return; } emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); emitter_indent(emitter); } /******************************************************************************/ /* Public functions for emitter_t. */ static inline void emitter_init(emitter_t *emitter, emitter_output_t emitter_output, void (*write_cb)(void *, const char *), void *cbopaque) { emitter->output = emitter_output; emitter->write_cb = write_cb; emitter->cbopaque = cbopaque; emitter->item_at_depth = false; emitter->emitted_key = false; emitter->nesting_depth = 0; } /******************************************************************************/ /* JSON public API. */ /* * Emits a key (e.g. as appears in an object). The next json entity emitted will * be the corresponding value. */ static inline void emitter_json_key(emitter_t *emitter, const char *json_key) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "\"%s\": ", json_key); emitter->emitted_key = true; } } static inline void emitter_json_value(emitter_t *emitter, emitter_type_t value_type, const void *value) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); emitter->item_at_depth = true; } } /* Shorthand for calling emitter_json_key and then emitter_json_value. */ static inline void emitter_json_kv(emitter_t *emitter, const char *json_key, emitter_type_t value_type, const void *value) { emitter_json_key(emitter, json_key); emitter_json_value(emitter, value_type, value); } static inline void emitter_json_array_begin(emitter_t *emitter) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "["); emitter_nest_inc(emitter); } } /* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */ static inline void emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) { emitter_json_key(emitter, json_key); emitter_json_array_begin(emitter); } static inline void emitter_json_array_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); emitter_printf(emitter, "\n"); emitter_indent(emitter); emitter_printf(emitter, "]"); } } static inline void emitter_json_object_begin(emitter_t *emitter) { if (emitter->output == emitter_output_json) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); } } /* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */ static inline void emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) { emitter_json_key(emitter, json_key); emitter_json_object_begin(emitter); } static inline void emitter_json_object_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); emitter_printf(emitter, "\n"); emitter_indent(emitter); emitter_printf(emitter, "}"); } } /******************************************************************************/ /* Table public API. */ static inline void emitter_table_dict_begin(emitter_t *emitter, const char *table_key) { if (emitter->output == emitter_output_table) { emitter_indent(emitter); emitter_printf(emitter, "%s\n", table_key); emitter_nest_inc(emitter); } } static inline void emitter_table_dict_end(emitter_t *emitter) { if (emitter->output == emitter_output_table) { emitter_nest_dec(emitter); } } static inline void emitter_table_kv_note(emitter_t *emitter, const char *table_key, emitter_type_t value_type, const void *value, const char *table_note_key, emitter_type_t table_note_value_type, const void *table_note_value) { if (emitter->output == emitter_output_table) { emitter_indent(emitter); emitter_printf(emitter, "%s: ", table_key); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); if (table_note_key != NULL) { emitter_printf(emitter, " (%s: ", table_note_key); emitter_print_value(emitter, emitter_justify_none, -1, table_note_value_type, table_note_value); emitter_printf(emitter, ")"); } emitter_printf(emitter, "\n"); } emitter->item_at_depth = true; } static inline void emitter_table_kv(emitter_t *emitter, const char *table_key, emitter_type_t value_type, const void *value) { emitter_table_kv_note(emitter, table_key, value_type, value, NULL, emitter_type_bool, NULL); } /* Write to the emitter the given string, but only in table mode. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void emitter_table_printf(emitter_t *emitter, const char *format, ...) { if (emitter->output == emitter_output_table) { va_list ap; va_start(ap, format); malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); va_end(ap); } } static inline void emitter_table_row(emitter_t *emitter, emitter_row_t *row) { if (emitter->output != emitter_output_table) { return; } emitter_col_t *col; ql_foreach(col, &row->cols, link) { emitter_print_value(emitter, col->justify, col->width, col->type, (const void *)&col->bool_val); } emitter_table_printf(emitter, "\n"); } static inline void emitter_row_init(emitter_row_t *row) { ql_new(&row->cols); } static inline void emitter_col_init(emitter_col_t *col, emitter_row_t *row) { ql_elm_new(col, link); ql_tail_insert(&row->cols, col, link); } /******************************************************************************/ /* * Generalized public API. Emits using either JSON or table, according to * settings in the emitter_t. */ /* * Note emits a different kv pair as well, but only in table mode. Omits the * note if table_note_key is NULL. */ static inline void emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, emitter_type_t value_type, const void *value, const char *table_note_key, emitter_type_t table_note_value_type, const void *table_note_value) { if (emitter->output == emitter_output_json) { emitter_json_key(emitter, json_key); emitter_json_value(emitter, value_type, value); } else { emitter_table_kv_note(emitter, table_key, value_type, value, table_note_key, table_note_value_type, table_note_value); } emitter->item_at_depth = true; } static inline void emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, emitter_type_t value_type, const void *value) { emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL, emitter_type_bool, NULL); } static inline void emitter_dict_begin(emitter_t *emitter, const char *json_key, const char *table_header) { if (emitter->output == emitter_output_json) { emitter_json_key(emitter, json_key); emitter_json_object_begin(emitter); } else { emitter_table_dict_begin(emitter, table_header); } } static inline void emitter_dict_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { emitter_json_object_end(emitter); } else { emitter_table_dict_end(emitter); } } static inline void emitter_begin(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth == 0); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); } else { /* * This guarantees that we always call write_cb at least once. * This is useful if some invariant is established by each call * to write_cb, but doesn't hold initially: e.g., some buffer * holds a null-terminated string. */ emitter_printf(emitter, "%s", ""); } } static inline void emitter_end(emitter_t *emitter) { if (emitter->output == emitter_output_json) { assert(emitter->nesting_depth == 1); emitter_nest_dec(emitter); emitter_printf(emitter, "\n}\n"); } } #endif /* JEMALLOC_INTERNAL_EMITTER_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_dss.h010064400007650000024000000013301344617474000225100ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H #define JEMALLOC_INTERNAL_EXTENT_DSS_H typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" extern const char *dss_prec_names[]; extern const char *opt_dss; dss_prec_t extent_dss_prec_get(void); bool extent_dss_prec_set(dss_prec_t dss_prec); void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool extent_in_dss(void *addr); bool extent_dss_mergeable(void *addr_a, void *addr_b); void extent_dss_boot(void); #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_externs.h010064400007650000024000000067431344617474000234240ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/rtree.h" extern size_t opt_lg_extent_max_active_fit; extern rtree_t extents_rtree; extern const extent_hooks_t extent_hooks_default; extern mutex_pool_t extent_mutex_pool; extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); extent_hooks_t *extent_hooks_get(arena_t *arena); extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks); #ifdef JEMALLOC_JET size_t extent_size_quantize_floor(size_t size); size_t extent_size_quantize_ceil(size_t size); #endif rb_proto(, extent_avail_, extent_tree_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, bool delay_coalesce); extent_state_t extents_state_get(const extents_t *extents); size_t extents_npages_get(extents_t *extents); /* Get the number of extents in the given page size index. */ size_t extents_nextents_get(extents_t *extents, pszind_t ind); /* Get the sum total bytes of the extents in the given page size index. */ size_t extents_nbytes_get(extents_t *extents, pszind_t ind); extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); void extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); void extents_prefork(tsdn_t *tsdn, extents_t *extents); void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent); bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length); extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); bool extent_boot(void); #endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_inlines.h010064400007650000024000000315341344617474000233710ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H #define JEMALLOC_INTERNAL_EXTENT_INLINES_H #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/sz.h" static inline void extent_lock(tsdn_t *tsdn, extent_t *extent) { assert(extent != NULL); mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); } static inline void extent_unlock(tsdn_t *tsdn, extent_t *extent) { assert(extent != NULL); mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); } static inline void extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { assert(extent1 != NULL && extent2 != NULL); mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, (uintptr_t)extent2); } static inline void extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { assert(extent1 != NULL && extent2 != NULL); mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, (uintptr_t)extent2); } static inline unsigned extent_arena_ind_get(const extent_t *extent) { unsigned arena_ind = (unsigned)((extent->e_bits & EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); assert(arena_ind < MALLOCX_ARENA_LIMIT); return arena_ind; } static inline arena_t * extent_arena_get(const extent_t *extent) { unsigned arena_ind = extent_arena_ind_get(extent); return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); } static inline szind_t extent_szind_get_maybe_invalid(const extent_t *extent) { szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> EXTENT_BITS_SZIND_SHIFT); assert(szind <= SC_NSIZES); return szind; } static inline szind_t extent_szind_get(const extent_t *extent) { szind_t szind = extent_szind_get_maybe_invalid(extent); assert(szind < SC_NSIZES); /* Never call when "invalid". */ return szind; } static inline size_t extent_usize_get(const extent_t *extent) { return sz_index2size(extent_szind_get(extent)); } static inline unsigned extent_binshard_get(const extent_t *extent) { unsigned binshard = (unsigned)((extent->e_bits & EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT); assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); return binshard; } static inline size_t extent_sn_get(const extent_t *extent) { return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> EXTENT_BITS_SN_SHIFT); } static inline extent_state_t extent_state_get(const extent_t *extent) { return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> EXTENT_BITS_STATE_SHIFT); } static inline bool extent_zeroed_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> EXTENT_BITS_ZEROED_SHIFT); } static inline bool extent_committed_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> EXTENT_BITS_COMMITTED_SHIFT); } static inline bool extent_dumpable_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >> EXTENT_BITS_DUMPABLE_SHIFT); } static inline bool extent_slab_get(const extent_t *extent) { return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> EXTENT_BITS_SLAB_SHIFT); } static inline unsigned extent_nfree_get(const extent_t *extent) { assert(extent_slab_get(extent)); return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> EXTENT_BITS_NFREE_SHIFT); } static inline void * extent_base_get(const extent_t *extent) { assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)); return PAGE_ADDR2BASE(extent->e_addr); } static inline void * extent_addr_get(const extent_t *extent) { assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || !extent_slab_get(extent)); return extent->e_addr; } static inline size_t extent_size_get(const extent_t *extent) { return (extent->e_size_esn & EXTENT_SIZE_MASK); } static inline size_t extent_esn_get(const extent_t *extent) { return (extent->e_size_esn & EXTENT_ESN_MASK); } static inline size_t extent_bsize_get(const extent_t *extent) { return extent->e_bsize; } static inline void * extent_before_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) - PAGE); } static inline void * extent_last_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) + extent_size_get(extent) - PAGE); } static inline void * extent_past_get(const extent_t *extent) { return (void *)((uintptr_t)extent_base_get(extent) + extent_size_get(extent)); } static inline arena_slab_data_t * extent_slab_data_get(extent_t *extent) { assert(extent_slab_get(extent)); return &extent->e_slab_data; } static inline const arena_slab_data_t * extent_slab_data_get_const(const extent_t *extent) { assert(extent_slab_get(extent)); return &extent->e_slab_data; } static inline prof_tctx_t * extent_prof_tctx_get(const extent_t *extent) { return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, ATOMIC_ACQUIRE); } static inline nstime_t extent_prof_alloc_time_get(const extent_t *extent) { return extent->e_alloc_time; } static inline void extent_arena_set(extent_t *extent, arena_t *arena) { unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << MALLOCX_ARENA_BITS) - 1); extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); } static inline void extent_binshard_set(extent_t *extent, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) | ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT); } static inline void extent_addr_set(extent_t *extent, void *addr) { extent->e_addr = addr; } static inline void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) { assert(extent_base_get(extent) == extent_addr_get(extent)); if (alignment < PAGE) { unsigned lg_range = LG_PAGE - lg_floor(CACHELINE_CEILING(alignment)); size_t r; if (!tsdn_null(tsdn)) { tsd_t *tsd = tsdn_tsd(tsdn); r = (size_t)prng_lg_range_u64( tsd_offset_statep_get(tsd), lg_range); } else { r = prng_lg_range_zu( &extent_arena_get(extent)->offset_state, lg_range, true); } uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - lg_range); extent->e_addr = (void *)((uintptr_t)extent->e_addr + random_offset); assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == extent->e_addr); } } static inline void extent_size_set(extent_t *extent, size_t size) { assert((size & ~EXTENT_SIZE_MASK) == 0); extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); } static inline void extent_esn_set(extent_t *extent, size_t esn) { extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & EXTENT_ESN_MASK); } static inline void extent_bsize_set(extent_t *extent, size_t bsize) { extent->e_bsize = bsize; } static inline void extent_szind_set(extent_t *extent, szind_t szind) { assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); } static inline void extent_nfree_set(extent_t *extent, unsigned nfree) { assert(extent_slab_get(extent)); extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); extent->e_bits = (extent->e_bits & (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) | ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) | ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_inc(extent_t *extent) { assert(extent_slab_get(extent)); extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_dec(extent_t *extent) { assert(extent_slab_get(extent)); extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_nfree_sub(extent_t *extent, uint64_t n) { assert(extent_slab_get(extent)); extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT); } static inline void extent_sn_set(extent_t *extent, size_t sn) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); } static inline void extent_state_set(extent_t *extent, extent_state_t state) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); } static inline void extent_zeroed_set(extent_t *extent, bool zeroed) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); } static inline void extent_committed_set(extent_t *extent, bool committed) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); } static inline void extent_dumpable_set(extent_t *extent, bool dumpable) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) | ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT); } static inline void extent_slab_set(extent_t *extent, bool slab) { extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); } static inline void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); } static inline void extent_prof_alloc_time_set(extent_t *extent, nstime_t t) { nstime_copy(&extent->e_alloc_time, &t); } static inline void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, bool committed, bool dumpable) { assert(addr == PAGE_ADDR2BASE(addr) || !slab); extent_arena_set(extent, arena); extent_addr_set(extent, addr); extent_size_set(extent, size); extent_slab_set(extent, slab); extent_szind_set(extent, szind); extent_sn_set(extent, sn); extent_state_set(extent, state); extent_zeroed_set(extent, zeroed); extent_committed_set(extent, committed); extent_dumpable_set(extent, dumpable); ql_elm_new(extent, ql_link); if (config_prof) { extent_prof_tctx_set(extent, NULL); } } static inline void extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { extent_arena_set(extent, NULL); extent_addr_set(extent, addr); extent_bsize_set(extent, bsize); extent_slab_set(extent, false); extent_szind_set(extent, SC_NSIZES); extent_sn_set(extent, sn); extent_state_set(extent, extent_state_active); extent_zeroed_set(extent, true); extent_committed_set(extent, true); extent_dumpable_set(extent, true); } static inline void extent_list_init(extent_list_t *list) { ql_new(list); } static inline extent_t * extent_list_first(const extent_list_t *list) { return ql_first(list); } static inline extent_t * extent_list_last(const extent_list_t *list) { return ql_last(list, ql_link); } static inline void extent_list_append(extent_list_t *list, extent_t *extent) { ql_tail_insert(list, extent, ql_link); } static inline void extent_list_prepend(extent_list_t *list, extent_t *extent) { ql_head_insert(list, extent, ql_link); } static inline void extent_list_replace(extent_list_t *list, extent_t *to_remove, extent_t *to_insert) { ql_after_insert(to_remove, to_insert, ql_link); ql_remove(list, to_remove, ql_link); } static inline void extent_list_remove(extent_list_t *list, extent_t *extent) { ql_remove(list, extent, ql_link); } static inline int extent_sn_comp(const extent_t *a, const extent_t *b) { size_t a_sn = extent_sn_get(a); size_t b_sn = extent_sn_get(b); return (a_sn > b_sn) - (a_sn < b_sn); } static inline int extent_esn_comp(const extent_t *a, const extent_t *b) { size_t a_esn = extent_esn_get(a); size_t b_esn = extent_esn_get(b); return (a_esn > b_esn) - (a_esn < b_esn); } static inline int extent_ad_comp(const extent_t *a, const extent_t *b) { uintptr_t a_addr = (uintptr_t)extent_addr_get(a); uintptr_t b_addr = (uintptr_t)extent_addr_get(b); return (a_addr > b_addr) - (a_addr < b_addr); } static inline int extent_ead_comp(const extent_t *a, const extent_t *b) { uintptr_t a_eaddr = (uintptr_t)a; uintptr_t b_eaddr = (uintptr_t)b; return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); } static inline int extent_snad_comp(const extent_t *a, const extent_t *b) { int ret; ret = extent_sn_comp(a, b); if (ret != 0) { return ret; } ret = extent_ad_comp(a, b); return ret; } static inline int extent_esnead_comp(const extent_t *a, const extent_t *b) { int ret; ret = extent_esn_comp(a, b); if (ret != 0) { return ret; } ret = extent_ead_comp(a, b); return ret; } #endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_mmap.h010064400007650000024000000005101344617474000226500ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H extern bool opt_retain; void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool extent_dalloc_mmap(void *addr, size_t size); #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_structs.h010064400007650000024000000175611344617474000234430ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H #define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/sc.h" typedef enum { extent_state_active = 0, extent_state_dirty = 1, extent_state_muzzy = 2, extent_state_retained = 3 } extent_state_t; /* Extent (span of pages). Use accessor functions for e_* fields. */ struct extent_s { /* * Bitfield containing several fields: * * a: arena_ind * b: slab * c: committed * d: dumpable * z: zeroed * t: state * i: szind * f: nfree * s: bin_shard * n: sn * * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa * * arena_ind: Arena from which this extent came, or all 1 bits if * unassociated. * * slab: The slab flag indicates whether the extent is used for a slab * of small regions. This helps differentiate small size classes, * and it indicates whether interior pointers can be looked up via * iealloc(). * * committed: The committed flag indicates whether physical memory is * committed to the extent, whether explicitly or implicitly * as on a system that overcommits and satisfies physical * memory needs on demand via soft page faults. * * dumpable: The dumpable flag indicates whether or not we've set the * memory in question to be dumpable. Note that this * interacts somewhat subtly with user-specified extent hooks, * since we don't know if *they* are fiddling with * dumpability (in which case, we don't want to undo whatever * they're doing). To deal with this scenario, we: * - Make dumpable false only for memory allocated with the * default hooks. * - Only allow memory to go from non-dumpable to dumpable, * and only once. * - Never make the OS call to allow dumping when the * dumpable bit is already set. * These three constraints mean that we will never * accidentally dump user memory that the user meant to set * nondumpable with their extent hooks. * * * zeroed: The zeroed flag is used by extent recycling code to track * whether memory is zero-filled. * * state: The state flag is an extent_state_t. * * szind: The szind flag indicates usable size class index for * allocations residing in this extent, regardless of whether the * extent is a slab. Extent size and usable size often differ * even for non-slabs, either due to sz_large_pad or promotion of * sampled small regions. * * nfree: Number of free regions in slab. * * bin_shard: the shard of the bin from which this extent came. * * sn: Serial number (potentially non-unique). * * Serial numbers may wrap around if !opt_retain, but as long as * comparison functions fall back on address comparison for equal * serial numbers, stable (if imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of * wrap-around, e.g. when splitting an extent and assigning the same * serial number to both resulting adjacent extents. */ uint64_t e_bits; #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) #define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS #define EXTENT_BITS_ARENA_SHIFT 0 #define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT) #define EXTENT_BITS_SLAB_WIDTH 1 #define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT) #define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT) #define EXTENT_BITS_COMMITTED_WIDTH 1 #define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT) #define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT) #define EXTENT_BITS_DUMPABLE_WIDTH 1 #define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT) #define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT) #define EXTENT_BITS_ZEROED_WIDTH 1 #define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT) #define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT) #define EXTENT_BITS_STATE_WIDTH 2 #define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT) #define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT) #define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) #define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT) #define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT) #define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1) #define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) #define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_BINSHARD_WIDTH 6 #define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT) #define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT) #define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) /* Pointer to the extent that this structure is responsible for. */ void *e_addr; union { /* * Extent size and serial number associated with the extent * structure (different than the serial number for the extent at * e_addr). * * ssssssss [...] ssssssss ssssnnnn nnnnnnnn */ size_t e_size_esn; #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) #define EXTENT_ESN_MASK ((size_t)PAGE-1) /* Base extent size, which may not be a multiple of PAGE. */ size_t e_bsize; }; /* * List linkage, used by a variety of lists: * - bin_t's slabs_full * - extents_t's LRU * - stashed dirty extents * - arena's large allocations */ ql_elm(extent_t) ql_link; /* * Linkage for per size class sn/address-ordered heaps, and * for extent_avail */ phn(extent_t) ph_link; union { /* Small region slab metadata. */ arena_slab_data_t e_slab_data; /* Profiling data, used for large objects. */ struct { /* Time when this was allocated. */ nstime_t e_alloc_time; /* Points to a prof_tctx_t. */ atomic_p_t e_prof_tctx; }; }; }; typedef ql_head(extent_t) extent_list_t; typedef ph(extent_t) extent_tree_t; typedef ph(extent_t) extent_heap_t; /* Quantized collection of extents, with built-in LRU queue. */ struct extents_s { malloc_mutex_t mtx; /* * Quantized per size class heaps of extents. * * Synchronization: mtx. */ extent_heap_t heaps[SC_NPSIZES + 1]; atomic_zu_t nextents[SC_NPSIZES + 1]; atomic_zu_t nbytes[SC_NPSIZES + 1]; /* * Bitmap for which set bits correspond to non-empty heaps. * * Synchronization: mtx. */ bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)]; /* * LRU of all extents in heaps. * * Synchronization: mtx. */ extent_list_t lru; /* * Page sum for all extents in heaps. * * The synchronization here is a little tricky. Modifications to npages * must hold mtx, but reads need not (though, a reader who sees npages * without holding the mutex can't assume anything about the rest of the * state of the extents_t). */ atomic_zu_t npages; /* All stored extents must be in the same state. */ extent_state_t state; /* * If true, delay coalescing until eviction; otherwise coalesce during * deallocation. */ bool delay_coalesce; }; #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/extent_types.h010064400007650000024000000007011344617474000230640ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H #define JEMALLOC_INTERNAL_EXTENT_TYPES_H typedef struct extent_s extent_t; typedef struct extents_s extents_t; #define EXTENT_HOOKS_INITIALIZER NULL /* * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) * is the max ratio between the size of the active extent and the new extent. */ #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 #endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/hash.h010064400007650000024000000176231344617474000212670ustar0000000000000000#ifndef JEMALLOC_INTERNAL_HASH_H #define JEMALLOC_INTERNAL_HASH_H #include "jemalloc/internal/assert.h" /* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ /******************************************************************************/ /* Internal implementation. */ static inline uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } static inline uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } static inline uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { uint32_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); return ret; } return p[i]; } static inline uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { uint64_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); return ret; } return p[i]; } static inline uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } static inline uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } static inline uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return h1; } static inline void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; JEMALLOC_FALLTHROUGH case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; JEMALLOC_FALLTHROUGH case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; JEMALLOC_FALLTHROUGH case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; JEMALLOC_FALLTHROUGH } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; JEMALLOC_FALLTHROUGH case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ static inline void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else { uint64_t hashes[2]; hash_x86_128(key, (int)len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; } #endif } #endif /* JEMALLOC_INTERNAL_HASH_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/hook.h010064400007650000024000000126471344617474000213050ustar0000000000000000#ifndef JEMALLOC_INTERNAL_HOOK_H #define JEMALLOC_INTERNAL_HOOK_H #include "jemalloc/internal/tsd.h" /* * This API is *extremely* experimental, and may get ripped out, changed in API- * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc. * * It allows hooking the stateful parts of the API to see changes as they * happen. * * Allocation hooks are called after the allocation is done, free hooks are * called before the free is done, and expand hooks are called after the * allocation is expanded. * * For realloc and rallocx, if the expansion happens in place, the expansion * hook is called. If it is moved, then the alloc hook is called on the new * location, and then the free hook is called on the old location (i.e. both * hooks are invoked in between the alloc and the dalloc). * * If we return NULL from OOM, then usize might not be trustworthy. Calling * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0) * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0), * and only calls the alloc hook). * * Reentrancy: * Reentrancy is guarded against from within the hook implementation. If you * call allocator functions from within a hook, the hooks will not be invoked * again. * Threading: * The installation of a hook synchronizes with all its uses. If you can * prove the installation of a hook happens-before a jemalloc entry point, * then the hook will get invoked (unless there's a racing removal). * * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread * allocates and has the alloc hook invoked, then a subsequent free on the * same thread will also have the free hook invoked). * * The *removal* of a hook does *not* block until all threads are done with * the hook. Hook authors have to be resilient to this, and need some * out-of-band mechanism for cleaning up any dynamically allocated memory * associated with their hook. * Ordering: * Order of hook execution is unspecified, and may be different than insertion * order. */ #define HOOK_MAX 4 enum hook_alloc_e { hook_alloc_malloc, hook_alloc_posix_memalign, hook_alloc_aligned_alloc, hook_alloc_calloc, hook_alloc_memalign, hook_alloc_valloc, hook_alloc_mallocx, /* The reallocating functions have both alloc and dalloc variants */ hook_alloc_realloc, hook_alloc_rallocx, }; /* * We put the enum typedef after the enum, since this file may get included by * jemalloc_cpp.cpp, and C++ disallows enum forward declarations. */ typedef enum hook_alloc_e hook_alloc_t; enum hook_dalloc_e { hook_dalloc_free, hook_dalloc_dallocx, hook_dalloc_sdallocx, /* * The dalloc halves of reallocation (not called if in-place expansion * happens). */ hook_dalloc_realloc, hook_dalloc_rallocx, }; typedef enum hook_dalloc_e hook_dalloc_t; enum hook_expand_e { hook_expand_realloc, hook_expand_rallocx, hook_expand_xallocx, }; typedef enum hook_expand_e hook_expand_t; typedef void (*hook_alloc)( void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]); typedef void (*hook_dalloc)( void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]); typedef void (*hook_expand)( void *extra, hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); typedef struct hooks_s hooks_t; struct hooks_s { hook_alloc alloc_hook; hook_dalloc dalloc_hook; hook_expand expand_hook; void *extra; }; /* * Begin implementation details; everything above this point might one day live * in a public API. Everything below this point never will. */ /* * The realloc pathways haven't gotten any refactoring love in a while, and it's * fairly difficult to pass information from the entry point to the hooks. We * put the informaiton the hooks will need into a struct to encapsulate * everything. * * Much of these pathways are force-inlined, so that the compiler can avoid * materializing this struct until we hit an extern arena function. For fairly * goofy reasons, *many* of the realloc paths hit an extern arena function. * These paths are cold enough that it doesn't matter; eventually, we should * rewrite the realloc code to make the expand-in-place and the * free-then-realloc paths more orthogonal, at which point we don't need to * spread the hook logic all over the place. */ typedef struct hook_ralloc_args_s hook_ralloc_args_t; struct hook_ralloc_args_s { /* I.e. as opposed to rallocx. */ bool is_realloc; /* * The expand hook takes 4 arguments, even if only 3 are actually used; * we add an extra one in case the user decides to memcpy without * looking too closely at the hooked function. */ uintptr_t args[4]; }; /* * Returns an opaque handle to be used when removing the hook. NULL means that * we couldn't install the hook. */ bool hook_boot(); void *hook_install(tsdn_t *tsdn, hooks_t *hooks); /* Uninstalls the hook with the handle previously returned from hook_install. */ void hook_remove(tsdn_t *tsdn, void *opaque); /* Hooks */ void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]); void hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]); void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); #endif /* JEMALLOC_INTERNAL_HOOK_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_decls.h010064400007650000024000000042141344617474000251700ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" # ifdef _WIN64 # if LG_VADDR <= 32 # error Generate the headers using x64 vcargs # endif # else # if LG_VADDR > 32 # undef LG_VADDR # define LG_VADDR 32 # endif # endif #else # include # include # if !defined(__pnacl__) && !defined(__native_client__) # include # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # if defined(SYS_open) && defined(__aarch64__) /* Android headers may define SYS_open to __NR_open even though * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ # undef SYS_open # endif # include # endif # include # ifdef __FreeBSD__ # include # endif # include # ifdef JEMALLOC_OS_UNFAIR_LOCK # include # endif # ifdef JEMALLOC_GLIBC_MALLOC_HOOK # include # endif # include # include # include # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME # include # endif #endif #include #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #ifndef SSIZE_MAX # define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) #endif #include #include #include #include #include #include #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include #include #include #ifdef _MSC_VER # include typedef intptr_t ssize_t; # define PATH_MAX 1024 # define STDERR_FILENO 2 # define __func__ __FUNCTION__ # ifdef JEMALLOC_HAS_RESTRICT # define restrict __restrict # endif /* Disable warnings about deprecated system functions. */ # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } #endif #else # include #endif #include #endif /* JEMALLOC_INTERNAL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_defs.h010064400007650000024000000257431344617503000250220ustar0000000000000000/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ #ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ #define JEMALLOC_PREFIX "je_" #define JEMALLOC_CPREFIX "JE_" /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ /* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */ /* #undef JEMALLOC_OVERRIDE___LIBC_FREE */ /* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */ /* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */ /* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */ /* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */ /* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #define JEMALLOC_PRIVATE_NAMESPACE je_ /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #define CPU_SPINWAIT __asm__ volatile("pause") /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ #define HAVE_CPU_SPINWAIT 1 /* * Number of significant bits in virtual addresses. This may be less than the * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ #define LG_VADDR 48 /* Defined if C11 atomics are available. */ #define JEMALLOC_C11_ATOMICS 1 /* Defined if GCC __atomic atomics are available. */ #define JEMALLOC_GCC_ATOMIC_ATOMICS 1 /* and the 8-bit variant support. */ #define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 /* Defined if GCC __sync atomics are available. */ #define JEMALLOC_GCC_SYNC_ATOMICS 1 /* and the 8-bit variant support. */ #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 /* * Defined if __builtin_clz() and __builtin_clzl() are available. */ #define JEMALLOC_HAVE_BUILTIN_CLZ /* * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. */ #define JEMALLOC_OS_UNFAIR_LOCK /* Defined if syscall(2) is usable. */ /* #undef JEMALLOC_USE_SYSCALL */ /* * Defined if secure_getenv(3) is available. */ /* #undef JEMALLOC_HAVE_SECURE_GETENV */ /* * Defined if issetugid(2) is available. */ #define JEMALLOC_HAVE_ISSETUGID /* Defined if pthread_atfork(3) is available. */ #define JEMALLOC_HAVE_PTHREAD_ATFORK /* Defined if pthread_setname_np(3) is available. */ /* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ /* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. */ /* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */ /* * Defined if mach_absolute_time() is available. */ #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ /* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ /* #undef JEMALLOC_THREADED_INIT */ /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ /* #undef JEMALLOC_MUTEX_INIT_CB */ /* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ /* #undef JEMALLOC_DEBUG */ /* JEMALLOC_STATS enables statistics calculation. */ #define JEMALLOC_STATS /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ /* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */ /* JEMALLOC_PROF enables allocation profiling. */ /* #undef JEMALLOC_PROF */ /* Use libunwind for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBUNWIND */ /* Use libgcc for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBGCC */ /* Use gcc intrinsics for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_GCC */ /* * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage * segment (DSS). */ /* #undef JEMALLOC_DSS */ /* Support memory filling (junk/zero). */ #define JEMALLOC_FILL /* Support utrace(2)-based tracing. */ /* #undef JEMALLOC_UTRACE */ /* Support optional abort() on OOM. */ /* #undef JEMALLOC_XMALLOC */ /* Support lazy locking (avoid locking unless a second thread is launched). */ /* #undef JEMALLOC_LAZY_LOCK */ /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ /* #undef LG_QUANTUM */ /* One page is 2^LG_PAGE bytes. */ #define LG_PAGE 12 /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ #define LG_HUGEPAGE 21 /* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. * This is the normal order of things for mmap()/munmap(), but on Windows * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. * mappings do *not* coalesce/fragment. */ #define JEMALLOC_MAPS_COALESCE /* * If defined, retain memory for later reuse by default rather than using e.g. * munmap() to unmap freed extents. This is enabled on 64-bit Linux because * common sequences of mmap()/munmap() calls will cause virtual memory map * holes. */ /* #undef JEMALLOC_RETAIN */ /* TLS is used to map arenas and magazine caches to threads. */ /* #undef JEMALLOC_TLS */ /* * Used to mark unreachable code to quiet "end of non-void" compiler warnings. * Don't use this directly; instead use unreachable() from util.h */ #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable /* * ffs*() functions to use for bitmapping. Don't use these directly; instead, * use ffs_*() from util.h. */ #define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll #define JEMALLOC_INTERNAL_FFSL __builtin_ffsl #define JEMALLOC_INTERNAL_FFS __builtin_ffs /* * popcount*() functions to use for bitmapping. */ #define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl #define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount /* * If defined, explicitly attempt to more uniformly distribute large allocation * pointer alignments across all cache indices. */ #define JEMALLOC_CACHE_OBLIVIOUS /* * If defined, enable logging facilities. We make this a configure option to * avoid taking extra branches everywhere. */ /* #undef JEMALLOC_LOG */ /* * If defined, use readlinkat() (instead of readlink()) to follow * /etc/malloc_conf. */ /* #undef JEMALLOC_READLINKAT */ /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #define JEMALLOC_ZONE /* * Methods for determining whether the OS overcommits. * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's * /proc/sys/vm.overcommit_memory file. * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. */ /* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ /* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */ /* Defined if madvise(2) is available. */ #define JEMALLOC_HAVE_MADVISE /* * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE * arguments to madvise(2). */ /* #undef JEMALLOC_HAVE_MADVISE_HUGE */ /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_FREE) : This marks pages as being unused, such that they * will be discarded rather than swapped out. * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is * defined, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched; * otherwise this behaves similarly to * MADV_FREE, though typically with higher * system overhead. */ #define JEMALLOC_PURGE_MADVISE_FREE #define JEMALLOC_PURGE_MADVISE_DONTNEED /* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */ /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ /* #undef JEMALLOC_DEFINE_MADVISE_FREE */ /* * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. */ /* #undef JEMALLOC_MADVISE_DONTDUMP */ /* * Defined if transparent huge pages (THPs) are supported via the * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. */ /* #undef JEMALLOC_THP */ /* Define if operating system has alloca.h header. */ /* #undef JEMALLOC_HAS_ALLOCA_H */ /* C99 restrict keyword supported. */ #define JEMALLOC_HAS_RESTRICT 1 /* For use by hash code. */ /* #undef JEMALLOC_BIG_ENDIAN */ /* sizeof(int) == 2^LG_SIZEOF_INT. */ #define LG_SIZEOF_INT 2 /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #define LG_SIZEOF_LONG 3 /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ #define LG_SIZEOF_LONG_LONG 3 /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #define LG_SIZEOF_INTMAX_T 3 /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ /* #undef JEMALLOC_GLIBC_MALLOC_HOOK */ /* glibc memalign hook. */ /* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */ /* pthread support */ #define JEMALLOC_HAVE_PTHREAD /* dlsym() support */ #define JEMALLOC_HAVE_DLSYM /* Adaptive mutex support in pthreads. */ /* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */ /* GNU specific sched_getcpu support */ /* #undef JEMALLOC_HAVE_SCHED_GETCPU */ /* GNU specific sched_setaffinity support */ /* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */ /* * If defined, all the features necessary for background threads are present. */ /* #undef JEMALLOC_BACKGROUND_THREAD */ /* * If defined, jemalloc symbols are not exported (doesn't work when * JEMALLOC_PREFIX is not defined). */ /* #undef JEMALLOC_EXPORT */ /* config.malloc_conf options string. */ #define JEMALLOC_CONFIG_MALLOC_CONF "" /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ /* #undef JEMALLOC_IS_MALLOC */ /* * Defined if strerror_r returns char * if _GNU_SOURCE is defined. */ /* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */ /* Performs additional size-matching sanity checks when defined. */ /* #undef JEMALLOC_EXTRA_SIZE_CHECK */ #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_defs.h.in010064400007650000024000000244561344617474000254360ustar0000000000000000#ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ #undef JEMALLOC_PREFIX #undef JEMALLOC_CPREFIX /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ #undef JEMALLOC_OVERRIDE___LIBC_CALLOC #undef JEMALLOC_OVERRIDE___LIBC_FREE #undef JEMALLOC_OVERRIDE___LIBC_MALLOC #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN #undef JEMALLOC_OVERRIDE___LIBC_REALLOC #undef JEMALLOC_OVERRIDE___LIBC_VALLOC #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #undef JEMALLOC_PRIVATE_NAMESPACE /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #undef CPU_SPINWAIT /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ #undef HAVE_CPU_SPINWAIT /* * Number of significant bits in virtual addresses. This may be less than the * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ #undef LG_VADDR /* Defined if C11 atomics are available. */ #undef JEMALLOC_C11_ATOMICS /* Defined if GCC __atomic atomics are available. */ #undef JEMALLOC_GCC_ATOMIC_ATOMICS /* and the 8-bit variant support. */ #undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS /* Defined if GCC __sync atomics are available. */ #undef JEMALLOC_GCC_SYNC_ATOMICS /* and the 8-bit variant support. */ #undef JEMALLOC_GCC_U8_SYNC_ATOMICS /* * Defined if __builtin_clz() and __builtin_clzl() are available. */ #undef JEMALLOC_HAVE_BUILTIN_CLZ /* * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. */ #undef JEMALLOC_OS_UNFAIR_LOCK /* Defined if syscall(2) is usable. */ #undef JEMALLOC_USE_SYSCALL /* * Defined if secure_getenv(3) is available. */ #undef JEMALLOC_HAVE_SECURE_GETENV /* * Defined if issetugid(2) is available. */ #undef JEMALLOC_HAVE_ISSETUGID /* Defined if pthread_atfork(3) is available. */ #undef JEMALLOC_HAVE_PTHREAD_ATFORK /* Defined if pthread_setname_np(3) is available. */ #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. */ #undef JEMALLOC_HAVE_CLOCK_MONOTONIC /* * Defined if mach_absolute_time() is available. */ #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ #undef JEMALLOC_MALLOC_THREAD_CLEANUP /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ #undef JEMALLOC_THREADED_INIT /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ #undef JEMALLOC_MUTEX_INIT_CB /* Non-empty if the tls_model attribute is supported. */ #undef JEMALLOC_TLS_MODEL /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ #undef JEMALLOC_DEBUG /* JEMALLOC_STATS enables statistics calculation. */ #undef JEMALLOC_STATS /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API /* JEMALLOC_PROF enables allocation profiling. */ #undef JEMALLOC_PROF /* Use libunwind for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBUNWIND /* Use libgcc for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBGCC /* Use gcc intrinsics for profile backtracing if defined. */ #undef JEMALLOC_PROF_GCC /* * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage * segment (DSS). */ #undef JEMALLOC_DSS /* Support memory filling (junk/zero). */ #undef JEMALLOC_FILL /* Support utrace(2)-based tracing. */ #undef JEMALLOC_UTRACE /* Support optional abort() on OOM. */ #undef JEMALLOC_XMALLOC /* Support lazy locking (avoid locking unless a second thread is launched). */ #undef JEMALLOC_LAZY_LOCK /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #undef LG_QUANTUM /* One page is 2^LG_PAGE bytes. */ #undef LG_PAGE /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ #undef LG_HUGEPAGE /* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. * This is the normal order of things for mmap()/munmap(), but on Windows * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. * mappings do *not* coalesce/fragment. */ #undef JEMALLOC_MAPS_COALESCE /* * If defined, retain memory for later reuse by default rather than using e.g. * munmap() to unmap freed extents. This is enabled on 64-bit Linux because * common sequences of mmap()/munmap() calls will cause virtual memory map * holes. */ #undef JEMALLOC_RETAIN /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS /* * Used to mark unreachable code to quiet "end of non-void" compiler warnings. * Don't use this directly; instead use unreachable() from util.h */ #undef JEMALLOC_INTERNAL_UNREACHABLE /* * ffs*() functions to use for bitmapping. Don't use these directly; instead, * use ffs_*() from util.h. */ #undef JEMALLOC_INTERNAL_FFSLL #undef JEMALLOC_INTERNAL_FFSL #undef JEMALLOC_INTERNAL_FFS /* * popcount*() functions to use for bitmapping. */ #undef JEMALLOC_INTERNAL_POPCOUNTL #undef JEMALLOC_INTERNAL_POPCOUNT /* * If defined, explicitly attempt to more uniformly distribute large allocation * pointer alignments across all cache indices. */ #undef JEMALLOC_CACHE_OBLIVIOUS /* * If defined, enable logging facilities. We make this a configure option to * avoid taking extra branches everywhere. */ #undef JEMALLOC_LOG /* * If defined, use readlinkat() (instead of readlink()) to follow * /etc/malloc_conf. */ #undef JEMALLOC_READLINKAT /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE /* * Methods for determining whether the OS overcommits. * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's * /proc/sys/vm.overcommit_memory file. * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. */ #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY /* Defined if madvise(2) is available. */ #undef JEMALLOC_HAVE_MADVISE /* * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE * arguments to madvise(2). */ #undef JEMALLOC_HAVE_MADVISE_HUGE /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_FREE) : This marks pages as being unused, such that they * will be discarded rather than swapped out. * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is * defined, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched; * otherwise this behaves similarly to * MADV_FREE, though typically with higher * system overhead. */ #undef JEMALLOC_PURGE_MADVISE_FREE #undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ #undef JEMALLOC_DEFINE_MADVISE_FREE /* * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. */ #undef JEMALLOC_MADVISE_DONTDUMP /* * Defined if transparent huge pages (THPs) are supported via the * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. */ #undef JEMALLOC_THP /* Define if operating system has alloca.h header. */ #undef JEMALLOC_HAS_ALLOCA_H /* C99 restrict keyword supported. */ #undef JEMALLOC_HAS_RESTRICT /* For use by hash code. */ #undef JEMALLOC_BIG_ENDIAN /* sizeof(int) == 2^LG_SIZEOF_INT. */ #undef LG_SIZEOF_INT /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #undef LG_SIZEOF_LONG /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ #undef LG_SIZEOF_LONG_LONG /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #undef LG_SIZEOF_INTMAX_T /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ #undef JEMALLOC_GLIBC_MALLOC_HOOK /* glibc memalign hook. */ #undef JEMALLOC_GLIBC_MEMALIGN_HOOK /* pthread support */ #undef JEMALLOC_HAVE_PTHREAD /* dlsym() support */ #undef JEMALLOC_HAVE_DLSYM /* Adaptive mutex support in pthreads. */ #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP /* GNU specific sched_getcpu support */ #undef JEMALLOC_HAVE_SCHED_GETCPU /* GNU specific sched_setaffinity support */ #undef JEMALLOC_HAVE_SCHED_SETAFFINITY /* * If defined, all the features necessary for background threads are present. */ #undef JEMALLOC_BACKGROUND_THREAD /* * If defined, jemalloc symbols are not exported (doesn't work when * JEMALLOC_PREFIX is not defined). */ #undef JEMALLOC_EXPORT /* config.malloc_conf options string. */ #undef JEMALLOC_CONFIG_MALLOC_CONF /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ #undef JEMALLOC_IS_MALLOC /* * Defined if strerror_r returns char * if _GNU_SOURCE is defined. */ #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE /* Performs additional size-matching sanity checks when defined. */ #undef JEMALLOC_EXTRA_SIZE_CHECK #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_externs.h010064400007650000024000000032651344617474000255730ustar0000000000000000#ifndef JEMALLOC_INTERNAL_EXTERNS_H #define JEMALLOC_INTERNAL_EXTERNS_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/tsd_types.h" /* TSD checks this to set thread local slow state accordingly. */ extern bool malloc_slow; /* Run-time options. */ extern bool opt_abort; extern bool opt_abort_conf; extern const char *opt_junk; extern bool opt_junk_alloc; extern bool opt_junk_free; extern bool opt_utrace; extern bool opt_xmalloc; extern bool opt_zero; extern unsigned opt_narenas; /* Number of CPUs. */ extern unsigned ncpus; /* Number of arenas used for automatic multiplexing of threads and arenas. */ extern unsigned narenas_auto; /* Base index for manual arenas. */ extern unsigned manual_arena_base; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. */ extern atomic_p_t arenas[]; void *a0malloc(size_t size); void a0dalloc(void *ptr); void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); void arena_set(unsigned ind, arena_t *arena); unsigned narenas_total_get(void); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); arena_t *arena_choose_hard(tsd_t *tsd, bool internal); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); void iarena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); void arenas_tdata_cleanup(tsd_t *tsd); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); bool malloc_initialized(void); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_includes.h010064400007650000024000000103031344617474000257000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INCLUDES_H #define JEMALLOC_INTERNAL_INCLUDES_H /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. * * Historically, we dealt with this by each header into four sections (types, * structs, externs, and inlines), and included each header file multiple times * in this file, picking out the portion we want on each pass using the * following #defines: * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. * * We're moving toward a world in which the dependencies are explicit; each file * will #include the headers it depends on (rather than relying on them being * implicitly available via this file including every header file in the * project). * * We're now in an intermediate state: we've broken up the header files to avoid * having to include each one multiple times, but have not yet moved the * dependency information into the header files (i.e. we still rely on the * ordering in this file to ensure all a header's dependencies are available in * its translation unit). Each component is now broken up into multiple header * files, corresponding to the sections above (e.g. instead of "foo.h", we now * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). * * Those files which have been converted to explicitly include their * inter-component dependencies are now in the initial HERMETIC HEADERS * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, * must be included first in every translation unit) for system headers and * global jemalloc definitions, however. */ /******************************************************************************/ /* TYPES */ /******************************************************************************/ #include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/base_types.h" #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/prof_types.h" /******************************************************************************/ /* STRUCTS */ /******************************************************************************/ #include "jemalloc/internal/arena_structs_a.h" #include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/base_structs.h" #include "jemalloc/internal/prof_structs.h" #include "jemalloc/internal/arena_structs_b.h" #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/background_thread_structs.h" /******************************************************************************/ /* EXTERNS */ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/extent_externs.h" #include "jemalloc/internal/base_externs.h" #include "jemalloc/internal/arena_externs.h" #include "jemalloc/internal/large_externs.h" #include "jemalloc/internal/tcache_externs.h" #include "jemalloc/internal/prof_externs.h" #include "jemalloc/internal/background_thread_externs.h" /******************************************************************************/ /* INLINES */ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_inlines_a.h" #include "jemalloc/internal/base_inlines.h" /* * Include portions of arena code interleaved with tcache code in order to * resolve circular dependencies. */ #include "jemalloc/internal/prof_inlines_a.h" #include "jemalloc/internal/arena_inlines_a.h" #include "jemalloc/internal/extent_inlines.h" #include "jemalloc/internal/jemalloc_internal_inlines_b.h" #include "jemalloc/internal/tcache_inlines.h" #include "jemalloc/internal/arena_inlines_b.h" #include "jemalloc/internal/jemalloc_internal_inlines_c.h" #include "jemalloc/internal/prof_inlines_b.h" #include "jemalloc/internal/background_thread_inlines.h" #endif /* JEMALLOC_INTERNAL_INCLUDES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_inlines_a.h010064400007650000024000000106231344617474000260400ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_A_H #define JEMALLOC_INTERNAL_INLINES_A_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/ticker.h" JEMALLOC_ALWAYS_INLINE malloc_cpuid_t malloc_getcpu(void) { assert(have_percpu_arena); #if defined(_WIN32) return GetCurrentProcessorNumber(); #elif defined(JEMALLOC_HAVE_SCHED_GETCPU) return (malloc_cpuid_t)sched_getcpu(); #else not_reached(); return -1; #endif } /* Return the chosen arena index based on current cpu. */ JEMALLOC_ALWAYS_INLINE unsigned percpu_arena_choose(void) { assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); malloc_cpuid_t cpuid = malloc_getcpu(); assert(cpuid >= 0); unsigned arena_ind; if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / 2)) { arena_ind = cpuid; } else { assert(opt_percpu_arena == per_phycpu_arena); /* Hyper threads on the same physical CPU share arena. */ arena_ind = cpuid - ncpus / 2; } return arena_ind; } /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ JEMALLOC_ALWAYS_INLINE unsigned percpu_arena_ind_limit(percpu_arena_mode_t mode) { assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); if (mode == per_phycpu_arena && ncpus > 1) { if (ncpus % 2) { /* This likely means a misconfig. */ return ncpus / 2 + 1; } return ncpus / 2; } else { return ncpus; } } static inline arena_tdata_t * arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { arena_tdata_t *tdata; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); if (unlikely(arenas_tdata == NULL)) { /* arenas_tdata hasn't been initialized yet. */ return arena_tdata_get_hard(tsd, ind); } if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { /* * ind is invalid, cache is old (too small), or tdata to be * initialized. */ return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : NULL); } tdata = &arenas_tdata[ind]; if (likely(tdata != NULL) || !refresh_if_missing) { return tdata; } return arena_tdata_get_hard(tsd, ind); } static inline arena_t * arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_t *ret; assert(ind < MALLOCX_ARENA_LIMIT); ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); if (unlikely(ret == NULL)) { if (init_if_missing) { ret = arena_init(tsdn, ind, (extent_hooks_t *)&extent_hooks_default); } } return ret; } static inline ticker_t * decay_ticker_get(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata; tdata = arena_tdata_get(tsd, ind, true); if (unlikely(tdata == NULL)) { return NULL; } return &tdata->decay_ticker; } JEMALLOC_ALWAYS_INLINE cache_bin_t * tcache_small_bin_get(tcache_t *tcache, szind_t binind) { assert(binind < SC_NBINS); return &tcache->bins_small[binind]; } JEMALLOC_ALWAYS_INLINE cache_bin_t * tcache_large_bin_get(tcache_t *tcache, szind_t binind) { assert(binind >= SC_NBINS &&binind < nhbins); return &tcache->bins_large[binind - SC_NBINS]; } JEMALLOC_ALWAYS_INLINE bool tcache_available(tsd_t *tsd) { /* * Thread specific auto tcache might be unavailable if: 1) during tcache * initialization, or 2) disabled through thread.tcache.enabled mallctl * or config options. This check covers all cases. */ if (likely(tsd_tcache_enabled_get(tsd))) { /* Associated arena == NULL implies tcache init in progress. */ assert(tsd_tcachep_get(tsd)->arena == NULL || tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != NULL); return true; } return false; } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(tsd_t *tsd) { if (!tcache_available(tsd)) { return NULL; } return tsd_tcachep_get(tsd); } static inline void pre_reentrancy(tsd_t *tsd, arena_t *arena) { /* arena is the current context. Reentry from a0 is not allowed. */ assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); bool fast = tsd_fast(tsd); assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); ++*tsd_reentrancy_levelp_get(tsd); if (fast) { /* Prepare slow path for reentrancy. */ tsd_slow_update(tsd); assert(tsd_state_get(tsd) == tsd_state_nominal_slow); } } static inline void post_reentrancy(tsd_t *tsd) { int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); assert(*reentrancy_level > 0); if (--*reentrancy_level == 0) { tsd_slow_update(tsd); } } #endif /* JEMALLOC_INTERNAL_INLINES_A_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_inlines_b.h010064400007650000024000000043671344617474000260510ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_B_H #define JEMALLOC_INTERNAL_INLINES_B_H #include "jemalloc/internal/rtree.h" /* Choose an arena based on a per-thread value. */ static inline arena_t * arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { arena_t *ret; if (arena != NULL) { return arena; } /* During reentrancy, arena 0 is the safest bet. */ if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { return arena_get(tsd_tsdn(tsd), 0, true); } ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); if (unlikely(ret == NULL)) { ret = arena_choose_hard(tsd, internal); assert(ret); if (tcache_available(tsd)) { tcache_t *tcache = tcache_get(tsd); if (tcache->arena != NULL) { /* See comments in tcache_data_init().*/ assert(tcache->arena == arena_get(tsd_tsdn(tsd), 0, false)); if (tcache->arena != ret) { tcache_arena_reassociate(tsd_tsdn(tsd), tcache, ret); } } else { tcache_arena_associate(tsd_tsdn(tsd), tcache, ret); } } } /* * Note that for percpu arena, if the current arena is outside of the * auto percpu arena range, (i.e. thread is assigned to a manually * managed arena), then percpu arena is skipped. */ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && !internal && (arena_ind_get(ret) < percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != tsd_tsdn(tsd))) { unsigned ind = percpu_arena_choose(); if (arena_ind_get(ret) != ind) { percpu_arena_update(tsd, ind); ret = tsd_arena_get(tsd); } ret->last_thd = tsd_tsdn(tsd); } return ret; } static inline arena_t * arena_choose(tsd_t *tsd, arena_t *arena) { return arena_choose_impl(tsd, arena, false); } static inline arena_t * arena_ichoose(tsd_t *tsd, arena_t *arena) { return arena_choose_impl(tsd, arena, true); } static inline bool arena_is_auto(arena_t *arena) { assert(narenas_auto > 0); return (arena_ind_get(arena) < manual_arena_base); } JEMALLOC_ALWAYS_INLINE extent_t * iealloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); } #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_inlines_c.h010064400007650000024000000161211344617474000260410ustar0000000000000000#ifndef JEMALLOC_INTERNAL_INLINES_C_H #define JEMALLOC_INTERNAL_INLINES_C_H #include "jemalloc/internal/hook.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/witness.h" /* * Translating the names of the 'i' functions: * Abbreviations used in the first part of the function name (before * alloc/dalloc) describe what that function accomplishes: * a: arena (query) * s: size (query, or sized deallocation) * e: extent (query) * p: aligned (allocates) * vs: size (query, without knowing that the pointer is into the heap) * r: rallocx implementation * x: xallocx implementation * Abbreviations used in the second part of the function name (after * alloc/dalloc) describe the arguments it takes * z: whether to return zeroed memory * t: accepts a tcache_t * parameter * m: accepts an arena_t * parameter */ JEMALLOC_ALWAYS_INLINE arena_t * iaalloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); return arena_aalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE size_t isalloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); return arena_salloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void * iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path) { void *ret; assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); } ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); if (config_stats && is_internal && likely(ret != NULL)) { arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); } return ret; } JEMALLOC_ALWAYS_INLINE void * ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, NULL, slow_path); } JEMALLOC_ALWAYS_INLINE void * ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_internal, arena_t *arena) { void *ret; assert(usize != 0); assert(usize == sz_sa2u(usize, alignment)); assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_internal && likely(ret != NULL)) { arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); } return ret; } JEMALLOC_ALWAYS_INLINE void * ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); } JEMALLOC_ALWAYS_INLINE void * ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, tcache_get(tsd), false, NULL); } JEMALLOC_ALWAYS_INLINE size_t ivsalloc(tsdn_t *tsdn, const void *ptr) { return arena_vsalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) { assert(ptr != NULL); assert(!is_internal || tcache == NULL); assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (config_stats && is_internal) { arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); } if (!is_internal && !tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { assert(tcache == NULL); } arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); } JEMALLOC_ALWAYS_INLINE void idalloc(tsd_t *tsd, void *ptr) { idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); } JEMALLOC_ALWAYS_INLINE void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); } JEMALLOC_ALWAYS_INLINE void * iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); void *p; size_t usize, copysize; usize = sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); if (p == NULL) { return NULL; } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); hook_invoke_alloc(hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p, hook_args->args); hook_invoke_dalloc(hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return p; } /* * is_realloc threads through the knowledge of whether or not this call comes * from je_realloc (as opposed to je_rallocx); this ensures that we pass the * correct entry point into any hooks. * Note that these functions are all force-inlined, so no actual bool gets * passed-around anywhere. */ JEMALLOC_ALWAYS_INLINE void * iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) { assert(ptr != NULL); assert(size != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* * Existing object alignment is inadequate; allocate new space * and copy. */ return iralloct_realign(tsdn, ptr, oldsize, size, alignment, zero, tcache, arena, hook_args); } return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, tcache, hook_args); } JEMALLOC_ALWAYS_INLINE void * iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, hook_ralloc_args_t *hook_args) { return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, tcache_get(tsd), NULL, hook_args); } JEMALLOC_ALWAYS_INLINE bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, size_t *newsize) { assert(ptr != NULL); assert(size != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* Existing object alignment is inadequate. */ *newsize = oldsize; return true; } return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero, newsize); } #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_macros.h010064400007650000024000000077701344617474000253740ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MACROS_H #define JEMALLOC_INTERNAL_MACROS_H #ifdef JEMALLOC_DEBUG # define JEMALLOC_ALWAYS_INLINE static inline #else # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline #endif #ifdef _MSC_VER # define inline _inline #endif #define UNUSED JEMALLOC_ATTR(unused) #define ZU(z) ((size_t)z) #define ZD(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) #define QD(q) ((int64_t)q) #define KZU(z) ZU(z##ULL) #define KZD(z) ZD(z##LL) #define KQU(q) QU(q##ULL) #define KQD(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) # define restrict #endif /* Various function pointers are static and immutable except during testing. */ #ifdef JEMALLOC_JET # define JET_MUTABLE #else # define JET_MUTABLE const #endif #define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ #if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \ && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7) #define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough); #else #define JEMALLOC_FALLTHROUGH /* falls through */ #endif /* Diagnostic suppression macros */ #if defined(_MSC_VER) && !defined(__clang__) # define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) # define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) # define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W)) # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /* #pragma GCC diagnostic first appeared in gcc 4.6. */ #elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \ (__GNUC_MINOR__ > 5)))) || defined(__clang__) /* * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang * diagnostic suppression macros and should not be used anywhere else. */ # define JEMALLOC_PRAGMA__(X) _Pragma(#X) # define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) # define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) # define JEMALLOC_DIAGNOSTIC_IGNORE(W) \ JEMALLOC_PRAGMA__(GCC diagnostic ignored W) /* * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and * all clang versions up to version 7 (currently trunk, unreleased). This macro * suppresses the warning for the affected compiler versions only. */ # if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \ defined(__clang__) # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers") # else # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS # endif # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") # define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") # if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") # else # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN # endif # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ JEMALLOC_DIAGNOSTIC_PUSH \ JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER #else # define JEMALLOC_DIAGNOSTIC_PUSH # define JEMALLOC_DIAGNOSTIC_POP # define JEMALLOC_DIAGNOSTIC_IGNORE(W) # define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS # define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS # define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN # define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS #endif /* * Disables spurious diagnostics for all headers. Since these headers are not * included by users directly, it does not affect their diagnostic settings. */ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS #endif /* JEMALLOC_INTERNAL_MACROS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_internal_types.h010064400007650000024000000066611344617474000252520ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TYPES_H #define JEMALLOC_INTERNAL_TYPES_H #include "jemalloc/internal/quantum.h" /* Page size index type. */ typedef unsigned pszind_t; /* Size class index type. */ typedef unsigned szind_t; /* Processor / core id type. */ typedef int malloc_cpuid_t; /* * Flags bits: * * a: arena * t: tcache * 0: unused * z: zero * n: alignment * * aaaaaaaa aaaatttt tttttttt 0znnnnnn */ #define MALLOCX_ARENA_BITS 12 #define MALLOCX_TCACHE_BITS 12 #define MALLOCX_LG_ALIGN_BITS 6 #define MALLOCX_ARENA_SHIFT 20 #define MALLOCX_TCACHE_SHIFT 8 #define MALLOCX_ARENA_MASK \ (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) /* NB: Arena index bias decreases the maximum number of arenas by 1. */ #define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) #define MALLOCX_TCACHE_MASK \ (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) #define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) #define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) #define MALLOCX_ALIGN_GET(flags) \ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) #define MALLOCX_ZERO_GET(flags) \ ((bool)(flags & MALLOCX_ZERO)) #define MALLOCX_TCACHE_GET(flags) \ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) #define MALLOCX_ARENA_GET(flags) \ (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) /* Smallest size class to support. */ #define TINY_MIN (1U << LG_TINY_MIN) #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) #define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ #define LONG_CEILING(a) \ (((a) + LONG_MASK) & ~LONG_MASK) #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) #define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ #define PTR_CEILING(a) \ (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. * In addition, this controls the spacing of cacheline-spaced size classes. * * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can * only handle raw constants. */ #define LG_CACHELINE 6 #define CACHELINE 64 #define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & (alignment - 1))) /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ (((s) + (alignment - 1)) & ((~(alignment)) + 1)) /* Declare a variable-length array. */ #if __STDC_VERSION__ < 199901L # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef JEMALLOC_HAS_ALLOCA_H # include # else # include # endif # endif # define VARIABLE_ARRAY(type, name, count) \ type *name = alloca(sizeof(type) * (count)) #else # define VARIABLE_ARRAY(type, name, count) type name[(count)] #endif #endif /* JEMALLOC_INTERNAL_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_preamble.h010064400007650000024000000067131344617502700237760ustar0000000000000000#ifndef JEMALLOC_PREAMBLE_H #define JEMALLOC_PREAMBLE_H #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" #ifdef JEMALLOC_UTRACE #include #endif #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # undef JEMALLOC_IS_MALLOC # define JEMALLOC_N(n) jet_##n # include "jemalloc/internal/public_namespace.h" # define JEMALLOC_NO_RENAME # include "../jemalloc.h" # undef JEMALLOC_NO_RENAME #else # define JEMALLOC_N(n) je_##n # include "../jemalloc.h" #endif #if defined(JEMALLOC_OSATOMIC) #include #endif #ifdef JEMALLOC_ZONE #include #include #include #endif #include "jemalloc/internal/jemalloc_internal_macros.h" /* * Note that the ordering matters here; the hook itself is name-mangled. We * want the inclusion of hooks to happen early, so that we hook as much as * possible. */ #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE # ifndef JEMALLOC_JET # include "jemalloc/internal/private_namespace.h" # else # include "jemalloc/internal/private_namespace_jet.h" # endif #endif #include "jemalloc/internal/test_hooks.h" #ifdef JEMALLOC_DEFINE_MADVISE_FREE # define JEMALLOC_MADV_FREE 8 #endif static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool have_madvise_huge = #ifdef JEMALLOC_HAVE_MADVISE_HUGE true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; static const bool maps_coalesce = #ifdef JEMALLOC_MAPS_COALESCE true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_cache_oblivious = #ifdef JEMALLOC_CACHE_OBLIVIOUS true #else false #endif ; /* * Undocumented, for jemalloc development use only at the moment. See the note * in jemalloc/internal/log.h. */ static const bool config_log = #ifdef JEMALLOC_LOG true #else false #endif ; #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA #endif static const bool have_percpu_arena = #ifdef JEMALLOC_PERCPU_ARENA true #else false #endif ; /* * Undocumented, and not recommended; the application should take full * responsibility for tracking provenance. */ static const bool force_ivsalloc = #ifdef JEMALLOC_FORCE_IVSALLOC true #else false #endif ; static const bool have_background_thread = #ifdef JEMALLOC_BACKGROUND_THREAD true #else false #endif ; #endif /* JEMALLOC_PREAMBLE_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/jemalloc_preamble.h.in010064400007650000024000000067731344617474000244120ustar0000000000000000#ifndef JEMALLOC_PREAMBLE_H #define JEMALLOC_PREAMBLE_H #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" #ifdef JEMALLOC_UTRACE #include #endif #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # undef JEMALLOC_IS_MALLOC # define JEMALLOC_N(n) jet_##n # include "jemalloc/internal/public_namespace.h" # define JEMALLOC_NO_RENAME # include "../jemalloc@install_suffix@.h" # undef JEMALLOC_NO_RENAME #else # define JEMALLOC_N(n) @private_namespace@##n # include "../jemalloc@install_suffix@.h" #endif #if defined(JEMALLOC_OSATOMIC) #include #endif #ifdef JEMALLOC_ZONE #include #include #include #endif #include "jemalloc/internal/jemalloc_internal_macros.h" /* * Note that the ordering matters here; the hook itself is name-mangled. We * want the inclusion of hooks to happen early, so that we hook as much as * possible. */ #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE # ifndef JEMALLOC_JET # include "jemalloc/internal/private_namespace.h" # else # include "jemalloc/internal/private_namespace_jet.h" # endif #endif #include "jemalloc/internal/test_hooks.h" #ifdef JEMALLOC_DEFINE_MADVISE_FREE # define JEMALLOC_MADV_FREE 8 #endif static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool have_madvise_huge = #ifdef JEMALLOC_HAVE_MADVISE_HUGE true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; static const bool maps_coalesce = #ifdef JEMALLOC_MAPS_COALESCE true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_cache_oblivious = #ifdef JEMALLOC_CACHE_OBLIVIOUS true #else false #endif ; /* * Undocumented, for jemalloc development use only at the moment. See the note * in jemalloc/internal/log.h. */ static const bool config_log = #ifdef JEMALLOC_LOG true #else false #endif ; #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA #endif static const bool have_percpu_arena = #ifdef JEMALLOC_PERCPU_ARENA true #else false #endif ; /* * Undocumented, and not recommended; the application should take full * responsibility for tracking provenance. */ static const bool force_ivsalloc = #ifdef JEMALLOC_FORCE_IVSALLOC true #else false #endif ; static const bool have_background_thread = #ifdef JEMALLOC_BACKGROUND_THREAD true #else false #endif ; #endif /* JEMALLOC_PREAMBLE_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/large_externs.h010064400007650000024000000026251344617474000232020ustar0000000000000000#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H #define JEMALLOC_INTERNAL_LARGE_EXTERNS_H #include "jemalloc/internal/hook.h" void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t usize_max, bool zero); void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args); typedef void (large_dalloc_junk_t)(void *, size_t); extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; typedef void (large_dalloc_maybe_junk_t)(void *, size_t); extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); void large_dalloc(tsdn_t *tsdn, extent_t *extent); size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); nstime_t large_prof_alloc_time_get(const extent_t *extent); void large_prof_alloc_time_set(extent_t *extent, nstime_t time); #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/log.h010064400007650000024000000072521344617474000211220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_LOG_H #define JEMALLOC_INTERNAL_LOG_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #ifdef JEMALLOC_LOG # define JEMALLOC_LOG_VAR_BUFSIZE 1000 #else # define JEMALLOC_LOG_VAR_BUFSIZE 1 #endif #define JEMALLOC_LOG_BUFSIZE 4096 /* * The log malloc_conf option is a '|'-delimited list of log_var name segments * which should be logged. The names are themselves hierarchical, with '.' as * the delimiter (a "segment" is just a prefix in the log namespace). So, if * you have: * * log("arena", "log msg for arena"); // 1 * log("arena.a", "log msg for arena.a"); // 2 * log("arena.b", "log msg for arena.b"); // 3 * log("arena.a.a", "log msg for arena.a.a"); // 4 * log("extent.a", "log msg for extent.a"); // 5 * log("extent.b", "log msg for extent.b"); // 6 * * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and * 6 will print at runtime. You can enable logging from all log vars by * writing "log=.". * * None of this should be regarded as a stable API for right now. It's intended * as a debugging interface, to let us keep around some of our printf-debugging * statements. */ extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; extern atomic_b_t log_init_done; typedef struct log_var_s log_var_t; struct log_var_s { /* * Lowest bit is "inited", second lowest is "enabled". Putting them in * a single word lets us avoid any fences on weak architectures. */ atomic_u_t state; const char *name; }; #define LOG_NOT_INITIALIZED 0U #define LOG_INITIALIZED_NOT_ENABLED 1U #define LOG_ENABLED 2U #define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} /* * Returns the value we should assume for state (which is not necessarily * accurate; if logging is done before logging has finished initializing, then * we default to doing the safe thing by logging everything). */ unsigned log_var_update_state(log_var_t *log_var); /* We factor out the metadata management to allow us to test more easily. */ #define log_do_begin(log_var) \ if (config_log) { \ unsigned log_state = atomic_load_u(&(log_var).state, \ ATOMIC_RELAXED); \ if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ log_state = log_var_update_state(&(log_var)); \ assert(log_state != LOG_NOT_INITIALIZED); \ } \ if (log_state == LOG_ENABLED) { \ { /* User code executes here. */ #define log_do_end(log_var) \ } \ } \ } /* * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during * preprocessing. To work around this, we take all potential extra arguments in * a var-args functions. Since a varargs macro needs at least one argument in * the "...", we accept the format string there, and require that the first * argument in this "..." is a const char *. */ static inline void log_impl_varargs(const char *name, ...) { char buf[JEMALLOC_LOG_BUFSIZE]; va_list ap; va_start(ap, name); const char *format = va_arg(ap, const char *); size_t dst_offset = 0; dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); dst_offset += malloc_vsnprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); dst_offset += malloc_snprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); va_end(ap); malloc_write(buf); } /* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ #define LOG(log_var_str, ...) \ do { \ static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ log_do_begin(log_var) \ log_impl_varargs((log_var).name, __VA_ARGS__); \ log_do_end(log_var) \ } while (0) #endif /* JEMALLOC_INTERNAL_LOG_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/malloc_io.h010064400007650000024000000056511344617474000223000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H #define JEMALLOC_INTERNAL_MALLOC_IO_H #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "ll" # else # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "" # endif # define FMTd32 "d" # define FMTu32 "u" # define FMTx32 "x" # define FMTd64 FMT64_PREFIX "d" # define FMTu64 FMT64_PREFIX "u" # define FMTx64 FMT64_PREFIX "x" # define FMTdPTR FMTPTR_PREFIX "d" # define FMTuPTR FMTPTR_PREFIX "u" # define FMTxPTR FMTPTR_PREFIX "x" #else # include # define FMTd32 PRId32 # define FMTu32 PRIu32 # define FMTx32 PRIx32 # define FMTd64 PRId64 # define FMTu64 PRIu64 # define FMTx64 PRIx64 # define FMTdPTR PRIdPTR # define FMTuPTR PRIuPTR # define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); /* * The caller can set write_cb and cbopaque to null to choose to print with the * je_malloc_message hook. */ void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); static inline ssize_t malloc_write_fd(int fd, const void *buf, size_t count) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. * * syscall() returns long or int, depending on platform, so capture the * result in the widest plausible type to avoid compiler warnings. */ long result = syscall(SYS_write, fd, buf, count); #else ssize_t result = (ssize_t)write(fd, buf, #ifdef _WIN32 (unsigned int) #endif count); #endif return (ssize_t)result; } static inline ssize_t malloc_read_fd(int fd, void *buf, size_t count) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) long result = syscall(SYS_read, fd, buf, count); #else ssize_t result = read(fd, buf, #ifdef _WIN32 (unsigned int) #endif count); #endif return (ssize_t)result; } #endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/mutex.h010064400007650000024000000226221344617474000215010ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_H #define JEMALLOC_INTERNAL_MUTEX_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/witness.h" typedef enum { /* Can only acquire one mutex of a given witness rank at a time. */ malloc_mutex_rank_exclusive, /* * Can acquire multiple mutexes of the same witness rank, but in * address-ascending order only. */ malloc_mutex_address_ordered } malloc_mutex_lock_order_t; typedef struct malloc_mutex_s malloc_mutex_t; struct malloc_mutex_s { union { struct { /* * prof_data is defined first to reduce cacheline * bouncing: the data is not touched by the mutex holder * during unlocking, while might be modified by * contenders. Having it before the mutex itself could * avoid prefetching a modified cacheline (for the * unlocking thread). */ mutex_prof_data_t prof_data; #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif /* * Hint flag to avoid exclusive cache line contention * during spin waiting */ atomic_b_t locked; }; /* * We only touch witness when configured w/ debug. However we * keep the field in a union when !debug so that we don't have * to pollute the code base with #ifdefs, while avoid paying the * memory cost. */ #if !defined(JEMALLOC_DEBUG) witness_t witness; malloc_mutex_lock_order_t lock_order; #endif }; #if defined(JEMALLOC_DEBUG) witness_t witness; malloc_mutex_lock_order_t lock_order; #endif }; /* * Based on benchmark results, a fixed spin with this amount of retries works * well for our critical sections. */ #define MALLOC_MUTEX_MAX_SPIN 250 #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) # else # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) #else # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) #endif #define LOCK_PROF_DATA_INITIALIZER \ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ ATOMIC_INIT(0), 0, NULL, 0} #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # if defined(JEMALLOC_DEBUG) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} # else # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # if (defined(JEMALLOC_DEBUG)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} # else # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif #else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # if defined(JEMALLOC_DEBUG) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} # else # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif #endif #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_lock_slow(malloc_mutex_t *mutex); static inline void malloc_mutex_lock_final(malloc_mutex_t *mutex) { MALLOC_MUTEX_LOCK(mutex); atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); } static inline bool malloc_mutex_trylock_final(malloc_mutex_t *mutex) { return MALLOC_MUTEX_TRYLOCK(mutex); } static inline void mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (config_stats) { mutex_prof_data_t *data = &mutex->prof_data; data->n_lock_ops++; if (data->prev_owner != tsdn) { data->prev_owner = tsdn; data->n_owner_switches++; } } } /* Trylock: return false if the lock is successfully acquired. */ static inline bool malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); return true; } mutex_owner_stats_update(tsdn, mutex); } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); return false; } /* Aggregate lock prof data. */ static inline void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { nstime_add(&sum->tot_wait_time, &data->tot_wait_time); if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { nstime_copy(&sum->max_wait_time, &data->max_wait_time); } sum->n_wait_times += data->n_wait_times; sum->n_spin_acquired += data->n_spin_acquired; if (sum->max_n_thds < data->max_n_thds) { sum->max_n_thds = data->max_n_thds; } uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, ATOMIC_RELAXED); uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( &data->n_waiting_thds, ATOMIC_RELAXED); atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, ATOMIC_RELAXED); sum->n_owner_switches += data->n_owner_switches; sum->n_lock_ops += data->n_lock_ops; } static inline void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { malloc_mutex_lock_slow(mutex); atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); } mutex_owner_stats_update(tsdn, mutex); } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } static inline void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED); witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { MALLOC_MUTEX_UNLOCK(mutex); } } static inline void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } static inline void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } /* Copy the prof data from mutex for processing. */ static inline void malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { mutex_prof_data_t *source = &mutex->prof_data; /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); /* * Not *really* allowed (we shouldn't be doing non-atomic loads of * atomic data), but the mutex protection makes this safe, and writing * a member-for-member copy is tedious for this situation. */ *data = *source; /* n_wait_thds is not reported (modified w/o locking). */ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); } static inline void malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { mutex_prof_data_t *source = &mutex->prof_data; /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); nstime_add(&data->tot_wait_time, &source->tot_wait_time); if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) { nstime_copy(&data->max_wait_time, &source->max_wait_time); } data->n_wait_times += source->n_wait_times; data->n_spin_acquired += source->n_spin_acquired; if (data->max_n_thds < source->max_n_thds) { data->max_n_thds = source->max_n_thds; } /* n_wait_thds is not reported. */ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); data->n_owner_switches += source->n_owner_switches; data->n_lock_ops += source->n_lock_ops; } #endif /* JEMALLOC_INTERNAL_MUTEX_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/mutex_pool.h010064400007650000024000000054731344617474000225370ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H #define JEMALLOC_INTERNAL_MUTEX_POOL_H #include "jemalloc/internal/hash.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/witness.h" /* We do mod reductions by this value, so it should be kept a power of 2. */ #define MUTEX_POOL_SIZE 256 typedef struct mutex_pool_s mutex_pool_t; struct mutex_pool_s { malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; }; bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); /* Internal helper - not meant to be called outside this module. */ static inline malloc_mutex_t * mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { size_t hash_result[2]; hash(&key, sizeof(key), 0xd50dcc1b, hash_result); return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; } static inline void mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { for (int i = 0; i < MUTEX_POOL_SIZE; i++) { malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); } } /* * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. * You're not allowed to acquire mutexes in the pool one at a time. You have to * acquire all the mutexes you'll need in a single function call, and then * release them all in a single function call. */ static inline void mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { mutex_pool_assert_not_held(tsdn, pool); malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); malloc_mutex_lock(tsdn, mutex); } static inline void mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); malloc_mutex_unlock(tsdn, mutex); mutex_pool_assert_not_held(tsdn, pool); } static inline void mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, uintptr_t key2) { mutex_pool_assert_not_held(tsdn, pool); malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { malloc_mutex_lock(tsdn, mutex1); malloc_mutex_lock(tsdn, mutex2); } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { malloc_mutex_lock(tsdn, mutex1); } else { malloc_mutex_lock(tsdn, mutex2); malloc_mutex_lock(tsdn, mutex1); } } static inline void mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, uintptr_t key2) { malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); if (mutex1 == mutex2) { malloc_mutex_unlock(tsdn, mutex1); } else { malloc_mutex_unlock(tsdn, mutex1); malloc_mutex_unlock(tsdn, mutex2); } mutex_pool_assert_not_held(tsdn, pool); } static inline void mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); } #endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/mutex_prof.h010064400007650000024000000070651344617474000225330ustar0000000000000000#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H #define JEMALLOC_INTERNAL_MUTEX_PROF_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/tsd_types.h" #define MUTEX_PROF_GLOBAL_MUTEXES \ OP(background_thread) \ OP(ctl) \ OP(prof) typedef enum { #define OP(mtx) global_prof_mutex_##mtx, MUTEX_PROF_GLOBAL_MUTEXES #undef OP mutex_prof_num_global_mutexes } mutex_prof_global_ind_t; #define MUTEX_PROF_ARENA_MUTEXES \ OP(large) \ OP(extent_avail) \ OP(extents_dirty) \ OP(extents_muzzy) \ OP(extents_retained) \ OP(decay_dirty) \ OP(decay_muzzy) \ OP(base) \ OP(tcache_list) typedef enum { #define OP(mtx) arena_prof_mutex_##mtx, MUTEX_PROF_ARENA_MUTEXES #undef OP mutex_prof_num_arena_mutexes } mutex_prof_arena_ind_t; /* * The forth parameter is a boolean value that is true for derived rate counters * and false for real ones. */ #define MUTEX_PROF_UINT64_COUNTERS \ OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) #define MUTEX_PROF_UINT32_COUNTERS \ OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) #define MUTEX_PROF_COUNTERS \ MUTEX_PROF_UINT64_COUNTERS \ MUTEX_PROF_UINT32_COUNTERS #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, #define COUNTER_ENUM(counter_list, t) \ typedef enum { \ counter_list \ mutex_prof_num_##t##_counters \ } mutex_prof_##t##_counter_ind_t; COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) #undef COUNTER_ENUM #undef OP typedef struct { /* * Counters touched on the slow path, i.e. when there is lock * contention. We update them once we have the lock. */ /* Total time (in nano seconds) spent waiting on this mutex. */ nstime_t tot_wait_time; /* Max time (in nano seconds) spent on a single lock operation. */ nstime_t max_wait_time; /* # of times have to wait for this mutex (after spinning). */ uint64_t n_wait_times; /* # of times acquired the mutex through local spinning. */ uint64_t n_spin_acquired; /* Max # of threads waiting for the mutex at the same time. */ uint32_t max_n_thds; /* Current # of threads waiting on the lock. Atomic synced. */ atomic_u32_t n_waiting_thds; /* * Data touched on the fast path. These are modified right after we * grab the lock, so it's placed closest to the end (i.e. right before * the lock) so that we have a higher chance of them being on the same * cacheline. */ /* # of times the mutex holder is different than the previous one. */ uint64_t n_owner_switches; /* Previous mutex holder, to facilitate n_owner_switches. */ tsdn_t *prev_owner; /* # of lock() operations in total. */ uint64_t n_lock_ops; } mutex_prof_data_t; #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/nstime.h010064400007650000024000000024301344617474000216310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_NSTIME_H #define JEMALLOC_INTERNAL_NSTIME_H /* Maximum supported number of seconds (~584 years). */ #define NSTIME_SEC_MAX KQU(18446744072) #define NSTIME_ZERO_INITIALIZER {0} typedef struct { uint64_t ns; } nstime_t; void nstime_init(nstime_t *time, uint64_t ns); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); uint64_t nstime_sec(const nstime_t *time); uint64_t nstime_msec(const nstime_t *time); uint64_t nstime_nsec(const nstime_t *time); void nstime_copy(nstime_t *time, const nstime_t *source); int nstime_compare(const nstime_t *a, const nstime_t *b); void nstime_add(nstime_t *time, const nstime_t *addend); void nstime_iadd(nstime_t *time, uint64_t addend); void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); void nstime_isubtract(nstime_t *time, uint64_t subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); typedef bool (nstime_monotonic_t)(void); extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; typedef bool (nstime_update_t)(nstime_t *); extern nstime_update_t *JET_MUTABLE nstime_update; #endif /* JEMALLOC_INTERNAL_NSTIME_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/pages.h010064400007650000024000000060671344617474000214430ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H #define JEMALLOC_INTERNAL_PAGES_EXTERNS_H /* Page size. LG_PAGE is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif #define PAGE ((size_t)(1U << LG_PAGE)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the page base address for the page containing address a. */ #define PAGE_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~PAGE_MASK)) /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) /* Return the huge page base address for the huge page containing address a. */ #define HUGEPAGE_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) /* Return the smallest pagesize multiple that is >= s. */ #define HUGEPAGE_CEILING(s) \ (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) # define PAGES_CAN_PURGE_LAZY #endif /* * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. * * The only supported way to hard-purge on Windows is to decommit and then * re-commit, but doing so is racy, and if re-commit fails it's a pain to * propagate the "poisoned" memory state. Since we typically decommit as the * next step after purging on Windows anyway, there's no point in adding such * complexity. */ #if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ defined(JEMALLOC_MAPS_COALESCE)) # define PAGES_CAN_PURGE_FORCED #endif static const bool pages_can_purge_lazy = #ifdef PAGES_CAN_PURGE_LAZY true #else false #endif ; static const bool pages_can_purge_forced = #ifdef PAGES_CAN_PURGE_FORCED true #else false #endif ; typedef enum { thp_mode_default = 0, /* Do not change hugepage settings. */ thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ thp_mode_names_limit = 3, /* Used for option processing. */ thp_mode_not_supported = 3 /* No THP support detected. */ } thp_mode_t; #define THP_MODE_DEFAULT thp_mode_default extern thp_mode_t opt_thp; extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ extern const char *thp_mode_names[]; void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); void pages_unmap(void *addr, size_t size); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge_lazy(void *addr, size_t size); bool pages_purge_forced(void *addr, size_t size); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); bool pages_dontdump(void *addr, size_t size); bool pages_dodump(void *addr, size_t size); bool pages_boot(void); void pages_set_thp_state (void *ptr, size_t size); #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/ph.h010064400007650000024000000304151344617474000207450ustar0000000000000000/* * A Pairing Heap implementation. * * "The Pairing Heap: A New Form of Self-Adjusting Heap" * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf * * With auxiliary twopass list, described in a follow on paper. * * "Pairing Heaps: Experiments and Analysis" * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf * ******************************************************************************* */ #ifndef PH_H_ #define PH_H_ /* Node structure. */ #define phn(a_type) \ struct { \ a_type *phn_prev; \ a_type *phn_next; \ a_type *phn_lchild; \ } /* Root structure. */ #define ph(a_type) \ struct { \ a_type *ph_root; \ } /* Internal utility macros. */ #define phn_lchild_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_lchild) #define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ a_phn->a_field.phn_lchild = a_lchild; \ } while (0) #define phn_next_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_next) #define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ a_phn->a_field.phn_prev = a_prev; \ } while (0) #define phn_prev_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_prev) #define phn_next_set(a_type, a_field, a_phn, a_next) do { \ a_phn->a_field.phn_next = a_next; \ } while (0) #define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ a_type *phn0child; \ \ assert(a_phn0 != NULL); \ assert(a_phn1 != NULL); \ assert(a_cmp(a_phn0, a_phn1) <= 0); \ \ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ phn_next_set(a_type, a_field, a_phn1, phn0child); \ if (phn0child != NULL) { \ phn_prev_set(a_type, a_field, phn0child, a_phn1); \ } \ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ } while (0) #define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ if (a_phn0 == NULL) { \ r_phn = a_phn1; \ } else if (a_phn1 == NULL) { \ r_phn = a_phn0; \ } else if (a_cmp(a_phn0, a_phn1) < 0) { \ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ a_cmp); \ r_phn = a_phn0; \ } else { \ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ a_cmp); \ r_phn = a_phn1; \ } \ } while (0) #define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *head = NULL; \ a_type *tail = NULL; \ a_type *phn0 = a_phn; \ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ \ /* \ * Multipass merge, wherein the first two elements of a FIFO \ * are repeatedly merged, and each result is appended to the \ * singly linked FIFO, until the FIFO contains only a single \ * element. We start with a sibling list but no reference to \ * its tail, so we do a single pass over the sibling list to \ * populate the FIFO. \ */ \ if (phn1 != NULL) { \ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, NULL); \ phn_next_set(a_type, a_field, phn0, NULL); \ phn_prev_set(a_type, a_field, phn1, NULL); \ phn_next_set(a_type, a_field, phn1, NULL); \ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ head = tail = phn0; \ phn0 = phnrest; \ while (phn0 != NULL) { \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ phnrest = phn_next_get(a_type, a_field, \ phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, \ phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, \ NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ phn_prev_set(a_type, a_field, phn1, \ NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = phnrest; \ } else { \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = NULL; \ } \ } \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ while (true) { \ head = phn_next_get(a_type, a_field, \ phn1); \ assert(phn_prev_get(a_type, a_field, \ phn0) == NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ assert(phn_prev_get(a_type, a_field, \ phn1) == NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ if (head == NULL) { \ break; \ } \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, \ phn0); \ } \ } \ } \ r_phn = phn0; \ } while (0) #define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ if (phn != NULL) { \ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_prev_set(a_type, a_field, phn, NULL); \ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ assert(phn_next_get(a_type, a_field, phn) == NULL); \ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ a_ph->ph_root); \ } \ } while (0) #define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ if (lchild == NULL) { \ r_phn = NULL; \ } else { \ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ r_phn); \ } \ } while (0) /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ #define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ a_attr void a_prefix##new(a_ph_type *ph); \ a_attr bool a_prefix##empty(a_ph_type *ph); \ a_attr a_type *a_prefix##first(a_ph_type *ph); \ a_attr a_type *a_prefix##any(a_ph_type *ph); \ a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); /* * The ph_gen() macro generates a type-specific pairing heap implementation, * based on the above cpp macros. */ #define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_ph_type *ph) { \ memset(ph, 0, sizeof(ph(a_type))); \ } \ a_attr bool \ a_prefix##empty(a_ph_type *ph) { \ return (ph->ph_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_ph_type *ph) { \ if (ph->ph_root == NULL) { \ return NULL; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ return ph->ph_root; \ } \ a_attr a_type * \ a_prefix##any(a_ph_type *ph) { \ if (ph->ph_root == NULL) { \ return NULL; \ } \ a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ if (aux != NULL) { \ return aux; \ } \ return ph->ph_root; \ } \ a_attr void \ a_prefix##insert(a_ph_type *ph, a_type *phn) { \ memset(&phn->a_field, 0, sizeof(phn(a_type))); \ \ /* \ * Treat the root as an aux list during insertion, and lazily \ * merge during a_prefix##remove_first(). For elements that \ * are inserted, then removed via a_prefix##remove() before the \ * aux list is ever processed, this makes insert/remove \ * constant-time, whereas eager merging would make insert \ * O(log n). \ */ \ if (ph->ph_root == NULL) { \ ph->ph_root = phn; \ } else { \ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ a_field, ph->ph_root)); \ if (phn_next_get(a_type, a_field, ph->ph_root) != \ NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, ph->ph_root), \ phn); \ } \ phn_prev_set(a_type, a_field, phn, ph->ph_root); \ phn_next_set(a_type, a_field, ph->ph_root, phn); \ } \ } \ a_attr a_type * \ a_prefix##remove_first(a_ph_type *ph) { \ a_type *ret; \ \ if (ph->ph_root == NULL) { \ return NULL; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ \ ret = ph->ph_root; \ \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ \ return ret; \ } \ a_attr a_type * \ a_prefix##remove_any(a_ph_type *ph) { \ /* \ * Remove the most recently inserted aux list element, or the \ * root if the aux list is empty. This has the effect of \ * behaving as a LIFO (and insertion/removal is therefore \ * constant-time) if a_prefix##[remove_]first() are never \ * called. \ */ \ if (ph->ph_root == NULL) { \ return NULL; \ } \ a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ if (ret != NULL) { \ a_type *aux = phn_next_get(a_type, a_field, ret); \ phn_next_set(a_type, a_field, ph->ph_root, aux); \ if (aux != NULL) { \ phn_prev_set(a_type, a_field, aux, \ ph->ph_root); \ } \ return ret; \ } \ ret = ph->ph_root; \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ return ret; \ } \ a_attr void \ a_prefix##remove(a_ph_type *ph, a_type *phn) { \ a_type *replace, *parent; \ \ if (ph->ph_root == phn) { \ /* \ * We can delete from aux list without merging it, but \ * we need to merge if we are dealing with the root \ * node and it has children. \ */ \ if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ ph->ph_root = phn_next_get(a_type, a_field, \ phn); \ if (ph->ph_root != NULL) { \ phn_prev_set(a_type, a_field, \ ph->ph_root, NULL); \ } \ return; \ } \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ if (ph->ph_root == phn) { \ ph_merge_children(a_type, a_field, ph->ph_root, \ a_cmp, ph->ph_root); \ return; \ } \ } \ \ /* Get parent (if phn is leftmost child) before mutating. */ \ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ if (phn_lchild_get(a_type, a_field, parent) != phn) { \ parent = NULL; \ } \ } \ /* Find a possible replacement node, and link to parent. */ \ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ /* Set next/prev for sibling linked list. */ \ if (replace != NULL) { \ if (parent != NULL) { \ phn_prev_set(a_type, a_field, replace, parent); \ phn_lchild_set(a_type, a_field, parent, \ replace); \ } else { \ phn_prev_set(a_type, a_field, replace, \ phn_prev_get(a_type, a_field, phn)); \ if (phn_prev_get(a_type, a_field, phn) != \ NULL) { \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ replace); \ } \ } \ phn_next_set(a_type, a_field, replace, \ phn_next_get(a_type, a_field, phn)); \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ replace); \ } \ } else { \ if (parent != NULL) { \ a_type *next = phn_next_get(a_type, a_field, \ phn); \ phn_lchild_set(a_type, a_field, parent, next); \ if (next != NULL) { \ phn_prev_set(a_type, a_field, next, \ parent); \ } \ } else { \ assert(phn_prev_get(a_type, a_field, phn) != \ NULL); \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ phn_next_get(a_type, a_field, phn)); \ } \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ phn_prev_get(a_type, a_field, phn)); \ } \ } \ } #endif /* PH_H_ */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/private_namespace.sh010075500007650000024000000001371344617474000242100ustar0000000000000000#!/bin/sh for symbol in `cat "$@"` ; do echo "#define ${symbol} JEMALLOC_N(${symbol})" done jemalloc-sys-0.3.2/rep/include/jemalloc/internal/private_symbols.awk010064400007650000024000000026641344617503000241110ustar0000000000000000#!/usr/bin/env awk -f BEGIN { sym_prefix = "_" split("\ _je_aligned_alloc \ _je_calloc \ _je_dallocx \ _je_free \ _je_mallctl \ _je_mallctlbymib \ _je_mallctlnametomib \ _je_malloc \ _je_malloc_conf \ _je_malloc_message \ _je_malloc_stats_print \ _je_malloc_usable_size \ _je_mallocx \ _je_smallocx_0000000000000000000000000000000000000000 \ _je_nallocx \ _je_posix_memalign \ _je_rallocx \ _je_realloc \ _je_sallocx \ _je_sdallocx \ _je_xallocx \ _je_valloc \ _pthread_create \ ", exported_symbol_names) # Store exported symbol names as keys in exported_symbols. for (i in exported_symbol_names) { exported_symbols[exported_symbol_names[i]] = 1 } } # Process 'nm -a ' output. # # Handle lines like: # 0000000000000008 D opt_junk # 0000000000007574 T malloc_initialized (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) } # Process 'dumpbin /SYMBOLS ' output. # # Handle lines like: # 353 00008098 SECT4 notype External | opt_junk # 3F1 00000000 SECT7 notype () External | malloc_initialized ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { print $NF } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/private_symbols.sh010075500007650000024000000021741344617474000237470ustar0000000000000000#!/bin/sh # # Generate private_symbols[_jet].awk. # # Usage: private_symbols.sh * # # is typically "" or "_". sym_prefix=$1 shift cat <' output. # # Handle lines like: # 0000000000000008 D opt_junk # 0000000000007574 T malloc_initialized (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) } # Process 'dumpbin /SYMBOLS ' output. # # Handle lines like: # 353 00008098 SECT4 notype External | opt_junk # 3F1 00000000 SECT7 notype () External | malloc_initialized ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { print $NF } EOF jemalloc-sys-0.3.2/rep/include/jemalloc/internal/private_symbols_jet.awk010064400007650000024000000027121344617503000247450ustar0000000000000000#!/usr/bin/env awk -f BEGIN { sym_prefix = "_" split("\ _jet_aligned_alloc \ _jet_calloc \ _jet_dallocx \ _jet_free \ _jet_mallctl \ _jet_mallctlbymib \ _jet_mallctlnametomib \ _jet_malloc \ _jet_malloc_conf \ _jet_malloc_message \ _jet_malloc_stats_print \ _jet_malloc_usable_size \ _jet_mallocx \ _jet_smallocx_0000000000000000000000000000000000000000 \ _jet_nallocx \ _jet_posix_memalign \ _jet_rallocx \ _jet_realloc \ _jet_sallocx \ _jet_sdallocx \ _jet_xallocx \ _jet_valloc \ _pthread_create \ ", exported_symbol_names) # Store exported symbol names as keys in exported_symbols. for (i in exported_symbol_names) { exported_symbols[exported_symbol_names[i]] = 1 } } # Process 'nm -a ' output. # # Handle lines like: # 0000000000000008 D opt_junk # 0000000000007574 T malloc_initialized (NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) } # Process 'dumpbin /SYMBOLS ' output. # # Handle lines like: # 353 00008098 SECT4 notype External | opt_junk # 3F1 00000000 SECT7 notype () External | malloc_initialized ($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { print $NF } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prng.h010064400007650000024000000113041344617474000213000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PRNG_H #define JEMALLOC_INTERNAL_PRNG_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. */ /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ #define PRNG_A_32 UINT32_C(1103515241) #define PRNG_C_32 UINT32_C(12347) #define PRNG_A_64 UINT64_C(6364136223846793005) #define PRNG_C_64 UINT64_C(1442695040888963407) JEMALLOC_ALWAYS_INLINE uint32_t prng_state_next_u32(uint32_t state) { return (state * PRNG_A_32) + PRNG_C_32; } JEMALLOC_ALWAYS_INLINE uint64_t prng_state_next_u64(uint64_t state) { return (state * PRNG_A_64) + PRNG_C_64; } JEMALLOC_ALWAYS_INLINE size_t prng_state_next_zu(size_t state) { #if LG_SIZEOF_PTR == 2 return (state * PRNG_A_32) + PRNG_C_32; #elif LG_SIZEOF_PTR == 3 return (state * PRNG_A_64) + PRNG_C_64; #else #error Unsupported pointer size #endif } /******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ /* * The prng_lg_range functions give a uniform int in the half-open range [0, * 2**lg_range). If atomic is true, they do so safely from multiple threads. * Multithreaded 64-bit prngs aren't supported. */ JEMALLOC_ALWAYS_INLINE uint32_t prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { uint32_t ret, state0, state1; assert(lg_range > 0); assert(lg_range <= 32); state0 = atomic_load_u32(state, ATOMIC_RELAXED); if (atomic) { do { state1 = prng_state_next_u32(state0); } while (!atomic_compare_exchange_weak_u32(state, &state0, state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); } else { state1 = prng_state_next_u32(state0); atomic_store_u32(state, state1, ATOMIC_RELAXED); } ret = state1 >> (32 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range) { uint64_t ret, state1; assert(lg_range > 0); assert(lg_range <= 64); state1 = prng_state_next_u64(*state); *state = state1; ret = state1 >> (64 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE size_t prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { size_t ret, state0, state1; assert(lg_range > 0); assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); state0 = atomic_load_zu(state, ATOMIC_RELAXED); if (atomic) { do { state1 = prng_state_next_zu(state0); } while (atomic_compare_exchange_weak_zu(state, &state0, state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); } else { state1 = prng_state_next_zu(state0); atomic_store_zu(state, state1, ATOMIC_RELAXED); } ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); return ret; } /* * The prng_range functions behave like the prng_lg_range, but return a result * in [0, range) instead of [0, 2**lg_range). */ JEMALLOC_ALWAYS_INLINE uint32_t prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { uint32_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u32(state, lg_range, atomic); } while (ret >= range); return ret; } JEMALLOC_ALWAYS_INLINE uint64_t prng_range_u64(uint64_t *state, uint64_t range) { uint64_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u64(state, lg_range); } while (ret >= range); return ret; } JEMALLOC_ALWAYS_INLINE size_t prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { size_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_zu(state, lg_range, atomic); } while (ret >= range); return ret; } #endif /* JEMALLOC_INTERNAL_PRNG_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prof_externs.h010064400007650000024000000074521344617474000230610ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H #define JEMALLOC_INTERNAL_PROF_EXTERNS_H #include "jemalloc/internal/mutex.h" extern malloc_mutex_t bt2gctx_mtx; extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern bool opt_prof_log; /* Turn logging on at boot. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); #endif typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *JET_MUTABLE prof_dump_open; typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *JET_MUTABLE prof_dump_header; #ifdef JEMALLOC_JET void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes); #endif bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); bool prof_thread_active_init_get(tsdn_t *tsdn); bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(tsd_t *tsd); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_stop(tsdn_t *tsdn); #ifdef JEMALLOC_JET size_t prof_log_bt_count(void); size_t prof_log_alloc_count(void); size_t prof_log_thr_count(void); bool prof_log_is_logging(void); bool prof_log_rep_check(void); void prof_log_dummy_set(bool new_value); #endif #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prof_inlines_a.h010064400007650000024000000047021344617474000233250ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H #define JEMALLOC_INTERNAL_PROF_INLINES_A_H #include "jemalloc/internal/mutex.h" static inline bool prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { cassert(config_prof); bool overflow; uint64_t a0, a1; /* * If the application allocates fast enough (and/or if idump is slow * enough), extreme overflow here (a1 >= prof_interval * 2) can cause * idump trigger coalescing. This is an intentional mechanism that * avoids rate-limiting allocation. */ #ifdef JEMALLOC_ATOMIC_U64 a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); do { a1 = a0 + accumbytes; assert(a1 >= a0); overflow = (a1 >= prof_interval); if (overflow) { a1 %= prof_interval; } } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else malloc_mutex_lock(tsdn, &prof_accum->mtx); a0 = prof_accum->accumbytes; a1 = a0 + accumbytes; overflow = (a1 >= prof_interval); if (overflow) { a1 %= prof_interval; } prof_accum->accumbytes = a1; malloc_mutex_unlock(tsdn, &prof_accum->mtx); #endif return overflow; } static inline void prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { cassert(config_prof); /* * Cancel out as much of the excessive prof_accumbytes increase as * possible without underflowing. Interval-triggered dumps occur * slightly more often than intended as a result of incomplete * canceling. */ uint64_t a0, a1; #ifdef JEMALLOC_ATOMIC_U64 a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); do { a1 = (a0 >= SC_LARGE_MINCLASS - usize) ? a0 - (SC_LARGE_MINCLASS - usize) : 0; } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else malloc_mutex_lock(tsdn, &prof_accum->mtx); a0 = prof_accum->accumbytes; a1 = (a0 >= SC_LARGE_MINCLASS - usize) ? a0 - (SC_LARGE_MINCLASS - usize) : 0; prof_accum->accumbytes = a1; malloc_mutex_unlock(tsdn, &prof_accum->mtx); #endif } JEMALLOC_ALWAYS_INLINE bool prof_active_get_unlocked(void) { /* * Even if opt_prof is true, sampling can be temporarily disabled by * setting prof_active to false. No locking is used when reading * prof_active in the fast path, so there are no guarantees regarding * how long it will take for all threads to notice state changes. */ return prof_active; } #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prof_inlines_b.h010064400007650000024000000146101344617474000233250ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H #define JEMALLOC_INTERNAL_PROF_INLINES_B_H #include "jemalloc/internal/sz.h" JEMALLOC_ALWAYS_INLINE bool prof_gdump_get_unlocked(void) { /* * No locking is used when reading prof_gdump_val in the fast path, so * there are no guarantees regarding how long it will take for all * threads to notice state changes. */ return prof_gdump_val; } JEMALLOC_ALWAYS_INLINE prof_tdata_t * prof_tdata_get(tsd_t *tsd, bool create) { prof_tdata_t *tdata; cassert(config_prof); tdata = tsd_prof_tdata_get(tsd); if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } assert(tdata == NULL || tdata->attached); } return tdata; } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_reset(tsdn, ptr, tctx); } JEMALLOC_ALWAYS_INLINE nstime_t prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx); } JEMALLOC_ALWAYS_INLINE void prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, nstime_t t) { cassert(config_prof); assert(ptr != NULL); arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t); } JEMALLOC_ALWAYS_INLINE bool prof_sample_check(tsd_t *tsd, size_t usize, bool update) { ssize_t check = update ? 0 : usize; int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); if (update) { bytes_until_sample -= usize; if (tsd_nominal(tsd)) { tsd_bytes_until_sample_set(tsd, bytes_until_sample); } } if (likely(bytes_until_sample >= check)) { return true; } return false; } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; cassert(config_prof); /* Fastpath: no need to load tdata */ if (likely(prof_sample_check(tsd, usize, update))) { return true; } bool booted = tsd_prof_tdata_get(tsd); tdata = prof_tdata_get(tsd, true); if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { tdata = NULL; } if (tdata_out != NULL) { *tdata_out = tdata; } if (unlikely(tdata == NULL)) { return true; } /* * If this was the first creation of tdata, then * prof_tdata_get() reset bytes_until_sample, so decrement and * check it again */ if (!booted && prof_sample_check(tsd, usize, update)) { return true; } if (tsd_reentrancy_level_get(tsd) > 0) { return true; } /* Compute new sample threshold. */ if (update) { prof_sample_threshold_update(tdata); } return !tdata->active; } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { prof_tctx_t *ret; prof_tdata_t *tdata; prof_bt_t bt; assert(usize == sz_s2u(usize)); if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, &tdata))) { ret = (prof_tctx_t *)(uintptr_t)1U; } else { bt_init(&bt, tdata->vec); prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return ret; } JEMALLOC_ALWAYS_INLINE void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(tsdn, ptr)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { prof_malloc_sample_object(tsdn, ptr, usize, tctx); } else { prof_tctx_set(tsdn, ptr, usize, alloc_ctx, (prof_tctx_t *)(uintptr_t)1U); } } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled, moved; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { assert(usize == isalloc(tsd_tsdn(tsd), ptr)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ prof_alloc_rollback(tsd, tctx, true); tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); moved = (ptr != old_ptr); if (unlikely(sampled)) { prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); } else if (moved) { prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, (prof_tctx_t *)(uintptr_t)1U); } else if (unlikely(old_sampled)) { /* * prof_tctx_set() would work for the !moved case as well, but * prof_tctx_reset() is slightly cheaper, and the proper thing * to do here in the presence of explicit knowledge re: moved * state. */ prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); } else { assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == (uintptr_t)1U); } /* * The prof_free_sampled_object() call must come after the * prof_malloc_sample_object() call, because tctx and old_tctx may be * the same, in which case reversing the call order could cause the tctx * to be prematurely destroyed as a side effect of momentarily zeroed * counters. */ if (unlikely(old_sampled)) { prof_free_sampled_object(tsd, ptr, old_usize, old_tctx); } } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); cassert(config_prof); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { prof_free_sampled_object(tsd, ptr, usize, tctx); } } #endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prof_structs.h010064400007650000024000000120231344617474000230660ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H #define JEMALLOC_INTERNAL_PROF_STRUCTS_H #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/rb.h" struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_accum_s { #ifndef JEMALLOC_ATOMIC_U64 malloc_mutex_t mtx; uint64_t accumbytes; #else atomic_u64_t accumbytes; #endif }; struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; typedef enum { prof_tctx_state_initializing, prof_tctx_state_nominal, prof_tctx_state_dumping, prof_tctx_state_purgatory /* Dumper must finish destroying. */ } prof_tctx_state_t; struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ uint64_t thr_uid; uint64_t thr_discrim; /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; /* Associated global context. */ prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, * but coexisting in gctx->tctxs. There are two ways that such * coexistence can occur: * - A dumper thread can cause a tctx to be retained in the purgatory * state. * - Although a single "producer" thread must create all tctx's which * share the same thr_uid, multiple "consumers" can each concurrently * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only * gets called once each time cnts.cur{objs,bytes} drop to 0, but this * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ bool prepared; /* Current dump-related state, protected by gctx->lock. */ prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of * limbo due to one of: * - Initializing this gctx. * - Initializing per thread counters associated with this gctx. * - Preparing to destroy this gctx. * - Dumping a heap profile that includes this gctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Associated backtrace. */ prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ uint64_t thr_discrim; /* Included in heap profile dumps if non-NULL. */ char *thread_name; bool attached; bool expired; rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks * backtraces for which it has non-zero allocation/deallocation counters * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ ckh_t bt2tctx; /* Sampling state. */ uint64_t prng_state; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are * currently being dumped. New threads' tdata's have this initialized * to false so that they aren't accidentally included in later dump * phases. */ bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ bool active; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ void *vec[PROF_BT_MAX]; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/prof_types.h010064400007650000024000000033621344617474000225310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H #define JEMALLOC_INTERNAL_PROF_TYPES_H typedef struct prof_bt_s prof_bt_t; typedef struct prof_accum_s prof_accum_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NTDATA_LOCKS 256 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/public_namespace.h010064400007650000024000000020341344617503100236160ustar0000000000000000#define je_aligned_alloc JEMALLOC_N(aligned_alloc) #define je_calloc JEMALLOC_N(calloc) #define je_dallocx JEMALLOC_N(dallocx) #define je_free JEMALLOC_N(free) #define je_mallctl JEMALLOC_N(mallctl) #define je_mallctlbymib JEMALLOC_N(mallctlbymib) #define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) #define je_malloc JEMALLOC_N(malloc) #define je_malloc_conf JEMALLOC_N(malloc_conf) #define je_malloc_message JEMALLOC_N(malloc_message) #define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) #define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) #define je_mallocx JEMALLOC_N(mallocx) #define je_smallocx_0000000000000000000000000000000000000000 JEMALLOC_N(smallocx_0000000000000000000000000000000000000000) #define je_nallocx JEMALLOC_N(nallocx) #define je_posix_memalign JEMALLOC_N(posix_memalign) #define je_rallocx JEMALLOC_N(rallocx) #define je_realloc JEMALLOC_N(realloc) #define je_sallocx JEMALLOC_N(sallocx) #define je_sdallocx JEMALLOC_N(sdallocx) #define je_xallocx JEMALLOC_N(xallocx) #define je_valloc JEMALLOC_N(valloc) jemalloc-sys-0.3.2/rep/include/jemalloc/internal/public_namespace.sh010075500007650000024000000002011344617474000240040ustar0000000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#define je_${n} JEMALLOC_N(${n})" done jemalloc-sys-0.3.2/rep/include/jemalloc/internal/public_symbols.txt010064400007650000024000000011441344617503000237420ustar0000000000000000aligned_alloc:je_aligned_alloc calloc:je_calloc dallocx:je_dallocx free:je_free mallctl:je_mallctl mallctlbymib:je_mallctlbymib mallctlnametomib:je_mallctlnametomib malloc:je_malloc malloc_conf:je_malloc_conf malloc_message:je_malloc_message malloc_stats_print:je_malloc_stats_print malloc_usable_size:je_malloc_usable_size mallocx:je_mallocx smallocx_0000000000000000000000000000000000000000:je_smallocx_0000000000000000000000000000000000000000 nallocx:je_nallocx posix_memalign:je_posix_memalign rallocx:je_rallocx realloc:je_realloc sallocx:je_sallocx sdallocx:je_sdallocx xallocx:je_xallocx valloc:je_valloc jemalloc-sys-0.3.2/rep/include/jemalloc/internal/public_unnamespace.h010064400007650000024000000007551344617503100241710ustar0000000000000000#undef je_aligned_alloc #undef je_calloc #undef je_dallocx #undef je_free #undef je_mallctl #undef je_mallctlbymib #undef je_mallctlnametomib #undef je_malloc #undef je_malloc_conf #undef je_malloc_message #undef je_malloc_stats_print #undef je_malloc_usable_size #undef je_mallocx #undef je_smallocx_0000000000000000000000000000000000000000 #undef je_nallocx #undef je_posix_memalign #undef je_rallocx #undef je_realloc #undef je_sallocx #undef je_sdallocx #undef je_xallocx #undef je_valloc jemalloc-sys-0.3.2/rep/include/jemalloc/internal/public_unnamespace.sh010075500007650000024000000001571344617474000243610ustar0000000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#undef je_${n}" done jemalloc-sys-0.3.2/rep/include/jemalloc/internal/ql.h010064400007650000024000000047101344617474000207510ustar0000000000000000#ifndef JEMALLOC_INTERNAL_QL_H #define JEMALLOC_INTERNAL_QL_H #include "jemalloc/internal/qr.h" /* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) #endif /* JEMALLOC_INTERNAL_QL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/qr.h010064400007650000024000000044561344617474000207660ustar0000000000000000#ifndef JEMALLOC_INTERNAL_QR_H #define JEMALLOC_INTERNAL_QR_H /* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ a_type *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_type, a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) #endif /* JEMALLOC_INTERNAL_QR_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/quantum.h010064400007650000024000000033751344617474000220350ustar0000000000000000#ifndef JEMALLOC_INTERNAL_QUANTUM_H #define JEMALLOC_INTERNAL_QUANTUM_H /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM # if (defined(__i386__) || defined(_M_IX86)) # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif # ifdef __aarch64__ # define LG_QUANTUM 4 # endif # ifdef __hppa__ # define LG_QUANTUM 4 # endif # ifdef __m68k__ # define LG_QUANTUM 3 # endif # ifdef __mips__ # define LG_QUANTUM 3 # endif # ifdef __nios2__ # define LG_QUANTUM 3 # endif # ifdef __or1k__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif # if defined(__riscv) || defined(__riscv__) # define LG_QUANTUM 4 # endif # ifdef __s390__ # define LG_QUANTUM 4 # endif # if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ defined(__SH4_SINGLE_ONLY__)) # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 # endif # ifdef __le32__ # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM # error "Unknown minimum alignment for architecture; specify via " "--with-lg-quantum" # endif #endif #define QUANTUM ((size_t)(1U << LG_QUANTUM)) #define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ #define QUANTUM_CEILING(a) \ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #endif /* JEMALLOC_INTERNAL_QUANTUM_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/rb.h010064400007650000024000001126411344617474000207430ustar0000000000000000/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include * #include * #define NDEBUG // (Optional, see assert(3).) * #include * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #include * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ #ifndef __PGI #define RB_COMPACT #endif #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ } /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ /* Bookkeeping bit cannot be used by node pointer. */ \ assert(((uintptr_t)(a_node) & 0x1) == 0); \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #endif /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. * * static void * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); * Description: Iterate over the tree with post-order traversal, remove * each node, and run the callback if non-null. This is * used for destroying a tree without paying the cost to * rebalance it. The tree must not be otherwise altered * during traversal. * Args: * tree: Pointer to an initialized red-black tree object. * cb : Callback function, which, if non-null, is called for each node * during iteration. There is no way to stop iteration once it * has begun. * arg : Opaque pointer passed to cb(). */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return ret; \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (left != NULL && rbtn_red_get(a_type, a_field, \ left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != NULL; pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (leftrightleft != NULL && rbtn_red_get(a_type, \ a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } else if (cmp > 0) { \ return a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ return ret; \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } else if (cmp < 0) { \ return a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ return ret; \ } \ a_attr void \ a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ a_type *, void *), void *arg) { \ if (node == NULL) { \ return; \ } \ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ node), cb, arg); \ rbtn_left_set(a_type, a_field, (node), NULL); \ a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ node), cb, arg); \ rbtn_right_set(a_type, a_field, (node), NULL); \ if (cb) { \ cb(node, arg); \ } \ } \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg) { \ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/rtree.h010064400007650000024000000420151344617474000214560ustar0000000000000000#ifndef JEMALLOC_INTERNAL_RTREE_H #define JEMALLOC_INTERNAL_RTREE_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree_tsd.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/tsd.h" /* * This radix tree implementation is tailored to the singular purpose of * associating metadata with extents that are currently owned by jemalloc. * ******************************************************************************* */ /* Number of high insignificant bits. */ #define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) /* Number of low insigificant bits. */ #define RTREE_NLIB LG_PAGE /* Number of significant bits. */ #define RTREE_NSB (LG_VADDR - RTREE_NLIB) /* Number of levels in radix tree. */ #if RTREE_NSB <= 10 # define RTREE_HEIGHT 1 #elif RTREE_NSB <= 36 # define RTREE_HEIGHT 2 #elif RTREE_NSB <= 52 # define RTREE_HEIGHT 3 #else # error Unsupported number of significant virtual address bits #endif /* Use compact leaf representation if virtual address encoding allows. */ #if RTREE_NHIB >= LG_CEIL(SC_NSIZES) # define RTREE_LEAF_COMPACT #endif /* Needed for initialization only. */ #define RTREE_LEAFKEY_INVALID ((uintptr_t)1) typedef struct rtree_node_elm_s rtree_node_elm_t; struct rtree_node_elm_s { atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ }; struct rtree_leaf_elm_s { #ifdef RTREE_LEAF_COMPACT /* * Single pointer-width field containing all three leaf element fields. * For example, on a 64-bit x64 system with 48 significant virtual * memory address bits, the index, extent, and slab fields are packed as * such: * * x: index * e: extent * b: slab * * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b */ atomic_p_t le_bits; #else atomic_p_t le_extent; /* (extent_t *) */ atomic_u_t le_szind; /* (szind_t) */ atomic_b_t le_slab; /* (bool) */ #endif }; typedef struct rtree_level_s rtree_level_t; struct rtree_level_s { /* Number of key bits distinguished by this level. */ unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ unsigned cumbits; }; typedef struct rtree_s rtree_t; struct rtree_s { malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ #if RTREE_HEIGHT > 1 rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; #else rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; #endif }; /* * Split the bits into one to three partitions depending on number of * significant bits. It the number of bits does not divide evenly into the * number of levels, place one remainder bit per level starting at the leaf * level. */ static const rtree_level_t rtree_levels[] = { #if RTREE_HEIGHT == 1 {RTREE_NSB, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 2 {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 3 {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, {RTREE_NSB/3 + RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} #else # error Unsupported rtree height #endif }; bool rtree_new(rtree_t *rtree, bool zeroed); typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; #ifdef JEMALLOC_JET void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); #endif rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leafkey(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - rtree_levels[RTREE_HEIGHT-1].bits); unsigned maskbits = ptrbits - cumbits; uintptr_t mask = ~((ZU(1) << maskbits) - 1); return (key & mask); } JEMALLOC_ALWAYS_INLINE size_t rtree_cache_direct_map(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - rtree_levels[RTREE_HEIGHT-1].bits); unsigned maskbits = ptrbits - cumbits; return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); } JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(uintptr_t key, unsigned level) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = rtree_levels[level].cumbits; unsigned shiftbits = ptrbits - cumbits; unsigned maskbits = rtree_levels[level].bits; uintptr_t mask = (ZU(1) << maskbits) - 1; return ((key >> shiftbits) & mask); } /* * Atomic getters. * * dependent: Reading a value on behalf of a pointer to a valid allocation * is guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the * pointer came into existence. * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ # ifdef RTREE_LEAF_COMPACT JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { return (uintptr_t)atomic_load_p(&elm->le_bits, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); } JEMALLOC_ALWAYS_INLINE extent_t * rtree_leaf_elm_bits_extent_get(uintptr_t bits) { # ifdef __aarch64__ /* * aarch64 doesn't sign extend the highest virtual address bit to set * the higher ones. Instead, the high bits gets zeroed. */ uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; /* Mask off the slab bit. */ uintptr_t low_bit_mask = ~(uintptr_t)1; uintptr_t mask = high_bit_mask & low_bit_mask; return (extent_t *)(bits & mask); # else /* Restore sign-extended high bits, mask slab bit. */ return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> RTREE_NHIB) & ~((uintptr_t)0x1)); # endif } JEMALLOC_ALWAYS_INLINE szind_t rtree_leaf_elm_bits_szind_get(uintptr_t bits) { return (szind_t)(bits >> LG_VADDR); } JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_bits_slab_get(uintptr_t bits) { return (bool)(bits & (uintptr_t)0x1); } # endif JEMALLOC_ALWAYS_INLINE extent_t * rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_extent_get(bits); #else extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); return extent; #endif } JEMALLOC_ALWAYS_INLINE szind_t rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_szind_get(bits); #else return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); #endif } JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); return rtree_leaf_elm_bits_slab_get(bits); #else return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); #endif } static inline void rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind) { assert(szind <= SC_NSIZES); #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); #endif } static inline void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); /* * Write extent last, since the element is atomically considered valid * as soon as the extent field is non-NULL. */ rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); #endif } static inline void rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind, bool slab) { assert(!slab || szind < SC_NBINS); /* * The caller implicitly assures that it is the only writer to the szind * and slab fields, and that the extent field cannot currently change. */ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); } JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing) { assert(key != 0); assert(!dependent || !init_missing); size_t slot = rtree_cache_direct_map(key); uintptr_t leafkey = rtree_leafkey(key); assert(leafkey != RTREE_LEAFKEY_INVALID); /* Fast path: L1 direct mapped cache. */ if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; assert(leaf != NULL); uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); return &leaf[subkey]; } /* * Search the L2 LRU cache. On hit, swap the matching element into the * slot in L1 cache, and move the position in L2 up by 1. */ #define RTREE_CACHE_CHECK_L2(i) do { \ if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ assert(leaf != NULL); \ if (i > 0) { \ /* Bubble up by one. */ \ rtree_ctx->l2_cache[i].leafkey = \ rtree_ctx->l2_cache[i - 1].leafkey; \ rtree_ctx->l2_cache[i].leaf = \ rtree_ctx->l2_cache[i - 1].leaf; \ rtree_ctx->l2_cache[i - 1].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[i - 1].leaf = \ rtree_ctx->cache[slot].leaf; \ } else { \ rtree_ctx->l2_cache[0].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[0].leaf = \ rtree_ctx->cache[slot].leaf; \ } \ rtree_ctx->cache[slot].leafkey = leafkey; \ rtree_ctx->cache[slot].leaf = leaf; \ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ return &leaf[subkey]; \ } \ } while (0) /* Check the first cache entry. */ RTREE_CACHE_CHECK_L2(0); /* Search the remaining cache elements. */ for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { RTREE_CACHE_CHECK_L2(i); } #undef RTREE_CACHE_CHECK_L2 return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent, init_missing); } static inline bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, extent_t *extent, szind_t szind, bool slab) { /* Use rtree_clear() to set the extent to NULL. */ assert(extent != NULL); rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true); if (elm == NULL) { return true; } assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); return false; } JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false); if (!dependent && elm == NULL) { return NULL; } assert(elm != NULL); return elm; } JEMALLOC_ALWAYS_INLINE extent_t * rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return NULL; } return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); } JEMALLOC_ALWAYS_INLINE szind_t rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return SC_NSIZES; } return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); } /* * rtree_slab_read() is intentionally omitted because slab is always read in * conjunction with szind, which makes rtree_szind_slab_read() a better choice. */ JEMALLOC_ALWAYS_INLINE bool rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return true; } *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); return false; } /* * Try to read szind_slab from the L1 cache. Returns true on a hit, * and fills in r_szind and r_slab. Otherwise returns false. * * Key is allowed to be NULL in order to save an extra branch on the * fastpath. returns false in this case. */ JEMALLOC_ALWAYS_INLINE bool rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t *r_szind, bool *r_slab) { rtree_leaf_elm_t *elm; size_t slot = rtree_cache_direct_map(key); uintptr_t leafkey = rtree_leafkey(key); assert(leafkey != RTREE_LEAFKEY_INVALID); if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; assert(leaf != NULL); uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); elm = &leaf[subkey]; #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); *r_szind = rtree_leaf_elm_bits_szind_get(bits); *r_slab = rtree_leaf_elm_bits_slab_get(bits); #else *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true); *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true); #endif return true; } else { return false; } } JEMALLOC_ALWAYS_INLINE bool rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, dependent); if (!dependent && elm == NULL) { return true; } #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); *r_szind = rtree_leaf_elm_bits_szind_get(bits); *r_slab = rtree_leaf_elm_bits_slab_get(bits); #else *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); #endif return false; } static inline void rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t szind, bool slab) { assert(!slab || szind < SC_NBINS); rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); } static inline void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != NULL); rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false); } #endif /* JEMALLOC_INTERNAL_RTREE_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/rtree_tsd.h010064400007650000024000000035051344617474000223310ustar0000000000000000#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H #define JEMALLOC_INTERNAL_RTREE_CTX_H /* * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each * entry supports an entire leaf, so the cache hit rate is typically high even * with a small number of entries. In rare cases extent activity will straddle * the boundary between two leaf nodes. Furthermore, an arena may use a * combination of dss and mmap. Note that as memory usage grows past the amount * that this cache can directly cover, the cache will become less effective if * locality of reference is low, but the consequence is merely cache misses * while traversing the tree nodes. * * The L1 direct mapped cache offers consistent and low cost on cache hit. * However collision could affect hit rate negatively. This is resolved by * combining with a L2 LRU cache, which requires linear search and re-ordering * on access but suffers no collision. Note that, the cache will itself suffer * cache misses if made overly large, plus the cost of linear search in the LRU * cache. */ #define RTREE_CTX_LG_NCACHE 4 #define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) #define RTREE_CTX_NCACHE_L2 8 /* * Zero initializer required for tsd initialization only. Proper initialization * done via rtree_ctx_data_init(). */ #define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}} typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; struct rtree_ctx_cache_elm_s { uintptr_t leafkey; rtree_leaf_elm_t *leaf; }; typedef struct rtree_ctx_s rtree_ctx_t; struct rtree_ctx_s { /* Direct mapped cache. */ rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; /* L2 LRU cache. */ rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; }; void rtree_ctx_data_init(rtree_ctx_t *ctx); #endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/sc.h010064400007650000024000000307111344617474000207420ustar0000000000000000#ifndef JEMALLOC_INTERNAL_SC_H #define JEMALLOC_INTERNAL_SC_H #include "jemalloc/internal/jemalloc_internal_types.h" /* * Size class computations: * * These are a little tricky; we'll first start by describing how things * generally work, and then describe some of the details. * * Ignore the first few size classes for a moment. We can then split all the * remaining size classes into groups. The size classes in a group are spaced * such that they cover allocation request sizes in a power-of-2 range. The * power of two is called the base of the group, and the size classes in it * satisfy allocations in the half-open range (base, base * 2]. There are * SC_NGROUP size classes in each group, equally spaced in the range, so that * each one covers allocations for base / SC_NGROUP possible allocation sizes. * We call that value (base / SC_NGROUP) the delta of the group. Each size class * is delta larger than the one before it (including the initial size class in a * group, which is delta large than 2**base, the largest size class in the * previous group). * To make the math all work out nicely, we require that SC_NGROUP is a power of * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of * lg_base and lg_delta. For each of these groups then, we have that * lg_delta == lg_base - SC_LG_NGROUP. * The size classes in a group with a given lg_base and lg_delta (which, recall, * can be computed from lg_base for these groups) are therefore: * base + 1 * delta * which covers allocations in (base, base + 1 * delta] * base + 2 * delta * which covers allocations in (base + 1 * delta, base + 2 * delta]. * base + 3 * delta * which covers allocations in (base + 2 * delta, base + 3 * delta]. * ... * base + SC_NGROUP * delta ( == 2 * base) * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base]. * (Note that currently SC_NGROUP is always 4, so the "..." is empty in * practice.) * Note that the last size class in the group is the next power of two (after * base), so that we've set up the induction correctly for the next group's * selection of delta. * * Now, let's start considering the first few size classes. Two extra constants * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the * highest required alignment of a platform. For allocation sizes smaller than * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support * platforms with types with alignment larger than their size). To allow such * allocations (without wasting space unnecessarily), we introduce tiny size * classes; one per power of two, up until we hit the quantum size. There are * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes. * * Next, we have a size class of size LG_QUANTUM. This can't be the start of a * group in the sense we described above (covering a power of two range) since, * if we divided into it to pick a value of delta, we'd get a delta smaller than * (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which is against the rules. * * The first base we can divide by SC_NGROUP while still being at least * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size * classes are: * 1 * (1 << LG_QUANTUM) * 2 * (1 << LG_QUANTUM) * 3 * (1 << LG_QUANTUM) * ... (although, as above, this "..." is empty in practice) * SC_NGROUP * (1 << LG_QUANTUM). * * There are SC_NGROUP of these size classes, so we can regard it as a sort of * pseudo-group, even though it spans multiple powers of 2, is divided * differently, and both starts and ends on a power of 2 (as opposed to just * ending). SC_NGROUP is itself a power of two, so the first group after the * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP * sizes without violating our LG_QUANTUM requirements, so we can safely set * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM). * * So, in order, the size classes are: * * Tiny size classes: * - Count: LG_QUANTUM - SC_LG_TINY_MIN. * - Sizes: * 1 << SC_LG_TINY_MIN * 1 << (SC_LG_TINY_MIN + 1) * 1 << (SC_LG_TINY_MIN + 2) * ... * 1 << (LG_QUANTUM - 1) * * Initial pseudo-group: * - Count: SC_NGROUP * - Sizes: * 1 * (1 << LG_QUANTUM) * 2 * (1 << LG_QUANTUM) * 3 * (1 << LG_QUANTUM) * ... * SC_NGROUP * (1 << LG_QUANTUM) * * Regular group 0: * - Count: SC_NGROUP * - Sizes: * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of * lg_base - SC_LG_NGROUP) * (1 << lg_base) + 1 * (1 << lg_delta) * (1 << lg_base) + 2 * (1 << lg_delta) * (1 << lg_base) + 3 * (1 << lg_delta) * ... * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] * * Regular group 1: * - Count: SC_NGROUP * - Sizes: * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of * lg_base - SC_LG_NGROUP) * (1 << lg_base) + 1 * (1 << lg_delta) * (1 << lg_base) + 2 * (1 << lg_delta) * (1 << lg_base) + 3 * (1 << lg_delta) * ... * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] * * ... * * Regular group N: * - Count: SC_NGROUP * - Sizes: * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of * lg_base - SC_LG_NGROUP) * (1 << lg_base) + 1 * (1 << lg_delta) * (1 << lg_base) + 2 * (1 << lg_delta) * (1 << lg_base) + 3 * (1 << lg_delta) * ... * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] * * * Representation of metadata: * To make the math easy, we'll mostly work in lg quantities. We record lg_base, * lg_delta, and ndelta (i.e. number of deltas above the base) on a * per-size-class basis, and maintain the invariant that, across all size * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta). * * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP), * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP. * * For the initial tiny size classes (if any), lg_base is lg(size class size). * lg_delta is lg_base for the first size class, and lg_base - 1 for all * subsequent ones. ndelta is always 0. * * For the pseudo-group, if there are no tiny size classes, then we set * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0 * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do * indeed get a power of two that way). If there *are* tiny size classes, then * the first size class needs to have lg_delta relative to the largest tiny size * class. We therefore set lg_base == LG_QUANTUM - 1, * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the * pseudo-group the same. * * * Other terminology: * "Small" size classes mean those that are allocated out of bins, which is the * same as those that are slab allocated. * "Large" size classes are those that are not small. The cutoff for counting as * large is page size * group size. */ /* * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N. */ #define SC_LG_NGROUP 2 #define SC_LG_TINY_MIN 3 #if SC_LG_TINY_MIN == 0 /* The div module doesn't support division by 1, which this would require. */ #error "Unsupported LG_TINY_MIN" #endif /* * The definitions below are all determined by the above settings and system * characteristics. */ #define SC_NGROUP (1ULL << SC_LG_NGROUP) #define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8) #define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN) #define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1) #define SC_NPSEUDO SC_NGROUP #define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP) /* * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1 * size class shorter than the others). * We could probably save some space in arenas by capping this at LG_VADDR size. */ #define SC_LG_BASE_MAX (SC_PTR_BITS - 2) #define SC_NREGULAR (SC_NGROUP * \ (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) /* The number of size classes that are a multiple of the page size. */ #define SC_NPSIZES ( \ /* Start with all the size classes. */ \ SC_NSIZES \ /* Subtract out those groups with too small a base. */ \ - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \ /* And the pseudo-group. */ \ - SC_NPSEUDO \ /* And the tiny group. */ \ - SC_NTINY \ /* Groups where ndelta*delta is not a multiple of the page size. */ \ - (2 * (SC_NGROUP))) /* * We declare a size class is binnable if size < page size * group. Or, in other * words, lg(size) < lg(page size) + lg(group size). */ #define SC_NBINS ( \ /* Sub-regular size classes. */ \ SC_NTINY + SC_NPSEUDO \ /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \ /* Last SC of the last group hits the bound exactly; exclude it. */ \ - 1) /* * The size2index_tab lookup table uses uint8_t to encode each bin index, so we * cannot support more than 256 small size classes. */ #if (SC_NBINS > 256) # error "Too many small size classes" #endif /* The largest size class in the lookup table. */ #define SC_LOOKUP_MAXCLASS ((size_t)1 << 12) /* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ #define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1)) #define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1)) /* The largest size class allocated out of a slab. */ #define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) /* The smallest size class not allocated out of a slab. */ #define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) #define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) /* Internal; only used for the definition of SC_LARGE_MAXCLASS. */ #define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2)) #define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP)) /* The largest size class supported. */ #define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) typedef struct sc_s sc_t; struct sc_s { /* Size class index, or -1 if not a valid size class. */ int index; /* Lg group base size (no deltas added). */ int lg_base; /* Lg delta to previous size class. */ int lg_delta; /* Delta multiplier. size == 1<data) / sizeof(size_t)]; \ buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \ memcpy(buf, src, sizeof(type)); \ size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \ atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \ atomic_fence(ATOMIC_RELEASE); \ for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \ } \ atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \ } \ \ /* Returns whether or not the read was consistent. */ \ static inline bool \ seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \ size_t buf[sizeof(src->data) / sizeof(size_t)]; \ size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \ if (seq1 % 2 != 0) { \ return false; \ } \ for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \ } \ atomic_fence(ATOMIC_ACQUIRE); \ size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \ if (seq1 != seq2) { \ return false; \ } \ memcpy(dst, buf, sizeof(type)); \ return true; \ } #endif /* JEMALLOC_INTERNAL_SEQ_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/smoothstep.h010064400007650000024000000364121344617474000225460ustar0000000000000000#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H #define JEMALLOC_INTERNAL_SMOOTHSTEP_H /* * This file was generated by the following command: * sh smoothstep.sh smoother 200 24 3 15 */ /******************************************************************************/ /* * This header defines a precomputed table based on the smoothstep family of * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so * that floating point math can be avoided. * * 3 2 * smoothstep(x) = -2x + 3x * * 5 4 3 * smootherstep(x) = 6x - 15x + 10x * * 7 6 5 4 * smootheststep(x) = -20x + 70x - 84x + 35x */ #define SMOOTHSTEP_VARIANT "smoother" #define SMOOTHSTEP_NSTEPS 200 #define SMOOTHSTEP_BFP 24 #define SMOOTHSTEP \ /* STEP(step, h, x, y) */ \ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ #endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/smoothstep.sh010075500007650000024000000056321344617474000227340ustar0000000000000000#!/bin/sh # # Generate a discrete lookup table for a sigmoid function in the smoothstep # family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table # entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode # the entries using a binary fixed point representation. # # Usage: smoothstep.sh # # is in {smooth, smoother, smoothest}. # must be greater than zero. # must be in [0..62]; reasonable values are roughly [10..30]. # is x decimal precision. # is y decimal precision. #set -x cmd="sh smoothstep.sh $*" variant=$1 nsteps=$2 bfp=$3 xprec=$4 yprec=$5 case "${variant}" in smooth) ;; smoother) ;; smoothest) ;; *) echo "Unsupported variant" exit 1 ;; esac smooth() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoother() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoothest() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } cat <iteration < 5) { for (i = 0; i < (1U << spin->iteration); i++) { spin_cpu_spinwait(); } spin->iteration++; } else { #ifdef _WIN32 SwitchToThread(); #else sched_yield(); #endif } } #undef SPIN_INLINE #endif /* JEMALLOC_INTERNAL_SPIN_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/stats.h010064400007650000024000000017461344617474000215010ustar0000000000000000#ifndef JEMALLOC_INTERNAL_STATS_H #define JEMALLOC_INTERNAL_STATS_H /* OPTION(opt, var_name, default, set_value_to) */ #define STATS_PRINT_OPTIONS \ OPTION('J', json, false, true) \ OPTION('g', general, true, false) \ OPTION('m', merged, config_stats, false) \ OPTION('d', destroyed, config_stats, false) \ OPTION('a', unmerged, config_stats, false) \ OPTION('b', bins, true, false) \ OPTION('l', large, true, false) \ OPTION('x', mutex, true, false) \ OPTION('e', extents, true, false) enum { #define OPTION(o, v, d, s) stats_print_option_num_##v, STATS_PRINT_OPTIONS #undef OPTION stats_print_tot_num_options }; /* Options for stats_print. */ extern bool opt_stats_print; extern char opt_stats_print_opts[stats_print_tot_num_options+1]; /* Implements je_malloc_stats_print. */ void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_INTERNAL_STATS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/sz.h010064400007650000024000000177221344617474000210000ustar0000000000000000#ifndef JEMALLOC_INTERNAL_SIZE_H #define JEMALLOC_INTERNAL_SIZE_H #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/util.h" /* * sz module: Size computations. * * Some abbreviations used here: * p: Page * ind: Index * s, sz: Size * u: Usable size * a: Aligned * * These are not always used completely consistently, but should be enough to * interpret function names. E.g. sz_psz2ind converts page size to page size * index; sz_sa2u converts a (size, alignment) allocation request to the usable * size that would result from such an allocation. */ /* * sz_pind2sz_tab encodes the same information as could be computed by * sz_pind2sz_compute(). */ extern size_t sz_pind2sz_tab[SC_NPSIZES + 1]; /* * sz_index2size_tab encodes the same information as could be computed (at * unacceptable cost in some code paths) by sz_index2size_compute(). */ extern size_t sz_index2size_tab[SC_NSIZES]; /* * sz_size2index_tab is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via sz_size2index(). */ extern uint8_t sz_size2index_tab[]; static const size_t sz_large_pad = #ifdef JEMALLOC_CACHE_OBLIVIOUS PAGE #else 0 #endif ; extern void sz_boot(const sc_data_t *sc_data); JEMALLOC_ALWAYS_INLINE pszind_t sz_psz2ind(size_t psz) { if (unlikely(psz > SC_LARGE_MAXCLASS)) { return SC_NPSIZES; } pszind_t x = lg_floor((psz<<1)-1); pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ? 0 : x - (SC_LG_NGROUP + LG_PAGE); pszind_t grp = shift << SC_LG_NGROUP; pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? LG_PAGE : x - SC_LG_NGROUP - 1; size_t delta_inverse_mask = ZU(-1) << lg_delta; pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << SC_LG_NGROUP) - 1); pszind_t ind = grp + mod; return ind; } static inline size_t sz_pind2sz_compute(pszind_t pind) { if (unlikely(pind == SC_NPSIZES)) { return SC_LARGE_MAXCLASS + PAGE; } size_t grp = pind >> SC_LG_NGROUP; size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_PAGE-1); size_t mod_size = (mod+1) << lg_delta; size_t sz = grp_size + mod_size; return sz; } static inline size_t sz_pind2sz_lookup(pszind_t pind) { size_t ret = (size_t)sz_pind2sz_tab[pind]; assert(ret == sz_pind2sz_compute(pind)); return ret; } static inline size_t sz_pind2sz(pszind_t pind) { assert(pind < SC_NPSIZES + 1); return sz_pind2sz_lookup(pind); } static inline size_t sz_psz2u(size_t psz) { if (unlikely(psz > SC_LARGE_MAXCLASS)) { return SC_LARGE_MAXCLASS + PAGE; } size_t x = lg_floor((psz<<1)-1); size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? LG_PAGE : x - SC_LG_NGROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (psz + delta_mask) & ~delta_mask; return usize; } static inline szind_t sz_size2index_compute(size_t size) { if (unlikely(size > SC_LARGE_MAXCLASS)) { return SC_NSIZES; } if (size == 0) { return 0; } #if (SC_NTINY != 0) if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } #endif { szind_t x = lg_floor((size<<1)-1); szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 : x - (SC_LG_NGROUP + LG_QUANTUM); szind_t grp = shift << SC_LG_NGROUP; szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - SC_LG_NGROUP - 1; size_t delta_inverse_mask = ZU(-1) << lg_delta; szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << SC_LG_NGROUP) - 1); szind_t index = SC_NTINY + grp + mod; return index; } } JEMALLOC_ALWAYS_INLINE szind_t sz_size2index_lookup(size_t size) { assert(size <= SC_LOOKUP_MAXCLASS); szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1) >> SC_LG_TINY_MIN]); assert(ret == sz_size2index_compute(size)); return ret; } JEMALLOC_ALWAYS_INLINE szind_t sz_size2index(size_t size) { if (likely(size <= SC_LOOKUP_MAXCLASS)) { return sz_size2index_lookup(size); } return sz_size2index_compute(size); } static inline size_t sz_index2size_compute(szind_t index) { #if (SC_NTINY > 0) if (index < SC_NTINY) { return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index)); } #endif { size_t reduced_index = index - SC_NTINY; size_t grp = reduced_index >> SC_LG_NGROUP; size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_QUANTUM + (SC_LG_NGROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_QUANTUM-1); size_t mod_size = (mod+1) << lg_delta; size_t usize = grp_size + mod_size; return usize; } } JEMALLOC_ALWAYS_INLINE size_t sz_index2size_lookup(szind_t index) { size_t ret = (size_t)sz_index2size_tab[index]; assert(ret == sz_index2size_compute(index)); return ret; } JEMALLOC_ALWAYS_INLINE size_t sz_index2size(szind_t index) { assert(index < SC_NSIZES); return sz_index2size_lookup(index); } JEMALLOC_ALWAYS_INLINE size_t sz_s2u_compute(size_t size) { if (unlikely(size > SC_LARGE_MAXCLASS)) { return 0; } if (size == 0) { size++; } #if (SC_NTINY > 0) if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); } #endif { size_t x = lg_floor((size<<1)-1); size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - SC_LG_NGROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; return usize; } } JEMALLOC_ALWAYS_INLINE size_t sz_s2u_lookup(size_t size) { size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); assert(ret == sz_s2u_compute(size)); return ret; } /* * Compute usable size that would result from allocating an object with the * specified size. */ JEMALLOC_ALWAYS_INLINE size_t sz_s2u(size_t size) { if (likely(size <= SC_LOOKUP_MAXCLASS)) { return sz_s2u_lookup(size); } return sz_s2u_compute(size); } /* * Compute usable size that would result from allocating an object with the * specified size and alignment. */ JEMALLOC_ALWAYS_INLINE size_t sz_sa2u(size_t size, size_t alignment) { size_t usize; assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* Try for a small size class. */ if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) { /* * Round size up to the nearest multiple of alignment. * * This done, we can take advantage of the fact that for each * small size class, every object is aligned at the smallest * power of two that is non-zero in the base two representation * of the size. For example: * * Size | Base 2 | Minimum alignment * -----+----------+------------------ * 96 | 1100000 | 32 * 144 | 10100000 | 32 * 192 | 11000000 | 64 */ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); if (usize < SC_LARGE_MINCLASS) { return usize; } } /* Large size class. Beware of overflow. */ if (unlikely(alignment > SC_LARGE_MAXCLASS)) { return 0; } /* Make sure result is a large size class. */ if (size <= SC_LARGE_MINCLASS) { usize = SC_LARGE_MINCLASS; } else { usize = sz_s2u(size); if (usize < size) { /* size_t overflow. */ return 0; } } /* * Calculate the multi-page mapping that large_palloc() would need in * order to guarantee the alignment. */ if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { /* size_t overflow. */ return 0; } return usize; } #endif /* JEMALLOC_INTERNAL_SIZE_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tcache_externs.h010064400007650000024000000041351344617474000233350ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H #define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern cache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more * large-object bins. */ extern unsigned nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); tcache_t *tcache_create_explicit(tsd_t *tsd); void tcache_cleanup(tsd_t *tsd); void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); bool tcache_boot(tsdn_t *tsdn); void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); void tcache_prefork(tsdn_t *tsdn); void tcache_postfork_parent(tsdn_t *tsdn); void tcache_postfork_child(tsdn_t *tsdn); void tcache_flush(tsd_t *tsd); bool tsd_tcache_data_init(tsd_t *tsd); bool tsd_tcache_enabled_data_init(tsd_t *tsd); #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tcache_inlines.h010064400007650000024000000133501344617474000233050ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H #define JEMALLOC_INTERNAL_TCACHE_INLINES_H #include "jemalloc/internal/bin.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" static inline bool tcache_enabled_get(tsd_t *tsd) { return tsd_tcache_enabled_get(tsd); } static inline void tcache_enabled_set(tsd_t *tsd, bool enabled) { bool was_enabled = tsd_tcache_enabled_get(tsd); if (!was_enabled && enabled) { tsd_tcache_data_init(tsd); } else if (was_enabled && !enabled) { tcache_cleanup(tsd); } /* Commit the state last. Above calls check current state. */ tsd_tcache_enabled_set(tsd, enabled); tsd_slow_update(tsd); } JEMALLOC_ALWAYS_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache) { if (TCACHE_GC_INCR == 0) { return; } if (unlikely(ticker_tick(&tcache->gc_ticker))) { tcache_event_hard(tsd, tcache); } } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; cache_bin_t *bin; bool tcache_success; size_t usize JEMALLOC_CC_SILENCE_INIT(0); assert(binind < SC_NBINS); bin = tcache_small_bin_get(tcache, binind); ret = cache_bin_alloc_easy(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) { return NULL; } ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, bin, binind, &tcache_hard_success); if (tcache_hard_success == false) { return NULL; } } assert(ret); /* * Only compute usize if required. The checks in the following if * statement are all static. */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = sz_index2size(binind); assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (slow_path && config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); } if (config_stats) { bin->tstats.nrequests++; } if (config_prof) { tcache->prof_accumbytes += usize; } tcache_event(tsd, tcache); return ret; } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; cache_bin_t *bin; bool tcache_success; assert(binind >= SC_NBINS &&binind < nhbins); bin = tcache_large_bin_get(tcache, binind); ret = cache_bin_alloc_easy(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) { return NULL; } ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); if (ret == NULL) { return NULL; } } else { size_t usize JEMALLOC_CC_SILENCE_INIT(0); /* Only compute usize on demand */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = sz_index2size(binind); assert(usize <= tcache_maxclass); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { memset(ret, JEMALLOC_ALLOC_JUNK, usize); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { memset(ret, 0, usize); } if (config_stats) { bin->tstats.nrequests++; } if (config_prof) { tcache->prof_accumbytes += usize; } } tcache_event(tsd, tcache); return ret; } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { cache_bin_t *bin; cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, &bin_infos[binind]); } bin = tcache_small_bin_get(tcache, binind); bin_info = &tcache_bin_info[binind]; if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) { tcache_bin_flush_small(tsd, tcache, bin, binind, (bin_info->ncached_max >> 1)); bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr); assert(ret); } tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { cache_bin_t *bin; cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); if (slow_path && config_fill && unlikely(opt_junk_free)) { large_dalloc_junk(ptr, sz_index2size(binind)); } bin = tcache_large_bin_get(tcache, binind); bin_info = &tcache_bin_info[binind]; if (unlikely(bin->ncached == bin_info->ncached_max)) { tcache_bin_flush_large(tsd, bin, binind, (bin_info->ncached_max >> 1), tcache); } assert(bin->ncached < bin_info->ncached_max); bin->ncached++; *(bin->avail - bin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) { malloc_printf(": invalid tcache id (%u).\n", ind); abort(); } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) { elm->tcache = tcache_create_explicit(tsd); } return elm->tcache; } #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tcache_structs.h010064400007650000024000000040701344617474000233520ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H #define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H #include "jemalloc/internal/cache_bin.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/tsd_types.h" /* Various uses of this struct need it to be a named type. */ typedef ql_elm(tsd_t) tsd_link_t; struct tcache_s { /* * To minimize our cache-footprint, we put the frequently accessed data * together at the start of this struct. */ /* Cleared after arena_prof_accum(). */ uint64_t prof_accumbytes; /* Drives incremental GC. */ ticker_t gc_ticker; /* * The pointer stacks associated with bins follow as a contiguous array. * During tcache initialization, the avail pointer in each element of * tbins is initialized to point to the proper offset within this array. */ cache_bin_t bins_small[SC_NBINS]; /* * This data is less hot; we can be a little less careful with our * footprint here. */ /* Lets us track all the tcaches in an arena. */ ql_elm(tcache_t) link; /* Logically scoped to tsd, but put here for cache layout reasons. */ ql_elm(tsd_t) tsd_link; bool in_hook; /* * The descriptor lets the arena find our cache bins without seeing the * tcache definition. This enables arenas to aggregate stats across * tcaches without having a tcache dependency. */ cache_bin_array_descriptor_t cache_bin_array_descriptor; /* The arena this tcache is associated with. */ arena_t *arena; /* Next bin to GC. */ szind_t next_gc_bin; /* For small bins, fill (ncached_max >> lg_fill_div). */ uint8_t lg_fill_div[SC_NBINS]; /* * We put the cache bins for large size classes at the end of the * struct, since some of them might not get used. This might end up * letting us avoid touching an extra page if we don't have to. */ cache_bin_t bins_large[SC_NSIZES-SC_NBINS]; }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { tcache_t *tcache; tcaches_t *next; }; }; #endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tcache_types.h010064400007650000024000000040111344617474000230020ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H #define JEMALLOC_INTERNAL_TCACHE_TYPES_H #include "jemalloc/internal/sc.h" typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute minimum number of cache slots for each small bin. */ #define TCACHE_NSLOTS_SMALL_MIN 20 /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per slab for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1)) /* Used in TSD static initializer only. Real init in tcache_data_init(). */ #define TCACHE_ZERO_INITIALIZER {0} /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ #define TCACHE_ENABLED_ZERO_INITIALIZER false /* Used for explicit tcache only. Means flushed but not destroyed. */ #define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1) #endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/test_hooks.h010064400007650000024000000015541344617474000225220ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H #define JEMALLOC_INTERNAL_TEST_HOOKS_H extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); #define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) #define open JEMALLOC_HOOK(open, test_hooks_libc_hook) #define read JEMALLOC_HOOK(read, test_hooks_libc_hook) #define write JEMALLOC_HOOK(write, test_hooks_libc_hook) #define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook) #define close JEMALLOC_HOOK(close, test_hooks_libc_hook) #define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook) #define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook) /* Note that this is undef'd and re-define'd in src/prof.c. */ #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) #endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/ticker.h010064400007650000024000000043221344617474000216150ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TICKER_H #define JEMALLOC_INTERNAL_TICKER_H #include "jemalloc/internal/util.h" /** * A ticker makes it easy to count-down events until some limit. You * ticker_init the ticker to trigger every nticks events. You then notify it * that an event has occurred with calls to ticker_tick (or that nticks events * have occurred with a call to ticker_ticks), which will return true (and reset * the counter) if the countdown hit zero. */ typedef struct { int32_t tick; int32_t nticks; } ticker_t; static inline void ticker_init(ticker_t *ticker, int32_t nticks) { ticker->tick = nticks; ticker->nticks = nticks; } static inline void ticker_copy(ticker_t *ticker, const ticker_t *other) { *ticker = *other; } static inline int32_t ticker_read(const ticker_t *ticker) { return ticker->tick; } /* * Not intended to be a public API. Unfortunately, on x86, neither gcc nor * clang seems smart enough to turn * ticker->tick -= nticks; * if (unlikely(ticker->tick < 0)) { * fixup ticker * return true; * } * return false; * into * subq %nticks_reg, (%ticker_reg) * js fixup ticker * * unless we force "fixup ticker" out of line. In that case, gcc gets it right, * but clang now does worse than before. So, on x86 with gcc, we force it out * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be * worth the hassle, but this is on the fast path of both malloc and free (via * tcache_event). */ #if defined(__GNUC__) && !defined(__clang__) \ && (defined(__x86_64__) || defined(__i386__)) JEMALLOC_NOINLINE #endif static bool ticker_fixup(ticker_t *ticker) { ticker->tick = ticker->nticks; return true; } static inline bool ticker_ticks(ticker_t *ticker, int32_t nticks) { ticker->tick -= nticks; if (unlikely(ticker->tick < 0)) { return ticker_fixup(ticker); } return false; } static inline bool ticker_tick(ticker_t *ticker) { return ticker_ticks(ticker, 1); } /* * Try to tick. If ticker would fire, return true, but rely on * slowpath to reset ticker. */ static inline bool ticker_trytick(ticker_t *ticker) { --ticker->tick; if (unlikely(ticker->tick < 0)) { return true; } return false; } #endif /* JEMALLOC_INTERNAL_TICKER_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd.h010064400007650000024000000306271344617474000211350ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TSD_H #define JEMALLOC_INTERNAL_TSD_H #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/bin_types.h" #include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/prof_types.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/rtree_tsd.h" #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/witness.h" /* * Thread-Specific-Data layout * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- * s: state * e: tcache_enabled * m: thread_allocated (config_stats) * f: thread_deallocated (config_stats) * p: prof_tdata (config_prof) * c: rtree_ctx (rtree cache accessed on deallocation) * t: tcache * --- data not accessed on tcache fast path: arena-related fields --- * d: arenas_tdata_bypass * r: reentrancy_level * x: narenas_tdata * i: iarena * a: arena * o: arenas_tdata * Loading TSD data is on the critical path of basically all malloc operations. * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. * Use a compact layout to reduce cache footprint. * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ * |---------------------------- 1st cacheline ----------------------------| * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | * |---------------------------- 2nd cacheline ----------------------------| * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | * |---------------------------- 3nd cacheline ----------------------------| * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | * +-------------------------------------------------------------------------+ * Note: the entire tcache is embedded into TSD and spans multiple cachelines. * * The last 3 members (i, a and o) before tcache isn't really needed on tcache * fast path. However we have a number of unused tcache bins and witnesses * (never touched unless config_debug) at the end of tcache, so we place them * there to avoid breaking the cachelines and possibly paging in an extra page. */ #ifdef JEMALLOC_JET typedef void (*test_callback_t)(int *); # define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 # define MALLOC_TEST_TSD \ O(test_data, int, int) \ O(test_callback, test_callback_t, int) # define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL #else # define MALLOC_TEST_TSD # define MALLOC_TEST_TSD_INITIALIZER #endif /* O(name, type, nullable type */ #define MALLOC_TSD \ O(tcache_enabled, bool, bool) \ O(arenas_tdata_bypass, bool, bool) \ O(reentrancy_level, int8_t, int8_t) \ O(narenas_tdata, uint32_t, uint32_t) \ O(offset_state, uint64_t, uint64_t) \ O(thread_allocated, uint64_t, uint64_t) \ O(thread_deallocated, uint64_t, uint64_t) \ O(bytes_until_sample, int64_t, int64_t) \ O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ O(iarena, arena_t *, arena_t *) \ O(arena, arena_t *, arena_t *) \ O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ O(binshards, tsd_binshards_t, tsd_binshards_t)\ O(tcache, tcache_t, tcache_t) \ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ MALLOC_TEST_TSD #define TSD_INITIALIZER { \ ATOMIC_INIT(tsd_state_uninitialized), \ TCACHE_ENABLED_ZERO_INITIALIZER, \ false, \ 0, \ 0, \ 0, \ 0, \ 0, \ 0, \ NULL, \ RTREE_CTX_ZERO_INITIALIZER, \ NULL, \ NULL, \ NULL, \ TSD_BINSHARDS_ZERO_INITIALIZER, \ TCACHE_ZERO_INITIALIZER, \ WITNESS_TSD_INITIALIZER \ MALLOC_TEST_TSD_INITIALIZER \ } void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); void tsd_cleanup(void *arg); tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); void tsd_state_set(tsd_t *tsd, uint8_t new_state); void tsd_slow_update(tsd_t *tsd); void tsd_prefork(tsd_t *tsd); void tsd_postfork_parent(tsd_t *tsd); void tsd_postfork_child(tsd_t *tsd); /* * Call ..._inc when your module wants to take all threads down the slow paths, * and ..._dec when it no longer needs to. */ void tsd_global_slow_inc(tsdn_t *tsdn); void tsd_global_slow_dec(tsdn_t *tsdn); bool tsd_global_slow(); enum { /* Common case --> jnz. */ tsd_state_nominal = 0, /* Initialized but on slow path. */ tsd_state_nominal_slow = 1, /* * Some thread has changed global state in such a way that all nominal * threads need to recompute their fast / slow status the next time they * get a chance. * * Any thread can change another thread's status *to* recompute, but * threads are the only ones who can change their status *from* * recompute. */ tsd_state_nominal_recompute = 2, /* * The above nominal states should be lower values. We use * tsd_nominal_max to separate nominal states from threads in the * process of being born / dying. */ tsd_state_nominal_max = 2, /* * A thread might free() during its death as its only allocator action; * in such scenarios, we need tsd, but set up in such a way that no * cleanup is necessary. */ tsd_state_minimal_initialized = 3, /* States during which we know we're in thread death. */ tsd_state_purgatory = 4, tsd_state_reincarnated = 5, /* * What it says on the tin; tsd that hasn't been initialized. Note * that even when the tsd struct lives in TLS, when need to keep track * of stuff like whether or not our pthread destructors have been * scheduled, so this really truly is different than the nominal state. */ tsd_state_uninitialized = 6 }; /* * Some TSD accesses can only be done in a nominal state. To enforce this, we * wrap TSD member access in a function that asserts on TSD state, and mangle * field names to prevent touching them accidentally. */ #define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n #ifdef JEMALLOC_U8_ATOMICS # define tsd_state_t atomic_u8_t # define tsd_atomic_load atomic_load_u8 # define tsd_atomic_store atomic_store_u8 # define tsd_atomic_exchange atomic_exchange_u8 #else # define tsd_state_t atomic_u32_t # define tsd_atomic_load atomic_load_u32 # define tsd_atomic_store atomic_store_u32 # define tsd_atomic_exchange atomic_exchange_u32 #endif /* The actual tsd. */ struct tsd_s { /* * The contents should be treated as totally opaque outside the tsd * module. Access any thread-local state through the getters and * setters below. */ /* * We manually limit the state to just a single byte. Unless the 8-bit * atomics are unavailable (which is rare). */ tsd_state_t state; #define O(n, t, nt) \ t TSD_MANGLE(n); MALLOC_TSD #undef O }; JEMALLOC_ALWAYS_INLINE uint8_t tsd_state_get(tsd_t *tsd) { /* * This should be atomic. Unfortunately, compilers right now can't tell * that this can be done as a memory comparison, and forces a load into * a register that hurts fast-path performance. */ /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */ return *(uint8_t *)&tsd->state; } /* * Wrapper around tsd_t that makes it possible to avoid implicit conversion * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be * explicitly converted to tsd_t, which is non-nullable. */ struct tsdn_s { tsd_t tsd; }; #define TSDN_NULL ((tsdn_t *)0) JEMALLOC_ALWAYS_INLINE tsdn_t * tsd_tsdn(tsd_t *tsd) { return (tsdn_t *)tsd; } JEMALLOC_ALWAYS_INLINE bool tsdn_null(const tsdn_t *tsdn) { return tsdn == NULL; } JEMALLOC_ALWAYS_INLINE tsd_t * tsdn_tsd(tsdn_t *tsdn) { assert(!tsdn_null(tsdn)); return &tsdn->tsd; } /* * We put the platform-specific data declarations and inlines into their own * header files to avoid cluttering this file. They define tsd_boot0, * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #include "jemalloc/internal/tsd_malloc_thread_cleanup.h" #elif (defined(JEMALLOC_TLS)) #include "jemalloc/internal/tsd_tls.h" #elif (defined(_WIN32)) #include "jemalloc/internal/tsd_win.h" #else #include "jemalloc/internal/tsd_generic.h" #endif /* * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of * foo. This omits some safety checks, and so can be used during tsd * initialization and cleanup. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get_unsafe(tsd_t *tsd) { \ return &tsd->TSD_MANGLE(n); \ } MALLOC_TSD #undef O /* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) { \ /* \ * Because the state might change asynchronously if it's \ * nominal, we need to make sure that we only read it once. \ */ \ uint8_t state = tsd_state_get(tsd); \ assert(state == tsd_state_nominal || \ state == tsd_state_nominal_slow || \ state == tsd_state_nominal_recompute || \ state == tsd_state_reincarnated || \ state == tsd_state_minimal_initialized); \ return tsd_##n##p_get_unsafe(tsd); \ } MALLOC_TSD #undef O /* * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE nt * \ tsdn_##n##p_get(tsdn_t *tsdn) { \ if (tsdn_null(tsdn)) { \ return NULL; \ } \ tsd_t *tsd = tsdn_tsd(tsdn); \ return (nt *)tsd_##n##p_get(tsd); \ } MALLOC_TSD #undef O /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) { \ return *tsd_##n##p_get(tsd); \ } MALLOC_TSD #undef O /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t val) { \ assert(tsd_state_get(tsd) != tsd_state_reincarnated && \ tsd_state_get(tsd) != tsd_state_minimal_initialized); \ *tsd_##n##p_get(tsd) = val; \ } MALLOC_TSD #undef O JEMALLOC_ALWAYS_INLINE void tsd_assert_fast(tsd_t *tsd) { /* * Note that our fastness assertion does *not* include global slowness * counters; it's not in general possible to ensure that they won't * change asynchronously from underneath us. */ assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && tsd_reentrancy_level_get(tsd) == 0); } JEMALLOC_ALWAYS_INLINE bool tsd_fast(tsd_t *tsd) { bool fast = (tsd_state_get(tsd) == tsd_state_nominal); if (fast) { tsd_assert_fast(tsd); } return fast; } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_impl(bool init, bool minimal) { tsd_t *tsd = tsd_get(init); if (!init && tsd_get_allocates() && tsd == NULL) { return NULL; } assert(tsd != NULL); if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) { return tsd_fetch_slow(tsd, minimal); } assert(tsd_fast(tsd)); tsd_assert_fast(tsd); return tsd; } /* Get a minimal TSD that requires no cleanup. See comments in free(). */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_min(void) { return tsd_fetch_impl(true, true); } /* For internal background threads use only. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_internal_fetch(void) { tsd_t *tsd = tsd_fetch_min(); /* Use reincarnated state to prevent full initialization. */ tsd_state_set(tsd, tsd_state_reincarnated); return tsd; } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch(void) { return tsd_fetch_impl(true, false); } static inline bool tsd_nominal(tsd_t *tsd) { return (tsd_state_get(tsd) <= tsd_state_nominal_max); } JEMALLOC_ALWAYS_INLINE tsdn_t * tsdn_fetch(void) { if (!tsd_booted_get()) { return NULL; } return tsd_tsdn(tsd_fetch_impl(false, false)); } JEMALLOC_ALWAYS_INLINE rtree_ctx_t * tsd_rtree_ctx(tsd_t *tsd) { return tsd_rtree_ctxp_get(tsd); } JEMALLOC_ALWAYS_INLINE rtree_ctx_t * tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { /* * If tsd cannot be accessed, initialize the fallback rtree_ctx and * return a pointer to it. */ if (unlikely(tsdn_null(tsdn))) { rtree_ctx_data_init(fallback); return fallback; } return tsd_rtree_ctx(tsdn_tsd(tsdn)); } #endif /* JEMALLOC_INTERNAL_TSD_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd_generic.h010064400007650000024000000074101344617474000226230ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_GENERIC_H typedef struct tsd_init_block_s tsd_init_block_t; struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; /* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ typedef struct tsd_init_head_s tsd_init_head_t; typedef struct { bool initialized; tsd_t val; } tsd_wrapper_t; void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); extern pthread_key_t tsd_tsd; extern tsd_init_head_t tsd_init_head; extern tsd_wrapper_t tsd_boot_wrapper; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE void tsd_cleanup_wrapper(void *arg) { tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; if (wrapper->initialized) { wrapper->initialized = false; tsd_cleanup(&wrapper->val); if (wrapper->initialized) { /* Trigger another cleanup round. */ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { malloc_write(": Error setting TSD\n"); if (opt_abort) { abort(); } } return; } } malloc_tsd_dalloc(wrapper); } JEMALLOC_ALWAYS_INLINE void tsd_wrapper_set(tsd_wrapper_t *wrapper) { if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { malloc_write(": Error setting TSD\n"); abort(); } } JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); if (init && unlikely(wrapper == NULL)) { tsd_init_block_t block; wrapper = (tsd_wrapper_t *) tsd_init_check_recursion(&tsd_init_head, &block); if (wrapper) { return wrapper; } wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); block.data = (void *)wrapper; if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } else { wrapper->initialized = false; JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; } tsd_wrapper_set(wrapper); tsd_init_finish(&tsd_init_head, &block); } return wrapper; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { return true; } tsd_wrapper_set(&tsd_boot_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { tsd_wrapper_t *wrapper; wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; tsd_wrapper_set(wrapper); } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { if (tsd_boot0()) { return true; } tsd_boot1(); return false; } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return true; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(init); if (tsd_get_allocates() && !init && wrapper == NULL) { return NULL; } return &wrapper->val; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(true); if (likely(&wrapper->val != val)) { wrapper->val = *(val); } wrapper->initialized = true; } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd_malloc_thread_cleanup.h010064400007650000024000000021361344617474000255140ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H extern __thread tsd_t tsd_tls; extern __thread bool tsd_initialized; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_cleanup_wrapper(void) { if (tsd_initialized) { tsd_initialized = false; tsd_cleanup(&tsd_tls); } return tsd_initialized; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { /* Do nothing. */ } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { return tsd_boot0(); } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return false; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { return &tsd_tls; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { assert(tsd_booted); if (likely(&tsd_tls != val)) { tsd_tls = (*val); } tsd_initialized = true; } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd_tls.h010064400007650000024000000020451344617474000220100ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_TLS_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_TLS_H extern __thread tsd_t tsd_tls; extern pthread_key_t tsd_tsd; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { return true; } tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { /* Do nothing. */ } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { return tsd_boot0(); } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return false; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { return &tsd_tls; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { assert(tsd_booted); if (likely(&tsd_tls != val)) { tsd_tls = (*val); } if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { malloc_write(": Error setting tsd.\n"); if (opt_abort) { abort(); } } } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd_types.h010064400007650000024000000004021344617474000223450ustar0000000000000000#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H #define JEMALLOC_INTERNAL_TSD_TYPES_H #define MALLOC_TSD_CLEANUPS_MAX 2 typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; typedef bool (*malloc_tsd_cleanup_t)(void); #endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/tsd_win.h010064400007650000024000000057121344617474000220070ustar0000000000000000#ifdef JEMALLOC_INTERNAL_TSD_WIN_H #error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_WIN_H typedef struct { bool initialized; tsd_t val; } tsd_wrapper_t; extern DWORD tsd_tsd; extern tsd_wrapper_t tsd_boot_wrapper; extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_cleanup_wrapper(void) { DWORD error = GetLastError(); tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); SetLastError(error); if (wrapper == NULL) { return false; } if (wrapper->initialized) { wrapper->initialized = false; tsd_cleanup(&wrapper->val); if (wrapper->initialized) { /* Trigger another cleanup round. */ return true; } } malloc_tsd_dalloc(wrapper); return false; } JEMALLOC_ALWAYS_INLINE void tsd_wrapper_set(tsd_wrapper_t *wrapper) { if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { malloc_write(": Error setting TSD\n"); abort(); } } JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { DWORD error = GetLastError(); tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); SetLastError(error); if (init && unlikely(wrapper == NULL)) { wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } else { wrapper->initialized = false; /* MSVC is finicky about aggregate initialization. */ tsd_t tsd_initializer = TSD_INITIALIZER; wrapper->val = tsd_initializer; } tsd_wrapper_set(wrapper); } return wrapper; } JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { tsd_tsd = TlsAlloc(); if (tsd_tsd == TLS_OUT_OF_INDEXES) { return true; } malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_wrapper_set(&tsd_boot_wrapper); tsd_booted = true; return false; } JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { tsd_wrapper_t *wrapper; wrapper = (tsd_wrapper_t *) malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; tsd_t initializer = TSD_INITIALIZER; wrapper->val = initializer; tsd_wrapper_set(wrapper); } JEMALLOC_ALWAYS_INLINE bool tsd_boot(void) { if (tsd_boot0()) { return true; } tsd_boot1(); return false; } JEMALLOC_ALWAYS_INLINE bool tsd_booted_get(void) { return tsd_booted; } JEMALLOC_ALWAYS_INLINE bool tsd_get_allocates(void) { return true; } /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_get(bool init) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(init); if (tsd_get_allocates() && !init && wrapper == NULL) { return NULL; } return &wrapper->val; } JEMALLOC_ALWAYS_INLINE void tsd_set(tsd_t *val) { tsd_wrapper_t *wrapper; assert(tsd_booted); wrapper = tsd_wrapper_get(true); if (likely(&wrapper->val != val)) { wrapper->val = *(val); } wrapper->initialized = true; } jemalloc-sys-0.3.2/rep/include/jemalloc/internal/util.h010064400007650000024000000026671344617474000213230ustar0000000000000000#ifndef JEMALLOC_INTERNAL_UTIL_H #define JEMALLOC_INTERNAL_UTIL_H #define UTIL_INLINE static inline /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) #endif #ifndef JEMALLOC_FREE_JUNK # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* cpp macro definition stringification. */ #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #define JEMALLOC_CC_SILENCE_INIT(v) = v #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) #else # define likely(x) !!(x) # define unlikely(x) !!(x) #endif #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() /* Set error code. */ UTIL_INLINE void set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code. */ UTIL_INLINE int get_errno(void) { #ifdef _WIN32 return GetLastError(); #else return errno; #endif } #undef UTIL_INLINE #endif /* JEMALLOC_INTERNAL_UTIL_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/internal/witness.h010064400007650000024000000225111344617474000220300ustar0000000000000000#ifndef JEMALLOC_INTERNAL_WITNESS_H #define JEMALLOC_INTERNAL_WITNESS_H #include "jemalloc/internal/ql.h" /******************************************************************************/ /* LOCK RANKS */ /******************************************************************************/ /* * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness * machinery. */ #define WITNESS_RANK_OMIT 0U #define WITNESS_RANK_MIN 1U #define WITNESS_RANK_INIT 1U #define WITNESS_RANK_CTL 1U #define WITNESS_RANK_TCACHES 2U #define WITNESS_RANK_ARENAS 3U #define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U #define WITNESS_RANK_PROF_DUMP 5U #define WITNESS_RANK_PROF_BT2GCTX 6U #define WITNESS_RANK_PROF_TDATAS 7U #define WITNESS_RANK_PROF_TDATA 8U #define WITNESS_RANK_PROF_LOG 9U #define WITNESS_RANK_PROF_GCTX 10U #define WITNESS_RANK_BACKGROUND_THREAD 11U /* * Used as an argument to witness_assert_depth_to_rank() in order to validate * depth excluding non-core locks with lower ranks. Since the rank argument to * witness_assert_depth_to_rank() is inclusive rather than exclusive, this * definition can have the same value as the minimally ranked core lock. */ #define WITNESS_RANK_CORE 12U #define WITNESS_RANK_DECAY 12U #define WITNESS_RANK_TCACHE_QL 13U #define WITNESS_RANK_EXTENT_GROW 14U #define WITNESS_RANK_EXTENTS 15U #define WITNESS_RANK_EXTENT_AVAIL 16U #define WITNESS_RANK_EXTENT_POOL 17U #define WITNESS_RANK_RTREE 18U #define WITNESS_RANK_BASE 19U #define WITNESS_RANK_ARENA_LARGE 20U #define WITNESS_RANK_HOOK 21U #define WITNESS_RANK_LEAF 0xffffffffU #define WITNESS_RANK_BIN WITNESS_RANK_LEAF #define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF #define WITNESS_RANK_DSS WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF /******************************************************************************/ /* PER-WITNESS DATA */ /******************************************************************************/ #if defined(JEMALLOC_DEBUG) # define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} #else # define WITNESS_INITIALIZER(name, rank) #endif typedef struct witness_s witness_t; typedef unsigned witness_rank_t; typedef ql_head(witness_t) witness_list_t; typedef int witness_comp_t (const witness_t *, void *, const witness_t *, void *); struct witness_s { /* Name, used for printing lock order reversal messages. */ const char *name; /* * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses * must be acquired in order of increasing rank. */ witness_rank_t rank; /* * If two witnesses are of equal rank and they have the samp comp * function pointer, it is called as a last attempt to differentiate * between witnesses of equal rank. */ witness_comp_t *comp; /* Opaque data, passed to comp(). */ void *opaque; /* Linkage for thread's currently owned locks. */ ql_elm(witness_t) link; }; /******************************************************************************/ /* PER-THREAD DATA */ /******************************************************************************/ typedef struct witness_tsd_s witness_tsd_t; struct witness_tsd_s { witness_list_t witnesses; bool forking; }; #define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } #define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) /******************************************************************************/ /* (PER-THREAD) NULLABILITY HELPERS */ /******************************************************************************/ typedef struct witness_tsdn_s witness_tsdn_t; struct witness_tsdn_s { witness_tsd_t witness_tsd; }; JEMALLOC_ALWAYS_INLINE witness_tsdn_t * witness_tsd_tsdn(witness_tsd_t *witness_tsd) { return (witness_tsdn_t *)witness_tsd; } JEMALLOC_ALWAYS_INLINE bool witness_tsdn_null(witness_tsdn_t *witness_tsdn) { return witness_tsdn == NULL; } JEMALLOC_ALWAYS_INLINE witness_tsd_t * witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { assert(!witness_tsdn_null(witness_tsdn)); return &witness_tsdn->witness_tsd; } /******************************************************************************/ /* API */ /******************************************************************************/ void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp, void *opaque); typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *JET_MUTABLE witness_lock_error; typedef void (witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *JET_MUTABLE witness_owner_error; typedef void (witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; typedef void (witness_depth_error_t)(const witness_list_t *, witness_rank_t rank_inclusive, unsigned depth); extern witness_depth_error_t *JET_MUTABLE witness_depth_error; void witnesses_cleanup(witness_tsd_t *witness_tsd); void witness_prefork(witness_tsd_t *witness_tsd); void witness_postfork_parent(witness_tsd_t *witness_tsd); void witness_postfork_child(witness_tsd_t *witness_tsd); /* Helper, not intended for direct use. */ static inline bool witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { witness_list_t *witnesses; witness_t *w; cassert(config_debug); witnesses = &witness_tsd->witnesses; ql_foreach(w, witnesses, link) { if (w == witness) { return true; } } return false; } static inline void witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { witness_tsd_t *witness_tsd; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } if (witness_owner(witness_tsd, witness)) { return; } witness_owner_error(witness); } static inline void witness_assert_not_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } witnesses = &witness_tsd->witnesses; ql_foreach(w, witnesses, link) { if (w == witness) { witness_not_owner_error(witness); } } } static inline void witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, witness_rank_t rank_inclusive, unsigned depth) { witness_tsd_t *witness_tsd; unsigned d; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); d = 0; witnesses = &witness_tsd->witnesses; w = ql_last(witnesses, link); if (w != NULL) { ql_reverse_foreach(w, witnesses, link) { if (w->rank < rank_inclusive) { break; } d++; } } if (d != depth) { witness_depth_error(witnesses, rank_inclusive, depth); } } static inline void witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); } static inline void witness_assert_lockless(witness_tsdn_t *witness_tsdn) { witness_assert_depth(witness_tsdn, 0); } static inline void witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } witness_assert_not_owner(witness_tsdn, witness); witnesses = &witness_tsd->witnesses; w = ql_last(witnesses, link); if (w == NULL) { /* No other locks; do nothing. */ } else if (witness_tsd->forking && w->rank <= witness->rank) { /* Forking, and relaxed ranking satisfied. */ } else if (w->rank > witness->rank) { /* Not forking, rank order reversal. */ witness_lock_error(witnesses, witness); } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > 0)) { /* * Missing/incompatible comparison function, or comparison * function indicates rank order reversal. */ witness_lock_error(witnesses, witness); } ql_elm_new(witness, link); ql_tail_insert(witnesses, witness, link); } static inline void witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { witness_tsd_t *witness_tsd; witness_list_t *witnesses; if (!config_debug) { return; } if (witness_tsdn_null(witness_tsdn)) { return; } witness_tsd = witness_tsdn_tsd(witness_tsdn); if (witness->rank == WITNESS_RANK_OMIT) { return; } /* * Check whether owner before removal, rather than relying on * witness_assert_owner() to abort, so that unit tests can test this * function's failure mode without causing undefined behavior. */ if (witness_owner(witness_tsd, witness)) { witnesses = &witness_tsd->witnesses; ql_remove(witnesses, witness, link); } else { witness_assert_owner(witness_tsdn, witness); } } #endif /* JEMALLOC_INTERNAL_WITNESS_H */ jemalloc-sys-0.3.2/rep/include/jemalloc/jemalloc.h010064400007650000024000000336551344617503300203150ustar0000000000000000#ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif /* Defined if __attribute__((...)) syntax is supported. */ #define JEMALLOC_HAVE_ATTR /* Defined if alloc_size attribute is supported. */ #define JEMALLOC_HAVE_ATTR_ALLOC_SIZE /* Defined if format(gnu_printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ /* Defined if format(printf, ...) attribute is supported. */ #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ /* #undef JEMALLOC_OVERRIDE_MEMALIGN */ #define JEMALLOC_OVERRIDE_VALLOC /* * At least Linux omits the "const" in: * * size_t malloc_usable_size(const void *ptr); * * Match the operating system's prototype. */ #define JEMALLOC_USABLE_SIZE_CONST const /* * If defined, specify throw() for the public function prototypes when compiling * with C++. The only justification for this is to match the prototypes that * glibc defines. */ /* #undef JEMALLOC_USE_CXX_THROW */ #ifdef _MSC_VER # ifdef _WIN64 # define LG_SIZEOF_PTR_WIN 3 # else # define LG_SIZEOF_PTR_WIN 2 # endif #endif /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ #define LG_SIZEOF_PTR 3 /* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME # define je_aligned_alloc je_aligned_alloc # define je_calloc je_calloc # define je_dallocx je_dallocx # define je_free je_free # define je_mallctl je_mallctl # define je_mallctlbymib je_mallctlbymib # define je_mallctlnametomib je_mallctlnametomib # define je_malloc je_malloc # define je_malloc_conf je_malloc_conf # define je_malloc_message je_malloc_message # define je_malloc_stats_print je_malloc_stats_print # define je_malloc_usable_size je_malloc_usable_size # define je_mallocx je_mallocx # define je_smallocx_0000000000000000000000000000000000000000 je_smallocx_0000000000000000000000000000000000000000 # define je_nallocx je_nallocx # define je_posix_memalign je_posix_memalign # define je_rallocx je_rallocx # define je_realloc je_realloc # define je_sallocx je_sallocx # define je_sdallocx je_sdallocx # define je_xallocx je_xallocx # define je_valloc je_valloc #endif #include #include #include #include #include #define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" #define JEMALLOC_VERSION_MAJOR 0 #define JEMALLOC_VERSION_MINOR 0 #define JEMALLOC_VERSION_BUGFIX 0 #define JEMALLOC_VERSION_NREV 0 #define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" #define JEMALLOC_VERSION_GID_IDENT 0000000000000000000000000000000000000000 #define MALLOCX_LG_ALIGN(la) ((int)(la)) #if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) #else # define MALLOCX_ALIGN(a) \ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ ffs((int)(((size_t)(a))>>32))+31)) #endif #define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ #define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) #define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ #define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) /* * Use as arena index in "arena..{purge,decay,dss}" and * "stats.arenas..*" mallctl interfaces to select all arenas. This * definition is intentionally specified in raw decimal format to support * cpp-based string concatenation, e.g. * * #define STRINGIFY_HELPER(x) #x * #define STRINGIFY(x) STRINGIFY_HELPER(x) * * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, * 0); */ #define MALLCTL_ARENAS_ALL 4096 /* * Use as arena index in "stats.arenas..*" mallctl interfaces to select * destroyed arenas. */ #define MALLCTL_ARENAS_DESTROYED 4097 #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #if defined(_MSC_VER) # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) # else # define JEMALLOC_NOTHROW # endif # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) # if _MSC_VER >= 1900 && !defined(__EDG__) # define JEMALLOC_ALLOCATOR __declspec(allocator) # else # define JEMALLOC_ALLOCATOR # endif #elif defined(JEMALLOC_HAVE_ATTR) # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # endif # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #endif /* * The je_ prefix on the following public symbol declarations is an artifact * of namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). */ extern JEMALLOC_EXPORT const char *je_malloc_conf; extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_malloc(size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) JEMALLOC_CXX_THROW; JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, int flags) JEMALLOC_ALLOC_SIZE(2); JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, size_t extra, int flags); JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, int flags) JEMALLOC_ATTR(pure); JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags); JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) JEMALLOC_ATTR(pure); JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( void (*write_cb)(void *, const char *), void *je_cbopaque, const char *opts); JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif typedef struct extent_hooks_s extent_hooks_t; /* * void * * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); */ typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, bool *, unsigned); /* * bool * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, * bool committed, unsigned arena_ind); */ typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, unsigned); /* * void * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, * bool committed, unsigned arena_ind); */ typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, unsigned); /* * bool * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, * size_t offset, size_t length, unsigned arena_ind); */ typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, unsigned); /* * bool * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, * size_t offset, size_t length, unsigned arena_ind); */ typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, unsigned); /* * bool * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, * size_t offset, size_t length, unsigned arena_ind); */ typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, unsigned); /* * bool * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); */ typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, bool, unsigned); /* * bool * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); */ typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, bool, unsigned); struct extent_hooks_s { extent_alloc_t *alloc; extent_dalloc_t *dalloc; extent_destroy_t *destroy; extent_commit_t *commit; extent_decommit_t *decommit; extent_purge_t *purge_lazy; extent_purge_t *purge_forced; extent_split_t *split; extent_merge_t *merge; }; /* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define aligned_alloc je_aligned_alloc # define calloc je_calloc # define dallocx je_dallocx # define free je_free # define mallctl je_mallctl # define mallctlbymib je_mallctlbymib # define mallctlnametomib je_mallctlnametomib # define malloc je_malloc # define malloc_conf je_malloc_conf # define malloc_message je_malloc_message # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size # define mallocx je_mallocx # define smallocx_0000000000000000000000000000000000000000 je_smallocx_0000000000000000000000000000000000000000 # define nallocx je_nallocx # define posix_memalign je_posix_memalign # define rallocx je_rallocx # define realloc je_realloc # define sallocx je_sallocx # define sdallocx je_sdallocx # define xallocx je_xallocx # define valloc je_valloc #endif /* * The je_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef je_aligned_alloc # undef je_calloc # undef je_dallocx # undef je_free # undef je_mallctl # undef je_mallctlbymib # undef je_mallctlnametomib # undef je_malloc # undef je_malloc_conf # undef je_malloc_message # undef je_malloc_stats_print # undef je_malloc_usable_size # undef je_mallocx # undef je_smallocx_0000000000000000000000000000000000000000 # undef je_nallocx # undef je_posix_memalign # undef je_rallocx # undef je_realloc # undef je_sallocx # undef je_sdallocx # undef je_xallocx # undef je_valloc #endif #ifdef __cplusplus } #endif #endif /* JEMALLOC_H_ */ jemalloc-sys-0.3.2/rep/include/jemalloc/jemalloc.sh010075500007650000024000000007111344617474000204720ustar0000000000000000#!/bin/sh objroot=$1 cat < #include #include #include #include #define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" #define JEMALLOC_VERSION_MAJOR 0 #define JEMALLOC_VERSION_MINOR 0 #define JEMALLOC_VERSION_BUGFIX 0 #define JEMALLOC_VERSION_NREV 0 #define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" #define JEMALLOC_VERSION_GID_IDENT 0000000000000000000000000000000000000000 #define MALLOCX_LG_ALIGN(la) ((int)(la)) #if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) #else # define MALLOCX_ALIGN(a) \ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ ffs((int)(((size_t)(a))>>32))+31)) #endif #define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ #define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) #define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ #define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) /* * Use as arena index in "arena..{purge,decay,dss}" and * "stats.arenas..*" mallctl interfaces to select all arenas. This * definition is intentionally specified in raw decimal format to support * cpp-based string concatenation, e.g. * * #define STRINGIFY_HELPER(x) #x * #define STRINGIFY(x) STRINGIFY_HELPER(x) * * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, * 0); */ #define MALLCTL_ARENAS_ALL 4096 /* * Use as arena index in "stats.arenas..*" mallctl interfaces to select * destroyed arenas. */ #define MALLCTL_ARENAS_DESTROYED 4097 #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #if defined(_MSC_VER) # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) # else # define JEMALLOC_NOTHROW # endif # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) # if _MSC_VER >= 1900 && !defined(__EDG__) # define JEMALLOC_ALLOCATOR __declspec(allocator) # else # define JEMALLOC_ALLOCATOR # endif #elif defined(JEMALLOC_HAVE_ATTR) # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # endif # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #endif jemalloc-sys-0.3.2/rep/include/jemalloc/jemalloc_macros.h.in010064400007650000024000000100001344617474000222450ustar0000000000000000#include #include #include #include #include #define JEMALLOC_VERSION "@jemalloc_version@" #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" #define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@ #define MALLOCX_LG_ALIGN(la) ((int)(la)) #if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) #else # define MALLOCX_ALIGN(a) \ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ ffs((int)(((size_t)(a))>>32))+31)) #endif #define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ #define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) #define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ #define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) /* * Use as arena index in "arena..{purge,decay,dss}" and * "stats.arenas..*" mallctl interfaces to select all arenas. This * definition is intentionally specified in raw decimal format to support * cpp-based string concatenation, e.g. * * #define STRINGIFY_HELPER(x) #x * #define STRINGIFY(x) STRINGIFY_HELPER(x) * * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, * 0); */ #define MALLCTL_ARENAS_ALL 4096 /* * Use as arena index in "stats.arenas..*" mallctl interfaces to select * destroyed arenas. */ #define MALLCTL_ARENAS_DESTROYED 4097 #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #if defined(_MSC_VER) # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) # else # define JEMALLOC_NOTHROW # endif # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) # if _MSC_VER >= 1900 && !defined(__EDG__) # define JEMALLOC_ALLOCATOR __declspec(allocator) # else # define JEMALLOC_ALLOCATOR # endif #elif defined(JEMALLOC_HAVE_ATTR) # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # endif # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #endif jemalloc-sys-0.3.2/rep/include/jemalloc/jemalloc_mangle.h010064400007650000024000000043031344617503200216230ustar0000000000000000/* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define aligned_alloc je_aligned_alloc # define calloc je_calloc # define dallocx je_dallocx # define free je_free # define mallctl je_mallctl # define mallctlbymib je_mallctlbymib # define mallctlnametomib je_mallctlnametomib # define malloc je_malloc # define malloc_conf je_malloc_conf # define malloc_message je_malloc_message # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size # define mallocx je_mallocx # define smallocx_0000000000000000000000000000000000000000 je_smallocx_0000000000000000000000000000000000000000 # define nallocx je_nallocx # define posix_memalign je_posix_memalign # define rallocx je_rallocx # define realloc je_realloc # define sallocx je_sallocx # define sdallocx je_sdallocx # define xallocx je_xallocx # define valloc je_valloc #endif /* * The je_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef je_aligned_alloc # undef je_calloc # undef je_dallocx # undef je_free # undef je_mallctl # undef je_mallctlbymib # undef je_mallctlnametomib # undef je_malloc # undef je_malloc_conf # undef je_malloc_message # undef je_malloc_stats_print # undef je_malloc_usable_size # undef je_mallocx # undef je_smallocx_0000000000000000000000000000000000000000 # undef je_nallocx # undef je_posix_memalign # undef je_rallocx # undef je_realloc # undef je_sallocx # undef je_sdallocx # undef je_xallocx # undef je_valloc #endif jemalloc-sys-0.3.2/rep/include/jemalloc/jemalloc_mangle.sh010075500007650000024000000023561344617474000220240ustar0000000000000000#!/bin/sh -eu public_symbols_txt=$1 symbol_prefix=$2 cat < /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as * a built-in type. */ #ifndef __clang__ typedef BOOL _Bool; #endif #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif /* stdbool_h */ jemalloc-sys-0.3.2/rep/include/msvc_compat/C99/stdint.h010064400007650000024000000170601344617474000211210ustar0000000000000000// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] jemalloc-sys-0.3.2/rep/include/msvc_compat/strings.h010064400007650000024000000020311344617474000207310ustar0000000000000000#ifndef strings_h #define strings_h /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER # include # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) { return i + 1; } return 0; } static __forceinline int ffs(int x) { return ffsl(x); } # ifdef _M_X64 # pragma intrinsic(_BitScanForward64) # endif static __forceinline int ffsll(unsigned __int64 x) { unsigned long i; #ifdef _M_X64 if (_BitScanForward64(&i, x)) { return i + 1; } return 0; #else // Fallback for 32-bit build where 64-bit version not available // assuming little endian union { unsigned __int64 ll; unsigned long l[2]; } s; s.ll = x; if (_BitScanForward(&i, s.l[0])) { return i + 1; } else if(_BitScanForward(&i, s.l[1])) { return i + 33; } return 0; #endif } #else # define ffsll(x) __builtin_ffsll(x) # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */ jemalloc-sys-0.3.2/rep/include/msvc_compat/windows_extra.h010064400007650000024000000002061344617474000221370ustar0000000000000000#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H #define MSVC_COMPAT_WINDOWS_EXTRA_H #include #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ jemalloc-sys-0.3.2/rep/jemalloc.pc010064400007650000024000000005631344617502700152520ustar0000000000000000prefix=/usr/local exec_prefix=/usr/local libdir=${exec_prefix}/lib includedir=${prefix}/include install_suffix= Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. URL: http://jemalloc.net/ Version: 0.0.0_0 Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix} jemalloc-sys-0.3.2/rep/jemalloc.pc.in010064400007650000024000000007211344617474000156540ustar0000000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ install_suffix=@install_suffix@ Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. URL: http://jemalloc.net/ Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@ Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix} jemalloc-sys-0.3.2/rep/m4/ax_cxx_compile_stdcxx.m4010064400007650000024000000330001344617474000203120ustar0000000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html # =========================================================================== # # SYNOPSIS # # AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) # # DESCRIPTION # # Check for baseline language coverage in the compiler for the specified # version of the C++ standard. If necessary, add switches to CXX and # CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) # or '14' (for the C++14 standard). # # The second argument, if specified, indicates whether you insist on an # extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. # -std=c++11). If neither is specified, you get whatever works, with # preference for an extended mode. # # The third argument, if specified 'mandatory' or if left unspecified, # indicates that baseline support for the specified C++ standard is # required and that the macro should error out if no mode with that # support is found. If specified 'optional', then configuration proceeds # regardless, after defining HAVE_CXX${VERSION} if and only if a # supporting mode is found. # # LICENSE # # Copyright (c) 2008 Benjamin Kosnik # Copyright (c) 2012 Zack Weinberg # Copyright (c) 2013 Roy Stogner # Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov # Copyright (c) 2015 Paul Norman # Copyright (c) 2015 Moritz Klammler # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 4 dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro dnl (serial version number 13). AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl m4_if([$1], [11], [], [$1], [14], [], [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])], [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl m4_if([$2], [], [], [$2], [ext], [], [$2], [noext], [], [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], [$3], [optional], [ax_cxx_compile_cxx$1_required=false], [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) AC_LANG_PUSH([C++])dnl ac_success=no AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, ax_cv_cxx_compile_cxx$1, [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [ax_cv_cxx_compile_cxx$1=yes], [ax_cv_cxx_compile_cxx$1=no])]) if test x$ax_cv_cxx_compile_cxx$1 = xyes; then ac_success=yes fi m4_if([$2], [noext], [], [dnl if test x$ac_success = xno; then for switch in -std=gnu++$1 -std=gnu++0x; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, $cachevar, [ac_save_CXX="$CXX" CXX="$CXX $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [eval $cachevar=yes], [eval $cachevar=no]) CXX="$ac_save_CXX"]) if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi]) m4_if([$2], [ext], [], [dnl if test x$ac_success = xno; then dnl HP's aCC needs +std=c++11 according to: dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf dnl Cray's crayCC needs "-h std=c++11" for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, $cachevar, [ac_save_CXX="$CXX" CXX="$CXX $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], [eval $cachevar=yes], [eval $cachevar=no]) CXX="$ac_save_CXX"]) if eval test x\$$cachevar = xyes; then CXX="$CXX $switch" if test -n "$CXXCPP" ; then CXXCPP="$CXXCPP $switch" fi ac_success=yes break fi done fi]) AC_LANG_POP([C++]) if test x$ax_cxx_compile_cxx$1_required = xtrue; then if test x$ac_success = xno; then AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) fi fi if test x$ac_success = xno; then HAVE_CXX$1=0 AC_MSG_NOTICE([No compiler with C++$1 support was found]) else HAVE_CXX$1=1 AC_DEFINE(HAVE_CXX$1,1, [define if the compiler supports basic C++$1 syntax]) fi AC_SUBST(HAVE_CXX$1) ]) dnl Test body for checking C++11 support m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 ) dnl Test body for checking C++14 support m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 ) dnl Tests for new features in C++11 m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ // If the compiler admits that it is not ready for C++11, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201103L #error "This is not a C++11 compiler" #else namespace cxx11 { namespace test_static_assert { template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; } namespace test_final_override { struct Base { virtual void f() {} }; struct Derived : public Base { virtual void f() override {} }; } namespace test_double_right_angle_brackets { template < typename T > struct check {}; typedef check single_type; typedef check> double_type; typedef check>> triple_type; typedef check>>> quadruple_type; } namespace test_decltype { int f() { int a = 1; decltype(a) b = 2; return a + b; } } namespace test_type_deduction { template < typename T1, typename T2 > struct is_same { static const bool value = false; }; template < typename T > struct is_same { static const bool value = true; }; template < typename T1, typename T2 > auto add(T1 a1, T2 a2) -> decltype(a1 + a2) { return a1 + a2; } int test(const int c, volatile int v) { static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == false, ""); auto ac = c; auto av = v; auto sumi = ac + av + 'x'; auto sumf = ac + av + 1.0; static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == true, ""); static_assert(is_same::value == false, ""); static_assert(is_same::value == true, ""); return (sumf > 0.0) ? sumi : add(c, v); } } namespace test_noexcept { int f() { return 0; } int g() noexcept { return 0; } static_assert(noexcept(f()) == false, ""); static_assert(noexcept(g()) == true, ""); } namespace test_constexpr { template < typename CharT > unsigned long constexpr strlen_c_r(const CharT *const s, const unsigned long acc) noexcept { return *s ? strlen_c_r(s + 1, acc + 1) : acc; } template < typename CharT > unsigned long constexpr strlen_c(const CharT *const s) noexcept { return strlen_c_r(s, 0UL); } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("1") == 1UL, ""); static_assert(strlen_c("example") == 7UL, ""); static_assert(strlen_c("another\0example") == 7UL, ""); } namespace test_rvalue_references { template < int N > struct answer { static constexpr int value = N; }; answer<1> f(int&) { return answer<1>(); } answer<2> f(const int&) { return answer<2>(); } answer<3> f(int&&) { return answer<3>(); } void test() { int i = 0; const int c = 0; static_assert(decltype(f(i))::value == 1, ""); static_assert(decltype(f(c))::value == 2, ""); static_assert(decltype(f(0))::value == 3, ""); } } namespace test_uniform_initialization { struct test { static const int zero {}; static const int one {1}; }; static_assert(test::zero == 0, ""); static_assert(test::one == 1, ""); } namespace test_lambdas { void test1() { auto lambda1 = [](){}; auto lambda2 = lambda1; lambda1(); lambda2(); } int test2() { auto a = [](int i, int j){ return i + j; }(1, 2); auto b = []() -> int { return '0'; }(); auto c = [=](){ return a + b; }(); auto d = [&](){ return c; }(); auto e = [a, &b](int x) mutable { const auto identity = [](int y){ return y; }; for (auto i = 0; i < a; ++i) a += b--; return x + identity(a + b); }(0); return a + b + c + d + e; } int test3() { const auto nullary = [](){ return 0; }; const auto unary = [](int x){ return x; }; using nullary_t = decltype(nullary); using unary_t = decltype(unary); const auto higher1st = [](nullary_t f){ return f(); }; const auto higher2nd = [unary](nullary_t f1){ return [unary, f1](unary_t f2){ return f2(unary(f1())); }; }; return higher1st(nullary) + higher2nd(nullary)(unary); } } namespace test_variadic_templates { template struct sum; template struct sum { static constexpr auto value = N0 + sum::value; }; template <> struct sum<> { static constexpr auto value = 0; }; static_assert(sum<>::value == 0, ""); static_assert(sum<1>::value == 1, ""); static_assert(sum<23>::value == 23, ""); static_assert(sum<1, 2>::value == 3, ""); static_assert(sum<5, 5, 11>::value == 21, ""); static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); } // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function // because of this. namespace test_template_alias_sfinae { struct foo {}; template using member = typename T::member_type; template void func(...) {} template void func(member*) {} void test(); void test() { func(0); } } } // namespace cxx11 #endif // __cplusplus >= 201103L ]]) dnl Tests for new features in C++14 m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ // If the compiler admits that it is not ready for C++14, why torture it? // Hopefully, this will speed up the test. #ifndef __cplusplus #error "This is not a C++ compiler" #elif __cplusplus < 201402L #error "This is not a C++14 compiler" #else namespace cxx14 { namespace test_polymorphic_lambdas { int test() { const auto lambda = [](auto&&... args){ const auto istiny = [](auto x){ return (sizeof(x) == 1UL) ? 1 : 0; }; const int aretiny[] = { istiny(args)... }; return aretiny[0]; }; return lambda(1, 1L, 1.0f, '1'); } } namespace test_binary_literals { constexpr auto ivii = 0b0000000000101010; static_assert(ivii == 42, "wrong value"); } namespace test_generalized_constexpr { template < typename CharT > constexpr unsigned long strlen_c(const CharT *const s) noexcept { auto length = 0UL; for (auto p = s; *p; ++p) ++length; return length; } static_assert(strlen_c("") == 0UL, ""); static_assert(strlen_c("x") == 1UL, ""); static_assert(strlen_c("test") == 4UL, ""); static_assert(strlen_c("another\0test") == 7UL, ""); } namespace test_lambda_init_capture { int test() { auto x = 0; const auto lambda1 = [a = x](int b){ return a + b; }; const auto lambda2 = [a = lambda1(x)](){ return a; }; return lambda2(); } } namespace test_digit_seperators { constexpr auto ten_million = 100'000'000; static_assert(ten_million == 100000000, ""); } namespace test_return_type_deduction { auto f(int& x) { return x; } decltype(auto) g(int& x) { return x; } template < typename T1, typename T2 > struct is_same { static constexpr auto value = false; }; template < typename T > struct is_same { static constexpr auto value = true; }; int test() { auto x = 0; static_assert(is_same::value, ""); static_assert(is_same::value, ""); return x; } } } // namespace cxx14 #endif // __cplusplus >= 201402L ]]) jemalloc-sys-0.3.2/rep/msvc/ReadMe.txt010064400007650000024000000010331344617474000160000ustar0000000000000000 How to build jemalloc for Windows ================================= 1. Install Cygwin with at least the following packages: * autoconf * autogen * gawk * grep * sed 2. Install Visual Studio 2015 or 2017 with Visual C++ 3. Add Cygwin\bin to the PATH environment variable 4. Open "x64 Native Tools Command Prompt for VS 2017" (note: x86/x64 doesn't matter at this point) 5. Generate header files: sh -c "CC=cl ./autogen.sh" 6. Now the project can be opened and built in Visual Studio: msvc\jemalloc_vc2017.sln jemalloc-sys-0.3.2/rep/msvc/jemalloc_vc2015.sln010064400007650000024000000074501344617474000174170ustar0000000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.24720.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" ProjectSection(SolutionItems) = preProject ReadMe.txt = ReadMe.txt EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Debug-static|x64 = Debug-static|x64 Debug-static|x86 = Debug-static|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 Release-static|x64 = Release-static|x64 Release-static|x86 = Release-static|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal jemalloc-sys-0.3.2/rep/msvc/jemalloc_vc2017.sln010064400007650000024000000074501344617474000174210ustar0000000000000000 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 14 VisualStudioVersion = 14.0.24720.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" ProjectSection(SolutionItems) = preProject ReadMe.txt = ReadMe.txt EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Debug-static|x64 = Debug-static|x64 Debug-static|x86 = Debug-static|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 Release-static|x64 = Release-static|x64 Release-static|x86 = Release-static|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal jemalloc-sys-0.3.2/rep/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj010064400007650000024000000454461344617474000236640ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A} Win32Proj jemalloc 8.1 DynamicLibrary true v140 MultiByte StaticLibrary true v140 MultiByte DynamicLibrary false v140 true MultiByte StaticLibrary false v140 true MultiByte DynamicLibrary true v140 MultiByte StaticLibrary true v140 MultiByte DynamicLibrary false v140 true MultiByte StaticLibrary false v140 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 OldStyle false Windows true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 OldStyle Windows true true true jemalloc-sys-0.3.2/rep/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters010064400007650000024000000070401344617474000253170ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files jemalloc-sys-0.3.2/rep/msvc/projects/vc2015/test_threads/test_threads.vcxproj010064400007650000024000000455261344617474000254710ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {09028CFD-4EB7-491D-869C-0708DB97ED44} Win32Proj test_threads 8.1 Application true v140 MultiByte Application true v140 MultiByte Application false v140 true MultiByte Application false v140 true MultiByte Application true v140 MultiByte Application true v140 MultiByte Application false v140 true MultiByte Application false v140 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true true $(SolutionDir)$(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false Level3 Disabled WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true $(SolutionDir)$(Platform)\$(Configuration) jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled _DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 MaxSpeed true true WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) {8d6bb292-9e1c-413d-9f98-4864bdc1514a} jemalloc-sys-0.3.2/rep/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters010064400007650000024000000017251344617474000271310ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hh;hpp;hxx;hm;inl;inc;xsd Source Files Source Files Header Files jemalloc-sys-0.3.2/rep/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj010064400007650000024000000452421344617474000236600ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {8D6BB292-9E1C-413D-9F98-4864BDC1514A} Win32Proj jemalloc DynamicLibrary true v141 MultiByte StaticLibrary true v141 MultiByte DynamicLibrary false v141 true MultiByte StaticLibrary false v141 true MultiByte DynamicLibrary true v141 MultiByte StaticLibrary true v141 MultiByte DynamicLibrary false v141 true MultiByte StaticLibrary false v141 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-$(PlatformToolset)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)d $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) Level3 Disabled _REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true Level3 Disabled JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug 4090;4146;4267;4334 OldStyle false Windows true Level3 MaxSpeed true true _REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true _REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) 4090;4146;4267;4334 $(OutputPath)$(TargetName).pdb Windows true true true Level3 MaxSpeed true true JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded 4090;4146;4267;4334 OldStyle Windows true true true jemalloc-sys-0.3.2/rep/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters010064400007650000024000000072141344617474000253240ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files Source Files jemalloc-sys-0.3.2/rep/msvc/projects/vc2017/test_threads/test_threads.vcxproj010064400007650000024000000454211344617474000254650ustar0000000000000000 Debug-static Win32 Debug-static x64 Debug Win32 Release-static Win32 Release-static x64 Release Win32 Debug x64 Release x64 {09028CFD-4EB7-491D-869C-0708DB97ED44} Win32Proj test_threads Application true v141 MultiByte Application true v141 MultiByte Application false v141 true MultiByte Application false v141 true MultiByte Application true v141 MultiByte Application true v141 MultiByte Application false v141 true MultiByte Application false v141 true MultiByte $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ true true $(SolutionDir)$(Platform)\$(Configuration)\ true $(SolutionDir)$(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false $(SolutionDir)$(Platform)\$(Configuration)\ $(Platform)\$(Configuration)\ false Level3 Disabled WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true $(SolutionDir)$(Platform)\$(Configuration) jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 Disabled _DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 Disabled JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreadedDebug Console true jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) $(SolutionDir)$(Platform)\$(Configuration) Level3 MaxSpeed true true WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) Level3 MaxSpeed true true JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) MultiThreaded Console true true true $(SolutionDir)$(Platform)\$(Configuration) jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) {8d6bb292-9e1c-413d-9f98-4864bdc1514a} jemalloc-sys-0.3.2/rep/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters010064400007650000024000000017251344617474000271330ustar0000000000000000 {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hh;hpp;hxx;hm;inl;inc;xsd Source Files Source Files Header Files jemalloc-sys-0.3.2/rep/msvc/test_threads/test_threads.cpp010064400007650000024000000061511344617474000217760ustar0000000000000000// jemalloc C++ threaded test // Author: Rustam Abdullaev // Public Domain #include #include #include #include #include #include #include #include using std::vector; using std::thread; using std::uniform_int_distribution; using std::minstd_rand; int test_threads() { je_malloc_conf = "narenas:3"; int narenas = 0; size_t sz = sizeof(narenas); je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); if (narenas != 3) { printf("Error: unexpected number of arenas: %d\n", narenas); return 1; } static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); vector workers; static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; je_malloc_stats_print(NULL, NULL, NULL); size_t allocated1; size_t sz1 = sizeof(allocated1); je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); printf("\nPress Enter to start threads...\n"); getchar(); printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); for (int i = 0; i < numThreads; i++) { workers.emplace_back([tid=i]() { uniform_int_distribution sizeDist(0, numSizes - 1); minstd_rand rnd(tid * 17); uint8_t* ptrs[numAllocsMax]; int ptrsz[numAllocsMax]; for (int i = 0; i < numIter1; ++i) { thread t([&]() { for (int i = 0; i < numIter2; ++i) { const int numAllocs = numAllocsMax - sizeDist(rnd); for (int j = 0; j < numAllocs; j += 64) { const int x = sizeDist(rnd); const int sz = sizes[x]; ptrsz[j] = sz; ptrs[j] = (uint8_t*)je_malloc(sz); if (!ptrs[j]) { printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); exit(1); } for (int k = 0; k < sz; k++) ptrs[j][k] = tid + k; } for (int j = 0; j < numAllocs; j += 64) { for (int k = 0, sz = ptrsz[j]; k < sz; k++) if (ptrs[j][k] != (uint8_t)(tid + k)) { printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); exit(1); } je_free(ptrs[j]); } } }); t.join(); } }); } for (thread& t : workers) { t.join(); } je_malloc_stats_print(NULL, NULL, NULL); size_t allocated2; je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); size_t leaked = allocated2 - allocated1; printf("\nDone. Leaked: %zd bytes\n", leaked); bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); printf("\nPress Enter to continue...\n"); getchar(); return failed ? 1 : 0; } jemalloc-sys-0.3.2/rep/msvc/test_threads/test_threads.h010064400007650000024000000000421344617474000214340ustar0000000000000000#pragma once int test_threads(); jemalloc-sys-0.3.2/rep/msvc/test_threads/test_threads_main.cpp010064400007650000024000000003101344617474000227710ustar0000000000000000#include "test_threads.h" #include #include #include using namespace std::chrono_literals; int main(int argc, char** argv) { int rc = test_threads(); return rc; } jemalloc-sys-0.3.2/rep/run_tests.sh010075500007650000024000000000601344617474000155160ustar0000000000000000$(dirname "$)")/scripts/gen_run_tests.py | bash jemalloc-sys-0.3.2/rep/scripts/gen_run_tests.py010075500007650000024000000075021344617474000200640ustar0000000000000000#!/usr/bin/env python import sys from itertools import combinations from os import uname from multiprocessing import cpu_count from subprocess import call # Later, we want to test extended vaddr support. Apparently, the "real" way of # checking this is flaky on OS X. bits_64 = sys.maxsize > 2**32 nparallel = cpu_count() * 2 uname = uname()[0] if "BSD" in uname: make_cmd = 'gmake' else: make_cmd = 'make' def powerset(items): result = [] for i in xrange(len(items) + 1): result += combinations(items, i) return result possible_compilers = [] for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']): try: cmd_ret = call([cc, "-v"]) if cmd_ret == 0: possible_compilers.append((cc, cxx)) except: pass possible_compiler_opts = [ '-m32', ] possible_config_opts = [ '--enable-debug', '--enable-prof', '--disable-stats', ] if bits_64: possible_config_opts.append('--with-lg-vaddr=56') possible_malloc_conf_opts = [ 'tcache:false', 'dss:primary', 'percpu_arena:percpu', 'background_thread:true', ] print 'set -e' print 'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd': make_cmd} print 'autoconf' print 'rm -rf run_tests.out' print 'mkdir run_tests.out' print 'cd run_tests.out' ind = 0 for cc, cxx in possible_compilers: for compiler_opts in powerset(possible_compiler_opts): for config_opts in powerset(possible_config_opts): for malloc_conf_opts in powerset(possible_malloc_conf_opts): if cc is 'clang' \ and '-m32' in possible_compiler_opts \ and '--enable-prof' in config_opts: continue config_line = ( 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror ' + 'CC="{} {}" '.format(cc, " ".join(compiler_opts)) + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts)) + '../../configure ' + " ".join(config_opts) + (' --with-malloc-conf=' + ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0 else '') ) # We don't want to test large vaddr spaces in 32-bit mode. if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in config_opts): continue # Per CPU arenas are only supported on Linux. linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \ or 'background_thread:true' in malloc_conf_opts) # Heap profiling and dss are not supported on OS X. darwin_unsupported = ('--enable-prof' in config_opts or \ 'dss:primary' in malloc_conf_opts) if (uname == 'Linux' and linux_supported) \ or (not linux_supported and (uname != 'Darwin' or \ not darwin_unsupported)): print """cat < run_test_%(ind)d.sh #!/bin/sh set -e abort() { echo "==> Error" >> run_test.log echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log" exit 255 # Special exit code tells xargs to terminate. } # Environment variables are not supported. run_cmd() { echo "==> \$@" >> run_test.log \$@ >> run_test.log 2>&1 || abort } echo "=> run_test_%(ind)d: %(config_line)s" mkdir run_test_%(ind)d.out cd run_test_%(ind)d.out echo "==> %(config_line)s" >> run_test.log %(config_line)s >> run_test.log 2>&1 || abort run_cmd %(make_cmd)s all tests run_cmd %(make_cmd)s check run_cmd %(make_cmd)s distclean EOF chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line, 'make_cmd': make_cmd} ind += 1 print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} jemalloc-sys-0.3.2/rep/scripts/gen_travis.py010075500007650000024000000110621344617474000173420ustar0000000000000000#!/usr/bin/env python from itertools import combinations travis_template = """\ language: generic dist: precise matrix: include: %s before_script: - autoconf - scripts/gen_travis.py > travis_script && diff .travis.yml travis_script - ./configure ${COMPILER_FLAGS:+ \ CC="$CC $COMPILER_FLAGS" \ CXX="$CXX $COMPILER_FLAGS" } \ $CONFIGURE_FLAGS - make -j3 - make -j3 tests script: - make check """ # The 'default' configuration is gcc, on linux, with no compiler or configure # flags. We also test with clang, -m32, --enable-debug, --enable-prof, # --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing # travis though, we don't test all 2**7 = 128 possible combinations of these; # instead, we only test combinations of up to 2 'unusual' settings, under the # hope that bugs involving interactions of such settings are rare. # Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29 MAX_UNUSUAL_OPTIONS = 2 os_default = 'linux' os_unusual = 'osx' compilers_default = 'CC=gcc CXX=g++' compilers_unusual = 'CC=clang CXX=clang++' compiler_flag_unusuals = ['-m32'] configure_flag_unusuals = [ '--enable-debug', '--enable-prof', '--disable-stats', '--disable-libdl', ] malloc_conf_unusuals = [ 'tcache:false', 'dss:primary', 'percpu_arena:percpu', 'background_thread:true', ] all_unusuals = ( [os_unusual] + [compilers_unusual] + compiler_flag_unusuals + configure_flag_unusuals + malloc_conf_unusuals ) unusual_combinations_to_test = [] for i in xrange(MAX_UNUSUAL_OPTIONS + 1): unusual_combinations_to_test += combinations(all_unusuals, i) gcc_multilib_set = False # Formats a job from a combination of flags def format_job(combination): global gcc_multilib_set os = os_unusual if os_unusual in combination else os_default compilers = compilers_unusual if compilers_unusual in combination else compilers_default compiler_flags = [x for x in combination if x in compiler_flag_unusuals] configure_flags = [x for x in combination if x in configure_flag_unusuals] malloc_conf = [x for x in combination if x in malloc_conf_unusuals] # Filter out unsupported configurations on OS X. if os == 'osx' and ('dss:primary' in malloc_conf or \ 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ in malloc_conf): return "" if len(malloc_conf) > 0: configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) # Filter out an unsupported configuration - heap profiling on OS X. if os == 'osx' and '--enable-prof' in configure_flags: return "" # We get some spurious errors when -Warray-bounds is enabled. env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( compilers, " ".join(compiler_flags), " ".join(configure_flags)) job = "" job += ' - os: %s\n' % os job += ' env: %s\n' % env_string if '-m32' in combination and os == 'linux': job += ' addons:' if gcc_multilib_set: job += ' *gcc_multilib\n' else: job += ' &gcc_multilib\n' job += ' apt:\n' job += ' packages:\n' job += ' - gcc-multilib\n' gcc_multilib_set = True return job include_rows = "" for combination in unusual_combinations_to_test: include_rows += format_job(combination) # Development build include_rows += '''\ # Development build - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" ''' # Enable-expermental-smallocx include_rows += '''\ # --enable-expermental-smallocx: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" ''' # Valgrind build bots include_rows += ''' # Valgrind - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind" addons: apt: packages: - valgrind ''' # To enable valgrind on macosx add: # # - os: osx # env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind" # install: brew install valgrind # # It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274 print travis_template % include_rows jemalloc-sys-0.3.2/rep/src/arena.c010064400007650000024000002041511344617474000151610ustar0000000000000000#define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/div.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/util.h" JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /******************************************************************************/ /* Data. */ /* * Define names for both unininitialized and initialized phases, so that * options and mallctl processing are straightforward. */ const char *percpu_arena_mode_names[] = { "percpu", "phycpu", "disabled", "percpu", "phycpu" }; percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default; const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { #define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; static div_info_t arena_binind_div_info[SC_NBINS]; size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; static unsigned huge_arena_ind; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread); static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); /******************************************************************************/ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { *nthreads += arena_nthreads_get(arena, false); *dss = dss_prec_names[arena_dss_prec_get(arena)]; *dirty_decay_ms = arena_dirty_decay_ms_get(arena); *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); *ndirty += extents_npages_get(&arena->extents_dirty); *nmuzzy += extents_npages_get(&arena->extents_muzzy); } void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, bin_stats_t *bstats, arena_stats_large_t *lstats, arena_stats_extents_t *estats) { cassert(config_stats); arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, muzzy_decay_ms, nactive, ndirty, nmuzzy); size_t base_allocated, base_resident, base_mapped, metadata_thp; base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, &base_mapped, &metadata_thp); arena_stats_lock(tsdn, &arena->stats); arena_stats_accum_zu(&astats->mapped, base_mapped + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); arena_stats_accum_zu(&astats->retained, extents_npages_get(&arena->extents_retained) << LG_PAGE); atomic_store_zu(&astats->extent_avail, atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), ATOMIC_RELAXED); arena_stats_accum_u64(&astats->decay_dirty.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.npurge)); arena_stats_accum_u64(&astats->decay_dirty.nmadvise, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.nmadvise)); arena_stats_accum_u64(&astats->decay_dirty.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.purged)); arena_stats_accum_u64(&astats->decay_muzzy.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.npurge)); arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.nmadvise)); arena_stats_accum_u64(&astats->decay_muzzy.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.purged)); arena_stats_accum_zu(&astats->base, base_allocated); arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); arena_stats_accum_zu(&astats->resident, base_resident + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + extents_npages_get(&arena->extents_dirty) + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nmalloc); arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].ndalloc); arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nrequests); arena_stats_accum_u64(&lstats[i].nrequests, nmalloc + nrequests); arena_stats_accum_u64(&astats->nrequests_large, nmalloc + nrequests); assert(nmalloc >= ndalloc); assert(nmalloc - ndalloc <= SIZE_T_MAX); size_t curlextents = (size_t)(nmalloc - ndalloc); lstats[i].curlextents += curlextents; arena_stats_accum_zu(&astats->allocated_large, curlextents * sz_index2size(SC_NBINS + i)); } for (pszind_t i = 0; i < SC_NPSIZES; i++) { size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, retained_bytes; dirty = extents_nextents_get(&arena->extents_dirty, i); muzzy = extents_nextents_get(&arena->extents_muzzy, i); retained = extents_nextents_get(&arena->extents_retained, i); dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); retained_bytes = extents_nbytes_get(&arena->extents_retained, i); atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED); atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes, ATOMIC_RELAXED); atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes, ATOMIC_RELAXED); atomic_store_zu(&estats[i].retained_bytes, retained_bytes, ATOMIC_RELAXED); } arena_stats_unlock(tsdn, &arena->stats); /* tcache_bytes counts currently cached bytes. */ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); cache_bin_array_descriptor_t *descriptor; ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { szind_t i = 0; for (; i < SC_NBINS; i++) { cache_bin_t *tbin = &descriptor->bins_small[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); } for (; i < nhbins; i++) { cache_bin_t *tbin = &descriptor->bins_large[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); } } malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[arena_prof_mutex_tcache_list], &arena->tcache_ql_mtx); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ malloc_mutex_lock(tsdn, &arena->mtx); \ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ &arena->mtx); \ malloc_mutex_unlock(tsdn, &arena->mtx); /* Gather per arena mutex profiling data. */ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, arena_prof_mutex_extent_avail) READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, arena_prof_mutex_extents_dirty) READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, arena_prof_mutex_extents_muzzy) READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, arena_prof_mutex_extents_retained) READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, arena_prof_mutex_decay_dirty) READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, arena_prof_mutex_decay_muzzy) READ_ARENA_MUTEX_PROF_DATA(base->mtx, arena_prof_mutex_base) #undef READ_ARENA_MUTEX_PROF_DATA nstime_copy(&astats->uptime, &arena->create_time); nstime_update(&astats->uptime); nstime_subtract(&astats->uptime, &arena->create_time); for (szind_t i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_stats_merge(tsdn, &bstats[i], &arena->bins[i].bin_shards[j]); } } } void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, extent); if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { arena_background_thread_inactivity_check(tsdn, arena, false); } } static void * arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { void *ret; arena_slab_data_t *slab_data = extent_slab_data_get(slab); size_t regind; assert(extent_nfree_get(slab) > 0); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); ret = (void *)((uintptr_t)extent_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); extent_nfree_dec(slab); return ret; } static void arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, unsigned cnt, void** ptrs) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); assert(extent_nfree_get(slab) >= cnt); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) for (unsigned i = 0; i < cnt; i++) { size_t regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); } #else unsigned group = 0; bitmap_t g = slab_data->bitmap[group]; unsigned i = 0; while (i < cnt) { while (g == 0) { g = slab_data->bitmap[++group]; } size_t shift = group << LG_BITMAP_GROUP_NBITS; size_t pop = popcount_lu(g); if (pop > (cnt - i)) { pop = cnt - i; } /* * Load from memory locations only once, outside the * hot loop below. */ uintptr_t base = (uintptr_t)extent_addr_get(slab); uintptr_t regsize = (uintptr_t)bin_info->reg_size; while (pop--) { size_t bit = cfs_lu(&g); size_t regind = shift + bit; *(ptrs + i) = (void *)(base + regsize * regind); i++; } slab_data->bitmap[group] = g; } #endif extent_nfree_sub(slab, cnt); } #ifndef JEMALLOC_JET static #endif size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { size_t diff, regind; /* Freeing a pointer outside the slab can cause assertion failure. */ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos[binind].reg_size == 0); diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); /* Avoid doing division with a variable divisor. */ regind = div_compute(&arena_binind_div_info[binind], diff); assert(regind < bin_infos[binind].nregs); return regind; } static void arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; size_t regind = arena_slab_regind(slab, binind, ptr); assert(extent_nfree_get(slab) < bin_info->nregs); /* Freeing an unallocated pointer can cause assertion failure. */ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); extent_nfree_inc(slab); } static void arena_nactive_add(arena_t *arena, size_t add_pages) { atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); } static void arena_nactive_sub(arena_t *arena, size_t sub_pages) { assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); } static void arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); if (usize < SC_LARGE_MINCLASS) { usize = SC_LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].nmalloc, 1); } static void arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); if (usize < SC_LARGE_MINCLASS) { usize = SC_LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].ndalloc, 1); } static void arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, size_t usize) { arena_large_dalloc_stats_update(tsdn, arena, oldusize); arena_large_malloc_stats_update(tsdn, arena, usize); } static bool arena_may_have_muzzy(arena_t *arena) { return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); } extent_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); szind_t szind = sz_size2index(usize); size_t mapped_add; bool commit = true; extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); if (extent == NULL && arena_may_have_muzzy(arena)) { extent = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); } size_t size = usize + sz_large_pad; if (extent == NULL) { extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); if (config_stats) { /* * extent may be NULL on OOM, but in that case * mapped_add isn't used below, so there's no need to * conditionlly set it to 0 here. */ mapped_add = size; } } else if (config_stats) { mapped_add = 0; } if (extent != NULL) { if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_malloc_stats_update(tsdn, arena, usize); if (mapped_add != 0) { arena_stats_add_zu(tsdn, &arena->stats, &arena->stats.mapped, mapped_add); } arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_add(arena, size >> LG_PAGE); } return extent; } void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_dalloc_stats_update(tsdn, arena, extent_usize_get(extent)); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = oldusize - usize; if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, udiff >> LG_PAGE); } void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = usize - oldusize; if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_add(arena, udiff >> LG_PAGE); } static ssize_t arena_decay_ms_read(arena_decay_t *decay) { return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); } static void arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); } static void arena_decay_deadline_init(arena_decay_t *decay) { /* * Generate a new deadline that is uniformly random within the next * epoch after the current one. */ nstime_copy(&decay->deadline, &decay->epoch); nstime_add(&decay->deadline, &decay->interval); if (arena_decay_ms_read(decay) > 0) { nstime_t jitter; nstime_init(&jitter, prng_range_u64(&decay->jitter_state, nstime_ns(&decay->interval))); nstime_add(&decay->deadline, &jitter); } } static bool arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { return (nstime_compare(&decay->deadline, time) <= 0); } static size_t arena_decay_backlog_npages_limit(const arena_decay_t *decay) { uint64_t sum; size_t npages_limit_backlog; unsigned i; /* * For each element of decay_backlog, multiply by the corresponding * fixed-point smoothstep decay factor. Sum the products, then divide * to round down to the nearest whole number of pages. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += decay->backlog[i] * h_steps[i]; } npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); return npages_limit_backlog; } static void arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { size_t npages_delta = (current_npages > decay->nunpurged) ? current_npages - decay->nunpurged : 0; decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; if (config_debug) { if (current_npages > decay->ceil_npages) { decay->ceil_npages = current_npages; } size_t npages_limit = arena_decay_backlog_npages_limit(decay); assert(decay->ceil_npages >= npages_limit); if (decay->ceil_npages > npages_limit) { decay->ceil_npages = npages_limit; } } } static void arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, size_t current_npages) { if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * sizeof(size_t)); } else { size_t nadvance_z = (size_t)nadvance_u64; assert((uint64_t)nadvance_z == nadvance_u64); memmove(decay->backlog, &decay->backlog[nadvance_z], (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); if (nadvance_z > 1) { memset(&decay->backlog[SMOOTHSTEP_NSTEPS - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); } } arena_decay_backlog_update_last(decay, current_npages); } static void arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, size_t current_npages, size_t npages_limit, bool is_background_thread) { if (current_npages > npages_limit) { arena_decay_to_limit(tsdn, arena, decay, extents, false, npages_limit, current_npages - npages_limit, is_background_thread); } } static void arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, size_t current_npages) { assert(arena_decay_deadline_reached(decay, time)); nstime_t delta; nstime_copy(&delta, time); nstime_subtract(&delta, &decay->epoch); uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); assert(nadvance_u64 > 0); /* Add nadvance_u64 decay intervals to epoch. */ nstime_copy(&delta, &decay->interval); nstime_imultiply(&delta, nadvance_u64); nstime_add(&decay->epoch, &delta); /* Set a new deadline. */ arena_decay_deadline_init(decay); /* Update the backlog. */ arena_decay_backlog_update(decay, nadvance_u64, current_npages); } static void arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, const nstime_t *time, bool is_background_thread) { size_t current_npages = extents_npages_get(extents); arena_decay_epoch_advance_helper(decay, time, current_npages); size_t npages_limit = arena_decay_backlog_npages_limit(decay); /* We may unlock decay->mtx when try_purge(). Finish logging first. */ decay->nunpurged = (npages_limit > current_npages) ? npages_limit : current_npages; if (!background_thread_enabled() || is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, current_npages, npages_limit, is_background_thread); } } static void arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { arena_decay_ms_write(decay, decay_ms); if (decay_ms > 0) { nstime_init(&decay->interval, (uint64_t)decay_ms * KQU(1000000)); nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); } nstime_init(&decay->epoch, 0); nstime_update(&decay->epoch); decay->jitter_state = (uint64_t)(uintptr_t)decay; arena_decay_deadline_init(decay); decay->nunpurged = 0; memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } static bool arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, arena_stats_decay_t *stats) { if (config_debug) { for (size_t i = 0; i < sizeof(arena_decay_t); i++) { assert(((char *)decay)[i] == 0); } decay->ceil_npages = 0; } if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, malloc_mutex_rank_exclusive)) { return true; } decay->purging = false; arena_decay_reinit(decay, decay_ms); /* Memory is zeroed, so there is no need to clear stats. */ if (config_stats) { decay->stats = stats; } return false; } static bool arena_decay_ms_valid(ssize_t decay_ms) { if (decay_ms < -1) { return false; } if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * KQU(1000)) { return true; } return false; } static bool arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread) { malloc_mutex_assert_owner(tsdn, &decay->mtx); /* Purge all or nothing if the option is disabled. */ ssize_t decay_ms = arena_decay_ms_read(decay); if (decay_ms <= 0) { if (decay_ms == 0) { arena_decay_to_limit(tsdn, arena, decay, extents, false, 0, extents_npages_get(extents), is_background_thread); } return false; } nstime_t time; nstime_init(&time, 0); nstime_update(&time); if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) > 0)) { /* * Time went backwards. Move the epoch back in time and * generate a new deadline, with the expectation that time * typically flows forward for long enough periods of time that * epochs complete. Unfortunately, this strategy is susceptible * to clock jitter triggering premature epoch advances, but * clock jitter estimation and compensation isn't feasible here * because calls into this code are event-driven. */ nstime_copy(&decay->epoch, &time); arena_decay_deadline_init(decay); } else { /* Verify that time does not go backwards. */ assert(nstime_compare(&decay->epoch, &time) <= 0); } /* * If the deadline has been reached, advance to the current epoch and * purge to the new limit if necessary. Note that dirty pages created * during the current epoch are not subject to purge until a future * epoch, so as a result purging only happens during epoch advances, or * being triggered by background threads (scheduled event). */ bool advance_epoch = arena_decay_deadline_reached(decay, &time); if (advance_epoch) { arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, is_background_thread); } else if (is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, extents_npages_get(extents), arena_decay_backlog_npages_limit(decay), is_background_thread); } return advance_epoch; } static ssize_t arena_decay_ms_get(arena_decay_t *decay) { return arena_decay_ms_read(decay); } ssize_t arena_dirty_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_dirty); } ssize_t arena_muzzy_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_muzzy); } static bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } malloc_mutex_lock(tsdn, &decay->mtx); /* * Restart decay backlog from scratch, which may cause many dirty pages * to be immediately purged. It would conceptually be possible to map * the old backlog onto the new backlog, but there is no justification * for such complexity since decay_ms changes are intended to be * infrequent, either between the {-1, 0, >0} states, or a one-time * arbitrary change during initial arena configuration. */ arena_decay_reinit(decay, decay_ms); arena_maybe_decay(tsdn, arena, decay, extents, false); malloc_mutex_unlock(tsdn, &decay->mtx); return false; } bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, decay_ms); } bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, decay_ms); } static size_t arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, size_t npages_decay_max, extent_list_t *decay_extents) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Stash extents according to npages_limit. */ size_t nstashed = 0; extent_t *extent; while (nstashed < npages_decay_max && (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, npages_limit)) != NULL) { extent_list_append(decay_extents, extent); nstashed += extent_size_get(extent) >> LG_PAGE; } return nstashed; } static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, bool all, extent_list_t *decay_extents, bool is_background_thread) { size_t nmadvise, nunmapped; size_t npurged; if (config_stats) { nmadvise = 0; nunmapped = 0; } npurged = 0; ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); for (extent_t *extent = extent_list_first(decay_extents); extent != NULL; extent = extent_list_first(decay_extents)) { if (config_stats) { nmadvise++; } size_t npages = extent_size_get(extent) >> LG_PAGE; npurged += npages; extent_list_remove(decay_extents, extent); switch (extents_state_get(extents)) { case extent_state_active: not_reached(); case extent_state_dirty: if (!all && muzzy_decay_ms != 0 && !extent_purge_lazy_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_muzzy, extent); arena_background_thread_inactivity_check(tsdn, arena, is_background_thread); break; } /* Fall through. */ case extent_state_muzzy: extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent); if (config_stats) { nunmapped += npages; } break; case extent_state_retained: default: not_reached(); } } if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 1); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->nmadvise, nmadvise); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, npurged); arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, nunmapped << LG_PAGE); arena_stats_unlock(tsdn, &arena->stats); } return npurged; } /* * npages_limit: Decay at most npages_decay_max pages without violating the * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper * bound on number of pages in order to prevent unbounded growth (namely in * stashed), otherwise unbounded new pages could be added to extents during the * current decay run, so that the purging thread never finishes. */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); malloc_mutex_assert_owner(tsdn, &decay->mtx); if (decay->purging) { return; } decay->purging = true; malloc_mutex_unlock(tsdn, &decay->mtx); extent_hooks_t *extent_hooks = extent_hooks_get(arena); extent_list_t decay_extents; extent_list_init(&decay_extents); size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { size_t npurged = arena_decay_stashed(tsdn, arena, &extent_hooks, decay, extents, all, &decay_extents, is_background_thread); assert(npurged == npurge); } malloc_mutex_lock(tsdn, &decay->mtx); decay->purging = false; } static bool arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread, bool all) { if (all) { malloc_mutex_lock(tsdn, &decay->mtx); arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, extents_npages_get(extents), is_background_thread); malloc_mutex_unlock(tsdn, &decay->mtx); return false; } if (malloc_mutex_trylock(tsdn, &decay->mtx)) { /* No need to wait if another thread is in progress. */ return true; } bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, is_background_thread); size_t npages_new; if (epoch_advanced) { /* Backlog is updated on epoch advance. */ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; } malloc_mutex_unlock(tsdn, &decay->mtx); if (have_background_thread && background_thread_enabled() && epoch_advanced && !is_background_thread) { background_thread_interval_check(tsdn, arena, decay, npages_new); } return false; } static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, is_background_thread, all); } static bool arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, is_background_thread, all); } void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { return; } arena_decay_muzzy(tsdn, arena, is_background_thread, all); } static void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); } static void arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) > 0); extent_heap_insert(&bin->slabs_nonfull, slab); } static void arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); } static extent_t * arena_bin_slabs_nonfull_tryget(bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { return NULL; } if (config_stats) { bin->stats.reslabs++; } return slab; } static void arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) == 0); /* * Tracking extents is required by arena_reset, which is not allowed * for auto arenas. Bypass this step to avoid touching the extent * linkage (often results in cache misses) for auto arenas. */ if (arena_is_auto(arena)) { return; } extent_list_append(&bin->slabs_full, slab); } static void arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { if (arena_is_auto(arena)) { return; } extent_list_remove(&bin->slabs_full, slab); } static void arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { extent_t *slab; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (bin->slabcur != NULL) { slab = bin->slabcur; bin->slabcur = NULL; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } for (slab = extent_list_first(&bin->slabs_full); slab != NULL; slab = extent_list_first(&bin->slabs_full)) { arena_bin_slabs_full_remove(arena, bin, slab); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } if (config_stats) { bin->stats.curregs = 0; bin->stats.curslabs = 0; } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } void arena_reset(tsd_t *tsd, arena_t *arena) { /* * Locking in this function is unintuitive. The caller guarantees that * no concurrent operations are happening in this arena, but there are * still reasons that some locking is necessary: * * - Some of the functions in the transitive closure of calls assume * appropriate locks are held, and in some cases these locks are * temporarily dropped to avoid lock order reversal or deadlock due to * reentry. * - mallctl("epoch", ...) may concurrently refresh stats. While * strictly speaking this is a "concurrent operation", disallowing * stats refreshes would impose an inconvenient burden. */ /* Large allocations. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); for (extent_t *extent = extent_list_first(&arena->large); extent != NULL; extent = extent_list_first(&arena->large)) { void *ptr = extent_base_get(extent); size_t usize; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); if (config_stats || (config_prof && opt_prof)) { usize = sz_index2size(alloc_ctx.szind); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); } /* Remove large allocation from prof sample set. */ if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, &alloc_ctx); } large_dalloc(tsd_tsdn(tsd), extent); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); /* Bins. */ for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { arena_bin_reset(tsd, arena, &arena->bins[i].bin_shards[j]); } } atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); } static void arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { /* * Iterate over the retained extents and destroy them. This gives the * extent allocator underlying the extent hooks an opportunity to unmap * all retained memory without having to keep its own metadata * structures. In practice, virtual memory for dss-allocated extents is * leaked here, so best practice is to avoid dss for arenas to be * destroyed, or provide custom extent hooks that track retained * dss-based extents for later reuse. */ extent_hooks_t *extent_hooks = extent_hooks_get(arena); extent_t *extent; while ((extent = extents_evict(tsdn, arena, &extent_hooks, &arena->extents_retained, 0)) != NULL) { extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } } void arena_destroy(tsd_t *tsd, arena_t *arena) { assert(base_ind_get(arena->base) >= narenas_auto); assert(arena_nthreads_get(arena, false) == 0); assert(arena_nthreads_get(arena, true) == 0); /* * No allocations have occurred since arena_reset() was called. * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * extents, so only retained extents may remain. */ assert(extents_npages_get(&arena->extents_dirty) == 0); assert(extents_npages_get(&arena->extents_muzzy) == 0); /* Deallocate retained memory. */ arena_destroy_retained(tsd_tsdn(tsd), arena); /* * Remove the arena pointer from the arenas array. We rely on the fact * that there is no way for the application to get a dirty read from the * arenas array unless there is an inherent race in the application * involving access of an arena being concurrently destroyed. The * application must synchronize knowledge of the arena's validity, so as * long as we use an atomic write to update the arenas array, the * application will get a clean read any time after it synchronizes * knowledge that the arena is no longer valid. */ arena_set(base_ind_get(arena->base), NULL); /* * Destroy the base allocator, which manages all metadata ever mapped by * this arena. */ base_delete(tsd_tsdn(tsd), arena->base); } static extent_t * arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, szind_t szind) { extent_t *slab; bool zero, commit; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); zero = false; commit = true; slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); if (config_stats && slab != NULL) { arena_stats_mapped_add(tsdn, &arena->stats, bin_info->slab_size); } return slab; } static extent_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, const bin_info_t *bin_info) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; szind_t szind = sz_size2index(bin_info->reg_size); bool zero = false; bool commit = true; extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); if (slab == NULL && arena_may_have_muzzy(arena)) { slab = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); } if (slab == NULL) { slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, bin_info, szind); if (slab == NULL) { return NULL; } } assert(extent_slab_get(slab)); /* Initialize slab internals. */ arena_slab_data_t *slab_data = extent_slab_data_get(slab); extent_nfree_binshard_set(slab, bin_info->nregs, binshard); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); return slab; } static extent_t * arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, unsigned binshard) { extent_t *slab; const bin_info_t *bin_info; /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; } /* No existing slabs have any space available. */ bin_info = &bin_infos[binind]; /* Allocate a new slab. */ malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info); /********************************/ malloc_mutex_lock(tsdn, &bin->lock); if (slab != NULL) { if (config_stats) { bin->stats.nslabs++; bin->stats.curslabs++; } return slab; } /* * arena_slab_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; } return NULL; } /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, unsigned binshard) { const bin_info_t *bin_info; extent_t *slab; bin_info = &bin_infos[binind]; if (!arena_is_auto(arena) && bin->slabcur != NULL) { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; } slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard); if (bin->slabcur != NULL) { /* * Another thread updated slabcur while this one ran without the * bin lock in arena_bin_nonfull_slab_get(). */ if (extent_nfree_get(bin->slabcur) > 0) { void *ret = arena_slab_reg_alloc(bin->slabcur, bin_info); if (slab != NULL) { /* * arena_slab_alloc() may have allocated slab, * or it may have been pulled from * slabs_nonfull. Therefore it is unsafe to * make any assumptions about how slab has * previously been used, and * arena_bin_lower_slab() must be called, as if * a region were just deallocated from the slab. */ if (extent_nfree_get(slab) == bin_info->nregs) { arena_dalloc_bin_slab(tsdn, arena, slab, bin); } else { arena_bin_lower_slab(tsdn, arena, slab, bin); } } return ret; } arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; } if (slab == NULL) { return NULL; } bin->slabcur = slab; assert(extent_nfree_get(bin->slabcur) > 0); return arena_slab_reg_alloc(slab, bin_info); } /* Choose a bin shard and return the locked bin. */ bin_t * arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard) { bin_t *bin; if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { *binshard = 0; } else { *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; } assert(*binshard < bin_infos[binind].n_shards); bin = &arena->bins[binind].bin_shards[*binshard]; malloc_mutex_lock(tsdn, &bin->lock); return bin; } void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill, cnt; assert(tbin->ncached == 0); if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { prof_idump(tsdn); } unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tcache->lg_fill_div[binind]); i < nfill; i += cnt) { extent_t *slab; if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { unsigned tofill = nfill - i; cnt = tofill < extent_nfree_get(slab) ? tofill : extent_nfree_get(slab); arena_slab_reg_alloc_batch( slab, &bin_infos[binind], cnt, tbin->avail - nfill + i); } else { cnt = 1; void *ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must * be moved just before tbin->avail before bailing out. */ if (ptr == NULL) { if (i > 0) { memmove(tbin->avail - i, tbin->avail - nfill, i * sizeof(void *)); } break; } /* Insert such that low regions get used first. */ *(tbin->avail - nfill + i) = ptr; } if (config_fill && unlikely(opt_junk_alloc)) { for (unsigned j = 0; j < cnt; j++) { void* ptr = *(tbin->avail - nfill + i + j); arena_alloc_junk_small(ptr, &bin_infos[binind], true); } } } if (config_stats) { bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; arena_decay_tick(tsdn, arena); } void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { if (!zero) { memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } static void arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = arena_dalloc_junk_small_impl; static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; bin_t *bin; size_t usize; extent_t *slab; assert(binind < SC_NBINS); usize = sz_index2size(binind); unsigned binshard; bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); } else { ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); } if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); return NULL; } if (config_stats) { bin->stats.nmalloc++; bin->stats.nrequests++; bin->stats.curregs++; } malloc_mutex_unlock(tsdn, &bin->lock); if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); } if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); } arena_decay_tick(tsdn, arena); return ret; } void * arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero) { assert(!tsdn_null(tsdn) || arena != NULL); if (likely(!tsdn_null(tsdn))) { arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); } if (unlikely(arena == NULL)) { return NULL; } if (likely(size <= SC_SMALL_MAXCLASS)) { return arena_malloc_small(tsdn, arena, ind, zero); } return large_malloc(tsdn, arena, sz_index2size(ind), zero); } void * arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; if (usize <= SC_SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special slab placement. */ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), zero, tcache, true); } else { if (likely(alignment <= CACHELINE)) { ret = large_malloc(tsdn, arena, usize, zero); } else { ret = large_palloc(tsdn, arena, usize, alignment, zero); } } return ret; } void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); assert(usize <= SC_SMALL_MAXCLASS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); arena_t *arena = extent_arena_get(extent); szind_t szind = sz_size2index(usize); extent_szind_set(extent, szind); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, szind, false); prof_accum_cancel(tsdn, &arena->prof_accum, usize); assert(isalloc(tsdn, ptr) == usize); } static size_t arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { cassert(config_prof); assert(ptr != NULL); extent_szind_set(extent, SC_NBINS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, SC_NBINS, false); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); return SC_LARGE_MINCLASS; } void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { cassert(config_prof); assert(opt_prof); extent_t *extent = iealloc(tsdn, ptr); size_t usize = arena_prof_demote(tsdn, extent, ptr); if (usize <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, sz_size2index(usize), slow_path); } else { large_dalloc(tsdn, extent); } } static void arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { /* Dissociate slab from bin. */ if (slab == bin->slabcur) { bin->slabcur = NULL; } else { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; /* * The following block's conditional is necessary because if the * slab only contains one region, then it never gets inserted * into the non-full slabs heap. */ if (bin_info->nregs == 1) { arena_bin_slabs_full_remove(arena, bin, slab); } else { arena_bin_slabs_nonfull_remove(bin, slab); } } } static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(slab != bin->slabcur); malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ arena_slab_dalloc(tsdn, arena, slab); /****************************/ malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) { bin->stats.curslabs--; } } static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(extent_nfree_get(slab) > 0); /* * Make sure that if bin->slabcur is non-NULL, it refers to the * oldest/lowest non-full slab. It is okay to NULL slabcur out rather * than proactively keeping it pointing at the oldest/lowest non-full * slab. */ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { /* Switch slabcur. */ if (extent_nfree_get(bin->slabcur) > 0) { arena_bin_slabs_nonfull_insert(bin, bin->slabcur); } else { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); } bin->slabcur = slab; if (config_stats) { bin->stats.reslabs++; } } else { arena_bin_slabs_nonfull_insert(bin, slab); } } static void arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, extent_t *slab, void *ptr, bool junked) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); } arena_slab_reg_dalloc(slab, slab_data, ptr); unsigned nfree = extent_nfree_get(slab); if (nfree == bin_info->nregs) { arena_dissociate_bin_slab(arena, slab, bin); arena_dalloc_bin_slab(tsdn, arena, slab, bin); } else if (nfree == 1 && slab != bin->slabcur) { arena_bin_slabs_full_remove(arena, bin, slab); arena_bin_lower_slab(tsdn, arena, slab, bin); } if (config_stats) { bin->stats.ndalloc++; bin->stats.curregs--; } } void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, extent_t *extent, void *ptr) { arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, true); } static void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { szind_t binind = extent_szind_get(extent); unsigned binshard = extent_binshard_get(extent); bin_t *bin = &arena->bins[binind].bin_shards[binshard]; malloc_mutex_lock(tsdn, &bin->lock); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, false); malloc_mutex_unlock(tsdn, &bin->lock); } void arena_dalloc_small(tsdn_t *tsdn, void *ptr) { extent_t *extent = iealloc(tsdn, ptr); arena_t *arena = extent_arena_get(extent); arena_dalloc_bin(tsdn, arena, extent, ptr); arena_decay_tick(tsdn, arena); } bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero, size_t *newsize) { bool ret; /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); extent_t *extent = iealloc(tsdn, ptr); if (unlikely(size > SC_LARGE_MAXCLASS)) { ret = true; goto done; } size_t usize_min = sz_s2u(size); size_t usize_max = sz_s2u(size + extra); if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min <= SC_SMALL_MAXCLASS)) { /* * Avoid moving the allocation if the size class can be left the * same. */ assert(bin_infos[sz_size2index(oldsize)].reg_size == oldsize); if ((usize_max > SC_SMALL_MAXCLASS || sz_size2index(usize_max) != sz_size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) { ret = true; goto done; } arena_decay_tick(tsdn, extent_arena_get(extent)); ret = false; } else if (oldsize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS) { ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, zero); } else { ret = true; } done: assert(extent == iealloc(tsdn, ptr)); *newsize = extent_usize_get(extent); return ret; } static void * arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) { return arena_malloc(tsdn, arena, usize, sz_size2index(usize), zero, tcache, true); } usize = sz_sa2u(usize, alignment); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } void * arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args) { size_t usize = sz_s2u(size); if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { return NULL; } if (likely(usize <= SC_SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ UNUSED size_t newsize; if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, &newsize)) { hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc : hook_expand_rallocx, ptr, oldsize, usize, (uintptr_t)ptr, hook_args->args); return ptr; } } if (oldsize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS) { return large_ralloc(tsdn, arena, ptr, usize, alignment, zero, tcache, hook_args); } /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and copying. */ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero, tcache); if (ret == NULL) { return NULL; } hook_invoke_alloc(hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, hook_args->args); hook_invoke_dalloc(hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ size_t copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return ret; } dss_prec_t arena_dss_prec_get(arena_t *arena) { return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); } bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) { return (dss_prec != dss_prec_disabled); } atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); return false; } ssize_t arena_dirty_decay_ms_default_get(void) { return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); } bool arena_dirty_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; } ssize_t arena_muzzy_decay_ms_default_get(void) { return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); } bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; } bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit) { assert(opt_retain); pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); if (new_limit != NULL) { size_t limit = *new_limit; /* Grow no more than the new limit. */ if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { return true; } } malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); if (old_limit != NULL) { *old_limit = sz_pind2sz(arena->retain_grow_limit); } if (new_limit != NULL) { arena->retain_grow_limit = new_ind; } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); return false; } unsigned arena_nthreads_get(arena_t *arena, bool internal) { return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); } void arena_nthreads_inc(arena_t *arena, bool internal) { atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } void arena_nthreads_dec(arena_t *arena, bool internal) { atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } size_t arena_extent_sn_next(arena_t *arena) { return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); } arena_t * arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; base_t *base; unsigned i; if (ind == 0) { base = b0get(); } else { base = base_new(tsdn, ind, extent_hooks); if (base == NULL) { return NULL; } } unsigned nbins_total = 0; for (i = 0; i < SC_NBINS; i++) { nbins_total += bin_infos[i].n_shards; } size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); if (arena == NULL) { goto label_error; } atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); arena->last_thd = NULL; if (config_stats) { if (arena_stats_init(tsdn, &arena->stats)) { goto label_error; } ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { goto label_error; } } if (config_prof) { if (prof_accum_init(tsdn, &arena->prof_accum)) { goto label_error; } } if (config_cache_oblivious) { /* * A nondeterministic seed based on the address of arena reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ atomic_store_zu(&arena->offset_state, config_debug ? ind : (size_t)(uintptr_t)arena, ATOMIC_RELAXED); } atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), ATOMIC_RELAXED); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); extent_list_init(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; } /* * Delay coalescing for dirty extents despite the disruptive effect on * memory layout for best-fit extent allocation, since cached extents * are likely to be reused soon after deallocation, and the cost of * merging/splitting extents is non-trivial. */ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, true)) { goto label_error; } /* * Coalesce muzzy extents immediately, because operations on them are in * the critical path much less often than for dirty extents. */ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, false)) { goto label_error; } /* * Coalesce retained extents immediately, in part because they will * never be evicted (and therefore there's no opportunity for delayed * coalescing), but also because operations on retained extents are not * in the critical path. */ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, false)) { goto label_error; } if (arena_decay_init(&arena->decay_dirty, arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { goto label_error; } if (arena_decay_init(&arena->decay_muzzy, arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { goto label_error; } arena->extent_grow_next = sz_psz2ind(HUGEPAGE); arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { goto label_error; } extent_avail_new(&arena->extent_avail); if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { goto label_error; } /* Initialize bins. */ uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); for (i = 0; i < SC_NBINS; i++) { unsigned nshards = bin_infos[i].n_shards; arena->bins[i].bin_shards = (bin_t *)bin_addr; bin_addr += nshards * sizeof(bin_t); for (unsigned j = 0; j < nshards; j++) { bool err = bin_init(&arena->bins[i].bin_shards[j]); if (err) { goto label_error; } } } assert(bin_addr == (uintptr_t)arena + arena_size); arena->base = base; /* Set arena before creating background threads. */ arena_set(ind, arena); nstime_init(&arena->create_time, 0); nstime_update(&arena->create_time); /* We don't support reentrancy for arena 0 bootstrapping. */ if (ind != 0) { /* * If we're here, then arena 0 already exists, so bootstrapping * is done enough that we should have tsd. */ assert(!tsdn_null(tsdn)); pre_reentrancy(tsdn_tsd(tsdn), arena); if (test_hooks_arena_new_hook) { test_hooks_arena_new_hook(); } post_reentrancy(tsdn_tsd(tsdn)); } return arena; label_error: if (ind != 0) { base_delete(tsdn, base); } return NULL; } arena_t * arena_choose_huge(tsd_t *tsd) { /* huge_arena_ind can be 0 during init (will use a0). */ if (huge_arena_ind == 0) { assert(!malloc_initialized()); } arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false); if (huge_arena == NULL) { /* Create the huge arena on demand. */ assert(huge_arena_ind != 0); huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true); if (huge_arena == NULL) { return NULL; } /* * Purge eagerly for huge allocations, because: 1) number of * huge allocations is usually small, which means ticker based * decay is not reliable; and 2) less immediate reuse is * expected for huge allocations. */ if (arena_dirty_decay_ms_default_get() > 0) { arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); } if (arena_muzzy_decay_ms_default_get() > 0) { arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); } } return huge_arena; } bool arena_init_huge(void) { bool huge_enabled; /* The threshold should be large size class. */ if (opt_oversize_threshold > SC_LARGE_MAXCLASS || opt_oversize_threshold < SC_LARGE_MINCLASS) { opt_oversize_threshold = 0; oversize_threshold = SC_LARGE_MAXCLASS + PAGE; huge_enabled = false; } else { /* Reserve the index for the huge arena. */ huge_arena_ind = narenas_total_get(); oversize_threshold = opt_oversize_threshold; huge_enabled = true; } return huge_enabled; } bool arena_is_huge(unsigned arena_ind) { if (huge_arena_ind == 0) { return false; } return (arena_ind == huge_arena_ind); } void arena_boot(sc_data_t *sc_data) { arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); for (unsigned i = 0; i < SC_NBINS; i++) { sc_t *sc = &sc_data->sc[i]; div_init(&arena_binind_div_info[i], (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); } } void arena_prefork0(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); } void arena_prefork1(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); } } void arena_prefork2(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); } void arena_prefork3(tsdn_t *tsdn, arena_t *arena) { extents_prefork(tsdn, &arena->extents_dirty); extents_prefork(tsdn, &arena->extents_muzzy); extents_prefork(tsdn, &arena->extents_retained); } void arena_prefork4(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); } void arena_prefork5(tsdn_t *tsdn, arena_t *arena) { base_prefork(tsdn, arena->base); } void arena_prefork6(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->large_mtx); } void arena_prefork7(tsdn_t *tsdn, arena_t *arena) { for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_prefork(tsdn, &arena->bins[i].bin_shards[j]); } } } void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; for (i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_postfork_parent(tsdn, &arena->bins[i].bin_shards[j]); } } malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); base_postfork_parent(tsdn, arena->base); malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); extents_postfork_parent(tsdn, &arena->extents_dirty); extents_postfork_parent(tsdn, &arena->extents_muzzy); extents_postfork_parent(tsdn, &arena->extents_retained); malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); } } void arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { arena_nthreads_inc(arena, false); } if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { arena_nthreads_inc(arena, true); } if (config_stats) { ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); if (tcache != NULL && tcache->arena == arena) { ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); cache_bin_array_descriptor_init( &tcache->cache_bin_array_descriptor, tcache->bins_small, tcache->bins_large); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); } } for (i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]); } } malloc_mutex_postfork_child(tsdn, &arena->large_mtx); base_postfork_child(tsdn, arena->base); malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); extents_postfork_child(tsdn, &arena->extents_dirty); extents_postfork_child(tsdn, &arena->extents_muzzy); extents_postfork_child(tsdn, &arena->extents_retained); malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); } } jemalloc-sys-0.3.2/rep/src/background_thread.c010064400007650000024000000657761344617474000175630ustar0000000000000000#define JEMALLOC_BACKGROUND_THREAD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /******************************************************************************/ /* Data. */ /* This option should be opt-in only. */ #define BACKGROUND_THREAD_DEFAULT false /* Read-only after initialization. */ bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1; /* Used for thread creation, termination and stats. */ malloc_mutex_t background_thread_lock; /* Indicates global state. Atomic because decay reads this w/o locking. */ atomic_b_t background_thread_enabled_state; size_t n_background_threads; size_t max_background_threads; /* Thread info per-index. */ background_thread_info_t *background_thread_info; /******************************************************************************/ #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void pthread_create_wrapper_init(void) { #ifdef JEMALLOC_LAZY_LOCK if (!isthreaded) { isthreaded = true; } #endif } int pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *__restrict arg) { pthread_create_wrapper_init(); return pthread_create_fptr(thread, attr, start_routine, arg); } #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ #ifndef JEMALLOC_BACKGROUND_THREAD #define NOT_REACHED { not_reached(); } bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED bool background_threads_enable(tsd_t *tsd) NOT_REACHED bool background_threads_disable(tsd_t *tsd) NOT_REACHED void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new) NOT_REACHED void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) NOT_REACHED void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED #undef NOT_REACHED #else static bool background_thread_enabled_at_fork; static void background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { background_thread_wakeup_time_set(tsdn, info, 0); info->npages_to_purge_new = 0; if (config_stats) { info->tot_n_runs = 0; nstime_init(&info->tot_sleep_time, 0); } } static inline bool set_current_thread_affinity(int cpu) { #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); return (ret != 0); #else return false; #endif } /* Threshold for determining when to wake up the background thread. */ #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) #define BILLION UINT64_C(1000000000) /* Minimal sleep interval 100 ms. */ #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) static inline size_t decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { size_t i; uint64_t sum = 0; for (i = 0; i < interval; i++) { sum += decay->backlog[i] * h_steps[i]; } for (; i < SMOOTHSTEP_NSTEPS; i++) { sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); } return (size_t)(sum >> SMOOTHSTEP_BFP); } static uint64_t arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, extents_t *extents) { if (malloc_mutex_trylock(tsdn, &decay->mtx)) { /* Use minimal interval if decay is contended. */ return BACKGROUND_THREAD_MIN_INTERVAL_NS; } uint64_t interval; ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); if (decay_time <= 0) { /* Purging is eagerly done or disabled currently. */ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; goto label_done; } uint64_t decay_interval_ns = nstime_ns(&decay->interval); assert(decay_interval_ns > 0); size_t npages = extents_npages_get(extents); if (npages == 0) { unsigned i; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { if (decay->backlog[i] > 0) { break; } } if (i == SMOOTHSTEP_NSTEPS) { /* No dirty pages recorded. Sleep indefinitely. */ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; goto label_done; } } if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { /* Use max interval. */ interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; goto label_done; } size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; size_t ub = SMOOTHSTEP_NSTEPS; /* Minimal 2 intervals to ensure reaching next epoch deadline. */ lb = (lb < 2) ? 2 : lb; if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || (lb + 2 > ub)) { interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; goto label_done; } assert(lb + 2 <= ub); size_t npurge_lb, npurge_ub; npurge_lb = decay_npurge_after_interval(decay, lb); if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { interval = decay_interval_ns * lb; goto label_done; } npurge_ub = decay_npurge_after_interval(decay, ub); if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { interval = decay_interval_ns * ub; goto label_done; } unsigned n_search = 0; size_t target, npurge; while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) && (lb + 2 < ub)) { target = (lb + ub) / 2; npurge = decay_npurge_after_interval(decay, target); if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { ub = target; npurge_ub = npurge; } else { lb = target; npurge_lb = npurge; } assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); } interval = decay_interval_ns * (ub + lb) / 2; label_done: interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; malloc_mutex_unlock(tsdn, &decay->mtx); return interval; } /* Compute purge interval for background threads. */ static uint64_t arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { uint64_t i1, i2; i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, &arena->extents_dirty); if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { return i1; } i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, &arena->extents_muzzy); return i1 < i2 ? i1 : i2; } static void background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, uint64_t interval) { if (config_stats) { info->tot_n_runs++; } info->npages_to_purge_new = 0; struct timeval tv; /* Specific clock required by timedwait. */ gettimeofday(&tv, NULL); nstime_t before_sleep; nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); int ret; if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { assert(background_thread_indefinite_sleep(info)); ret = pthread_cond_wait(&info->cond, &info->mtx.lock); assert(ret == 0); } else { assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); /* We need malloc clock (can be different from tv). */ nstime_t next_wakeup; nstime_init(&next_wakeup, 0); nstime_update(&next_wakeup); nstime_iadd(&next_wakeup, interval); assert(nstime_ns(&next_wakeup) < BACKGROUND_THREAD_INDEFINITE_SLEEP); background_thread_wakeup_time_set(tsdn, info, nstime_ns(&next_wakeup)); nstime_t ts_wakeup; nstime_copy(&ts_wakeup, &before_sleep); nstime_iadd(&ts_wakeup, interval); struct timespec ts; ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); assert(!background_thread_indefinite_sleep(info)); ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); assert(ret == ETIMEDOUT || ret == 0); background_thread_wakeup_time_set(tsdn, info, BACKGROUND_THREAD_INDEFINITE_SLEEP); } if (config_stats) { gettimeofday(&tv, NULL); nstime_t after_sleep; nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); if (nstime_compare(&after_sleep, &before_sleep) > 0) { nstime_subtract(&after_sleep, &before_sleep); nstime_add(&info->tot_sleep_time, &after_sleep); } } } static bool background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { if (unlikely(info->state == background_thread_paused)) { malloc_mutex_unlock(tsdn, &info->mtx); /* Wait on global lock to update status. */ malloc_mutex_lock(tsdn, &background_thread_lock); malloc_mutex_unlock(tsdn, &background_thread_lock); malloc_mutex_lock(tsdn, &info->mtx); return true; } return false; } static inline void background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; unsigned narenas = narenas_total_get(); for (unsigned i = ind; i < narenas; i += max_background_threads) { arena_t *arena = arena_get(tsdn, i, false); if (!arena) { continue; } arena_decay(tsdn, arena, true, false); if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { /* Min interval will be used. */ continue; } uint64_t interval = arena_decay_compute_purge_interval(tsdn, arena); assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); if (min_interval > interval) { min_interval = interval; } } background_thread_sleep(tsdn, info, min_interval); } static bool background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { if (info == &background_thread_info[0]) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); } else { malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &background_thread_lock); } pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); bool has_thread; assert(info->state != background_thread_paused); if (info->state == background_thread_started) { has_thread = true; info->state = background_thread_stopped; pthread_cond_signal(&info->cond); } else { has_thread = false; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (!has_thread) { post_reentrancy(tsd); return false; } void *ret; if (pthread_join(info->thread, &ret)) { post_reentrancy(tsd); return true; } assert(ret == NULL); n_background_threads--; post_reentrancy(tsd); return false; } static void *background_thread_entry(void *ind_arg); static int background_thread_create_signals_masked(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { /* * Mask signals during thread creation so that the thread inherits * an empty signal set. */ sigset_t set; sigfillset(&set); sigset_t oldset; int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); if (mask_err != 0) { return mask_err; } int create_err = pthread_create_wrapper(thread, attr, start_routine, arg); /* * Restore the signal mask. Failure to restore the signal mask here * changes program behavior. */ int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); if (restore_err != 0) { malloc_printf(": background thread creation " "failed (%d), and signal mask restoration failed " "(%d)\n", create_err, restore_err); if (opt_abort) { abort(); } } return create_err; } static bool check_background_thread_creation(tsd_t *tsd, unsigned *n_created, bool *created_threads) { bool ret = false; if (likely(*n_created == n_background_threads)) { return ret; } tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); for (unsigned i = 1; i < max_background_threads; i++) { if (created_threads[i]) { continue; } background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); /* * In case of the background_thread_paused state because of * arena reset, delay the creation. */ bool create = (info->state == background_thread_started); malloc_mutex_unlock(tsdn, &info->mtx); if (!create) { continue; } pre_reentrancy(tsd, NULL); int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)(uintptr_t)i); post_reentrancy(tsd); if (err == 0) { (*n_created)++; created_threads[i] = true; } else { malloc_printf(": background thread " "creation failed (%d)\n", err); if (opt_abort) { abort(); } } /* Return to restart the loop since we unlocked. */ ret = true; break; } malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); return ret; } static void background_thread0_work(tsd_t *tsd) { /* Thread0 is also responsible for launching / terminating threads. */ VARIABLE_ARRAY(bool, created_threads, max_background_threads); unsigned i; for (i = 1; i < max_background_threads; i++) { created_threads[i] = false; } /* Start working, and create more threads when asked. */ unsigned n_created = 1; while (background_thread_info[0].state != background_thread_stopped) { if (background_thread_pause_check(tsd_tsdn(tsd), &background_thread_info[0])) { continue; } if (check_background_thread_creation(tsd, &n_created, (bool *)&created_threads)) { continue; } background_work_sleep_once(tsd_tsdn(tsd), &background_thread_info[0], 0); } /* * Shut down other threads at exit. Note that the ctl thread is holding * the global background_thread mutex (and is waiting) for us. */ assert(!background_thread_enabled()); for (i = 1; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; assert(info->state != background_thread_paused); if (created_threads[i]) { background_threads_disable_single(tsd, info); } else { malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); if (info->state != background_thread_stopped) { /* The thread was not created. */ assert(info->state == background_thread_started); n_background_threads--; info->state = background_thread_stopped; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } } background_thread_info[0].state = background_thread_stopped; assert(n_background_threads == 1); } static void background_work(tsd_t *tsd, unsigned ind) { background_thread_info_t *info = &background_thread_info[ind]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); background_thread_wakeup_time_set(tsd_tsdn(tsd), info, BACKGROUND_THREAD_INDEFINITE_SLEEP); if (ind == 0) { background_thread0_work(tsd); } else { while (info->state != background_thread_stopped) { if (background_thread_pause_check(tsd_tsdn(tsd), info)) { continue; } background_work_sleep_once(tsd_tsdn(tsd), info, ind); } } assert(info->state == background_thread_stopped); background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } static void * background_thread_entry(void *ind_arg) { unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; assert(thread_ind < max_background_threads); #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); #elif defined(__FreeBSD__) pthread_set_name_np(pthread_self(), "jemalloc_bg_thd"); #endif if (opt_percpu_arena != percpu_arena_disabled) { set_current_thread_affinity((int)thread_ind); } /* * Start periodic background work. We use internal tsd which avoids * side effects, for example triggering new arena creation (which in * turn triggers another background thread creation). */ background_work(tsd_internal_fetch(), thread_ind); assert(pthread_equal(pthread_self(), background_thread_info[thread_ind].thread)); return NULL; } static void background_thread_init(tsd_t *tsd, background_thread_info_t *info) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); info->state = background_thread_started; background_thread_info_init(tsd_tsdn(tsd), info); n_background_threads++; } static bool background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) { assert(have_background_thread); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); /* We create at most NCPUs threads. */ size_t thread_ind = arena_ind % max_background_threads; background_thread_info_t *info = &background_thread_info[thread_ind]; bool need_new_thread; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); need_new_thread = background_thread_enabled() && (info->state == background_thread_stopped); if (need_new_thread) { background_thread_init(tsd, info); } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (!need_new_thread) { return false; } if (arena_ind != 0) { /* Threads are created asynchronously by Thread 0. */ background_thread_info_t *t0 = &background_thread_info[0]; malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); assert(t0->state == background_thread_started); pthread_cond_signal(&t0->cond); malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); return false; } pre_reentrancy(tsd, NULL); /* * To avoid complications (besides reentrancy), create internal * background threads with the underlying pthread_create. */ int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)thread_ind); post_reentrancy(tsd); if (err != 0) { malloc_printf(": arena 0 background thread creation " "failed (%d)\n", err); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_stopped; n_background_threads--; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); return true; } return false; } /* Create a new background thread if needed. */ bool background_thread_create(tsd_t *tsd, unsigned arena_ind) { assert(have_background_thread); bool ret; malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); ret = background_thread_create_locked(tsd, arena_ind); malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); return ret; } bool background_threads_enable(tsd_t *tsd) { assert(n_background_threads == 0); assert(background_thread_enabled()); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); VARIABLE_ARRAY(bool, marked, max_background_threads); unsigned i, nmarked; for (i = 0; i < max_background_threads; i++) { marked[i] = false; } nmarked = 0; /* Thread 0 is required and created at the end. */ marked[0] = true; /* Mark the threads we need to create for thread 0. */ unsigned n = narenas_total_get(); for (i = 1; i < n; i++) { if (marked[i % max_background_threads] || arena_get(tsd_tsdn(tsd), i, false) == NULL) { continue; } background_thread_info_t *info = &background_thread_info[ i % max_background_threads]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); assert(info->state == background_thread_stopped); background_thread_init(tsd, info); malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); marked[i % max_background_threads] = true; if (++nmarked == max_background_threads) { break; } } return background_thread_create_locked(tsd, 0); } bool background_threads_disable(tsd_t *tsd) { assert(!background_thread_enabled()); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); /* Thread 0 will be responsible for terminating other threads. */ if (background_threads_disable_single(tsd, &background_thread_info[0])) { return true; } assert(n_background_threads == 0); return false; } /* Check if we need to signal the background thread early. */ void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npages_new) { background_thread_info_t *info = arena_background_thread_info_get( arena); if (malloc_mutex_trylock(tsdn, &info->mtx)) { /* * Background thread may hold the mutex for a long period of * time. We'd like to avoid the variance on application * threads. So keep this non-blocking, and leave the work to a * future epoch. */ return; } if (info->state != background_thread_started) { goto label_done; } if (malloc_mutex_trylock(tsdn, &decay->mtx)) { goto label_done; } ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); if (decay_time <= 0) { /* Purging is eagerly done or disabled currently. */ goto label_done_unlock2; } uint64_t decay_interval_ns = nstime_ns(&decay->interval); assert(decay_interval_ns > 0); nstime_t diff; nstime_init(&diff, background_thread_wakeup_time_get(info)); if (nstime_compare(&diff, &decay->epoch) <= 0) { goto label_done_unlock2; } nstime_subtract(&diff, &decay->epoch); if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { goto label_done_unlock2; } if (npages_new > 0) { size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); /* * Compute how many new pages we would need to purge by the next * wakeup, which is used to determine if we should signal the * background thread. */ uint64_t npurge_new; if (n_epoch >= SMOOTHSTEP_NSTEPS) { npurge_new = npages_new; } else { uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; assert(h_steps_max >= h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); npurge_new = npages_new * (h_steps_max - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); npurge_new >>= SMOOTHSTEP_BFP; } info->npages_to_purge_new += npurge_new; } bool should_signal; if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { should_signal = true; } else if (unlikely(background_thread_indefinite_sleep(info)) && (extents_npages_get(&arena->extents_dirty) > 0 || extents_npages_get(&arena->extents_muzzy) > 0 || info->npages_to_purge_new > 0)) { should_signal = true; } else { should_signal = false; } if (should_signal) { info->npages_to_purge_new = 0; pthread_cond_signal(&info->cond); } label_done_unlock2: malloc_mutex_unlock(tsdn, &decay->mtx); label_done: malloc_mutex_unlock(tsdn, &info->mtx); } void background_thread_prefork0(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &background_thread_lock); background_thread_enabled_at_fork = background_thread_enabled(); } void background_thread_prefork1(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); } } void background_thread_postfork_parent(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_postfork_parent(tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_parent(tsdn, &background_thread_lock); } void background_thread_postfork_child(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { malloc_mutex_postfork_child(tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_child(tsdn, &background_thread_lock); if (!background_thread_enabled_at_fork) { return; } /* Clear background_thread state (reset to disabled for child). */ malloc_mutex_lock(tsdn, &background_thread_lock); n_background_threads = 0; background_thread_enabled_set(tsdn, false); for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); info->state = background_thread_stopped; int ret = pthread_cond_init(&info->cond, NULL); assert(ret == 0); background_thread_info_init(tsdn, info); malloc_mutex_unlock(tsdn, &info->mtx); } malloc_mutex_unlock(tsdn, &background_thread_lock); } bool background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { assert(config_stats); malloc_mutex_lock(tsdn, &background_thread_lock); if (!background_thread_enabled()) { malloc_mutex_unlock(tsdn, &background_thread_lock); return true; } stats->num_threads = n_background_threads; uint64_t num_runs = 0; nstime_init(&stats->run_interval, 0); for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsdn, &info->mtx); if (info->state != background_thread_stopped) { num_runs += info->tot_n_runs; nstime_add(&stats->run_interval, &info->tot_sleep_time); } malloc_mutex_unlock(tsdn, &info->mtx); } stats->num_runs = num_runs; if (num_runs > 0) { nstime_idivide(&stats->run_interval, num_runs); } malloc_mutex_unlock(tsdn, &background_thread_lock); return false; } #undef BACKGROUND_THREAD_NPAGES_THRESHOLD #undef BILLION #undef BACKGROUND_THREAD_MIN_INTERVAL_NS #ifdef JEMALLOC_HAVE_DLSYM #include #endif static bool pthread_create_fptr_init(void) { if (pthread_create_fptr != NULL) { return false; } /* * Try the next symbol first, because 1) when use lazy_lock we have a * wrapper for pthread_create; and 2) application may define its own * wrapper as well (and can call malloc within the wrapper). */ #ifdef JEMALLOC_HAVE_DLSYM pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); #else pthread_create_fptr = NULL; #endif if (pthread_create_fptr == NULL) { if (config_lazy_lock) { malloc_write(": Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } else { /* Fall back to the default symbol. */ pthread_create_fptr = pthread_create; } } return false; } /* * When lazy lock is enabled, we need to make sure setting isthreaded before * taking any background_thread locks. This is called early in ctl (instead of * wait for the pthread_create calls to trigger) because the mutex is required * before creating background threads. */ void background_thread_ctl_init(tsdn_t *tsdn) { malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER pthread_create_fptr_init(); pthread_create_wrapper_init(); #endif } #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ bool background_thread_boot0(void) { if (!have_background_thread && opt_background_thread) { malloc_printf(": option background_thread currently " "supports pthread only\n"); return true; } #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER if ((config_lazy_lock || opt_background_thread) && pthread_create_fptr_init()) { return true; } #endif return false; } bool background_thread_boot1(tsdn_t *tsdn) { #ifdef JEMALLOC_BACKGROUND_THREAD assert(have_background_thread); assert(narenas_total_get() > 0); if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) { opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD; } max_background_threads = opt_max_background_threads; background_thread_enabled_set(tsdn, opt_background_thread); if (malloc_mutex_init(&background_thread_lock, "background_thread_global", WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, malloc_mutex_rank_exclusive)) { return true; } background_thread_info = (background_thread_info_t *)base_alloc(tsdn, b0get(), opt_max_background_threads * sizeof(background_thread_info_t), CACHELINE); if (background_thread_info == NULL) { return true; } for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; /* Thread mutex is rank_inclusive because of thread0. */ if (malloc_mutex_init(&info->mtx, "background_thread", WITNESS_RANK_BACKGROUND_THREAD, malloc_mutex_address_ordered)) { return true; } if (pthread_cond_init(&info->cond, NULL)) { return true; } malloc_mutex_lock(tsdn, &info->mtx); info->state = background_thread_stopped; background_thread_info_init(tsdn, info); malloc_mutex_unlock(tsdn, &info->mtx); } #endif return false; } jemalloc-sys-0.3.2/rep/src/base.c010064400007650000024000000362511344617474000150110ustar0000000000000000#define JEMALLOC_BASE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sz.h" /******************************************************************************/ /* Data. */ static base_t *b0; metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; const char *metadata_thp_mode_names[] = { "disabled", "auto", "always" }; /******************************************************************************/ static inline bool metadata_thp_madvise(void) { return (metadata_thp_enabled() && (init_system_thp_mode == thp_mode_default)); } static void * base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ assert(size == HUGEPAGE_CEILING(size)); size_t alignment = HUGEPAGE; if (extent_hooks == &extent_hooks_default) { addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); } else { /* No arena context as we are creating new arenas. */ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); pre_reentrancy(tsd, NULL); addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, &zero, &commit, ind); post_reentrancy(tsd); } return addr; } static void base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, * stopping at first success. This cascade is performed for consistency * with the cascade in extent_dalloc_wrapper() because an application's * custom hooks may not support e.g. dalloc. This function is only ever * called as a side effect of arena destruction, so although it might * seem pointless to do anything besides dalloc here, the application * may in fact want the end state of all associated virtual memory to be * in some consistent-but-allocated state. */ if (extent_hooks == &extent_hooks_default) { if (!extent_dalloc_mmap(addr, size)) { goto label_done; } if (!pages_decommit(addr, size)) { goto label_done; } if (!pages_purge_forced(addr, size)) { goto label_done; } if (!pages_purge_lazy(addr, size)) { goto label_done; } /* Nothing worked. This should never happen. */ not_reached(); } else { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); pre_reentrancy(tsd, NULL); if (extent_hooks->dalloc != NULL && !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) { goto label_post_reentrancy; } if (extent_hooks->decommit != NULL && !extent_hooks->decommit(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_forced != NULL && !extent_hooks->purge_forced(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_lazy != NULL && !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } /* Nothing worked. That's the application's problem. */ label_post_reentrancy: post_reentrancy(tsd); } label_done: if (metadata_thp_madvise()) { /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && (size & HUGEPAGE_MASK) == 0); pages_nohuge(addr, size); } } static void base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, size_t size) { size_t sn; sn = *extent_sn_next; (*extent_sn_next)++; extent_binit(extent, addr, size, sn); } static size_t base_get_num_blocks(base_t *base, bool with_new_block) { base_block_t *b = base->blocks; assert(b != NULL); size_t n_blocks = with_new_block ? 2 : 1; while (b->next != NULL) { n_blocks++; b = b->next; } return n_blocks; } static void base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { assert(opt_metadata_thp == metadata_thp_auto); malloc_mutex_assert_owner(tsdn, &base->mtx); if (base->auto_thp_switched) { return; } /* Called when adding a new block. */ bool should_switch; if (base_ind_get(base) != 0) { should_switch = (base_get_num_blocks(base, true) == BASE_AUTO_THP_THRESHOLD); } else { should_switch = (base_get_num_blocks(base, true) == BASE_AUTO_THP_THRESHOLD_A0); } if (!should_switch) { return; } base->auto_thp_switched = true; assert(!config_stats || base->n_thp == 0); /* Make the initial blocks THP lazily. */ base_block_t *block = base->blocks; while (block != NULL) { assert((block->size & HUGEPAGE_MASK) == 0); pages_huge(block, block->size); if (config_stats) { base->n_thp += HUGEPAGE_CEILING(block->size - extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; } block = block->next; assert(block == NULL || (base_ind_get(base) == 0)); } } static void * base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, size_t alignment) { void *ret; assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(size == ALIGNMENT_CEILING(size, alignment)); *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), alignment) - (uintptr_t)extent_addr_get(extent); ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); assert(extent_bsize_get(extent) >= *gap_size + size); extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, extent_sn_get(extent)); return ret; } static void base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, void *addr, size_t size) { if (extent_bsize_get(extent) > 0) { /* * Compute the index for the largest size class that does not * exceed extent's size. */ szind_t index_floor = sz_size2index(extent_bsize_get(extent) + 1) - 1; extent_heap_insert(&base->avail[index_floor], extent); } if (config_stats) { base->allocated += size; /* * Add one PAGE to base_resident for every page boundary that is * crossed by the new allocation. Adjust n_thp similarly when * metadata_thp is enabled. */ base->resident += PAGE_CEILING((uintptr_t)addr + size) - PAGE_CEILING((uintptr_t)addr - gap_size); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); if (metadata_thp_madvise() && (opt_metadata_thp == metadata_thp_always || base->auto_thp_switched)) { base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> LG_HUGEPAGE; assert(base->mapped >= base->n_thp << LG_HUGEPAGE); } } } static void * base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, size_t alignment) { void *ret; size_t gap_size; ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); base_extent_bump_alloc_post(base, extent, gap_size, ret, size); return ret; } /* * Allocate a block of virtual memory that is large enough to start with a * base_block_t header, followed by an object of specified size and alignment. * On success a pointer to the initialized base_block_t header is returned. */ static base_block_t * base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, size_t alignment) { alignment = ALIGNMENT_CEILING(alignment, QUANTUM); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t header_size = sizeof(base_block_t); size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size; /* * Create increasingly larger blocks in order to limit the total number * of disjoint virtual memory ranges. Choose the next size in the page * size class series (skipping size classes that are not a multiple of * HUGEPAGE), or a size large enough to satisfy the requested size and * alignment, whichever is larger. */ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + usize)); pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ? *pind_last + 1 : *pind_last; size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size : next_block_size; base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, block_size); if (block == NULL) { return NULL; } if (metadata_thp_madvise()) { void *addr = (void *)block; assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && (block_size & HUGEPAGE_MASK) == 0); if (opt_metadata_thp == metadata_thp_always) { pages_huge(addr, block_size); } else if (opt_metadata_thp == metadata_thp_auto && base != NULL) { /* base != NULL indicates this is not a new base. */ malloc_mutex_lock(tsdn, &base->mtx); base_auto_thp_switch(tsdn, base); if (base->auto_thp_switched) { pages_huge(addr, block_size); } malloc_mutex_unlock(tsdn, &base->mtx); } } *pind_last = sz_psz2ind(block_size); block->size = block_size; block->next = NULL; assert(block_size >= header_size); base_extent_init(extent_sn_next, &block->extent, (void *)((uintptr_t)block + header_size), block_size - header_size); return block; } /* * Allocate an extent that is at least as large as specified size, with * specified alignment. */ static extent_t * base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { malloc_mutex_assert_owner(tsdn, &base->mtx); extent_hooks_t *extent_hooks = base_extent_hooks_get(base); /* * Drop mutex during base_block_alloc(), because an extent hook will be * called. */ malloc_mutex_unlock(tsdn, &base->mtx); base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, alignment); malloc_mutex_lock(tsdn, &base->mtx); if (block == NULL) { return NULL; } block->next = base->blocks; base->blocks = block; if (config_stats) { base->allocated += sizeof(base_block_t); base->resident += PAGE_CEILING(sizeof(base_block_t)); base->mapped += block->size; if (metadata_thp_madvise() && !(opt_metadata_thp == metadata_thp_auto && !base->auto_thp_switched)) { assert(base->n_thp > 0); base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE; } assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } return &block->extent; } base_t * b0get(void) { return b0; } base_t * base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { pszind_t pind_last = 0; size_t extent_sn_next = 0; base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); if (block == NULL) { return NULL; } size_t gap_size; size_t base_alignment = CACHELINE; size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, &gap_size, base_size, base_alignment); base->ind = ind; atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, malloc_mutex_rank_exclusive)) { base_unmap(tsdn, extent_hooks, ind, block, block->size); return NULL; } base->pind_last = pind_last; base->extent_sn_next = extent_sn_next; base->blocks = block; base->auto_thp_switched = false; for (szind_t i = 0; i < SC_NSIZES; i++) { extent_heap_new(&base->avail[i]); } if (config_stats) { base->allocated = sizeof(base_block_t); base->resident = PAGE_CEILING(sizeof(base_block_t)); base->mapped = block->size; base->n_thp = (opt_metadata_thp == metadata_thp_always) && metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE : 0; assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } base_extent_bump_alloc_post(base, &block->extent, gap_size, base, base_size); return base; } void base_delete(tsdn_t *tsdn, base_t *base) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *next = base->blocks; do { base_block_t *block = next; next = block->next; base_unmap(tsdn, extent_hooks, base_ind_get(base), block, block->size); } while (next != NULL); } extent_hooks_t * base_extent_hooks_get(base_t *base) { return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, ATOMIC_ACQUIRE); } extent_hooks_t * base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); return old_extent_hooks; } static void * base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, size_t *esn) { alignment = QUANTUM_CEILING(alignment); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t asize = usize + alignment - QUANTUM; extent_t *extent = NULL; malloc_mutex_lock(tsdn, &base->mtx); for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) { extent = extent_heap_remove_first(&base->avail[i]); if (extent != NULL) { /* Use existing space. */ break; } } if (extent == NULL) { /* Try to allocate more space. */ extent = base_extent_alloc(tsdn, base, usize, alignment); } void *ret; if (extent == NULL) { ret = NULL; goto label_return; } ret = base_extent_bump_alloc(base, extent, usize, alignment); if (esn != NULL) { *esn = extent_sn_get(extent); } label_return: malloc_mutex_unlock(tsdn, &base->mtx); return ret; } /* * base_alloc() returns zeroed memory, which is always demand-zeroed for the * auto arenas, in order to make multi-page sparse data structures such as radix * tree nodes efficient with respect to physical memory usage. Upon success a * pointer to at least size bytes with specified alignment is returned. Note * that size is rounded up to the nearest multiple of alignment to avoid false * sharing. */ void * base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { return base_alloc_impl(tsdn, base, size, alignment, NULL); } extent_t * base_alloc_extent(tsdn_t *tsdn, base_t *base) { size_t esn; extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), CACHELINE, &esn); if (extent == NULL) { return NULL; } extent_esn_set(extent, esn); return extent; } void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, size_t *mapped, size_t *n_thp) { cassert(config_stats); malloc_mutex_lock(tsdn, &base->mtx); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); *allocated = base->allocated; *resident = base->resident; *mapped = base->mapped; *n_thp = base->n_thp; malloc_mutex_unlock(tsdn, &base->mtx); } void base_prefork(tsdn_t *tsdn, base_t *base) { malloc_mutex_prefork(tsdn, &base->mtx); } void base_postfork_parent(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_parent(tsdn, &base->mtx); } void base_postfork_child(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_child(tsdn, &base->mtx); } bool base_boot(tsdn_t *tsdn) { b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); return (b0 == NULL); } jemalloc-sys-0.3.2/rep/src/bin.c010064400007650000024000000046701344617474000146470ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/bin.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/witness.h" bin_info_t bin_infos[SC_NBINS]; static void bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bin_info_t bin_infos[SC_NBINS]) { for (unsigned i = 0; i < SC_NBINS; i++) { bin_info_t *bin_info = &bin_infos[i]; sc_t *sc = &sc_data->sc[i]; bin_info->reg_size = ((size_t)1U << sc->lg_base) + ((size_t)sc->ndelta << sc->lg_delta); bin_info->slab_size = (sc->pgs << LG_PAGE); bin_info->nregs = (uint32_t)(bin_info->slab_size / bin_info->reg_size); bin_info->n_shards = bin_shard_sizes[i]; bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( bin_info->nregs); bin_info->bitmap_info = bitmap_info; } } bool bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size, size_t end_size, size_t nshards) { if (nshards > BIN_SHARDS_MAX || nshards == 0) { return true; } if (start_size > SC_SMALL_MAXCLASS) { return false; } if (end_size > SC_SMALL_MAXCLASS) { end_size = SC_SMALL_MAXCLASS; } /* Compute the index since this may happen before sz init. */ szind_t ind1 = sz_size2index_compute(start_size); szind_t ind2 = sz_size2index_compute(end_size); for (unsigned i = ind1; i <= ind2; i++) { bin_shard_sizes[i] = (unsigned)nshards; } return false; } void bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { /* Load the default number of shards. */ for (unsigned i = 0; i < SC_NBINS; i++) { bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT; } } void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { assert(sc_data->initialized); bin_infos_init(sc_data, bin_shard_sizes, bin_infos); } bool bin_init(bin_t *bin) { if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, malloc_mutex_rank_exclusive)) { return true; } bin->slabcur = NULL; extent_heap_new(&bin->slabs_nonfull); extent_list_init(&bin->slabs_full); if (config_stats) { memset(&bin->stats, 0, sizeof(bin_stats_t)); } return false; } void bin_prefork(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_prefork(tsdn, &bin->lock); } void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_postfork_parent(tsdn, &bin->lock); } void bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { malloc_mutex_postfork_child(tsdn, &bin->lock); } jemalloc-sys-0.3.2/rep/src/bitmap.c010064400007650000024000000061741344617474000153540ustar0000000000000000#define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" /******************************************************************************/ #ifdef BITMAP_USE_TREE void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); /* * Compute the number of groups necessary to store nbits bits, and * progressively work upward through the levels until reaching a level * that requires only one group. */ binfo->levels[0].group_offset = 0; group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; group_count = BITMAP_BITS2GROUPS(group_count); } binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; binfo->nbits = nbits; } static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return binfo->levels[binfo->nlevels].group_offset; } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap * interface. */ if (fill) { /* The "filled" bitmap starts out with all 0 bits. */ memset(bitmap, 0, bitmap_size(binfo)); return; } /* * The "empty" bitmap starts out with all 1 bits, except for trailing * unused bits (if any). Note that each group uses bit 0 to correspond * to the first logical bit in the group, so extra bits are the most * significant bits of the last group. */ memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; } } } #else /* BITMAP_USE_TREE */ void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); binfo->ngroups = BITMAP_BITS2GROUPS(nbits); binfo->nbits = nbits; } static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return binfo->ngroups; } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; if (fill) { memset(bitmap, 0, bitmap_size(binfo)); return; } memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { bitmap[binfo->ngroups - 1] >>= extra; } } #endif /* BITMAP_USE_TREE */ size_t bitmap_size(const bitmap_info_t *binfo) { return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); } jemalloc-sys-0.3.2/rep/src/ckh.c010064400007650000024000000345771344617474000146550ustar0000000000000000/* ******************************************************************************* * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash * functions are employed. The original cuckoo hashing algorithm was described * in: * * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms * 51(2):122-144. * * Generalization of cuckoo hashing was discussed in: * * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical * alternative to traditional hash tables. In Proceedings of the 7th * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, * January 2006. * * This implementation uses precisely two hash functions because that is the * fewest that can work, and supporting multiple hashes is an implementation * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) * that shows approximate expected maximum load factors for various * configurations: * * | #cells/bucket | * #hashes | 1 | 2 | 4 | 8 | * --------+-------+-------+-------+-------+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 | * 2 | 0.49 | 0.86 |>0.93< |>0.96< | * 3 | 0.91 | 0.97 | 0.98 | 0.999 | * 4 | 0.97 | 0.99 | 0.999 | | * * The number of cells per bucket is chosen such that a bucket fits in one cache * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, * respectively. * ******************************************************************************/ #define JEMALLOC_CKH_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ /* * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ static size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; if (cell->key != NULL && ckh->keycomp(key, cell->key)) { return (bucket << LG_CKH_BUCKET_CELLS) + i; } } return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ static size_t ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); ckh->hash(key, hashes); /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); if (cell != SIZE_T_MAX) { return cell; } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); return cell; } static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, const void *data) { ckhc_t *cell; unsigned offset, i; /* * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; if (cell->key == NULL) { cell->key = key; cell->data = data; ckh->count++; return false; } } return true; } /* * No space is available in bucket. Randomly evict an item, then try to find an * alternate location for that item. Iteratively repeat this * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; unsigned i; bucket = argbucket; key = *argkey; data = *argdata; while (true) { /* * Choose a random item within the bucket to evict. This is * critical to correct function, because without (eventually) * evicting all items within a bucket during iteration, it * would be possible to get stuck in an infinite loop if there * were an item for which both hashes indicated the same * bucket. */ i = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); /* Swap cell->{key,data} and {key,data} (evict). */ tkey = cell->key; tdata = cell->data; cell->key = key; cell->data = data; key = tkey; data = tdata; #ifdef CKH_COUNT ckh->nrelocs++; #endif /* Find the alternate bucket for the evicted item. */ ckh->hash(key, hashes); tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (tbucket == bucket) { tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); /* * It may be that (tbucket == bucket) still, if the * item's hashes both indicate this bucket. However, * we are guaranteed to eventually escape this bucket * during iteration, assuming pseudo-random item * selection (true randomness would make infinite * looping a remote possibility). The reason we can * never get trapped forever is that there are two * cases: * * 1) This bucket == argbucket, so we will quickly * detect an eviction cycle and terminate. * 2) An item was evicted to this bucket from another, * which means that at least one item in this bucket * has hashes that indicate distinct buckets. */ } /* Check for a cycle. */ if (tbucket == argbucket) { *argkey = key; *argdata = data; return true; } bucket = tbucket; if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } } } static bool ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; ckh->hash(key, hashes); /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } /* * Try to find a place for this item via iterative eviction/relocation. */ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ static bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; count = ckh->count; ckh->count = 0; for (i = nins = 0; nins < count; i++) { if (aTab[i].key != NULL) { key = aTab[i].key; data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; return true; } nins++; } } return false; } static bool ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; #endif /* * It is possible (though unlikely, given well behaved hashes) that the * table will have to be doubled more than once in order to create a * usable table. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; while (true) { size_t usize; lg_curcells++; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { ret = true; goto label_return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: return ret; } static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; size_t usize; unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the * table rebuild will fail. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't * prevent this or future operations from proceeding. */ return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif return; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT ckh->nshrinkfails++; #endif } bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); assert(hash != NULL); assert(keycomp != NULL); #ifdef CKH_COUNT ckh->ngrows = 0; ckh->nshrinks = 0; ckh->nshrinkfails = 0; ckh->ninserts = 0; ckh->nrelocs = 0; #endif ckh->prng_state = 42; /* Value doesn't really matter. */ ckh->count = 0; /* * Find the minimum power of 2 that is large enough to fit minitems * entries. We are using (2+,2) cuckoo hashing, which has an expected * maximum load factor of at least ~0.86, so 0.75 is a conservative load * factor that will typically allow mincells items to fit without ever * growing the table. */ assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; lg_mincells++) { /* Do nothing. */ } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { ret = true; goto label_return; } ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; } ret = false; label_return: return ret; } void ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); if (config_debug) { memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } } size_t ckh_count(ckh_t *ckh) { assert(ckh != NULL); return ckh->count; } bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { if (key != NULL) { *key = (void *)ckh->tab[i].key; } if (data != NULL) { *data = (void *)ckh->tab[i].data; } *tabind = i + 1; return false; } } return true; } bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); assert(ckh_search(ckh, key, NULL, NULL)); #ifdef CKH_COUNT ckh->ninserts++; #endif while (ckh_try_insert(ckh, &key, &data)) { if (ckh_grow(tsd, ckh)) { ret = true; goto label_return; } } ret = false; label_return: return ret; } bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) { *key = (void *)ckh->tab[cell].key; } if (data != NULL) { *data = (void *)ckh->tab[cell].data; } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ ckh->count--; /* Try to halve the table if it is less than 1/4 full. */ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ ckh_shrink(tsd, ckh); } return false; } return true; } bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) { *key = (void *)ckh->tab[cell].key; } if (data != NULL) { *data = (void *)ckh->tab[cell].data; } return false; } return true; } void ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); return !strcmp((char *)k1, (char *)k2); } void ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; } u; assert(sizeof(u.v) == sizeof(u.i)); u.v = key; hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); } bool ckh_pointer_keycomp(const void *k1, const void *k2) { return (k1 == k2); } jemalloc-sys-0.3.2/rep/src/ctl.c010064400007650000024000002525431344617474000146650ustar0000000000000000#define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: * - ctl_stats->* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; static ctl_stats_t *ctl_stats; static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ static const ctl_named_node_t * ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } static const ctl_named_node_t * ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } static const ctl_indexed_node_t * ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) CTL_PROTO(background_thread) CTL_PROTO(max_background_threads) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) CTL_PROTO(config_malloc_conf) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_abort_conf) CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) CTL_PROTO(opt_narenas) CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_oversize_threshold) CTL_PROTO(opt_background_thread) CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_thp) CTL_PROTO(opt_lg_extent_max_active_fit) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_thread_active_init) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) CTL_PROTO(arena_i_initialized) CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dirty_decay_ms) CTL_PROTO(arena_i_muzzy_decay_ms) CTL_PROTO(arena_i_extent_hooks) CTL_PROTO(arena_i_retain_grow_limit) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_slab_size) CTL_PROTO(arenas_bin_i_nshards) INDEX_PROTO(arenas_bin_i) CTL_PROTO(arenas_lextent_i_size) INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_dirty_decay_ms) CTL_PROTO(arenas_muzzy_decay_ms) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nlextents) CTL_PROTO(arenas_create) CTL_PROTO(arenas_lookup) CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) CTL_PROTO(prof_gdump) CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) CTL_PROTO(lg_prof_sample) CTL_PROTO(prof_log_start) CTL_PROTO(prof_log_stop) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_nrequests) CTL_PROTO(stats_arenas_i_lextents_j_curlextents) INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_extents_j_ndirty) CTL_PROTO(stats_arenas_i_extents_j_nmuzzy) CTL_PROTO(stats_arenas_i_extents_j_nretained) CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes) CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes) CTL_PROTO(stats_arenas_i_extents_j_retained_bytes) INDEX_PROTO(stats_arenas_i_extents_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dirty_decay_ms) CTL_PROTO(stats_arenas_i_muzzy_decay_ms) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_extent_avail) CTL_PROTO(stats_arenas_i_dirty_npurge) CTL_PROTO(stats_arenas_i_dirty_nmadvise) CTL_PROTO(stats_arenas_i_dirty_purged) CTL_PROTO(stats_arenas_i_muzzy_npurge) CTL_PROTO(stats_arenas_i_muzzy_nmadvise) CTL_PROTO(stats_arenas_i_muzzy_purged) CTL_PROTO(stats_arenas_i_base) CTL_PROTO(stats_arenas_i_internal) CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_resident) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_background_thread_num_threads) CTL_PROTO(stats_background_thread_num_runs) CTL_PROTO(stats_background_thread_run_interval) CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata_thp) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) CTL_PROTO(stats_retained) CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_remove) #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ CTL_PROTO(stats_##n##_num_wait) \ CTL_PROTO(stats_##n##_num_spin_acq) \ CTL_PROTO(stats_##n##_num_owner_switch) \ CTL_PROTO(stats_##n##_total_wait_time) \ CTL_PROTO(stats_##n##_max_wait_time) \ CTL_PROTO(stats_##n##_max_num_thds) /* Global mutexes. */ #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) MUTEX_PROF_GLOBAL_MUTEXES #undef OP /* Per arena mutexes. */ #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) MUTEX_PROF_ARENA_MUTEXES #undef OP /* Arena bin mutexes. */ MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) #undef MUTEX_STATS_CTL_PROTO_GEN CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ #define NAME(n) {true}, n #define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL #define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ #define INDEX(i) {false}, i##_index static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; static const ctl_named_node_t thread_prof_node[] = { {NAME("name"), CTL(thread_prof_name)}, {NAME("active"), CTL(thread_prof_active)} }; static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("tcache"), CHILD(named, thread_tcache)}, {NAME("prof"), CHILD(named, thread_prof)} }; static const ctl_named_node_t config_node[] = { {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("malloc_conf"), CTL(config_malloc_conf)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("stats"), CTL(config_stats)}, {NAME("utrace"), CTL(config_utrace)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("abort_conf"), CTL(opt_abort_conf)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)}, {NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, {NAME("background_thread"), CTL(opt_background_thread)}, {NAME("max_background_threads"), CTL(opt_max_background_threads)}, {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, {NAME("thp"), CTL(opt_thp)}, {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_active"), CTL(opt_prof_active)}, {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_accum"), CTL(opt_prof_accum)} }; static const ctl_named_node_t tcache_node[] = { {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { {NAME("initialized"), CTL(arena_i_initialized)}, {NAME("decay"), CTL(arena_i_decay)}, {NAME("purge"), CTL(arena_i_purge)}, {NAME("reset"), CTL(arena_i_reset)}, {NAME("destroy"), CTL(arena_i_destroy)}, {NAME("dss"), CTL(arena_i_dss)}, {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} }; static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} }; static const ctl_indexed_node_t arena_node[] = { {INDEX(arena_i)} }; static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}, {NAME("nshards"), CTL(arenas_bin_i_nshards)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { {NAME(""), CHILD(named, arenas_bin_i)} }; static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; static const ctl_named_node_t arenas_lextent_i_node[] = { {NAME("size"), CTL(arenas_lextent_i_size)} }; static const ctl_named_node_t super_arenas_lextent_i_node[] = { {NAME(""), CHILD(named, arenas_lextent_i)} }; static const ctl_indexed_node_t arenas_lextent_node[] = { {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("nlextents"), CTL(arenas_nlextents)}, {NAME("lextent"), CHILD(indexed, arenas_lextent)}, {NAME("create"), CTL(arenas_create)}, {NAME("lookup"), CTL(arenas_lookup)} }; static const ctl_named_node_t prof_node[] = { {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, {NAME("gdump"), CTL(prof_gdump)}, {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, {NAME("lg_sample"), CTL(lg_prof_sample)}, {NAME("log_start"), CTL(prof_log_start)}, {NAME("log_stop"), CTL(prof_log_stop)} }; static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; #define MUTEX_PROF_DATA_NODE(prefix) \ static const ctl_named_node_t stats_##prefix##_node[] = { \ {NAME("num_ops"), \ CTL(stats_##prefix##_num_ops)}, \ {NAME("num_wait"), \ CTL(stats_##prefix##_num_wait)}, \ {NAME("num_spin_acq"), \ CTL(stats_##prefix##_num_spin_acq)}, \ {NAME("num_owner_switch"), \ CTL(stats_##prefix##_num_owner_switch)}, \ {NAME("total_wait_time"), \ CTL(stats_##prefix##_total_wait_time)}, \ {NAME("max_wait_time"), \ CTL(stats_##prefix##_max_wait_time)}, \ {NAME("max_num_thds"), \ CTL(stats_##prefix##_max_num_thds)} \ /* Note that # of current waiting thread not provided. */ \ }; MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { {INDEX(stats_arenas_i_lextents_j)} }; static const ctl_named_node_t stats_arenas_i_extents_j_node[] = { {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)}, {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)}, {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)}, {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)}, {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)}, {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)} }; static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_extents_j)} }; static const ctl_indexed_node_t stats_arenas_i_extents_node[] = { {INDEX(stats_arenas_i_extents_j)} }; #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) MUTEX_PROF_ARENA_MUTEXES #undef OP static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, MUTEX_PROF_ARENA_MUTEXES #undef OP }; static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("uptime"), CTL(stats_arenas_i_uptime)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("retained"), CTL(stats_arenas_i_retained)}, {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)}, {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, {NAME("base"), CTL(stats_arenas_i_base)}, {NAME("internal"), CTL(stats_arenas_i_internal)}, {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, {NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} }; static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; static const ctl_named_node_t stats_background_thread_node[] = { {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, {NAME("run_interval"), CTL(stats_background_thread_run_interval)} }; #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) MUTEX_PROF_GLOBAL_MUTEXES #undef OP static const ctl_named_node_t stats_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, MUTEX_PROF_GLOBAL_MUTEXES #undef OP {NAME("reset"), CTL(stats_mutexes_reset)} }; #undef MUTEX_PROF_DATA_NODE static const ctl_named_node_t stats_node[] = { {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata_thp"), CTL(stats_metadata_thp)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, {NAME("retained"), CTL(stats_retained)}, {NAME("background_thread"), CHILD(named, stats_background_thread)}, {NAME("mutexes"), CHILD(named, stats_mutexes)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t hooks_node[] = { {NAME("install"), CTL(experimental_hooks_install)}, {NAME("remove"), CTL(experimental_hooks_remove)}, }; static const ctl_named_node_t experimental_node[] = { {NAME("hooks"), CHILD(named, hooks)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, {NAME("background_thread"), CTL(background_thread)}, {NAME("max_background_threads"), CTL(max_background_threads)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, {NAME("tcache"), CHILD(named, tcache)}, {NAME("arena"), CHILD(indexed, arena)}, {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, {NAME("stats"), CHILD(named, stats)}, {NAME("experimental"), CHILD(named, experimental)} }; static const ctl_named_node_t super_root_node[] = { {NAME(""), CHILD(named, root)} }; #undef NAME #undef CHILD #undef CTL #undef INDEX /******************************************************************************/ /* * Sets *dst + *src non-atomically. This is safe, since everything is * synchronized by the ctl mutex. */ static void ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { #ifdef JEMALLOC_ATOMIC_U64 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); #else *dst += *src; #endif } /* Likewise: with ctl mutex synchronization, reading is simple. */ static uint64_t ctl_arena_stats_read_u64(arena_stats_u64_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_u64(p, ATOMIC_RELAXED); #else return *p; #endif } static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); } /******************************************************************************/ static unsigned arenas_i2a_impl(size_t i, bool compat, bool validate) { unsigned a; switch (i) { case MALLCTL_ARENAS_ALL: a = 0; break; case MALLCTL_ARENAS_DESTROYED: a = 1; break; default: if (compat && i == ctl_arenas->narenas) { /* * Provide deprecated backward compatibility for * accessing the merged stats at index narenas rather * than via MALLCTL_ARENAS_ALL. This is scheduled for * removal in 6.0.0. */ a = 0; } else if (validate && i >= ctl_arenas->narenas) { a = UINT_MAX; } else { /* * This function should never be called for an index * more than one past the range of indices that have * initialized ctl data. */ assert(i < ctl_arenas->narenas || (!validate && i == ctl_arenas->narenas)); a = (unsigned)i + 2; } break; } return a; } static unsigned arenas_i2a(size_t i) { return arenas_i2a_impl(i, true, false); } static ctl_arena_t * arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { ctl_arena_t *ret; assert(!compat || !init); ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; if (init && ret == NULL) { if (config_stats) { struct container_s { ctl_arena_t ctl_arena; ctl_arena_stats_t astats; }; struct container_s *cont = (struct container_s *)base_alloc(tsd_tsdn(tsd), b0get(), sizeof(struct container_s), QUANTUM); if (cont == NULL) { return NULL; } ret = &cont->ctl_arena; ret->astats = &cont->astats; } else { ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), sizeof(ctl_arena_t), QUANTUM); if (ret == NULL) { return NULL; } } ret->arena_ind = (unsigned)i; ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; } assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); return ret; } static ctl_arena_t * arenas_i(size_t i) { ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); assert(ret != NULL); return ret; } static void ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->nthreads = 0; ctl_arena->dss = dss_prec_names[dss_prec_limit]; ctl_arena->dirty_decay_ms = -1; ctl_arena->muzzy_decay_ms = -1; ctl_arena->pactive = 0; ctl_arena->pdirty = 0; ctl_arena->pmuzzy = 0; if (config_stats) { memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); ctl_arena->astats->allocated_small = 0; ctl_arena->astats->nmalloc_small = 0; ctl_arena->astats->ndalloc_small = 0; ctl_arena->astats->nrequests_small = 0; memset(ctl_arena->astats->bstats, 0, SC_NBINS * sizeof(bin_stats_t)); memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) * sizeof(arena_stats_large_t)); memset(ctl_arena->astats->estats, 0, SC_NPSIZES * sizeof(arena_stats_extents_t)); } } static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; if (config_stats) { arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, &ctl_arena->dss, &ctl_arena->dirty_decay_ms, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->astats->astats, ctl_arena->astats->bstats, ctl_arena->astats->lstats, ctl_arena->astats->estats); for (i = 0; i < SC_NBINS; i++) { ctl_arena->astats->allocated_small += ctl_arena->astats->bstats[i].curregs * sz_index2size(i); ctl_arena->astats->nmalloc_small += ctl_arena->astats->bstats[i].nmalloc; ctl_arena->astats->ndalloc_small += ctl_arena->astats->bstats[i].ndalloc; ctl_arena->astats->nrequests_small += ctl_arena->astats->bstats[i].nrequests; } } else { arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, &ctl_arena->dss, &ctl_arena->dirty_decay_ms, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy); } } static void ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, bool destroyed) { unsigned i; if (!destroyed) { ctl_sdarena->nthreads += ctl_arena->nthreads; ctl_sdarena->pactive += ctl_arena->pactive; ctl_sdarena->pdirty += ctl_arena->pdirty; ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; } else { assert(ctl_arena->nthreads == 0); assert(ctl_arena->pactive == 0); assert(ctl_arena->pdirty == 0); assert(ctl_arena->pmuzzy == 0); } if (config_stats) { ctl_arena_stats_t *sdstats = ctl_sdarena->astats; ctl_arena_stats_t *astats = ctl_arena->astats; if (!destroyed) { accum_atomic_zu(&sdstats->astats.mapped, &astats->astats.mapped); accum_atomic_zu(&sdstats->astats.retained, &astats->astats.retained); accum_atomic_zu(&sdstats->astats.extent_avail, &astats->astats.extent_avail); } ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, &astats->astats.decay_dirty.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, &astats->astats.decay_dirty.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, &astats->astats.decay_dirty.purged); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, &astats->astats.decay_muzzy.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, &astats->astats.decay_muzzy.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, &astats->astats.decay_muzzy.purged); #define OP(mtx) malloc_mutex_prof_merge( \ &(sdstats->astats.mutex_prof_data[ \ arena_prof_mutex_##mtx]), \ &(astats->astats.mutex_prof_data[ \ arena_prof_mutex_##mtx])); MUTEX_PROF_ARENA_MUTEXES #undef OP if (!destroyed) { accum_atomic_zu(&sdstats->astats.base, &astats->astats.base); accum_atomic_zu(&sdstats->astats.internal, &astats->astats.internal); accum_atomic_zu(&sdstats->astats.resident, &astats->astats.resident); accum_atomic_zu(&sdstats->astats.metadata_thp, &astats->astats.metadata_thp); } else { assert(atomic_load_zu( &astats->astats.internal, ATOMIC_RELAXED) == 0); } if (!destroyed) { sdstats->allocated_small += astats->allocated_small; } else { assert(astats->allocated_small == 0); } sdstats->nmalloc_small += astats->nmalloc_small; sdstats->ndalloc_small += astats->ndalloc_small; sdstats->nrequests_small += astats->nrequests_small; if (!destroyed) { accum_atomic_zu(&sdstats->astats.allocated_large, &astats->astats.allocated_large); } else { assert(atomic_load_zu(&astats->astats.allocated_large, ATOMIC_RELAXED) == 0); } ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, &astats->astats.nmalloc_large); ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, &astats->astats.ndalloc_large); ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, &astats->astats.nrequests_large); accum_atomic_zu(&sdstats->astats.tcache_bytes, &astats->astats.tcache_bytes); if (ctl_arena->arena_ind == 0) { sdstats->astats.uptime = astats->astats.uptime; } /* Merge bin stats. */ for (i = 0; i < SC_NBINS; i++) { sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sdstats->bstats[i].nrequests += astats->bstats[i].nrequests; if (!destroyed) { sdstats->bstats[i].curregs += astats->bstats[i].curregs; } else { assert(astats->bstats[i].curregs == 0); } sdstats->bstats[i].nfills += astats->bstats[i].nfills; sdstats->bstats[i].nflushes += astats->bstats[i].nflushes; sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; if (!destroyed) { sdstats->bstats[i].curslabs += astats->bstats[i].curslabs; } else { assert(astats->bstats[i].curslabs == 0); } malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); } /* Merge stats for large allocations. */ for (i = 0; i < SC_NSIZES - SC_NBINS; i++) { ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, &astats->lstats[i].nmalloc); ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, &astats->lstats[i].ndalloc); ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, &astats->lstats[i].nrequests); if (!destroyed) { sdstats->lstats[i].curlextents += astats->lstats[i].curlextents; } else { assert(astats->lstats[i].curlextents == 0); } } /* Merge extents stats. */ for (i = 0; i < SC_NPSIZES; i++) { accum_atomic_zu(&sdstats->estats[i].ndirty, &astats->estats[i].ndirty); accum_atomic_zu(&sdstats->estats[i].nmuzzy, &astats->estats[i].nmuzzy); accum_atomic_zu(&sdstats->estats[i].nretained, &astats->estats[i].nretained); accum_atomic_zu(&sdstats->estats[i].dirty_bytes, &astats->estats[i].dirty_bytes); accum_atomic_zu(&sdstats->estats[i].muzzy_bytes, &astats->estats[i].muzzy_bytes); accum_atomic_zu(&sdstats->estats[i].retained_bytes, &astats->estats[i].retained_bytes); } } } static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, unsigned i, bool destroyed) { ctl_arena_t *ctl_arena = arenas_i(i); ctl_arena_clear(ctl_arena); ctl_arena_stats_amerge(tsdn, ctl_arena, arena); /* Merge into sum stats as well. */ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); } static unsigned ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { unsigned arena_ind; ctl_arena_t *ctl_arena; if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != NULL) { ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_ind = ctl_arena->arena_ind; } else { arena_ind = ctl_arenas->narenas; } /* Trigger stats allocation. */ if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { return UINT_MAX; } /* Initialize new arena. */ if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { return UINT_MAX; } if (arena_ind == ctl_arenas->narenas) { ctl_arenas->narenas++; } return arena_ind; } static void ctl_background_thread_stats_read(tsdn_t *tsdn) { background_thread_stats_t *stats = &ctl_stats->background_thread; if (!have_background_thread || background_thread_stats_read(tsdn, stats)) { memset(stats, 0, sizeof(background_thread_stats_t)); nstime_init(&stats->run_interval, 0); } } static void ctl_refresh(tsdn_t *tsdn) { unsigned i; ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_arena_clear(ctl_sarena); for (i = 0; i < ctl_arenas->narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); } for (i = 0; i < ctl_arenas->narenas; i++) { ctl_arena_t *ctl_arena = arenas_i(i); bool initialized = (tarenas[i] != NULL); ctl_arena->initialized = initialized; if (initialized) { ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, false); } } if (config_stats) { ctl_stats->allocated = ctl_sarena->astats->allocated_small + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, ATOMIC_RELAXED); ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); ctl_stats->metadata = atomic_load_zu( &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + atomic_load_zu(&ctl_sarena->astats->astats.internal, ATOMIC_RELAXED); ctl_stats->metadata_thp = atomic_load_zu( &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); ctl_stats->resident = atomic_load_zu( &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); ctl_stats->mapped = atomic_load_zu( &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); ctl_stats->retained = atomic_load_zu( &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); ctl_background_thread_stats_read(tsdn); #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ malloc_mutex_lock(tsdn, &mtx); \ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ malloc_mutex_unlock(tsdn, &mtx); if (config_prof && opt_prof) { READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, bt2gctx_mtx); } if (have_background_thread) { READ_GLOBAL_MUTEX_PROF_DATA( global_prof_mutex_background_thread, background_thread_lock); } else { memset(&ctl_stats->mutex_prof_data[ global_prof_mutex_background_thread], 0, sizeof(mutex_prof_data_t)); } /* We own ctl mutex already. */ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], &ctl_mtx); #undef READ_GLOBAL_MUTEX_PROF_DATA } ctl_arenas->epoch++; } static bool ctl_init(tsd_t *tsd) { bool ret; tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { ctl_arena_t *ctl_sarena, *ctl_darena; unsigned i; /* * Allocate demand-zeroed space for pointers to the full * range of supported arena indices. */ if (ctl_arenas == NULL) { ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, b0get(), sizeof(ctl_arenas_t), QUANTUM); if (ctl_arenas == NULL) { ret = true; goto label_return; } } if (config_stats && ctl_stats == NULL) { ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), sizeof(ctl_stats_t), QUANTUM); if (ctl_stats == NULL) { ret = true; goto label_return; } } /* * Allocate space for the current full range of arenas * here rather than doing it lazily elsewhere, in order * to limit when OOM-caused errors can occur. */ if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, true)) == NULL) { ret = true; goto label_return; } ctl_sarena->initialized = true; if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, false, true)) == NULL) { ret = true; goto label_return; } ctl_arena_clear(ctl_darena); /* * Don't toggle ctl_darena to initialized until an arena is * actually destroyed, so that arena..initialized can be used * to query whether the stats are relevant. */ ctl_arenas->narenas = narenas_total_get(); for (i = 0; i < ctl_arenas->narenas; i++) { if (arenas_i_impl(tsd, i, false, true) == NULL) { ret = true; goto label_return; } } ql_new(&ctl_arenas->destroyed); ctl_refresh(tsdn); ctl_initialized = true; } ret = false; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } static int ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; const ctl_named_node_t *node; elm = name; /* Equivalent to strchrnul(). */ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); if (elen == 0) { ret = ENOENT; goto label_return; } node = super_root_node; for (i = 0; i < *depthp; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { const ctl_named_node_t *pnode = node; /* Children are named. */ for (j = 0; j < node->nchildren; j++) { const ctl_named_node_t *child = ctl_named_children(node, j); if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; } mibp[i] = j; break; } } if (node == pnode) { ret = ENOENT; goto label_return; } } else { uintmax_t index; const ctl_indexed_node_t *inode; /* Children are indexed. */ index = malloc_strtoumax(elm, NULL, 10); if (index == UINTMAX_MAX || index > SIZE_T_MAX) { ret = ENOENT; goto label_return; } inode = ctl_indexed_node(node->children); node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; } mibp[i] = (size_t)index; } if (node->ctl != NULL) { /* Terminal node. */ if (*dot != '\0') { /* * The name contains more elements than are * in this path through the tree. */ ret = ENOENT; goto label_return; } /* Complete lookup successful. */ *depthp = i + 1; break; } /* Update elm. */ if (*dot == '\0') { /* No more elements. */ ret = ENOENT; goto label_return; } elm = &dot[1]; dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } ret = 0; label_return: return ret; } int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) { goto label_return; } node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) { ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } label_return: return(ret); } int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } /* Iterate down the tree. */ node = super_root_node; for (i = 0; i < miblen; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { /* Children are named. */ if (node->nchildren <= mib[i]) { ret = ENOENT; goto label_return; } node = ctl_named_children(node, mib[i]); } else { const ctl_indexed_node_t *inode; /* Indexed element. */ inode = ctl_indexed_node(node->children); node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } /* Call the ctl function. */ if (node && node->ctl) { ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); } else { /* Partial MIB. */ ret = ENOENT; } label_return: return(ret); } bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, malloc_mutex_rank_exclusive)) { return true; } ctl_initialized = false; return false; } void ctl_prefork(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &ctl_mtx); } void ctl_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void ctl_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ #define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ ? sizeof(t) : *oldlenp; \ memcpy(oldp, (void *)&(v), copylen); \ ret = EINVAL; \ goto label_return; \ } \ *(t *)oldp = (v); \ } \ } while (0) #define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ goto label_return; \ } \ (v) = *(t *)newp; \ } \ } while (0) #define MIB_UNSIGNED(v, i) do { \ if (mib[i] > UINT_MAX) { \ ret = EFAULT; \ goto label_return; \ } \ v = (unsigned)mib[i]; \ } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ if (l) { \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ if (l) { \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ } \ return ret; \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ } #define CTL_RO_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_RO_NL_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ if (!(c)) { \ return ENOENT; \ } \ READONLY(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } #define CTL_RO_CONFIG_GEN(n, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = n; \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return ret; \ } /******************************************************************************/ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) { ctl_refresh(tsd_tsdn(tsd)); } READ(ctl_arenas->epoch, uint64_t); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!have_background_thread) { return ENOENT; } background_thread_ctl_init(tsd_tsdn(tsd)); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (newp == NULL) { oldval = background_thread_enabled(); READ(oldval, bool); } else { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = background_thread_enabled(); READ(oldval, bool); bool newval = *(bool *)newp; if (newval == oldval) { ret = 0; goto label_return; } background_thread_enabled_set(tsd_tsdn(tsd), newval); if (newval) { if (background_threads_enable(tsd)) { ret = EFAULT; goto label_return; } } else { if (background_threads_disable(tsd)) { ret = EFAULT; goto label_return; } } } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t oldval; if (!have_background_thread) { return ENOENT; } background_thread_ctl_init(tsd_tsdn(tsd)); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (newp == NULL) { oldval = max_background_threads; READ(oldval, size_t); } else { if (newlen != sizeof(size_t)) { ret = EINVAL; goto label_return; } oldval = max_background_threads; READ(oldval, size_t); size_t newval = *(size_t *)newp; if (newval == oldval) { ret = 0; goto label_return; } if (newval > opt_max_background_threads) { ret = EINVAL; goto label_return; } if (background_thread_enabled()) { background_thread_enabled_set(tsd_tsdn(tsd), false); if (background_threads_disable(tsd)) { ret = EFAULT; goto label_return; } max_background_threads = newval; background_thread_enabled_set(tsd_tsdn(tsd), true); if (background_threads_enable(tsd)) { ret = EFAULT; goto label_return; } } else { max_background_threads = newval; } } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } /******************************************************************************/ CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_CONFIG_GEN(config_fill, bool) CTL_RO_CONFIG_GEN(config_lazy_lock, bool) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) CTL_RO_CONFIG_GEN(config_prof, bool) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) CTL_RO_CONFIG_GEN(config_stats, bool) CTL_RO_CONFIG_GEN(config_utrace, bool) CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], const char *) CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, size_t) CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; arena_t *oldarena; unsigned newind, oldind; oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) { return EAGAIN; } newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *newarena; if (newind >= narenas_total_get()) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { /* * If perCPU arena is enabled, thread_arena * control is not allowed for the auto arena * range. */ ret = EPERM; goto label_return; } } /* Initialize arena if necessary. */ newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); if (tcache_available(tsd)) { tcache_arena_reassociate(tsd_tsdn(tsd), tsd_tcachep_get(tsd), newarena); } } ret = 0; label_return: return ret; } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, uint64_t *) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; oldval = tcache_enabled_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } tcache_enabled_set(tsd, *(bool *)newp); } READ(oldval, bool); ret = 0; label_return: return ret; } static int thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!tcache_available(tsd)) { ret = EFAULT; goto label_return; } READONLY(); WRITEONLY(); tcache_flush(tsd); ret = 0; label_return: return ret; } static int thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_prof) { return ENOENT; } READ_XOR_WRITE(); if (newp != NULL) { if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != 0) { goto label_return; } } else { const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); } ret = 0; label_return: return ret; } static int thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } oldval = prof_thread_active_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } } READ(oldval, bool); ret = 0; label_return: return ret; } /******************************************************************************/ static int tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; goto label_return; } READ(tcache_ind, unsigned); ret = 0; label_return: return ret; } static int tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_flush(tsd, tcache_ind); ret = 0; label_return: return ret; } static int tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_destroy(tsd, tcache_ind); ret = 0; label_return: return ret; } /******************************************************************************/ static int arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsdn_t *tsdn = tsd_tsdn(tsd); unsigned arena_ind; bool initialized; READONLY(); MIB_UNSIGNED(arena_ind, 1); malloc_mutex_lock(tsdn, &ctl_mtx); initialized = arenas_i(arena_ind)->initialized; malloc_mutex_unlock(tsdn, &ctl_mtx); READ(initialized, bool); ret = 0; label_return: return ret; } static void arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_arenas->narenas; /* * Access via index narenas is deprecated, and scheduled for * removal in 6.0.0. */ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, narenas); for (i = 0; i < narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); } /* * No further need to hold ctl_mtx, since narenas and * tarenas contain everything needed below. */ malloc_mutex_unlock(tsdn, &ctl_mtx); for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) { arena_decay(tsdn, tarenas[i], false, all); } } } else { arena_t *tarena; assert(arena_ind < narenas); tarena = arena_get(tsdn, arena_ind, false); /* No further need to hold ctl_mtx. */ malloc_mutex_unlock(tsdn, &ctl_mtx); if (tarena != NULL) { arena_decay(tsdn, tarena, false, all); } } } } static int arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; READONLY(); WRITEONLY(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, false); ret = 0; label_return: return ret; } static int arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; READONLY(); WRITEONLY(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, true); ret = 0; label_return: return ret; } static int arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, arena_t **arena) { int ret; READONLY(); WRITEONLY(); MIB_UNSIGNED(*arena_ind, 1); *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); if (*arena == NULL || arena_is_auto(*arena)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return ret; } static void arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { /* Temporarily disable the background thread during arena reset. */ if (have_background_thread) { malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); if (background_thread_enabled()) { background_thread_info_t *info = background_thread_info_get(arena_ind); assert(info->state == background_thread_started); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_paused; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } } } static void arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { if (have_background_thread) { if (background_thread_enabled()) { background_thread_info_t *info = background_thread_info_get(arena_ind); assert(info->state == background_thread_paused); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_started; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); } } static int arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { return ret; } arena_reset_prepare_background_thread(tsd, arena_ind); arena_reset(tsd, arena); arena_reset_finish_background_thread(tsd, arena_ind); return ret; } static int arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; ctl_arena_t *ctl_darena, *ctl_arena; ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { goto label_return; } if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, true) != 0) { ret = EFAULT; goto label_return; } arena_reset_prepare_background_thread(tsd, arena_ind); /* Merge stats after resetting and purging arena. */ arena_reset(tsd, arena); arena_decay(tsd_tsdn(tsd), arena, false, true); ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); ctl_darena->initialized = true; ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); /* Destroy arena. */ arena_destroy(tsd, arena); ctl_arena = arenas_i(arena_ind); ctl_arena->initialized = false; /* Record arena index for later recycling via arenas.create. */ ql_elm_new(ctl_arena, destroyed_link); ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_reset_finish_background_thread(tsd, arena_ind); assert(ret == 0); label_return: return ret; } static int arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; unsigned arena_ind; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); MIB_UNSIGNED(arena_ind, 1); if (dss != NULL) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strcmp(dss_prec_names[i], dss) == 0) { dss_prec = i; match = true; break; } } if (!match) { ret = EINVAL; goto label_return; } } /* * Access via index narenas is deprecated, and scheduled for removal in * 6.0.0. */ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == ctl_arenas->narenas) { if (dss_prec != dss_prec_limit && extent_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } dss_prec_old = extent_dss_prec_get(); } else { arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; goto label_return; } dss_prec_old = arena_dss_prec_get(arena); } dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; unsigned arena_ind; arena_t *arena; MIB_UNSIGNED(arena_ind, 1); arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : arena_muzzy_decay_ms_get(arena); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) { /* * By default the huge arena purges eagerly. If it is * set to non-zero decay time afterwards, background * thread might be needed. */ if (background_thread_create(tsd, arena_ind)) { ret = EFAULT; goto label_return; } } if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), arena, *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return ret; } static int arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } static int arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); if (arena_ind < narenas_total_get()) { extent_hooks_t *old_extent_hooks; arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { if (arena_ind >= narenas_auto) { ret = EFAULT; goto label_return; } old_extent_hooks = (extent_hooks_t *)&extent_hooks_default; READ(old_extent_hooks, extent_hooks_t *); if (newp != NULL) { /* Initialize a new arena as a side effect. */ extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); arena = arena_init(tsd_tsdn(tsd), arena_ind, new_extent_hooks); if (arena == NULL) { ret = EFAULT; goto label_return; } } } else { if (newp != NULL) { extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); old_extent_hooks = extent_hooks_set(tsd, arena, new_extent_hooks); READ(old_extent_hooks, extent_hooks_t *); } else { old_extent_hooks = extent_hooks_get(arena); READ(old_extent_hooks, extent_hooks_t *); } } } else { ret = EFAULT; goto label_return; } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; if (!opt_retain) { /* Only relevant when retain is enabled. */ return ENOENT; } malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); if (arena_ind < narenas_total_get() && (arena = arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { size_t old_limit, new_limit; if (newp != NULL) { WRITE(new_limit, size_t); } bool err = arena_retain_grow_limit_get_set(tsd, arena, &old_limit, newp != NULL ? &new_limit : NULL); if (!err) { READ(old_limit, size_t); ret = 0; } else { ret = EFAULT; } } else { ret = EFAULT; } label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static const ctl_named_node_t * arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); switch (i) { case MALLCTL_ARENAS_ALL: case MALLCTL_ARENAS_DESTROYED: break; default: if (i > ctl_arenas->narenas) { ret = NULL; goto label_return; } break; } ret = super_arena_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } /******************************************************************************/ static int arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } narenas = ctl_arenas->narenas; READ(narenas, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; if (oldp != NULL && oldlenp != NULL) { size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : arena_muzzy_decay_ms_default_get()); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return ret; } static int arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t) static const ctl_named_node_t * arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NBINS) { return NULL; } return super_arenas_bin_i_node; } CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NSIZES - SC_NBINS) { return NULL; } return super_arenas_lextent_i_node; } static int arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; extent_hooks_t *extent_hooks; unsigned arena_ind; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); extent_hooks = (extent_hooks_t *)&extent_hooks_default; WRITE(extent_hooks, extent_hooks_t *); if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { ret = EAGAIN; goto label_return; } READ(arena_ind, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } static int arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; void *ptr; extent_t *extent; arena_t *arena; ptr = NULL; ret = EINVAL; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(ptr, void *); extent = iealloc(tsd_tsdn(tsd), ptr); if (extent == NULL) goto label_return; arena = extent_arena_get(extent); if (arena == NULL) goto label_return; arena_ind = arena_ind_get(arena); READ(arena_ind, unsigned); ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } /******************************************************************************/ static int prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_thread_active_init_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_active_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; if (!config_prof) { return ENOENT; } WRITEONLY(); WRITE(filename, const char *); if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return ret; } static int prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) { return ENOENT; } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else { oldval = prof_gdump_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: return ret; } static int prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; if (!config_prof) { return ENOENT; } WRITEONLY(); WRITE(lg_sample, size_t); if (lg_sample >= (sizeof(uint64_t) << 3)) { lg_sample = (sizeof(uint64_t) << 3) - 1; } prof_reset(tsd, lg_sample); ret = 0; label_return: return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) static int prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; if (!config_prof) { return ENOENT; } WRITEONLY(); WRITE(filename, const char *); if (prof_log_start(tsd_tsdn(tsd), filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return ret; } static int prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (!config_prof) { return ENOENT; } if (prof_log_stop(tsd_tsdn(tsd))) { return EFAULT; } return 0; } /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, ctl_stats->background_thread.num_threads, size_t) CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, ctl_stats->background_thread.num_runs, uint64_t) CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, ssize_t) CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_uptime, nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained, atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_base, atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_internal, atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_resident, atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, arenas_i(mib[2])->astats->nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) /* * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. */ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ /* Lock profiling related APIs below. */ #define RO_MUTEX_CTL_GEN(n, l) \ CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ l.n_lock_ops, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ l.n_wait_times, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ l.n_spin_acquired, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ l.n_owner_switches, uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ nstime_ns(&l.tot_wait_time), uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ nstime_ns(&l.max_wait_time), uint64_t) \ CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ l.max_n_thds, uint32_t) /* Global mutexes. */ #define OP(mtx) \ RO_MUTEX_CTL_GEN(mutexes_##mtx, \ ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) MUTEX_PROF_GLOBAL_MUTEXES #undef OP /* Per arena mutexes */ #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) MUTEX_PROF_ARENA_MUTEXES #undef OP /* tcache bin mutex */ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) #undef RO_MUTEX_CTL_GEN /* Resets all mutex stats, including global, arena and bin mutexes. */ static int stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (!config_stats) { return ENOENT; } tsdn_t *tsdn = tsd_tsdn(tsd); #define MUTEX_PROF_RESET(mtx) \ malloc_mutex_lock(tsdn, &mtx); \ malloc_mutex_prof_data_reset(tsdn, &mtx); \ malloc_mutex_unlock(tsdn, &mtx); /* Global mutexes: ctl and prof. */ MUTEX_PROF_RESET(ctl_mtx); if (have_background_thread) { MUTEX_PROF_RESET(background_thread_lock); } if (config_prof && opt_prof) { MUTEX_PROF_RESET(bt2gctx_mtx); } /* Per arena mutexes. */ unsigned n = narenas_total_get(); for (unsigned i = 0; i < n; i++) { arena_t *arena = arena_get(tsdn, i, false); if (!arena) { continue; } MUTEX_PROF_RESET(arena->large_mtx); MUTEX_PROF_RESET(arena->extent_avail_mtx); MUTEX_PROF_RESET(arena->extents_dirty.mtx); MUTEX_PROF_RESET(arena->extents_muzzy.mtx); MUTEX_PROF_RESET(arena->extents_retained.mtx); MUTEX_PROF_RESET(arena->decay_dirty.mtx); MUTEX_PROF_RESET(arena->decay_muzzy.mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->base->mtx); for (szind_t i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_t *bin = &arena->bins[i].bin_shards[j]; MUTEX_PROF_RESET(bin->lock); } } } #undef MUTEX_PROF_RESET return 0; } CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > SC_NBINS) { return NULL; } return super_stats_arenas_i_bins_j_node; } CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > SC_NSIZES - SC_NBINS) { return NULL; } return super_stats_arenas_i_lextents_j_node; } CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].ndirty, ATOMIC_RELAXED), size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, ATOMIC_RELAXED), size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].nretained, ATOMIC_RELAXED), size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, ATOMIC_RELAXED), size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, ATOMIC_RELAXED), size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, ATOMIC_RELAXED), size_t); static const ctl_named_node_t * stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j >= SC_NPSIZES) { return NULL; } return super_stats_arenas_i_extents_j_node; } static const ctl_named_node_t * stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; size_t a; malloc_mutex_lock(tsdn, &ctl_mtx); a = arenas_i2a_impl(i, true, true); if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; } static int experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp == NULL || oldlenp == NULL|| newp == NULL) { ret = EINVAL; goto label_return; } /* * Note: this is a *private* struct. This is an experimental interface; * forcing the user to know the jemalloc internals well enough to * extract the ABI hopefully ensures nobody gets too comfortable with * this API, which can change at a moment's notice. */ hooks_t hooks; WRITE(hooks, hooks_t); void *handle = hook_install(tsd_tsdn(tsd), &hooks); if (handle == NULL) { ret = EAGAIN; goto label_return; } READ(handle, void *); ret = 0; label_return: return ret; } static int experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; WRITEONLY(); void *handle = NULL; WRITE(handle, void *); if (handle == NULL) { ret = EINVAL; goto label_return; } hook_remove(tsd_tsdn(tsd), handle); ret = 0; label_return: return ret; } jemalloc-sys-0.3.2/rep/src/div.c010064400007650000024000000030361344617474000146540ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/div.h" #include "jemalloc/internal/assert.h" /* * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. * * For any k, we have (here, all division is exact; not C-style rounding): * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where * r = (-2^k) mod d. * * Expanding this out: * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) * = floor(n / d + (r / d) * (n / 2^k)). * * The fractional part of n / d is 0 (because of the assumption that d divides n * exactly), so we have: * ... = n / d + floor((r / d) * (n / 2^k)) * * So that our initial expression is equal to the quantity we seek, so long as * (r / d) * (n / 2^k) < 1. * * r is a remainder mod d, so r < d and r / d < 1 always. We can make * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. */ void div_init(div_info_t *div_info, size_t d) { /* Nonsensical. */ assert(d != 0); /* * This would make the value of magic too high to fit into a uint32_t * (we would want magic = 2^32 exactly). This would mess with code gen * on 32-bit machines. */ assert(d != 1); uint64_t two_to_k = ((uint64_t)1 << 32); uint32_t magic = (uint32_t)(two_to_k / d); /* * We want magic = ceil(2^k / d), but C gives us floor. We have to * increment it unless the result was exact (i.e. unless d is a power of * two). */ if (two_to_k % d != 0) { magic++; } div_info->magic = magic; #ifdef JEMALLOC_DEBUG div_info->d = d; #endif } jemalloc-sys-0.3.2/rep/src/extent.c010064400007650000024000002106651344617474000154110ustar0000000000000000#define JEMALLOC_EXTENT_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" /******************************************************************************/ /* Data. */ rtree_t extents_rtree; /* Keyed by the address of the extent_t being protected. */ mutex_pool_t extent_mutex_pool; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; static const bitmap_info_t extents_bitmap_info = BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); static bool extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #ifdef PAGES_CAN_PURGE_LAZY static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #endif static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); #ifdef PAGES_CAN_PURGE_FORCED static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); #endif static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); #ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); #endif static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained); #ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind); #endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, bool growing_retained); const extent_hooks_t extent_hooks_default = { extent_alloc_default, extent_dalloc_default, extent_destroy_default, extent_commit_default, extent_decommit_default #ifdef PAGES_CAN_PURGE_LAZY , extent_purge_lazy_default #else , NULL #endif #ifdef PAGES_CAN_PURGE_FORCED , extent_purge_forced_default #else , NULL #endif #ifdef JEMALLOC_MAPS_COALESCE , extent_split_default, extent_merge_default #endif }; /* Used exclusively for gdump triggering. */ static atomic_zu_t curpages; static atomic_zu_t highpages; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void extent_deregister(tsdn_t *tsdn, extent_t *extent); static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained); static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained); static void extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained); /******************************************************************************/ #define ATTR_NONE /* does nothing */ ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, extent_esnead_comp) #undef ATTR_NONE typedef enum { lock_result_success, lock_result_failure, lock_result_no_extent } lock_result_t; static lock_result_t extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, extent_t **result, bool inactive_only) { extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, elm, true); /* Slab implies active extents and should be skipped. */ if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, &extents_rtree, elm, true))) { return lock_result_no_extent; } /* * It's possible that the extent changed out from under us, and with it * the leaf->extent mapping. We have to recheck while holding the lock. */ extent_lock(tsdn, extent1); extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, elm, true); if (extent1 == extent2) { *result = extent1; return lock_result_success; } else { extent_unlock(tsdn, extent1); return lock_result_failure; } } /* * Returns a pool-locked extent_t * if there's one associated with the given * address, and NULL otherwise. */ static extent_t * extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, bool inactive_only) { extent_t *ret = NULL; rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)addr, false, false); if (elm == NULL) { return NULL; } lock_result_t lock_result; do { lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, inactive_only); } while (lock_result == lock_result_failure); return ret; } extent_t * extent_alloc(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_t *extent = extent_avail_first(&arena->extent_avail); if (extent == NULL) { malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return base_alloc_extent(tsdn, arena->base); } extent_avail_remove(&arena->extent_avail, extent); atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return extent; } void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_avail_insert(&arena->extent_avail, extent); atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); } extent_hooks_t * extent_hooks_get(arena_t *arena) { return base_extent_hooks_get(arena->base); } extent_hooks_t * extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { background_thread_info_t *info; if (have_background_thread) { info = arena_background_thread_info_get(arena); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); } extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); if (have_background_thread) { malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); } return ret; } static void extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks) { if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { *r_extent_hooks = extent_hooks_get(arena); } } #ifndef JEMALLOC_JET static #endif size_t extent_size_quantize_floor(size_t size) { size_t ret; pszind_t pind; assert(size > 0); assert((size & PAGE_MASK) == 0); pind = sz_psz2ind(size - sz_large_pad + 1); if (pind == 0) { /* * Avoid underflow. This short-circuit would also do the right * thing for all sizes in the range for which there are * PAGE-spaced size classes, but it's simplest to just handle * the one case that would cause erroneous results. */ return size; } ret = sz_pind2sz(pind - 1) + sz_large_pad; assert(ret <= size); return ret; } #ifndef JEMALLOC_JET static #endif size_t extent_size_quantize_ceil(size_t size) { size_t ret; assert(size > 0); assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); ret = extent_size_quantize_floor(size); if (ret < size) { /* * Skip a quantization that may have an adequately large extent, * because under-sized extents may be mixed in. This only * happens when an unusual size is requested, i.e. for aligned * allocation, and is just one of several places where linear * search would potentially find sufficiently aligned available * memory somewhere lower. */ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + sz_large_pad; } return ret; } /* Generate pairing heap functions. */ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, bool delay_coalesce) { if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, malloc_mutex_rank_exclusive)) { return true; } for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { extent_heap_new(&extents->heaps[i]); } bitmap_init(extents->bitmap, &extents_bitmap_info, true); extent_list_init(&extents->lru); atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); extents->state = state; extents->delay_coalesce = delay_coalesce; return false; } extent_state_t extents_state_get(const extents_t *extents) { return extents->state; } size_t extents_npages_get(extents_t *extents) { return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); } size_t extents_nextents_get(extents_t *extents, pszind_t pind) { return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED); } size_t extents_nbytes_get(extents_t *extents, pszind_t pind) { return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED); } static void extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) { size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED); cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED); } static void extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) { size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED); cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED); } static void extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &extents->mtx); assert(extent_state_get(extent) == extents->state); size_t size = extent_size_get(extent); size_t psz = extent_size_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); if (extent_heap_empty(&extents->heaps[pind])) { bitmap_unset(extents->bitmap, &extents_bitmap_info, (size_t)pind); } extent_heap_insert(&extents->heaps[pind], extent); if (config_stats) { extents_stats_add(extents, pind, size); } extent_list_append(&extents->lru, extent); size_t npages = size >> LG_PAGE; /* * All modifications to npages hold the mutex (as asserted above), so we * don't need an atomic fetch-add; we can get by with a load followed by * a store. */ size_t cur_extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); atomic_store_zu(&extents->npages, cur_extents_npages + npages, ATOMIC_RELAXED); } static void extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &extents->mtx); assert(extent_state_get(extent) == extents->state); size_t size = extent_size_get(extent); size_t psz = extent_size_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); extent_heap_remove(&extents->heaps[pind], extent); if (config_stats) { extents_stats_sub(extents, pind, size); } if (extent_heap_empty(&extents->heaps[pind])) { bitmap_set(extents->bitmap, &extents_bitmap_info, (size_t)pind); } extent_list_remove(&extents->lru, extent); size_t npages = size >> LG_PAGE; /* * As in extents_insert_locked, we hold extents->mtx and so don't need * atomic operations for updating extents->npages. */ size_t cur_extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); assert(cur_extents_npages >= npages); atomic_store_zu(&extents->npages, cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); } /* * Find an extent with size [min_size, max_size) to satisfy the alignment * requirement. For each size, try only the first extent in the heap. */ static extent_t * extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, size_t alignment) { pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); i < pind_max; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(i < SC_NPSIZES); assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); uintptr_t base = (uintptr_t)extent_base_get(extent); size_t candidate_size = extent_size_get(extent); assert(candidate_size >= min_size); uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, PAGE_CEILING(alignment)); if (base > next_align || base + candidate_size <= next_align) { /* Overflow or not crossing the next alignment. */ continue; } size_t leadsize = next_align - base; if (candidate_size - leadsize >= min_size) { return extent; } } return NULL; } /* Do any-best-fit extent selection, i.e. select any extent that best fits. */ static extent_t * extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t size) { pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); if (i < SC_NPSIZES + 1) { /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. */ if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { return NULL; } assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); return extent; } return NULL; } /* * Do first-fit extent selection, i.e. select the oldest/lowest extent that is * large enough. */ static extent_t * extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t size) { extent_t *ret = NULL; pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); i < SC_NPSIZES + 1; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); if (ret == NULL || extent_snad_comp(extent, ret) < 0) { ret = extent; } if (i == SC_NPSIZES) { break; } assert(i < SC_NPSIZES); } return ret; } /* * Do {best,first}-fit extent selection, where the selection policy choice is * based on extents->delay_coalesce. Best-fit selection requires less * searching, but its layout policy is less stable and may cause higher virtual * memory fragmentation as a side effect. */ static extent_t * extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, size_t esize, size_t alignment) { malloc_mutex_assert_owner(tsdn, &extents->mtx); size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (max_size < esize) { return NULL; } extent_t *extent = extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena, extents, max_size) : extents_first_fit_locked(tsdn, arena, extents, max_size); if (alignment > PAGE && extent == NULL) { /* * max_size guarantees the alignment requirement but is rather * pessimistic. Next we try to satisfy the aligned allocation * with sizes in [esize, max_size). */ extent = extents_fit_alignment(extents, esize, max_size, alignment); } return extent; } static bool extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent) { extent_state_set(extent, extent_state_active); bool coalesced; extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, &coalesced, false); extent_state_set(extent, extents_state_get(extents)); if (!coalesced) { return true; } extents_insert_locked(tsdn, extents, extent); return false; } extent_t * extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { assert(size + pad != 0); assert(alignment != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr, size, pad, alignment, slab, szind, zero, commit, false); assert(extent == NULL || extent_dumpable_get(extent)); return extent; } void extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent) { assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); assert(extent_dumpable_get(extent)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_addr_set(extent, extent_base_get(extent)); extent_zeroed_set(extent, false); extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); } extent_t * extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); malloc_mutex_lock(tsdn, &extents->mtx); /* * Get the LRU coalesced extent, if any. If coalescing was delayed, * the loop will iterate until the LRU extent is fully coalesced. */ extent_t *extent; while (true) { /* Get the LRU extent, if any. */ extent = extent_list_first(&extents->lru); if (extent == NULL) { goto label_return; } /* Check the eviction limit. */ size_t extents_npages = atomic_load_zu(&extents->npages, ATOMIC_RELAXED); if (extents_npages <= npages_min) { extent = NULL; goto label_return; } extents_remove_locked(tsdn, extents, extent); if (!extents->delay_coalesce) { break; } /* Try to coalesce. */ if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent)) { break; } /* * The LRU extent was just coalesced and the result placed in * the LRU at its neighbor's position. Start over. */ } /* * Either mark the extent active or deregister it to protect against * concurrent operations. */ switch (extents_state_get(extents)) { case extent_state_active: not_reached(); case extent_state_dirty: case extent_state_muzzy: extent_state_set(extent, extent_state_active); break; case extent_state_retained: extent_deregister(tsdn, extent); break; default: not_reached(); } label_return: malloc_mutex_unlock(tsdn, &extents->mtx); return extent; } static void extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained) { /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. */ if (extents_state_get(extents) == extent_state_dirty) { if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained)) { extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained); } } extent_dalloc(tsdn, arena, extent); } void extents_prefork(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_prefork(tsdn, &extents->mtx); } void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_postfork_parent(tsdn, &extents->mtx); } void extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { malloc_mutex_postfork_child(tsdn, &extents->mtx); } static void extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { assert(extent_arena_get(extent) == arena); assert(extent_state_get(extent) == extent_state_active); extent_state_set(extent, extents_state_get(extents)); extents_insert_locked(tsdn, extents, extent); } static void extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { malloc_mutex_lock(tsdn, &extents->mtx); extent_deactivate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); } static void extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *extent) { assert(extent_arena_get(extent) == arena); assert(extent_state_get(extent) == extents_state_get(extents)); extents_remove_locked(tsdn, extents, extent); extent_state_set(extent, extent_state_active); } static bool extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, const extent_t *extent, bool dependent, bool init_missing, rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), dependent, init_missing); if (!dependent && *r_elm_a == NULL) { return true; } assert(*r_elm_a != NULL); *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_last_get(extent), dependent, init_missing); if (!dependent && *r_elm_b == NULL) { return true; } assert(*r_elm_b != NULL); return false; } static void extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); if (elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, slab); } } static void extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, szind_t szind) { assert(extent_slab_get(extent)); /* Register interior. */ for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { rtree_write(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << LG_PAGE), extent, szind, true); } } static void extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); /* prof_gdump() requirement. */ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (opt_prof && extent_state_get(extent) == extent_state_active) { size_t nadd = extent_size_get(extent) >> LG_PAGE; size_t cur = atomic_fetch_add_zu(&curpages, nadd, ATOMIC_RELAXED) + nadd; size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); while (cur > high && !atomic_compare_exchange_weak_zu( &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highpages update race. * Note that high is updated in case of CAS failure. */ } if (cur > high && prof_gdump_get_unlocked()) { prof_gdump(tsdn); } } } static void extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); if (opt_prof && extent_state_get(extent) == extent_state_active) { size_t nsub = extent_size_get(extent) >> LG_PAGE; assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); } } static bool extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *elm_a, *elm_b; /* * We need to hold the lock to protect against a concurrent coalesce * operation that sees us in a partial state. */ extent_lock(tsdn, extent); if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, &elm_a, &elm_b)) { return true; } szind_t szind = extent_szind_get_maybe_invalid(extent); bool slab = extent_slab_get(extent); extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); if (slab) { extent_interior_register(tsdn, rtree_ctx, extent, szind); } extent_unlock(tsdn, extent); if (config_prof && gdump_add) { extent_gdump_add(tsdn, extent); } return false; } static bool extent_register(tsdn_t *tsdn, extent_t *extent) { return extent_register_impl(tsdn, extent, true); } static bool extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { return extent_register_impl(tsdn, extent, false); } static void extent_reregister(tsdn_t *tsdn, extent_t *extent) { bool err = extent_register(tsdn, extent); assert(!err); } /* * Removes all pointers to the given extent from the global rtree indices for * its interior. This is relevant for slab extents, for which we need to do * metadata lookups at places other than the head of the extent. We deregister * on the interior, then, when an extent moves from being an active slab to an * inactive state. */ static void extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent) { size_t i; assert(extent_slab_get(extent)); for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { rtree_clear(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << LG_PAGE)); } } /* * Removes all pointers to the given extent from the global rtree. */ static void extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *elm_a, *elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, &elm_a, &elm_b); extent_lock(tsdn, extent); extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); } extent_unlock(tsdn, extent); if (config_prof && gdump) { extent_gdump_sub(tsdn, extent); } } static void extent_deregister(tsdn_t *tsdn, extent_t *extent) { extent_deregister_impl(tsdn, extent, true); } static void extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { extent_deregister_impl(tsdn, extent, false); } /* * Tries to find and remove an extent from extents that can be used for the * given allocation request. */ static extent_t * extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); assert(alignment > 0); if (config_debug && new_addr != NULL) { /* * Non-NULL new_addr has two use cases: * * 1) Recycle a known-extant extent, e.g. during purging. * 2) Perform in-place expanding reallocation. * * Regardless of use case, new_addr must either refer to a * non-existing extent, or to the base of an extant extent, * since only active slabs support interior lookups (which of * course cannot be recycled). */ assert(PAGE_ADDR2BASE(new_addr) == new_addr); assert(pad == 0); assert(alignment <= PAGE); } size_t esize = size + pad; malloc_mutex_lock(tsdn, &extents->mtx); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_t *extent; if (new_addr != NULL) { extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, false); if (extent != NULL) { /* * We might null-out extent to report an error, but we * still need to unlock the associated mutex after. */ extent_t *unlock_extent = extent; assert(extent_base_get(extent) == new_addr); if (extent_arena_get(extent) != arena || extent_size_get(extent) < esize || extent_state_get(extent) != extents_state_get(extents)) { extent = NULL; } extent_unlock(tsdn, unlock_extent); } } else { extent = extents_fit_locked(tsdn, arena, extents, esize, alignment); } if (extent == NULL) { malloc_mutex_unlock(tsdn, &extents->mtx); return NULL; } extent_activate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); return extent; } /* * Given an allocation request and an extent guaranteed to be able to satisfy * it, this splits off lead and trail extents, leaving extent pointing to an * extent satisfying the allocation. * This function doesn't put lead or trail into any extents_t; it's the caller's * job to ensure that they can be reused. */ typedef enum { /* * Split successfully. lead, extent, and trail, are modified to extents * describing the ranges before, in, and after the given allocation. */ extent_split_interior_ok, /* * The extent can't satisfy the given allocation request. None of the * input extent_t *s are touched. */ extent_split_interior_cant_alloc, /* * In a potentially invalid state. Must leak (if *to_leak is non-NULL), * and salvage what's still salvageable (if *to_salvage is non-NULL). * None of lead, extent, or trail are valid. */ extent_split_interior_error } extent_split_interior_result_t; static extent_split_interior_result_t extent_split_interior(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, /* The result of splitting, in case of success. */ extent_t **extent, extent_t **lead, extent_t **trail, /* The mess to clean up, in case of error. */ extent_t **to_leak, extent_t **to_salvage, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool growing_retained) { size_t esize = size + pad; size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); assert(new_addr == NULL || leadsize == 0); if (extent_size_get(*extent) < leadsize + esize) { return extent_split_interior_cant_alloc; } size_t trailsize = extent_size_get(*extent) - leadsize - esize; *lead = NULL; *trail = NULL; *to_leak = NULL; *to_salvage = NULL; /* Split the lead. */ if (leadsize != 0) { *lead = *extent; *extent = extent_split_impl(tsdn, arena, r_extent_hooks, *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, slab, growing_retained); if (*extent == NULL) { *to_leak = *lead; *lead = NULL; return extent_split_interior_error; } } /* Split the trail. */ if (trailsize != 0) { *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, esize, szind, slab, trailsize, SC_NSIZES, false, growing_retained); if (*trail == NULL) { *to_leak = *extent; *to_salvage = *lead; *lead = NULL; *extent = NULL; return extent_split_interior_error; } } if (leadsize == 0 && trailsize == 0) { /* * Splitting causes szind to be set as a side effect, but no * splitting occurred. */ extent_szind_set(*extent, szind); if (szind != SC_NSIZES) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_addr_get(*extent), szind, slab); if (slab && extent_size_get(*extent) > PAGE) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_past_get(*extent) - (uintptr_t)PAGE, szind, slab); } } } return extent_split_interior_ok; } /* * This fulfills the indicated allocation request out of the given extent (which * the caller should have ensured was big enough). If there's any unused space * before or after the resulting allocation, that space is given its own extent * and put back into extents. */ static extent_t * extent_recycle_split(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, extent_t *extent, bool growing_retained) { extent_t *lead; extent_t *trail; extent_t *to_leak; extent_t *to_salvage; extent_split_interior_result_t result = extent_split_interior( tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, growing_retained); if (result == extent_split_interior_ok) { if (lead != NULL) { extent_deactivate(tsdn, arena, extents, lead); } if (trail != NULL) { extent_deactivate(tsdn, arena, extents, trail); } return extent; } else { /* * We should have picked an extent that was large enough to * fulfill our allocation request. */ assert(result == extent_split_interior_error); if (to_salvage != NULL) { extent_deregister(tsdn, to_salvage); } if (to_leak != NULL) { void *leak = extent_base_get(to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak); extents_leak(tsdn, arena, r_extent_hooks, extents, to_leak, growing_retained); assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, false) == NULL); } return NULL; } unreachable(); } static bool extent_need_manual_zero(arena_t *arena) { /* * Need to manually zero the extent on repopulating if either; 1) non * default extent hooks installed (in which case the purge semantics may * change); or 2) transparent huge pages enabled. */ return (!arena_has_default_hooks(arena) || (opt_thp == thp_mode_always)); } /* * Tries to satisfy the given allocation request by reusing one of the extents * in the given extents_t. */ static extent_t * extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); assert(new_addr == NULL || !slab); assert(pad == 0 || !slab); assert(!*zero || !slab); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, rtree_ctx, extents, new_addr, size, pad, alignment, slab, growing_retained); if (extent == NULL) { return NULL; } extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, extents, new_addr, size, pad, alignment, slab, szind, extent, growing_retained); if (extent == NULL) { return NULL; } if (*commit && !extent_committed_get(extent)) { if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained)) { extent_record(tsdn, arena, r_extent_hooks, extents, extent, growing_retained); return NULL; } if (!extent_need_manual_zero(arena)) { extent_zeroed_set(extent, true); } } if (extent_committed_get(extent)) { *commit = true; } if (extent_zeroed_get(extent)) { *zero = true; } if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } assert(extent_state_get(extent) == extent_state_active); if (slab) { extent_slab_set(extent, slab); extent_interior_register(tsdn, rtree_ctx, extent, szind); } if (*zero) { void *addr = extent_base_get(extent); if (!extent_zeroed_get(extent)) { size_t size = extent_size_get(extent); if (extent_need_manual_zero(arena) || pages_purge_forced(addr, size)) { memset(addr, 0, size); } } else if (config_debug) { size_t *p = (size_t *)(uintptr_t)addr; /* Check the first page only. */ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { assert(p[i] == 0); } } } return extent; } /* * If the caller specifies (!*zero), it is still possible to receive zeroed * memory, in which case *zero is toggled to true. arena_extent_alloc() takes * advantage of this to avoid demanding zeroed extents, but taking advantage of * them if they are returned. */ static void * extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; assert(size != 0); assert(alignment != 0); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* mmap. */ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { return ret; } /* All strategies for allocation failed. */ return NULL; } static void * extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED)); if (have_madvise_huge && ret) { pages_set_thp_state(ret, size); } return ret; } static void * extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { tsdn_t *tsdn; arena_t *arena; tsdn = tsdn_fetch(); arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. */ assert(arena != NULL); return extent_alloc_default_impl(tsdn, arena, new_addr, size, alignment, zero, commit); } static void extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { /* * The only legitimate case of customized extent hooks for a0 is * hooks with no allocation activities. One such example is to * place metadata on pre-allocated resources such as huge pages. * In that case, rely on reentrancy_level checks to catch * infinite recursions. */ pre_reentrancy(tsd, NULL); } else { pre_reentrancy(tsd, arena); } } static void extent_hook_post_reentrancy(tsdn_t *tsdn) { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); post_reentrancy(tsd); } /* * If virtual memory is retained, create increasingly larger extents from which * to split requested extents in order to limit the total number of disjoint * virtual memory ranges retained by each arena. */ static extent_t * extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); assert(pad == 0 || !slab); assert(!*zero || !slab); size_t esize = size + pad; size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (alloc_size_min < esize) { goto label_err; } /* * Find the next extent size in the series that would be large enough to * satisfy this request. */ pszind_t egn_skip = 0; size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); while (alloc_size < alloc_size_min) { egn_skip++; if (arena->extent_grow_next + egn_skip >= sz_psz2ind(SC_LARGE_MAXCLASS)) { /* Outside legal range. */ goto label_err; } alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); } extent_t *extent = extent_alloc(tsdn, arena); if (extent == NULL) { goto label_err; } bool zeroed = false; bool committed = false; void *ptr; if (*r_extent_hooks == &extent_hooks_default) { ptr = extent_alloc_default_impl(tsdn, arena, NULL, alloc_size, PAGE, &zeroed, &committed); } else { extent_hook_pre_reentrancy(tsdn, arena); ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, alloc_size, PAGE, &zeroed, &committed, arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed, committed, true); if (ptr == NULL) { extent_dalloc(tsdn, arena, extent); goto label_err; } if (extent_register_no_gdump_add(tsdn, extent)) { extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, true); goto label_err; } if (extent_zeroed_get(extent) && extent_committed_get(extent)) { *zero = true; } if (extent_committed_get(extent)) { *commit = true; } rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *lead; extent_t *trail; extent_t *to_leak; extent_t *to_salvage; extent_split_interior_result_t result = extent_split_interior( tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, true); if (result == extent_split_interior_ok) { if (lead != NULL) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, lead, true); } if (trail != NULL) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, trail, true); } } else { /* * We should have allocated a sufficiently large extent; the * cant_alloc case should not occur. */ assert(result == extent_split_interior_error); if (to_salvage != NULL) { if (config_prof) { extent_gdump_add(tsdn, to_salvage); } extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, to_salvage, true); } if (to_leak != NULL) { extent_deregister_no_gdump_sub(tsdn, to_leak); extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, to_leak, true); } goto label_err; } if (*commit && !extent_committed_get(extent)) { if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), true)) { extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, true); goto label_err; } if (!extent_need_manual_zero(arena)) { extent_zeroed_set(extent, true); } } /* * Increment extent_grow_next if doing so wouldn't exceed the allowed * range. */ if (arena->extent_grow_next + egn_skip + 1 <= arena->retain_grow_limit) { arena->extent_grow_next += egn_skip + 1; } else { arena->extent_grow_next = arena->retain_grow_limit; } /* All opportunities for failure are past. */ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); if (config_prof) { /* Adjust gdump stats now that extent is final size. */ extent_gdump_add(tsdn, extent); } if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } if (slab) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_slab_set(extent, true); extent_interior_register(tsdn, rtree_ctx, extent, szind); } if (*zero && !extent_zeroed_get(extent)) { void *addr = extent_base_get(extent); size_t size = extent_size_get(extent); if (extent_need_manual_zero(arena) || pages_purge_forced(addr, size)) { memset(addr, 0, size); } } return extent; label_err: malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); return NULL; } static extent_t * extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { assert(size != 0); assert(alignment != 0); malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, &arena->extents_retained, new_addr, size, pad, alignment, slab, szind, zero, commit, true); if (extent != NULL) { malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); if (config_prof) { extent_gdump_add(tsdn, extent); } } else if (opt_retain && new_addr == NULL) { extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, pad, alignment, slab, szind, zero, commit); /* extent_grow_retained() always releases extent_grow_mtx. */ } else { malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); } malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); return extent; } static extent_t * extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { size_t esize = size + pad; extent_t *extent = extent_alloc(tsdn, arena); if (extent == NULL) { return NULL; } void *addr; if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, alignment, zero, commit); } else { extent_hook_pre_reentrancy(tsdn, arena); addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, esize, alignment, zero, commit, arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } if (addr == NULL) { extent_dalloc(tsdn, arena, extent); return NULL; } extent_init(extent, arena, addr, esize, slab, szind, arena_extent_sn_next(arena), extent_state_active, *zero, *commit, true); if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } if (extent_register(tsdn, extent)) { extents_leak(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, false); return NULL; } return extent; } extent_t * extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, new_addr, size, pad, alignment, slab, szind, zero, commit); if (extent == NULL) { if (opt_retain && new_addr != NULL) { /* * When retain is enabled and new_addr is set, we do not * attempt extent_alloc_wrapper_hard which does mmap * that is very unlikely to succeed (unless it happens * to be at the end). */ return NULL; } extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, new_addr, size, pad, alignment, slab, szind, zero, commit); } assert(extent == NULL || extent_dumpable_get(extent)); return extent; } static bool extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, const extent_t *outer) { assert(extent_arena_get(inner) == arena); if (extent_arena_get(outer) != arena) { return false; } assert(extent_state_get(inner) == extent_state_active); if (extent_state_get(outer) != extents->state) { return false; } if (extent_committed_get(inner) != extent_committed_get(outer)) { return false; } return true; } static bool extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *inner, extent_t *outer, bool forward, bool growing_retained) { assert(extent_can_coalesce(arena, extents, inner, outer)); extent_activate_locked(tsdn, arena, extents, outer); malloc_mutex_unlock(tsdn, &extents->mtx); bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, forward ? inner : outer, forward ? outer : inner, growing_retained); malloc_mutex_lock(tsdn, &extents->mtx); if (err) { extent_deactivate_locked(tsdn, arena, extents, outer); } return err; } static extent_t * extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained, bool inactive_only) { /* * We avoid checking / locking inactive neighbors for large size * classes, since they are eagerly coalesced on deallocation which can * cause lock contention. */ /* * Continue attempting to coalesce until failure, to protect against * races with other threads that are thwarted by this one. */ bool again; do { again = false; /* Try to coalesce forward. */ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, extent_past_get(extent), inactive_only); if (next != NULL) { /* * extents->mtx only protects against races for * like-state extents, so call extent_can_coalesce() * before releasing next's pool lock. */ bool can_coalesce = extent_can_coalesce(arena, extents, extent, next); extent_unlock(tsdn, next); if (can_coalesce && !extent_coalesce(tsdn, arena, r_extent_hooks, extents, extent, next, true, growing_retained)) { if (extents->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; return extent; } again = true; } } /* Try to coalesce backward. */ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, extent_before_get(extent), inactive_only); if (prev != NULL) { bool can_coalesce = extent_can_coalesce(arena, extents, extent, prev); extent_unlock(tsdn, prev); if (can_coalesce && !extent_coalesce(tsdn, arena, r_extent_hooks, extents, extent, prev, false, growing_retained)) { extent = prev; if (extents->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; return extent; } again = true; } } } while (again); if (extents->delay_coalesce) { *coalesced = false; } return extent; } static extent_t * extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained) { return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, coalesced, growing_retained, false); } static extent_t * extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_t *extent, bool *coalesced, bool growing_retained) { return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, coalesced, growing_retained, true); } /* * Does the metadata management portions of putting an unused extent into the * given extents_t (coalesces, deregisters slab interiors, the heap operations). */ static void extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); assert((extents_state_get(extents) != extent_state_dirty && extents_state_get(extents) != extent_state_muzzy) || !extent_zeroed_get(extent)); malloc_mutex_lock(tsdn, &extents->mtx); extent_hooks_assure_initialized(arena, r_extent_hooks); extent_szind_set(extent, SC_NSIZES); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); } assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), true) == extent); if (!extents->delay_coalesce) { extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, NULL, growing_retained); } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { assert(extents == &arena->extents_dirty); /* Always coalesce large extents eagerly. */ bool coalesced; do { assert(extent_state_get(extent) == extent_state_active); extent = extent_try_coalesce_large(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, &coalesced, growing_retained); } while (coalesced); if (extent_size_get(extent) >= oversize_threshold) { /* Shortcut to purge the oversize extent eagerly. */ malloc_mutex_unlock(tsdn, &extents->mtx); arena_decay_extent(tsdn, arena, r_extent_hooks, extent); return; } } extent_deactivate_locked(tsdn, arena, extents, extent); malloc_mutex_unlock(tsdn, &extents->mtx); } void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (extent_register(tsdn, extent)) { extents_leak(tsdn, arena, &extent_hooks, &arena->extents_retained, extent, false); return; } extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); } static bool extent_may_dalloc(void) { /* With retain enabled, the default dalloc always fails. */ return !opt_retain; } static bool extent_dalloc_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { return extent_dalloc_mmap(addr, size); } return true; } static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { return extent_dalloc_default_impl(addr, size); } static bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { bool err; assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_addr_set(extent, extent_base_get(extent)); extent_hooks_assure_initialized(arena, r_extent_hooks); /* Try to deallocate. */ if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ err = extent_dalloc_default_impl(extent_base_get(extent), extent_size_get(extent)); } else { extent_hook_pre_reentrancy(tsdn, arena); err = ((*r_extent_hooks)->dalloc == NULL || (*r_extent_hooks)->dalloc(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena))); extent_hook_post_reentrancy(tsdn); } if (!err) { extent_dalloc(tsdn, arena, extent); } return err; } void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { assert(extent_dumpable_get(extent)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Avoid calling the default extent_dalloc unless have to. */ if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { /* * Deregister first to avoid a race with other allocating * threads, and reregister if deallocation fails. */ extent_deregister(tsdn, extent); if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { return; } extent_reregister(tsdn, extent); } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } /* Try to decommit; purge if that fails. */ bool zeroed; if (!extent_committed_get(extent)) { zeroed = true; } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { zeroed = true; } else if ((*r_extent_hooks)->purge_forced != NULL && !(*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, extent_size_get(extent), arena_ind_get(arena))) { zeroed = true; } else if (extent_state_get(extent) == extent_state_muzzy || ((*r_extent_hooks)->purge_lazy != NULL && !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, extent_size_get(extent), arena_ind_get(arena)))) { zeroed = false; } else { zeroed = false; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_zeroed_set(extent, zeroed); if (config_prof) { extent_gdump_sub(tsdn, extent); } extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent, false); } static void extent_destroy_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { pages_unmap(addr, size); } } static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { extent_destroy_default_impl(addr, size); } void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Deregister first to avoid a race with other allocating threads. */ extent_deregister(tsdn, extent); extent_addr_set(extent, extent_base_get(extent)); extent_hooks_assure_initialized(arena, r_extent_hooks); /* Try to destroy; silently fail otherwise. */ if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ extent_destroy_default_impl(extent_base_get(extent), extent_size_get(extent)); } else if ((*r_extent_hooks)->destroy != NULL) { extent_hook_pre_reentrancy(tsdn, arena); (*r_extent_hooks)->destroy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } extent_dalloc(tsdn, arena, extent); } static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), length); } static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->commit == NULL || (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) || !err); return err; } bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } static bool extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), length); } bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->decommit == NULL || (*r_extent_hooks)->decommit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) && err); return err; } #ifdef PAGES_CAN_PURGE_LAZY static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(addr != NULL); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), length); } #endif static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->purge_lazy == NULL) { return true; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } return err; } bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } #ifdef PAGES_CAN_PURGE_FORCED static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(addr != NULL); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return pages_purge_forced((void *)((uintptr_t)addr + (uintptr_t)offset), length); } #endif static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->purge_forced == NULL) { return true; } if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } return err; } bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length) { return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, offset, length, false); } #ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { return !maps_coalesce; } #endif /* * Accepts the extent to split, and the characteristics of each side of the * split. The 'a' parameters go with the 'lead' of the resulting pair of * extents (the lower addressed portion of the split), and the 'b' parameters go * with the trail (the higher addressed portion). This makes 'extent' the lead, * and returns the trail (except in case of error). */ static extent_t * extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained) { assert(extent_size_get(extent) == size_a + size_b); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->split == NULL) { return NULL; } extent_t *trail = extent_alloc(tsdn, arena); if (trail == NULL) { goto label_error_a; } extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_dumpable_get(extent)); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; { extent_t lead; extent_init(&lead, arena, extent_addr_get(extent), size_a, slab_a, szind_a, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_dumpable_get(extent)); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, true, &lead_elm_a, &lead_elm_b); } rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, &trail_elm_a, &trail_elm_b); if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL || trail_elm_b == NULL) { goto label_error_b; } extent_lock2(tsdn, extent, trail); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), size_a + size_b, size_a, size_b, extent_committed_get(extent), arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_post_reentrancy(tsdn); } if (err) { goto label_error_c; } extent_size_set(extent, size_a); extent_szind_set(extent, szind_a); extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, szind_a, slab_a); extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, szind_b, slab_b); extent_unlock2(tsdn, extent, trail); return trail; label_error_c: extent_unlock2(tsdn, extent, trail); label_error_b: extent_dalloc(tsdn, arena, trail); label_error_a: return NULL; } extent_t * extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, szind_a, slab_a, size_b, szind_b, slab_b, false); } static bool extent_merge_default_impl(void *addr_a, void *addr_b) { if (!maps_coalesce) { return true; } if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { return true; } return false; } #ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { return extent_merge_default_impl(addr_a, addr_b); } #endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->merge == NULL) { return true; } bool err; if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ err = extent_merge_default_impl(extent_base_get(a), extent_base_get(b)); } else { extent_hook_pre_reentrancy(tsdn, arena); err = (*r_extent_hooks)->merge(*r_extent_hooks, extent_base_get(a), extent_size_get(a), extent_base_get(b), extent_size_get(b), extent_committed_get(a), arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } if (err) { return true; } /* * The rtree writes must happen while all the relevant elements are * owned, so the following code uses decomposed helper functions rather * than extent_{,de}register() to do things in the right order. */ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, &a_elm_b); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, &b_elm_b); extent_lock2(tsdn, a, b); if (a_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, SC_NSIZES, false); } if (b_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, SC_NSIZES, false); } else { b_elm_b = b_elm_a; } extent_size_set(a, extent_size_get(a) + extent_size_get(b)); extent_szind_set(a, SC_NSIZES); extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? extent_sn_get(a) : extent_sn_get(b)); extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, false); extent_unlock2(tsdn, a, b); extent_dalloc(tsdn, extent_arena_get(b), b); return false; } bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); } bool extent_boot(void) { if (rtree_new(&extents_rtree, true)) { return true; } if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", WITNESS_RANK_EXTENT_POOL)) { return true; } if (have_dss) { extent_dss_boot(); } return false; } jemalloc-sys-0.3.2/rep/src/extent_dss.c010064400007650000024000000153441344617474000162570ustar0000000000000000#define JEMALLOC_EXTENT_DSS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/spin.h" /******************************************************************************/ /* Data. */ const char *opt_dss = DSS_DEFAULT; const char *dss_prec_names[] = { "disabled", "primary", "secondary", "N/A" }; /* * Current dss precedence default, used when creating new arenas. NB: This is * stored as unsigned rather than dss_prec_t because in principle there's no * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use * atomic operations to synchronize the setting. */ static atomic_u_t dss_prec_default = ATOMIC_INIT( (unsigned)DSS_PREC_DEFAULT); /* Base address of the DSS. */ static void *dss_base; /* Atomic boolean indicating whether a thread is currently extending DSS. */ static atomic_b_t dss_extending; /* Atomic boolean indicating whether the DSS is exhausted. */ static atomic_b_t dss_exhausted; /* Atomic current upper limit on DSS addresses. */ static atomic_p_t dss_max; /******************************************************************************/ static void * extent_dss_sbrk(intptr_t increment) { #ifdef JEMALLOC_DSS return sbrk(increment); #else not_implemented(); return NULL; #endif } dss_prec_t extent_dss_prec_get(void) { dss_prec_t ret; if (!have_dss) { return dss_prec_disabled; } ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); return ret; } bool extent_dss_prec_set(dss_prec_t dss_prec) { if (!have_dss) { return (dss_prec != dss_prec_disabled); } atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); return false; } static void extent_dss_extending_start(void) { spin_t spinner = SPIN_INITIALIZER; while (true) { bool expected = false; if (atomic_compare_exchange_weak_b(&dss_extending, &expected, true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { break; } spin_adaptive(&spinner); } } static void extent_dss_extending_finish(void) { assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); } static void * extent_dss_max_update(void *new_addr) { /* * Get the current end of the DSS as max_cur and assure that dss_max is * up to date. */ void *max_cur = extent_dss_sbrk(0); if (max_cur == (void *)-1) { return NULL; } atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); /* Fixed new_addr can only be supported if it is at the edge of DSS. */ if (new_addr != NULL && max_cur != new_addr) { return NULL; } return max_cur; } void * extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { extent_t *gap; cassert(have_dss); assert(size > 0); assert(alignment > 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a large allocation request as a negative increment. */ if ((intptr_t)size < 0) { return NULL; } gap = extent_alloc(tsdn, arena); if (gap == NULL) { return NULL; } extent_dss_extending_start(); if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ while (true) { void *max_cur = extent_dss_max_update(new_addr); if (max_cur == NULL) { goto label_oom; } /* * Compute how much page-aligned gap space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ void *gap_addr_page = (void *)(PAGE_CEILING( (uintptr_t)max_cur)); void *ret = (void *)ALIGNMENT_CEILING( (uintptr_t)gap_addr_page, alignment); size_t gap_size_page = (uintptr_t)ret - (uintptr_t)gap_addr_page; if (gap_size_page != 0) { extent_init(gap, arena, gap_addr_page, gap_size_page, false, SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, false, true, true); } /* * Compute the address just past the end of the desired * allocation space. */ void *dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)max_cur || (uintptr_t)dss_next < (uintptr_t)max_cur) { goto label_oom; /* Wrap-around. */ } /* Compute the increment, including subpage bytes. */ void *gap_addr_subpage = max_cur; size_t gap_size_subpage = (uintptr_t)ret - (uintptr_t)gap_addr_subpage; intptr_t incr = gap_size_subpage + size; assert((uintptr_t)max_cur + incr == (uintptr_t)ret + size); /* Try to allocate. */ void *dss_prev = extent_dss_sbrk(incr); if (dss_prev == max_cur) { /* Success. */ atomic_store_p(&dss_max, dss_next, ATOMIC_RELEASE); extent_dss_extending_finish(); if (gap_size_page != 0) { extent_dalloc_gap(tsdn, arena, gap); } else { extent_dalloc(tsdn, arena, gap); } if (!*commit) { *commit = pages_decommit(ret, size); } if (*zero && *commit) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; extent_t extent; extent_init(&extent, arena, ret, size, size, false, SC_NSIZES, extent_state_active, false, true, true); if (extent_purge_forced_wrapper(tsdn, arena, &extent_hooks, &extent, 0, size)) { memset(ret, 0, size); } } return ret; } /* * Failure, whether due to OOM or a race with a raw * sbrk() call from outside the allocator. */ if (dss_prev == (void *)-1) { /* OOM. */ atomic_store_b(&dss_exhausted, true, ATOMIC_RELEASE); goto label_oom; } } } label_oom: extent_dss_extending_finish(); extent_dalloc(tsdn, arena, gap); return NULL; } static bool extent_in_dss_helper(void *addr, void *max) { return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < (uintptr_t)max); } bool extent_in_dss(void *addr) { cassert(have_dss); return extent_in_dss_helper(addr, atomic_load_p(&dss_max, ATOMIC_ACQUIRE)); } bool extent_dss_mergeable(void *addr_a, void *addr_b) { void *max; cassert(have_dss); if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < (uintptr_t)dss_base) { return true; } max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); return (extent_in_dss_helper(addr_a, max) == extent_in_dss_helper(addr_b, max)); } void extent_dss_boot(void) { cassert(have_dss); dss_base = extent_dss_sbrk(0); atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); } /******************************************************************************/ jemalloc-sys-0.3.2/rep/src/extent_mmap.c010064400007650000024000000016111344617474000164100ustar0000000000000000#define JEMALLOC_EXTENT_MMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" /******************************************************************************/ /* Data. */ bool opt_retain = #ifdef JEMALLOC_RETAIN true #else false #endif ; /******************************************************************************/ void * extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, PAGE), commit); if (ret == NULL) { return NULL; } assert(ret != NULL); if (*commit) { *zero = true; } return ret; } bool extent_dalloc_mmap(void *addr, size_t size) { if (!opt_retain) { pages_unmap(addr, size); } return opt_retain; } jemalloc-sys-0.3.2/rep/src/hash.c010064400007650000024000000002041344617474000150070ustar0000000000000000#define JEMALLOC_HASH_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/rep/src/hook.c010064400007650000024000000131561344617474000150360ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/seq.h" typedef struct hooks_internal_s hooks_internal_t; struct hooks_internal_s { hooks_t hooks; bool in_use; }; seq_define(hooks_internal_t, hooks) static atomic_u_t nhooks = ATOMIC_INIT(0); static seq_hooks_t hooks[HOOK_MAX]; static malloc_mutex_t hooks_mu; bool hook_boot() { return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK, malloc_mutex_rank_exclusive); } static void * hook_install_locked(hooks_t *to_install) { hooks_internal_t hooks_internal; for (int i = 0; i < HOOK_MAX; i++) { bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]); /* We hold mu; no concurrent access. */ assert(success); if (!hooks_internal.in_use) { hooks_internal.hooks = *to_install; hooks_internal.in_use = true; seq_store_hooks(&hooks[i], &hooks_internal); atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1, ATOMIC_RELAXED); return &hooks[i]; } } return NULL; } void * hook_install(tsdn_t *tsdn, hooks_t *to_install) { malloc_mutex_lock(tsdn, &hooks_mu); void *ret = hook_install_locked(to_install); if (ret != NULL) { tsd_global_slow_inc(tsdn); } malloc_mutex_unlock(tsdn, &hooks_mu); return ret; } static void hook_remove_locked(seq_hooks_t *to_remove) { hooks_internal_t hooks_internal; bool success = seq_try_load_hooks(&hooks_internal, to_remove); /* We hold mu; no concurrent access. */ assert(success); /* Should only remove hooks that were added. */ assert(hooks_internal.in_use); hooks_internal.in_use = false; seq_store_hooks(to_remove, &hooks_internal); atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1, ATOMIC_RELAXED); } void hook_remove(tsdn_t *tsdn, void *opaque) { if (config_debug) { char *hooks_begin = (char *)&hooks[0]; char *hooks_end = (char *)&hooks[HOOK_MAX]; char *hook = (char *)opaque; assert(hooks_begin <= hook && hook < hooks_end && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0); } malloc_mutex_lock(tsdn, &hooks_mu); hook_remove_locked((seq_hooks_t *)opaque); tsd_global_slow_dec(tsdn); malloc_mutex_unlock(tsdn, &hooks_mu); } #define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \ for (int for_each_hook_counter = 0; \ for_each_hook_counter < HOOK_MAX; \ for_each_hook_counter++) { \ bool for_each_hook_success = seq_try_load_hooks( \ (hooks_internal_ptr), &hooks[for_each_hook_counter]); \ if (!for_each_hook_success) { \ continue; \ } \ if (!(hooks_internal_ptr)->in_use) { \ continue; \ } #define FOR_EACH_HOOK_END \ } static bool * hook_reentrantp() { /* * We prevent user reentrancy within hooks. This is basically just a * thread-local bool that triggers an early-exit. * * We don't fold in_hook into reentrancy. There are two reasons for * this: * - Right now, we turn on reentrancy during things like extent hook * execution. Allocating during extent hooks is not officially * supported, but we don't want to break it for the time being. These * sorts of allocations should probably still be hooked, though. * - If a hook allocates, we may want it to be relatively fast (after * all, it executes on every allocator operation). Turning on * reentrancy is a fairly heavyweight mode (disabling tcache, * redirecting to arena 0, etc.). It's possible we may one day want * to turn on reentrant mode here, if it proves too difficult to keep * this working. But that's fairly easy for us to see; OTOH, people * not using hooks because they're too slow is easy for us to miss. * * The tricky part is * that this code might get invoked even if we don't have access to tsd. * This function mimics getting a pointer to thread-local data, except * that it might secretly return a pointer to some global data if we * know that the caller will take the early-exit path. * If we return a bool that indicates that we are reentrant, then the * caller will go down the early exit path, leaving the global * untouched. */ static bool in_hook_global = true; tsdn_t *tsdn = tsdn_fetch(); tcache_t *tcache = tsdn_tcachep_get(tsdn); if (tcache != NULL) { return &tcache->in_hook; } return &in_hook_global; } #define HOOK_PROLOGUE \ if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \ return; \ } \ bool *in_hook = hook_reentrantp(); \ if (*in_hook) { \ return; \ } \ *in_hook = true; #define HOOK_EPILOGUE \ *in_hook = false; void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]) { HOOK_PROLOGUE hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) hook_alloc h = hook.hooks.alloc_hook; if (h != NULL) { h(hook.hooks.extra, type, result, result_raw, args_raw); } FOR_EACH_HOOK_END HOOK_EPILOGUE } void hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { HOOK_PROLOGUE hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) hook_dalloc h = hook.hooks.dalloc_hook; if (h != NULL) { h(hook.hooks.extra, type, address, args_raw); } FOR_EACH_HOOK_END HOOK_EPILOGUE } void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) { HOOK_PROLOGUE hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) hook_expand h = hook.hooks.expand_hook; if (h != NULL) { h(hook.hooks.extra, type, address, old_usize, new_usize, result_raw, args_raw); } FOR_EACH_HOOK_END HOOK_EPILOGUE } jemalloc-sys-0.3.2/rep/src/jemalloc.c010064400007650000024000003041701344617474000156630ustar0000000000000000#define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/log.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/spin.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* Runtime configuration options. */ const char *je_malloc_conf #ifndef _WIN32 JEMALLOC_ATTR(weak) #endif ; bool opt_abort = #ifdef JEMALLOC_DEBUG true #else false #endif ; bool opt_abort_conf = #ifdef JEMALLOC_DEBUG true #else false #endif ; const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" #else "false" #endif ; bool opt_junk_alloc = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; bool opt_utrace = false; bool opt_xmalloc = false; bool opt_zero = false; unsigned opt_narenas = 0; unsigned ncpus; /* Protects arenas initialization. */ malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. * * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. * * Points to an arena_t. */ JEMALLOC_ALIGNED(CACHELINE) atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; static atomic_u_t narenas_total; /* Use narenas_total_*(). */ /* Below three are read-only after initialization. */ static arena_t *a0; /* arenas[0]. */ unsigned narenas_auto; unsigned manual_arena_base; typedef enum { malloc_init_uninitialized = 3, malloc_init_a0_initialized = 2, malloc_init_recursible = 1, malloc_init_initialized = 0 /* Common case --> jnz. */ } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; /* False should be the common case. Set to true to trigger initialization. */ bool malloc_slow = true; /* When malloc_slow is true, set the corresponding bits for sanity check. */ enum { flag_opt_junk_alloc = (1U), flag_opt_junk_free = (1U << 1), flag_opt_zero = (1U << 2), flag_opt_utrace = (1U << 3), flag_opt_xmalloc = (1U << 4) }; static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ # define NO_INITIALIZER ((unsigned long)0) # define INITIALIZER pthread_self() # define IS_INITIALIZER (malloc_initializer == pthread_self()) static pthread_t malloc_initializer = NO_INITIALIZER; #else # define NO_INITIALIZER false # define INITIALIZER true # define IS_INITIALIZER malloc_initializer static bool malloc_initializer = NO_INITIALIZER; #endif /* Used to avoid initialization races. */ #ifdef _WIN32 #if _WIN32_WINNT >= 0x0600 static malloc_mutex_t init_lock = SRWLOCK_INIT; #else static malloc_mutex_t init_lock; static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI _init_init_lock(void) { /* * If another constructor in the same binary is using mallctl to e.g. * set up extent hooks, it may end up running before this one, and * malloc_init_hard will crash trying to lock the uninitialized lock. So * we force an initialization of the lock in malloc_init_hard as well. * We don't try to care about atomicity of the accessed to the * init_lock_initialized boolean, since it really only matters early in * the process creation, before any separate thread normally starts * doing anything. */ if (!init_lock_initialized) { malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, malloc_mutex_rank_exclusive); } init_lock_initialized = true; } #ifdef _MSC_VER # pragma section(".CRT$XCU", read) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif #endif #else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif typedef struct { void *p; /* Input pointer (as in realloc(p, s)). */ size_t s; /* Request size. */ void *r; /* Result pointer. */ } malloc_utrace_t; #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ ut.s = (b); \ ut.r = (c); \ utrace(&ut, sizeof(ut)); \ errno = utrace_serrno; \ } \ } while (0) #else # define UTRACE(a, b, c) #endif /* Whether encountered any invalid config options. */ static bool had_conf_error = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool malloc_init_hard_a0(void); static bool malloc_init_hard(void); /******************************************************************************/ /* * Begin miscellaneous support functions. */ bool malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); } JEMALLOC_ALWAYS_INLINE bool malloc_init_a0(void) { if (unlikely(malloc_init_state == malloc_init_uninitialized)) { return malloc_init_hard_a0(); } return false; } JEMALLOC_ALWAYS_INLINE bool malloc_init(void) { if (unlikely(!malloc_initialized()) && malloc_init_hard()) { return true; } return false; } /* * The a0*() functions are used instead of i{d,}alloc() in situations that * cannot tolerate TLS variable access. */ static void * a0ialloc(size_t size, bool zero, bool is_internal) { if (unlikely(malloc_init_a0())) { return NULL; } return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, is_internal, arena_get(TSDN_NULL, 0, true), true); } static void a0idalloc(void *ptr, bool is_internal) { idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); } void * a0malloc(size_t size) { return a0ialloc(size, false, true); } void a0dalloc(void *ptr) { a0idalloc(ptr, true); } /* * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive * situations that cannot tolerate TLS variable access (TLS allocation and very * early internal data structure initialization). */ void * bootstrap_malloc(size_t size) { if (unlikely(size == 0)) { size = 1; } return a0ialloc(size, false, false); } void * bootstrap_calloc(size_t num, size_t size) { size_t num_size; num_size = num * size; if (unlikely(num_size == 0)) { assert(num == 0 || size == 0); num_size = 1; } return a0ialloc(num_size, true, false); } void bootstrap_free(void *ptr) { if (unlikely(ptr == NULL)) { return; } a0idalloc(ptr, false); } void arena_set(unsigned ind, arena_t *arena) { atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); } static void narenas_total_set(unsigned narenas) { atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); } static void narenas_total_inc(void) { atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); } unsigned narenas_total_get(void) { return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); } /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; assert(ind <= narenas_total_get()); if (ind >= MALLOCX_ARENA_LIMIT) { return NULL; } if (ind == narenas_total_get()) { narenas_total_inc(); } /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(arena_is_auto(arena)); return arena; } /* Actually initialize the arena. */ arena = arena_new(tsdn, ind, extent_hooks); return arena; } static void arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { if (ind == 0) { return; } /* * Avoid creating a new background thread just for the huge arena, which * purges eagerly by default. */ if (have_background_thread && !arena_is_huge(ind)) { if (background_thread_create(tsdn_tsd(tsdn), ind)) { malloc_printf(": error in background thread " "creation for arena %u. Abort.\n", ind); abort(); } } } arena_t * arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; malloc_mutex_lock(tsdn, &arenas_lock); arena = arena_init_locked(tsdn, ind, extent_hooks); malloc_mutex_unlock(tsdn, &arenas_lock); arena_new_create_background_thread(tsdn, ind); return arena; } static void arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_inc(arena, internal); if (internal) { tsd_iarena_set(tsd, arena); } else { tsd_arena_set(tsd, arena); unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1, ATOMIC_RELAXED); tsd_binshards_t *bins = tsd_binshardsp_get(tsd); for (unsigned i = 0; i < SC_NBINS; i++) { assert(bin_infos[i].n_shards > 0 && bin_infos[i].n_shards <= BIN_SHARDS_MAX); bins->binshard[i] = shard % bin_infos[i].n_shards; } } } void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; oldarena = arena_get(tsd_tsdn(tsd), oldind, false); newarena = arena_get(tsd_tsdn(tsd), newind, false); arena_nthreads_dec(oldarena, false); arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); } static void arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_dec(arena, internal); if (internal) { tsd_iarena_set(tsd, NULL); } else { tsd_arena_set(tsd, NULL); } } arena_tdata_t * arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata, *arenas_tdata_old; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); unsigned narenas_tdata_old, i; unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); unsigned narenas_actual = narenas_total_get(); /* * Dissociate old tdata array (and set up for deallocation upon return) * if it's too small. */ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { arenas_tdata_old = arenas_tdata; narenas_tdata_old = narenas_tdata; arenas_tdata = NULL; narenas_tdata = 0; tsd_arenas_tdata_set(tsd, arenas_tdata); tsd_narenas_tdata_set(tsd, narenas_tdata); } else { arenas_tdata_old = NULL; narenas_tdata_old = 0; } /* Allocate tdata array if it's missing. */ if (arenas_tdata == NULL) { bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { *arenas_tdata_bypassp = true; arenas_tdata = (arena_tdata_t *)a0malloc( sizeof(arena_tdata_t) * narenas_tdata); *arenas_tdata_bypassp = false; } if (arenas_tdata == NULL) { tdata = NULL; goto label_return; } assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); tsd_arenas_tdata_set(tsd, arenas_tdata); tsd_narenas_tdata_set(tsd, narenas_tdata); } /* * Copy to tdata array. It's possible that the actual number of arenas * has increased since narenas_total_get() was called above, but that * causes no correctness issues unless two threads concurrently execute * the arenas.create mallctl, which we trust mallctl synchronization to * prevent. */ /* Copy/initialize tickers. */ for (i = 0; i < narenas_actual; i++) { if (i < narenas_tdata_old) { ticker_copy(&arenas_tdata[i].decay_ticker, &arenas_tdata_old[i].decay_ticker); } else { ticker_init(&arenas_tdata[i].decay_ticker, DECAY_NTICKS_PER_UPDATE); } } if (narenas_tdata > narenas_actual) { memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) * (narenas_tdata - narenas_actual)); } /* Read the refreshed tdata array. */ tdata = &arenas_tdata[ind]; label_return: if (arenas_tdata_old != NULL) { a0dalloc(arenas_tdata_old); } return tdata; } /* Slow path, called only by arena_choose(). */ arena_t * arena_choose_hard(tsd_t *tsd, bool internal) { arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { unsigned choose = percpu_arena_choose(); ret = arena_get(tsd_tsdn(tsd), choose, true); assert(ret != NULL); arena_bind(tsd, arena_ind_get(ret), false); arena_bind(tsd, arena_ind_get(ret), true); return ret; } if (narenas_auto > 1) { unsigned i, j, choose[2], first_null; bool is_new_arena[2]; /* * Determine binding for both non-internal and internal * allocation. * * choose[0]: For application allocation. * choose[1]: For internal metadata allocation. */ for (j = 0; j < 2; j++) { choose[j] = 0; is_new_arena[j] = false; } first_null = narenas_auto; malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get( tsd_tsdn(tsd), i, false), !!j) < arena_nthreads_get(arena_get( tsd_tsdn(tsd), choose[j], false), !!j)) { choose[j] = i; } } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized * arena, in case all extant arenas are in use. * * NB: It is possible for there to be * discontinuities in terms of initialized * versus uninitialized arenas, due to the * "thread.arena" mallctl. */ first_null = i; } } for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), choose[j], false), !!j) == 0 || first_null == narenas_auto) { /* * Use an unloaded arena, or the least loaded * arena if all arenas are already initialized. */ if (!!j == internal) { ret = arena_get(tsd_tsdn(tsd), choose[j], false); } } else { arena_t *arena; /* Initialize a new arena. */ choose[j] = first_null; arena = arena_init_locked(tsd_tsdn(tsd), choose[j], (extent_hooks_t *)&extent_hooks_default); if (arena == NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); return NULL; } is_new_arena[j] = true; if (!!j == internal) { ret = arena; } } arena_bind(tsd, choose[j], !!j); } malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); for (j = 0; j < 2; j++) { if (is_new_arena[j]) { assert(choose[j] > 0); arena_new_create_background_thread( tsd_tsdn(tsd), choose[j]); } } } else { ret = arena_get(tsd_tsdn(tsd), 0, false); arena_bind(tsd, 0, false); arena_bind(tsd, 0, true); } return ret; } void iarena_cleanup(tsd_t *tsd) { arena_t *iarena; iarena = tsd_iarena_get(tsd); if (iarena != NULL) { arena_unbind(tsd, arena_ind_get(iarena), true); } } void arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); if (arena != NULL) { arena_unbind(tsd, arena_ind_get(arena), false); } } void arenas_tdata_cleanup(tsd_t *tsd) { arena_tdata_t *arenas_tdata; /* Prevent tsd->arenas_tdata from being (re)created. */ *tsd_arenas_tdata_bypassp_get(tsd) = true; arenas_tdata = tsd_arenas_tdata_get(tsd); if (arenas_tdata != NULL) { tsd_arenas_tdata_set(tsd, NULL); a0dalloc(arenas_tdata); } } static void stats_print_atexit(void) { if (config_stats) { tsdn_t *tsdn; unsigned narenas, i; tsdn = tsdn_fetch(); /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats * events. As a consequence, the final stats may be slightly * out of date by the time they are reported, if other threads * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_foreach(tcache, &arena->tcache_ql, link) { tcache_stats_merge(tsdn, tcache, arena); } malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } } je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); } /* * Ensure that we don't hold any locks upon entry to or exit from allocator * code (in a "broad" sense that doesn't count a reentrant allocation as an * entrance or exit). */ JEMALLOC_ALWAYS_INLINE void check_entry_exit_locking(tsdn_t *tsdn) { if (!config_debug) { return; } if (tsdn_null(tsdn)) { return; } tsd_t *tsd = tsdn_tsd(tsdn); /* * It's possible we hold locks at entry/exit if we're in a nested * allocation. */ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); if (reentrancy_level != 0) { return; } witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); } /* * End miscellaneous support functions. */ /******************************************************************************/ /* * Begin initialization functions. */ static char * jemalloc_secure_getenv(const char *name) { #ifdef JEMALLOC_HAVE_SECURE_GETENV return secure_getenv(name); #else # ifdef JEMALLOC_HAVE_ISSETUGID if (issetugid() != 0) { return NULL; } # endif return getenv(name); #endif } static unsigned malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) /* * glibc >= 2.6 has the CPU_COUNT macro. * * glibc's sysconf() uses isspace(). glibc allocates for the first time * *before* setting up the isspace tables. Therefore we need a * different method to get the number of CPUs. */ { cpu_set_t set; pthread_getaffinity_np(pthread_self(), sizeof(set), &set); result = CPU_COUNT(&set); } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } static void init_opt_stats_print_opts(const char *v, size_t vlen) { size_t opts_len = strlen(opt_stats_print_opts); assert(opts_len <= stats_print_tot_num_options); for (size_t i = 0; i < vlen; i++) { switch (v[i]) { #define OPTION(o, v, d, s) case o: break; STATS_PRINT_OPTIONS #undef OPTION default: continue; } if (strchr(opt_stats_print_opts, v[i]) != NULL) { /* Ignore repeated. */ continue; } opt_stats_print_opts[opts_len++] = v[i]; opt_stats_print_opts[opts_len] = '\0'; assert(opts_len <= stats_print_tot_num_options); } assert(opts_len == strlen(opt_stats_print_opts)); } /* Reads the next size pair in a multi-sized option. */ static bool malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) { const char *cur = *slab_size_segment_cur; char *end; uintmax_t um; set_errno(0); /* First number, then '-' */ um = malloc_strtoumax(cur, &end, 0); if (get_errno() != 0 || *end != '-') { return true; } *slab_start = (size_t)um; cur = end + 1; /* Second number, then ':' */ um = malloc_strtoumax(cur, &end, 0); if (get_errno() != 0 || *end != ':') { return true; } *slab_end = (size_t)um; cur = end + 1; /* Last number */ um = malloc_strtoumax(cur, &end, 0); if (get_errno() != 0) { return true; } *new_size = (size_t)um; /* Consume the separator if there is one. */ if (*end == '|') { end++; } *vlen_left -= end - *slab_size_segment_cur; *slab_size_segment_cur = end; return false; } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; for (accept = false; !accept;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '_': opts++; break; case ':': opts++; *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; *v_p = opts; accept = true; break; case '\0': if (opts != *opts_p) { malloc_write(": Conf string ends " "with key\n"); } return true; default: malloc_write(": Malformed conf string\n"); return true; } } for (accept = false; !accept;) { switch (*opts) { case ',': opts++; /* * Look ahead one character here, because the next time * this function is called, it will assume that end of * input has been cleanly reached if no input remains, * but we have optimistically already consumed the * comma if one exists. */ if (*opts == '\0') { malloc_write(": Conf string ends " "with comma\n"); } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; accept = true; break; case '\0': *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; accept = true; break; default: opts++; break; } } *opts_p = opts; return false; } static void malloc_abort_invalid_conf(void) { assert(opt_abort_conf); malloc_printf(": Abort (abort_conf:true) on invalid conf " "value (see above).\n"); abort(); } static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); /* If abort_conf is set, error out after processing all options. */ const char *experimental = "experimental_"; if (strncmp(k, experimental, strlen(experimental)) == 0) { /* However, tolerate experimental features. */ return; } had_conf_error = true; } static void malloc_slow_flag_init(void) { /* * Combine the runtime options into malloc_slow for fast path. Called * after processing all the options. */ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | (opt_junk_free ? flag_opt_junk_free : 0) | (opt_zero ? flag_opt_zero : 0) | (opt_utrace ? flag_opt_utrace : 0) | (opt_xmalloc ? flag_opt_xmalloc : 0); malloc_slow = (malloc_slow_flags != 0); } static void malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: opts = config_malloc_conf; break; case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the * program. */ opts = je_malloc_conf; } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; case 2: { ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = # ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" # else "/etc/malloc.conf" # endif ; /* * Try to use the contents of the "/etc/malloc.conf" * symbolic link's name. */ #ifndef JEMALLOC_READLINKAT linklen = readlink(linkname, buf, sizeof(buf) - 1); #else linklen = readlinkat(AT_FDCWD, linkname, buf, sizeof(buf) - 1); #endif if (linklen == -1) { /* No configuration specified. */ linklen = 0; /* Restore errno. */ set_errno(saved_errno); } #endif buf[linklen] = '\0'; opts = buf; break; } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" #else "MALLOC_CONF" #endif ; if ((opts = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment * variable. */ } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; } default: not_reached(); buf[0] = '\0'; opts = buf; } while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { #define CONF_MATCH(n) \ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) #define CONF_MATCH_VALUE(n) \ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) #define CONF_HANDLE_BOOL(o, n) \ if (CONF_MATCH(n)) { \ if (CONF_MATCH_VALUE("true")) { \ o = true; \ } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ continue; \ } /* * One of the CONF_MIN macros below expands, in one of the use points, * to "unsigned integer < 0", which is always false, triggering the * GCC -Wtype-limits warning, which we disable here and re-enable below. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS #define CONF_MIN_no(um, min) false #define CONF_MIN_yes(um, min) ((um) < (min)) #define CONF_MAX_no(um, max) false #define CONF_MAX_yes(um, max) ((um) > (max)) #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ \ set_errno(0); \ um = malloc_strtoumax(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ if (CONF_MIN_##check_min(um, \ (t)(min))) { \ o = (t)(min); \ } else if ( \ CONF_MAX_##check_max(um, \ (t)(max))) { \ o = (t)(max); \ } else { \ o = (t)um; \ } \ } else { \ if (CONF_MIN_##check_min(um, \ (t)(min)) || \ CONF_MAX_##check_max(um, \ (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ } else { \ o = (t)um; \ } \ } \ continue; \ } #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ clip) \ CONF_HANDLE_T_U(unsigned, o, n, min, max, \ check_min, check_max, clip) #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ CONF_HANDLE_T_U(size_t, o, n, min, max, \ check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ char *end; \ \ set_errno(0); \ l = strtol(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (l < (ssize_t)(min) || l > \ (ssize_t)(max)) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else { \ o = l; \ } \ continue; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ strncpy(o, v, cpylen); \ o[cpylen] = '\0'; \ continue; \ } CONF_HANDLE_BOOL(opt_abort, "abort") CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") if (strncmp("metadata_thp", k, klen) == 0) { int i; bool match = false; for (i = 0; i < metadata_thp_mode_limit; i++) { if (strncmp(metadata_thp_mode_names[i], v, vlen) == 0) { opt_metadata_thp = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { if (extent_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); } else { opt_dss = dss_prec_names[i]; match = true; break; } } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, UINT_MAX, yes, no, false) if (CONF_MATCH("bin_shards")) { const char *bin_shards_segment_cur = v; size_t vlen_left = vlen; do { size_t size_start; size_t size_end; size_t nshards; bool err = malloc_conf_multi_sizes_next( &bin_shards_segment_cur, &vlen_left, &size_start, &size_end, &nshards); if (err || bin_update_shard_size( bin_shard_sizes, size_start, size_end, nshards)) { malloc_conf_error( "Invalid settings for " "bin_shards", k, klen, v, vlen); break; } } while (vlen_left > 0); continue; } CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : SSIZE_MAX); CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : SSIZE_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { init_opt_stats_print_opts(v, vlen); continue; } if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { opt_junk = "true"; opt_junk_alloc = opt_junk_free = true; } else if (CONF_MATCH_VALUE("false")) { opt_junk = "false"; opt_junk_alloc = opt_junk_free = false; } else if (CONF_MATCH_VALUE("alloc")) { opt_junk = "alloc"; opt_junk_alloc = true; opt_junk_free = false; } else if (CONF_MATCH_VALUE("free")) { opt_junk = "free"; opt_junk_alloc = false; opt_junk_free = true; } else { malloc_conf_error( "Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace") } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } CONF_HANDLE_BOOL(opt_tcache, "tcache") CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) /* * The runtime option of oversize_threshold remains * undocumented. It may be tweaked in the next major * release (6.0). The default value 8M is rather * conservative / safe. Tuning it further down may * improve fragmentation a bit more, but may also cause * contention on the huge arena. */ CONF_HANDLE_SIZE_T(opt_oversize_threshold, "oversize_threshold", SC_LARGE_MINCLASS, SC_LARGE_MAXCLASS, yes, yes, false) CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, "lg_extent_max_active_fit", 0, (sizeof(size_t) << 3), yes, yes, false) if (strncmp("percpu_arena", k, klen) == 0) { bool match = false; for (int i = percpu_arena_mode_names_base; i < percpu_arena_mode_names_limit; i++) { if (strncmp(percpu_arena_mode_names[i], v, vlen) == 0) { if (!have_percpu_arena) { malloc_conf_error( "No getcpu support", k, klen, v, vlen); } opt_percpu_arena = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_BOOL(opt_background_thread, "background_thread"); CONF_HANDLE_SIZE_T(opt_max_background_threads, "max_background_threads", 1, opt_max_background_threads, yes, yes, true); if (CONF_MATCH("slab_sizes")) { bool err; const char *slab_size_segment_cur = v; size_t vlen_left = vlen; do { size_t slab_start; size_t slab_end; size_t pgs; err = malloc_conf_multi_sizes_next( &slab_size_segment_cur, &vlen_left, &slab_start, &slab_end, &pgs); if (!err) { sc_data_update_slab_size( sc_data, slab_start, slab_end, (int)pgs); } else { malloc_conf_error( "Invalid settings for " "slab_sizes", k, klen, v, vlen); } } while (!err && vlen_left > 0); continue; } if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") CONF_HANDLE_BOOL(opt_prof_log, "prof_log") } if (config_log) { if (CONF_MATCH("log")) { size_t cpylen = ( vlen <= sizeof(log_var_names) ? vlen : sizeof(log_var_names) - 1); strncpy(log_var_names, v, cpylen); log_var_names[cpylen] = '\0'; continue; } } if (CONF_MATCH("thp")) { bool match = false; for (int i = 0; i < thp_mode_names_limit; i++) { if (strncmp(thp_mode_names[i],v, vlen) == 0) { if (!have_madvise_huge) { malloc_conf_error( "No THP support", k, klen, v, vlen); } opt_thp = i; match = true; break; } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH #undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL #undef CONF_MIN_no #undef CONF_MIN_yes #undef CONF_MAX_no #undef CONF_MAX_yes #undef CONF_HANDLE_T_U #undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P /* Re-enable diagnostic "-Wtype-limits" */ JEMALLOC_DIAGNOSTIC_POP } if (opt_abort_conf && had_conf_error) { malloc_abort_invalid_conf(); } } atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); } static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ return false; } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ spin_t spinner = SPIN_INITIALIZER; do { malloc_mutex_unlock(TSDN_NULL, &init_lock); spin_adaptive(&spinner); malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); return false; } #endif return true; } static bool malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS sc_data_t sc_data = {0}; JEMALLOC_DIAGNOSTIC_POP /* * Ordering here is somewhat tricky; we need sc_boot() first, since that * determines what the size classes will be, and then * malloc_conf_init(), since any slab size tweaking will need to be done * before sz_boot and bin_boot, which assume that the values they read * out of sc_data_global are final. */ sc_boot(&sc_data); unsigned bin_shard_sizes[SC_NBINS]; bin_shard_sizes_boot(bin_shard_sizes); /* * prof_boot0 only initializes opt_prof_prefix. We need to do it before * we parse malloc_conf options, in case malloc_conf parsing overwrites * it. */ if (config_prof) { prof_boot0(); } malloc_conf_init(&sc_data, bin_shard_sizes); sz_boot(&sc_data); bin_boot(&sc_data, bin_shard_sizes); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) { abort(); } } } if (pages_boot()) { return true; } if (base_boot(TSDN_NULL)) { return true; } if (extent_boot()) { return true; } if (ctl_boot()) { return true; } if (config_prof) { prof_boot1(); } arena_boot(&sc_data); if (tcache_boot(TSDN_NULL)) { return true; } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, malloc_mutex_rank_exclusive)) { return true; } hook_boot(); /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ narenas_auto = 1; manual_arena_base = narenas_auto + 1; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) == NULL) { return true; } a0 = arena_get(TSDN_NULL, 0, false); malloc_init_state = malloc_init_a0_initialized; return false; } static bool malloc_init_hard_a0(void) { bool ret; malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); malloc_mutex_unlock(TSDN_NULL, &init_lock); return ret; } /* Initialize data structures which may trigger recursive allocation. */ static bool malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) { abort(); } return true; } #endif if (background_thread_boot0()) { return true; } return false; } static unsigned malloc_narenas_default(void) { assert(ncpus > 0); /* * For SMP systems, create more than one arena per CPU by * default. */ if (ncpus > 1) { return ncpus << 2; } else { return 1; } } static percpu_arena_mode_t percpu_arena_as_initialized(percpu_arena_mode_t mode) { assert(!malloc_initialized()); assert(mode <= percpu_arena_disabled); if (mode != percpu_arena_disabled) { mode += percpu_arena_mode_enabled_base; } return mode; } static bool malloc_init_narenas(void) { assert(ncpus > 0); if (opt_percpu_arena != percpu_arena_disabled) { if (!have_percpu_arena || malloc_getcpu() < 0) { opt_percpu_arena = percpu_arena_disabled; malloc_printf(": perCPU arena getcpu() not " "available. Setting narenas to %u.\n", opt_narenas ? opt_narenas : malloc_narenas_default()); if (opt_abort) { abort(); } } else { if (ncpus >= MALLOCX_ARENA_LIMIT) { malloc_printf(": narenas w/ percpu" "arena beyond limit (%d)\n", ncpus); if (opt_abort) { abort(); } return true; } /* NB: opt_percpu_arena isn't fully initialized yet. */ if (percpu_arena_as_initialized(opt_percpu_arena) == per_phycpu_arena && ncpus % 2 != 0) { malloc_printf(": invalid " "configuration -- per physical CPU arena " "with odd number (%u) of CPUs (no hyper " "threading?).\n", ncpus); if (opt_abort) abort(); } unsigned n = percpu_arena_ind_limit( percpu_arena_as_initialized(opt_percpu_arena)); if (opt_narenas < n) { /* * If narenas is specified with percpu_arena * enabled, actual narenas is set as the greater * of the two. percpu_arena_choose will be free * to use any of the arenas based on CPU * id. This is conservative (at a small cost) * but ensures correctness. * * If for some reason the ncpus determined at * boot is not the actual number (e.g. because * of affinity setting from numactl), reserving * narenas this way provides a workaround for * percpu_arena. */ opt_narenas = n; } } } if (opt_narenas == 0) { opt_narenas = malloc_narenas_default(); } assert(opt_narenas > 0); narenas_auto = opt_narenas; /* * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ if (narenas_auto >= MALLOCX_ARENA_LIMIT) { narenas_auto = MALLOCX_ARENA_LIMIT - 1; malloc_printf(": Reducing narenas to limit (%d)\n", narenas_auto); } narenas_total_set(narenas_auto); if (arena_init_huge()) { narenas_total_inc(); } manual_arena_base = narenas_total_get(); return false; } static void malloc_init_percpu(void) { opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); } static bool malloc_init_hard_finish(void) { if (malloc_mutex_boot()) { return true; } malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); return false; } static void malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { malloc_mutex_assert_owner(tsdn, &init_lock); malloc_mutex_unlock(tsdn, &init_lock); if (reentrancy_set) { assert(!tsdn_null(tsdn)); tsd_t *tsd = tsdn_tsd(tsdn); assert(tsd_reentrancy_level_get(tsd) > 0); post_reentrancy(tsd); } } static bool malloc_init_hard(void) { tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif malloc_mutex_lock(TSDN_NULL, &init_lock); #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ malloc_init_hard_cleanup(tsdn, reentrancy); \ return ret; if (!malloc_init_hard_needed()) { UNLOCK_RETURN(TSDN_NULL, false, false) } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { UNLOCK_RETURN(TSDN_NULL, true, false) } malloc_mutex_unlock(TSDN_NULL, &init_lock); /* Recursive allocation relies on functional tsd. */ tsd = malloc_tsd_boot0(); if (tsd == NULL) { return true; } if (malloc_init_hard_recursible()) { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); /* Set reentrancy level to 1 during init. */ pre_reentrancy(tsd, NULL); /* Initialize narenas before prof_boot2 (for allocation). */ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } if (config_prof && prof_boot2(tsd)) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } malloc_init_percpu(); if (malloc_init_hard_finish()) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } post_reentrancy(tsd); malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); witness_assert_lockless(witness_tsd_tsdn( tsd_witness_tsdp_get_unsafe(tsd))); malloc_tsd_boot1(); /* Update TSD after tsd_boot1. */ tsd = tsd_fetch(); if (opt_background_thread) { assert(have_background_thread); /* * Need to finish init & unlock first before creating background * threads (pthread_create depends on malloc). ctl_init (which * sets isthreaded) needs to be called without holding any lock. */ background_thread_ctl_init(tsd_tsdn(tsd)); if (background_thread_create(tsd, 0)) { return true; } } #undef UNLOCK_RETURN return false; } /* * End initialization functions. */ /******************************************************************************/ /* * Begin allocation-path internal functions and data structures. */ /* * Settings determined by the documented behavior of the allocation functions. */ typedef struct static_opts_s static_opts_t; struct static_opts_s { /* Whether or not allocation size may overflow. */ bool may_overflow; /* * Whether to assert that allocations are not of size 0 (after any * bumping). */ bool assert_nonempty_alloc; /* * Whether or not to modify the 'result' argument to malloc in case of * error. */ bool null_out_result_on_error; /* Whether to set errno when we encounter an error condition. */ bool set_errno_on_error; /* * The minimum valid alignment for functions requesting aligned storage. */ size_t min_alignment; /* The error string to use if we oom. */ const char *oom_string; /* The error string to use if the passed-in alignment is invalid. */ const char *invalid_alignment_string; /* * False if we're configured to skip some time-consuming operations. * * This isn't really a malloc "behavior", but it acts as a useful * summary of several other static (or at least, static after program * initialization) options. */ bool slow; /* * Return size. */ bool usize; }; JEMALLOC_ALWAYS_INLINE void static_opts_init(static_opts_t *static_opts) { static_opts->may_overflow = false; static_opts->assert_nonempty_alloc = false; static_opts->null_out_result_on_error = false; static_opts->set_errno_on_error = false; static_opts->min_alignment = 0; static_opts->oom_string = ""; static_opts->invalid_alignment_string = ""; static_opts->slow = false; static_opts->usize = false; } /* * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we * should have one constant here per magic value there. Note however that the * representations need not be related. */ #define TCACHE_IND_NONE ((unsigned)-1) #define TCACHE_IND_AUTOMATIC ((unsigned)-2) #define ARENA_IND_AUTOMATIC ((unsigned)-1) typedef struct dynamic_opts_s dynamic_opts_t; struct dynamic_opts_s { void **result; size_t usize; size_t num_items; size_t item_size; size_t alignment; bool zero; unsigned tcache_ind; unsigned arena_ind; }; JEMALLOC_ALWAYS_INLINE void dynamic_opts_init(dynamic_opts_t *dynamic_opts) { dynamic_opts->result = NULL; dynamic_opts->usize = 0; dynamic_opts->num_items = 0; dynamic_opts->item_size = 0; dynamic_opts->alignment = 0; dynamic_opts->zero = false; dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; } /* ind is ignored if dopts->alignment > 0. */ JEMALLOC_ALWAYS_INLINE void * imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t size, size_t usize, szind_t ind) { tcache_t *tcache; arena_t *arena; /* Fill in the tcache. */ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { if (likely(!sopts->slow)) { /* Getting tcache ptr unconditionally. */ tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { tcache = tcache_get(tsd); } } else if (dopts->tcache_ind == TCACHE_IND_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, dopts->tcache_ind); } /* Fill in the arena. */ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { /* * In case of automatic arena management, we defer arena * computation until as late as we can, hoping to fill the * allocation out of the tcache. */ arena = NULL; } else { arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); } if (unlikely(dopts->alignment != 0)) { return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, dopts->zero, tcache, arena); } return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, arena, sopts->slow); } JEMALLOC_ALWAYS_INLINE void * imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t usize, szind_t ind) { void *ret; /* * For small allocations, sampling bumps the usize. If so, we allocate * from the ind_large bucket. */ szind_t ind_large; size_t bumped_usize = usize; if (usize <= SC_SMALL_MAXCLASS) { assert(((dopts->alignment == 0) ? sz_s2u(SC_LARGE_MINCLASS) : sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) == SC_LARGE_MINCLASS); ind_large = sz_size2index(SC_LARGE_MINCLASS); bumped_usize = sz_s2u(SC_LARGE_MINCLASS); ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, bumped_usize, ind_large); if (unlikely(ret == NULL)) { return NULL; } arena_prof_promote(tsd_tsdn(tsd), ret, usize); } else { ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); } return ret; } /* * Returns true if the allocation will overflow, and false otherwise. Sets * *size to the product either way. */ JEMALLOC_ALWAYS_INLINE bool compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, size_t *size) { /* * This function is just num_items * item_size, except that we may have * to check for overflow. */ if (!may_overflow) { assert(dopts->num_items == 1); *size = dopts->item_size; return false; } /* A size_t with its high-half bits all set to 1. */ static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); *size = dopts->item_size * dopts->num_items; if (unlikely(*size == 0)) { return (dopts->num_items != 0 && dopts->item_size != 0); } /* * We got a non-zero size, but we don't know if we overflowed to get * there. To avoid having to do a divide, we'll be clever and note that * if both A and B can be represented in N/2 bits, then their product * can be represented in N bits (without the possibility of overflow). */ if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { return false; } if (likely(*size / dopts->item_size == dopts->num_items)) { return false; } return true; } JEMALLOC_ALWAYS_INLINE int imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { /* Where the actual allocated memory will live. */ void *allocation = NULL; /* Filled in by compute_size_with_overflow below. */ size_t size = 0; /* * For unaligned allocations, we need only ind. For aligned * allocations, or in case of stats or profiling we need usize. * * These are actually dead stores, in that their values are reset before * any branch on their value is taken. Sometimes though, it's * convenient to pass them as arguments before this point. To avoid * undefined behavior then, we initialize them with dummy stores. */ szind_t ind = 0; size_t usize = 0; /* Reentrancy is only checked on slow path. */ int8_t reentrancy_level; /* Compute the amount of memory the user wants. */ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, &size))) { goto label_oom; } /* Validate the user input. */ if (sopts->assert_nonempty_alloc) { assert (size != 0); } if (unlikely(dopts->alignment < sopts->min_alignment || (dopts->alignment & (dopts->alignment - 1)) != 0)) { goto label_invalid_alignment; } /* This is the beginning of the "core" algorithm. */ if (dopts->alignment == 0) { ind = sz_size2index(size); if (unlikely(ind >= SC_NSIZES)) { goto label_oom; } if (config_stats || (config_prof && opt_prof) || sopts->usize) { usize = sz_index2size(ind); dopts->usize = usize; assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); } } else { usize = sz_sa2u(size, dopts->alignment); dopts->usize = usize; if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { goto label_oom; } } check_entry_exit_locking(tsd_tsdn(tsd)); /* * If we need to handle reentrancy, we can do it out of a * known-initialized arena (i.e. arena 0). */ reentrancy_level = tsd_reentrancy_level_get(tsd); if (sopts->slow && unlikely(reentrancy_level > 0)) { /* * We should never specify particular arenas or tcaches from * within our internal allocations. */ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || dopts->tcache_ind == TCACHE_IND_NONE); assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); dopts->tcache_ind = TCACHE_IND_NONE; /* We know that arena 0 has already been initialized. */ dopts->arena_ind = 0; } /* If profiling is on, get our profiling context. */ if (config_prof && opt_prof) { /* * Note that if we're going down this path, usize must have been * initialized in the previous if statement. */ prof_tctx_t *tctx = prof_alloc_prep( tsd, usize, prof_active_get_unlocked(), true); alloc_ctx_t alloc_ctx; if (likely((uintptr_t)tctx == (uintptr_t)1U)) { alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS); allocation = imalloc_no_sample( sopts, dopts, tsd, usize, usize, ind); } else if ((uintptr_t)tctx > (uintptr_t)1U) { /* * Note that ind might still be 0 here. This is fine; * imalloc_sample ignores ind if dopts->alignment > 0. */ allocation = imalloc_sample( sopts, dopts, tsd, usize, ind); alloc_ctx.slab = false; } else { allocation = NULL; } if (unlikely(allocation == NULL)) { prof_alloc_rollback(tsd, tctx, true); goto label_oom; } prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); } else { /* * If dopts->alignment > 0, then ind is still 0, but usize was * computed in the previous if statement. Down the positive * alignment path, imalloc_no_sample ignores ind and size * (relying only on usize). */ allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, ind); if (unlikely(allocation == NULL)) { goto label_oom; } } /* * Allocation has been done at this point. We still have some * post-allocation work to do though. */ assert(dopts->alignment == 0 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); if (config_stats) { assert(usize == isalloc(tsd_tsdn(tsd), allocation)); *tsd_thread_allocatedp_get(tsd) += usize; } if (sopts->slow) { UTRACE(0, size, allocation); } /* Success! */ check_entry_exit_locking(tsd_tsdn(tsd)); *dopts->result = allocation; return 0; label_oom: if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->oom_string); abort(); } if (sopts->slow) { UTRACE(NULL, size, NULL); } check_entry_exit_locking(tsd_tsdn(tsd)); if (sopts->set_errno_on_error) { set_errno(ENOMEM); } if (sopts->null_out_result_on_error) { *dopts->result = NULL; } return ENOMEM; /* * This label is only jumped to by one goto; we move it out of line * anyways to avoid obscuring the non-error paths, and for symmetry with * the oom case. */ label_invalid_alignment: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->invalid_alignment_string); abort(); } if (sopts->set_errno_on_error) { set_errno(EINVAL); } if (sopts->slow) { UTRACE(NULL, size, NULL); } check_entry_exit_locking(tsd_tsdn(tsd)); if (sopts->null_out_result_on_error) { *dopts->result = NULL; } return EINVAL; } JEMALLOC_ALWAYS_INLINE bool imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->oom_string); abort(); } UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); set_errno(ENOMEM); *dopts->result = NULL; return false; } return true; } /* Returns the errno-style error code of the allocation. */ JEMALLOC_ALWAYS_INLINE int imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { return ENOMEM; } /* We always need the tsd. Let's grab it right away. */ tsd_t *tsd = tsd_fetch(); assert(tsd); if (likely(tsd_fast(tsd))) { /* Fast and common path. */ tsd_assert_fast(tsd); sopts->slow = false; return imalloc_body(sopts, dopts, tsd); } else { if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { return ENOMEM; } sopts->slow = true; return imalloc_body(sopts, dopts, tsd); } } JEMALLOC_NOINLINE void * malloc_default(size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.malloc.entry", "size: %zu", size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.oom_string = ": Error in malloc(): out of memory\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; imalloc(&sopts, &dopts); /* * Note that this branch gets optimized away -- it immediately follows * the check on tsd_fast that sets sopts.slow. */ if (sopts.slow) { uintptr_t args[3] = {size}; hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); } LOG("core.malloc.exit", "result: %p", ret); return ret; } /******************************************************************************/ /* * Begin malloc(3)-compatible functions. */ /* * malloc() fastpath. * * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit * tcache. If either of these is false, we tail-call to the slowpath, * malloc_default(). Tail-calling is used to avoid any caller-saved * registers. * * fastpath supports ticker and profiling, both of which will also * tail-call to the slowpath if they fire. */ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { LOG("core.malloc.entry", "size: %zu", size); if (tsd_get_allocates() && unlikely(!malloc_initialized())) { return malloc_default(size); } tsd_t *tsd = tsd_get(false); if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { return malloc_default(size); } tcache_t *tcache = tsd_tcachep_get(tsd); if (unlikely(ticker_trytick(&tcache->gc_ticker))) { return malloc_default(size); } szind_t ind = sz_size2index_lookup(size); size_t usize; if (config_stats || config_prof) { usize = sz_index2size(ind); } /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */ assert(ind < SC_NBINS); assert(size <= SC_SMALL_MAXCLASS); if (config_prof) { int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); bytes_until_sample -= usize; tsd_bytes_until_sample_set(tsd, bytes_until_sample); if (unlikely(bytes_until_sample < 0)) { /* * Avoid a prof_active check on the fastpath. * If prof_active is false, set bytes_until_sample to * a large value. If prof_active is set to true, * bytes_until_sample will be reset. */ if (!prof_active) { tsd_bytes_until_sample_set(tsd, SSIZE_MAX); } return malloc_default(size); } } cache_bin_t *bin = tcache_small_bin_get(tcache, ind); bool tcache_success; void* ret = cache_bin_alloc_easy(bin, &tcache_success); if (tcache_success) { if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; bin->tstats.nrequests++; } if (config_prof) { tcache->prof_accumbytes += usize; } LOG("core.malloc.exit", "result: %p", ret); /* Fastpath success */ return ret; } return malloc_default(size); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { int ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " "size: %zu", memptr, alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.min_alignment = sizeof(void *); sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = memptr; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; ret = imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, (uintptr_t)size}; hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, (uintptr_t)ret, args); } LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, *memptr); return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) je_aligned_alloc(size_t alignment, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.min_alignment = 1; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; hook_invoke_alloc(hook_alloc_aligned_alloc, ret, (uintptr_t)ret, args); } LOG("core.aligned_alloc.exit", "result: %p", ret); return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.may_overflow = true; sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.oom_string = ": Error in calloc(): out of memory\n"; dopts.result = &ret; dopts.num_items = num; dopts.item_size = size; dopts.zero = true; imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); } LOG("core.calloc.exit", "result: %p", ret); return ret; } static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { void *p; if (tctx == NULL) { return NULL; } if (usize <= SC_SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, SC_LARGE_MINCLASS, 0, false, hook_args); if (p == NULL) { return NULL; } arena_prof_promote(tsd_tsdn(tsd), p, usize); } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, hook_args); } return p; } JEMALLOC_ALWAYS_INLINE void * irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx, hook_args); } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, hook_args); } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return NULL; } prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); return p; } JEMALLOC_ALWAYS_INLINE void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (!slow_path) { tsd_assert_fast(tsd); } check_entry_exit_locking(tsd_tsdn(tsd)); if (tsd_reentrancy_level_get(tsd) != 0) { assert(slow_path); } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); size_t usize; if (config_prof && opt_prof) { usize = sz_index2size(alloc_ctx.szind); prof_free(tsd, ptr, usize, &alloc_ctx); } else if (config_stats) { usize = sz_index2size(alloc_ctx.szind); } if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } if (likely(!slow_path)) { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, false); } else { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, true); } } JEMALLOC_ALWAYS_INLINE void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { if (!slow_path) { tsd_assert_fast(tsd); } check_entry_exit_locking(tsd_tsdn(tsd)); if (tsd_reentrancy_level_get(tsd) != 0) { assert(slow_path); } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); alloc_ctx_t alloc_ctx, *ctx; if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { /* * When cache_oblivious is disabled and ptr is not page aligned, * the allocation was not sampled -- usize can be used to * determine szind directly. */ alloc_ctx.szind = sz_size2index(usize); alloc_ctx.slab = true; ctx = &alloc_ctx; if (config_debug) { alloc_ctx_t dbg_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, &dbg_ctx.slab); assert(dbg_ctx.szind == alloc_ctx.szind); assert(dbg_ctx.slab == alloc_ctx.slab); } } else if (config_prof && opt_prof) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind == sz_size2index(usize)); ctx = &alloc_ctx; } else { ctx = NULL; } if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, ctx); } if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } if (likely(!slow_path)) { isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); } else { isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); } } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t arg_size) { void *ret; tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; size_t size = arg_size; LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tcache_t *tcache; tsd_t *tsd = tsd_fetch(); if (tsd_reentrancy_level_get(tsd) == 0) { tcache = tcache_get(tsd); } else { tcache = NULL; } uintptr_t args[3] = {(uintptr_t)ptr, size}; hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); ifree(tsd, ptr, tcache, true); LOG("core.realloc.exit", "result: %p", NULL); return NULL; } size = 1; } if (likely(ptr != NULL)) { assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, (uintptr_t)arg_size, 0, 0}}; alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { usize = sz_s2u(size); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { ret = NULL; } else { ret = irealloc_prof(tsd, ptr, old_usize, usize, &alloc_ctx, &hook_args); } } else { if (config_stats) { usize = sz_s2u(size); } ret = iralloc(tsd, ptr, old_usize, size, 0, false, &hook_args); } tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ static_opts_t sopts; dynamic_opts_t dopts; static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.oom_string = ": Error in realloc(): out of memory\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; hook_invoke_alloc(hook_alloc_realloc, ret, (uintptr_t)ret, args); } return ret; } if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { tsd_t *tsd; assert(usize == isalloc(tsdn, ret)); tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); check_entry_exit_locking(tsdn); LOG("core.realloc.exit", "result: %p", ret); return ret; } JEMALLOC_NOINLINE void free_default(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { /* * We avoid setting up tsd fully (e.g. tcache, arena binding) * based on only free() calls -- other activities trigger the * minimal to full transition. This is because free() may * happen during thread shutdown after tls deallocation: if a * thread never had any malloc activities until then, a * fully-setup tsd won't be destructed properly. */ tsd_t *tsd = tsd_fetch_min(); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (likely(tsd_fast(tsd))) { tsd_assert_fast(tsd); /* Unconditionally get tcache ptr on fast path. */ tcache = tsd_tcachep_get(tsd); ifree(tsd, ptr, tcache, false); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } uintptr_t args_raw[3] = {(uintptr_t)ptr}; hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); ifree(tsd, ptr, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); } } JEMALLOC_ALWAYS_INLINE bool free_fastpath(void *ptr, size_t size, bool size_hint) { tsd_t *tsd = tsd_get(false); if (unlikely(!tsd || !tsd_fast(tsd))) { return false; } tcache_t *tcache = tsd_tcachep_get(tsd); alloc_ctx_t alloc_ctx; /* * If !config_cache_oblivious, we can check PAGE alignment to * detect sampled objects. Otherwise addresses are * randomized, and we have to look it up in the rtree anyway. * See also isfree(). */ if (!size_hint || config_cache_oblivious) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); /* Note: profiled objects will have alloc_ctx.slab set */ if (!res || !alloc_ctx.slab) { return false; } } else { /* * Check for both sizes that are too large, and for sampled objects. * Sampled objects are always page-aligned. The sampled object check * will also check for null ptr. */ if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) { return false; } alloc_ctx.szind = sz_size2index_lookup(size); } if (unlikely(ticker_trytick(&tcache->gc_ticker))) { return false; } cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind); cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind]; if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) { return false; } if (config_stats) { size_t usize = sz_index2size(alloc_ctx.szind); *tsd_thread_deallocatedp_get(tsd) += usize; } return true; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { LOG("core.free.entry", "ptr: %p", ptr); if (!free_fastpath(ptr, 0, false)) { free_default(ptr); } LOG("core.free.exit", ""); } /* * End malloc(3)-compatible functions. */ /******************************************************************************/ /* * Begin non-standard override functions. */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.min_alignment = 1; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; sopts.null_out_result_on_error = true; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = alignment; imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {alignment, size}; hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, args); } LOG("core.memalign.exit", "result: %p", ret); return ret; } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_valloc(size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.valloc.entry", "size: %zu\n", size); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.null_out_result_on_error = true; sopts.min_alignment = PAGE; sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; sopts.invalid_alignment_string = ": Error allocating aligned memory: invalid alignment\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; dopts.alignment = PAGE; imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {size}; hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args); } LOG("core.valloc.exit", "result: %p\n", ret); return ret; } #endif #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). * * These definitions interpose hooks in glibc. The functions are actually * passed an extra argument for the caller return address, which will be * ignored. */ JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif # ifdef CPU_COUNT /* * To enable static linking with glibc, the libc specific malloc interface must * be implemented also, so none of glibc's malloc.o functions are added to the * link. */ # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) /* To force macro expansion of je_ prefix before stringification. */ # define PREALIAS(je_fn) ALIAS(je_fn) # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_FREE void __libc_free(void* ptr) PREALIAS(je_free); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC void *__libc_malloc(size_t size) PREALIAS(je_malloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); # endif # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC void *__libc_valloc(size_t size) PREALIAS(je_valloc); # endif # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); # endif # undef PREALIAS # undef ALIAS # endif #endif /* * End non-standard override functions. */ /******************************************************************************/ /* * Begin non-standard functions. */ #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y #define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) typedef struct { void *ptr; size_t size; } smallocx_return_t; JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN smallocx_return_t JEMALLOC_NOTHROW /* * The attribute JEMALLOC_ATTR(malloc) cannot be used due to: * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 */ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) (size_t size, int flags) { /* * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be * used here because it makes writing beyond the `size` * of the `ptr` undefined behavior, but the objective * of this function is to allow writing beyond `size` * up to `smallocx_return_t::size`. */ smallocx_return_t ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.assert_nonempty_alloc = true; sopts.null_out_result_on_error = true; sopts.oom_string = ": Error in mallocx(): out of memory\n"; sopts.usize = true; dopts.result = &ret.ptr; dopts.num_items = 1; dopts.item_size = size; if (unlikely(flags != 0)) { if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); } dopts.zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { dopts.tcache_ind = TCACHE_IND_NONE; } else { dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); } } else { dopts.tcache_ind = TCACHE_IND_AUTOMATIC; } if ((flags & MALLOCX_ARENA_MASK) != 0) dopts.arena_ind = MALLOCX_ARENA_GET(flags); } imalloc(&sopts, &dopts); assert(dopts.usize == je_nallocx(size, flags)); ret.size = dopts.usize; LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); return ret; } #undef JEMALLOC_SMALLOCX_CONCAT_HELPER #undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 #endif JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); static_opts_init(&sopts); dynamic_opts_init(&dopts); sopts.assert_nonempty_alloc = true; sopts.null_out_result_on_error = true; sopts.oom_string = ": Error in mallocx(): out of memory\n"; dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; if (unlikely(flags != 0)) { if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); } dopts.zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { dopts.tcache_ind = TCACHE_IND_NONE; } else { dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); } } else { dopts.tcache_ind = TCACHE_IND_AUTOMATIC; } if ((flags & MALLOCX_ARENA_MASK) != 0) dopts.arena_ind = MALLOCX_ARENA_GET(flags); } imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {size, flags}; hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, args); } LOG("core.mallocx.exit", "result: %p", ret); return ret; } static void * irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { void *p; if (tctx == NULL) { return NULL; } if (usize <= SC_SMALL_MAXCLASS) { p = iralloct(tsdn, old_ptr, old_usize, SC_LARGE_MINCLASS, alignment, zero, tcache, arena, hook_args); if (p == NULL) { return NULL; } arena_prof_promote(tsdn, p, usize); } else { p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, tcache, arena, hook_args); } return p; } JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, *usize, alignment, zero, tcache, arena, tctx, hook_args); } else { p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, zero, tcache, arena, hook_args); } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, false); return NULL; } if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested * alignment, and that the alignment constraint was * serendipitously satisfied. Additionally, old_usize may not * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ *usize = isalloc(tsd_tsdn(tsd), p); } prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); return p; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { void *p; tsd_t *tsd; size_t usize; size_t old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; arena_t *arena; tcache_t *tcache; LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); assert(ptr != NULL); assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) { goto label_oom; } } else { arena = NULL; } if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { tcache = tcache_get(tsd); } alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, 0}}; if (config_prof && opt_prof) { usize = (alignment == 0) ? sz_s2u(size) : sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { goto label_oom; } p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, zero, tcache, arena, &alloc_ctx, &hook_args); if (unlikely(p == NULL)) { goto label_oom; } } else { p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, zero, tcache, arena, &hook_args); if (unlikely(p == NULL)) { goto label_oom; } if (config_stats) { usize = isalloc(tsd_tsdn(tsd), p); } } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", p); return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", NULL); return NULL; } JEMALLOC_ALWAYS_INLINE size_t ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t newsize; if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, &newsize)) { return old_usize; } return newsize; } static size_t ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) { return old_usize; } usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, zero); return usize; } JEMALLOC_ALWAYS_INLINE size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ if (alignment == 0) { usize_max = sz_s2u(size+extra); assert(usize_max > 0 && usize_max <= SC_LARGE_MAXCLASS); } else { usize_max = sz_sa2u(size+extra, alignment); if (unlikely(usize_max == 0 || usize_max > SC_LARGE_MAXCLASS)) { /* * usize_max is out of range, and chances are that * allocation will fail, but use the maximum possible * value and carry on with prof_alloc_prep(), just in * case allocation succeeds. */ usize_max = SC_LARGE_MAXCLASS; } } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero, tctx); } else { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); return usize; } prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, old_tctx); return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " "flags: %d", ptr, size, extra, flags); assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); /* * The API explicitly absolves itself of protecting against (size + * extra) numerical overflow, but we may need to clamp extra to avoid * exceeding SC_LARGE_MAXCLASS. * * Ordinarily, size limit checking is handled deeper down, but here we * have to check as part of (size + extra) clamping, since we need the * clamped value in the above helper functions. */ if (unlikely(size > SC_LARGE_MAXCLASS)) { usize = old_usize; goto label_not_resized; } if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { extra = SC_LARGE_MAXCLASS - size; } if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero, &alloc_ctx); } else { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } if (unlikely(usize == old_usize)) { goto label_not_resized; } if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } label_not_resized: if (unlikely(!tsd_fast(tsd))) { uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, usize, (uintptr_t)usize, args); } UTRACE(ptr, size, ptr); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.xallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { size_t usize; tsdn_t *tsdn; LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(malloc_initialized() || IS_INITIALIZER); assert(ptr != NULL); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); if (config_debug || force_ivsalloc) { usize = ivsalloc(tsdn, ptr); assert(force_ivsalloc || usize != 0); } else { usize = isalloc(tsdn, ptr); } check_entry_exit_locking(tsdn); LOG("core.sallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags) { LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ assert(tsd_reentrancy_level_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { if (likely(fast)) { tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } } } UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); ifree(tsd, ptr, tcache, false); } else { uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); ifree(tsd, ptr, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.dallocx.exit", ""); } JEMALLOC_ALWAYS_INLINE size_t inallocx(tsdn_t *tsdn, size_t size, int flags) { check_entry_exit_locking(tsdn); size_t usize; if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { usize = sz_s2u(size); } else { usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); } check_entry_exit_locking(tsdn); return usize; } JEMALLOC_NOINLINE void sdallocx_default(void *ptr, size_t size, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ assert(tsd_reentrancy_level_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { if (likely(fast)) { tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); } else { if (likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); } else { tcache = NULL; } } } UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); isfree(tsd, ptr, usize, tcache, false); } else { uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); isfree(tsd, ptr, usize, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); if (flags !=0 || !free_fastpath(ptr, size, true)) { sdallocx_default(ptr, size, flags); } LOG("core.sdallocx.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { size_t usize; tsdn_t *tsdn; assert(size != 0); if (unlikely(malloc_init())) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); usize = inallocx(tsdn, size, flags); if (unlikely(usize > SC_LARGE_MAXCLASS)) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } check_entry_exit_locking(tsdn); LOG("core.nallocx.exit", "result: %zu", usize); return usize; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; LOG("core.mallctl.entry", "name: %s", name); if (unlikely(malloc_init())) { LOG("core.mallctl.exit", "result: %d", EAGAIN); return EAGAIN; } tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctl.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; LOG("core.mallctlnametomib.entry", "name: %s", name); if (unlikely(malloc_init())) { LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); return EAGAIN; } tsd_t *tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_nametomib(tsd, name, mibp, miblenp); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctlnametomib.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; LOG("core.mallctlbymib.entry", ""); if (unlikely(malloc_init())) { LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); return EAGAIN; } tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctlbymib.exit", "result: %d", ret); return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { tsdn_t *tsdn; LOG("core.malloc_stats_print.entry", ""); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); stats_print(write_cb, cbopaque, opts); check_entry_exit_locking(tsdn); LOG("core.malloc_stats_print.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; tsdn_t *tsdn; LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); assert(malloc_initialized() || IS_INITIALIZER); tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); if (unlikely(ptr == NULL)) { ret = 0; } else { if (config_debug || force_ivsalloc) { ret = ivsalloc(tsdn, ptr); assert(force_ivsalloc || ret != 0); } else { ret = isalloc(tsdn, ptr); } } check_entry_exit_locking(tsdn); LOG("core.malloc_usable_size.exit", "result: %zu", ret); return ret; } /* * End non-standard functions. */ /******************************************************************************/ /* * The following functions are used by threading libraries for protection of * malloc during fork(). */ /* * If an application creates a thread before doing any allocation in the main * thread, then calls fork(2) in the main thread followed by memory allocation * in the child process, a race can occur that results in deadlock within the * child: the main thread may have forked while the created thread had * partially initialized the allocator. Ordinarily jemalloc prevents * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library * constructor is a partial solution to this problem. It may still be possible * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ #ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void jemalloc_constructor(void) { malloc_init(); } #endif #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_prefork(void) #else JEMALLOC_EXPORT void _malloc_prefork(void) #endif { tsd_t *tsd; unsigned i, j, narenas; arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { return; } #endif assert(malloc_initialized()); tsd = tsd_fetch(); narenas = narenas_total_get(); witness_prefork(tsd_witness_tsdp_get(tsd)); /* Acquire all mutexes in a safe order. */ ctl_prefork(tsd_tsdn(tsd)); tcache_prefork(tsd_tsdn(tsd)); malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); if (have_background_thread) { background_thread_prefork0(tsd_tsdn(tsd)); } prof_prefork0(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_prefork1(tsd_tsdn(tsd)); } /* Break arena prefork into stages to preserve lock order. */ for (i = 0; i < 8; i++) { for (j = 0; j < narenas; j++) { if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != NULL) { switch (i) { case 0: arena_prefork0(tsd_tsdn(tsd), arena); break; case 1: arena_prefork1(tsd_tsdn(tsd), arena); break; case 2: arena_prefork2(tsd_tsdn(tsd), arena); break; case 3: arena_prefork3(tsd_tsdn(tsd), arena); break; case 4: arena_prefork4(tsd_tsdn(tsd), arena); break; case 5: arena_prefork5(tsd_tsdn(tsd), arena); break; case 6: arena_prefork6(tsd_tsdn(tsd), arena); break; case 7: arena_prefork7(tsd_tsdn(tsd), arena); break; default: not_reached(); } } } } prof_prefork1(tsd_tsdn(tsd)); tsd_prefork(tsd); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_postfork_parent(void) #else JEMALLOC_EXPORT void _malloc_postfork(void) #endif { tsd_t *tsd; unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { return; } #endif assert(malloc_initialized()); tsd = tsd_fetch(); tsd_postfork_parent(tsd); witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_parent(tsd_tsdn(tsd), arena); } } prof_postfork_parent(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_postfork_parent(tsd_tsdn(tsd)); } malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_parent(tsd_tsdn(tsd)); ctl_postfork_parent(tsd_tsdn(tsd)); } void jemalloc_postfork_child(void) { tsd_t *tsd; unsigned i, narenas; assert(malloc_initialized()); tsd = tsd_fetch(); tsd_postfork_child(tsd); witness_postfork_child(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_child(tsd_tsdn(tsd), arena); } } prof_postfork_child(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_postfork_child(tsd_tsdn(tsd)); } malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_child(tsd_tsdn(tsd)); ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ jemalloc-sys-0.3.2/rep/src/jemalloc_cpp.cpp010064400007650000024000000057231344617474100170700ustar0000000000000000#include #include #define JEMALLOC_CPP_CPP_ #ifdef __cplusplus extern "C" { #endif #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #ifdef __cplusplus } #endif // All operators in this file are exported. // Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt // thunk? // // extern __typeof (sdallocx) sdallocx_int // __attribute ((alias ("sdallocx"), // visibility ("hidden"))); // // ... but it needs to work with jemalloc namespaces. void *operator new(std::size_t size); void *operator new[](std::size_t size); void *operator new(std::size_t size, const std::nothrow_t &) noexcept; void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; void operator delete(void *ptr) noexcept; void operator delete[](void *ptr) noexcept; void operator delete(void *ptr, const std::nothrow_t &) noexcept; void operator delete[](void *ptr, const std::nothrow_t &) noexcept; #if __cpp_sized_deallocation >= 201309 /* C++14's sized-delete operators. */ void operator delete(void *ptr, std::size_t size) noexcept; void operator delete[](void *ptr, std::size_t size) noexcept; #endif JEMALLOC_NOINLINE static void * handleOOM(std::size_t size, bool nothrow) { void *ptr = nullptr; while (ptr == nullptr) { std::new_handler handler; // GCC-4.8 and clang 4.0 do not have std::get_new_handler. { static std::mutex mtx; std::lock_guard lock(mtx); handler = std::set_new_handler(nullptr); std::set_new_handler(handler); } if (handler == nullptr) break; try { handler(); } catch (const std::bad_alloc &) { break; } ptr = je_malloc(size); } if (ptr == nullptr && !nothrow) std::__throw_bad_alloc(); return ptr; } template JEMALLOC_ALWAYS_INLINE void * newImpl(std::size_t size) noexcept(IsNoExcept) { void *ptr = je_malloc(size); if (likely(ptr != nullptr)) return ptr; return handleOOM(size, IsNoExcept); } void * operator new(std::size_t size) { return newImpl(size); } void * operator new[](std::size_t size) { return newImpl(size); } void * operator new(std::size_t size, const std::nothrow_t &) noexcept { return newImpl(size); } void * operator new[](std::size_t size, const std::nothrow_t &) noexcept { return newImpl(size); } void operator delete(void *ptr) noexcept { je_free(ptr); } void operator delete[](void *ptr) noexcept { je_free(ptr); } void operator delete(void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } void operator delete[](void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } #if __cpp_sized_deallocation >= 201309 void operator delete(void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } je_sdallocx(ptr, size, /*flags=*/0); } void operator delete[](void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } je_sdallocx(ptr, size, /*flags=*/0); } #endif // __cpp_sized_deallocation jemalloc-sys-0.3.2/rep/src/large.c010064400007650000024000000270521344617474100151710ustar0000000000000000#define JEMALLOC_LARGE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/util.h" /******************************************************************************/ void * large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == sz_s2u(usize)); return large_palloc(tsdn, arena, usize, CACHELINE, zero); } void * large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { size_t ausize; extent_t *extent; bool is_zeroed; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); assert(!tsdn_null(tsdn) || arena != NULL); ausize = sz_sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) { return NULL; } if (config_fill && unlikely(opt_zero)) { zero = true; } /* * Copy zero into is_zeroed and pass the copy when allocating the * extent, so that it is possible to make correct junk/zero fill * decisions below, even if is_zeroed ends up true when zero is false. */ is_zeroed = zero; if (likely(!tsdn_null(tsdn))) { arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); } if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, arena, usize, alignment, &is_zeroed)) == NULL) { return NULL; } /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { /* Insert extent into large. */ malloc_mutex_lock(tsdn, &arena->large_mtx); extent_list_append(&arena->large, extent); malloc_mutex_unlock(tsdn, &arena->large_mtx); } if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); } if (zero) { assert(is_zeroed); } else if (config_fill && unlikely(opt_junk_alloc)) { memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, extent_usize_get(extent)); } arena_decay_tick(tsdn, arena); return extent_addr_get(extent); } static void large_dalloc_junk_impl(void *ptr, size_t size) { memset(ptr, JEMALLOC_FREE_JUNK, size); } large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; static void large_dalloc_maybe_junk_impl(void *ptr, size_t size) { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the extent isn't about to be * unmapped. */ if (opt_retain || (have_dss && extent_in_dss(ptr))) { large_dalloc_junk(ptr, size); } } } large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = large_dalloc_maybe_junk_impl; static bool large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t diff = extent_size_get(extent) - (usize + sz_large_pad); assert(oldusize > usize); if (extent_hooks->split == NULL) { return true; } /* Split excess pages. */ if (diff != 0) { extent_t *trail = extent_split_wrapper(tsdn, arena, &extent_hooks, extent, usize + sz_large_pad, sz_size2index(usize), false, diff, SC_NSIZES, false); if (trail == NULL) { return true; } if (config_fill && unlikely(opt_junk_free)) { large_dalloc_maybe_junk(extent_addr_get(trail), extent_size_get(trail)); } arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); } arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); return false; } static bool large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, bool zero) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t trailsize = usize - oldusize; if (extent_hooks->merge == NULL) { return true; } if (config_fill && unlikely(opt_zero)) { zero = true; } /* * Copy zero into is_zeroed_trail and pass the copy when allocating the * extent, so that it is possible to make correct junk/zero fill * decisions below, even if is_zeroed_trail ends up true when zero is * false. */ bool is_zeroed_trail = zero; bool commit = true; extent_t *trail; bool new_mapping; if ((trail = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_dirty, extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL || (trail = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) { if (config_stats) { new_mapping = false; } } else { if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) { return true; } if (config_stats) { new_mapping = true; } } if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); return true; } rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); szind_t szind = sz_size2index(usize); extent_szind_set(extent, szind); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_addr_get(extent), szind, false); if (config_stats && new_mapping) { arena_stats_mapped_add(tsdn, &arena->stats, trailsize); } if (zero) { if (config_cache_oblivious) { /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. * There will always be trailing bytes, because ptr's * offset from the beginning of the extent is a multiple * of CACHELINE in [0 .. PAGE). */ void *zbase = (void *) ((uintptr_t)extent_addr_get(extent) + oldusize); void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + PAGE)); size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; assert(nzero > 0); memset(zbase, 0, nzero); } assert(is_zeroed_trail); } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), JEMALLOC_ALLOC_JUNK, usize - oldusize); } arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); return false; } bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t usize_max, bool zero) { size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ assert(oldusize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS); if (usize_max > oldusize) { /* Attempt to expand the allocation in-place. */ if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } /* Try again, this time with usize_min. */ if (usize_min < usize_max && usize_min > oldusize && large_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } } /* * Avoid moving the allocation if the existing extent size accommodates * the new size. */ if (oldusize >= usize_min && oldusize <= usize_max) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } /* Attempt to shrink the allocation in-place. */ if (oldusize > usize_max) { if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return false; } } return true; } static void * large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= CACHELINE) { return large_malloc(tsdn, arena, usize, zero); } return large_palloc(tsdn, arena, usize, alignment, zero); } void * large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args) { extent_t *extent = iealloc(tsdn, ptr); size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ assert(oldusize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS); /* Try to avoid moving the allocation. */ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize, usize, (uintptr_t)ptr, hook_args->args); return extent_addr_get(extent); } /* * usize and old size are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero); if (ret == NULL) { return NULL; } hook_invoke_alloc(hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, hook_args->args); hook_invoke_dalloc(hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); size_t copysize = (usize < oldusize) ? usize : oldusize; memcpy(ret, extent_addr_get(extent), copysize); isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); return ret; } /* * junked_locked indicates whether the extent's data have been junk-filled, and * whether the arena's large_mtx is currently held. */ static void large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, bool junked_locked) { if (!junked_locked) { /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { malloc_mutex_lock(tsdn, &arena->large_mtx); extent_list_remove(&arena->large, extent); malloc_mutex_unlock(tsdn, &arena->large_mtx); } large_dalloc_maybe_junk(extent_addr_get(extent), extent_usize_get(extent)); } else { /* Only hold the large_mtx if necessary. */ if (!arena_is_auto(arena)) { malloc_mutex_assert_owner(tsdn, &arena->large_mtx); extent_list_remove(&arena->large, extent); } } arena_extent_dalloc_large_prep(tsdn, arena, extent); } static void large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); } void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); } void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); } void large_dalloc(tsdn_t *tsdn, extent_t *extent) { arena_t *arena = extent_arena_get(extent); large_dalloc_prep_impl(tsdn, arena, extent, false); large_dalloc_finish_impl(tsdn, arena, extent); arena_decay_tick(tsdn, arena); } size_t large_salloc(tsdn_t *tsdn, const extent_t *extent) { return extent_usize_get(extent); } prof_tctx_t * large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { return extent_prof_tctx_get(extent); } void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { extent_prof_tctx_set(extent, tctx); } void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); } nstime_t large_prof_alloc_time_get(const extent_t *extent) { return extent_prof_alloc_time_get(extent); } void large_prof_alloc_time_set(extent_t *extent, nstime_t t) { extent_prof_alloc_time_set(extent, t); } jemalloc-sys-0.3.2/rep/src/log.c010064400007650000024000000046751344617474100146660ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/log.h" char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; atomic_b_t log_init_done = ATOMIC_INIT(false); /* * Returns true if we were able to pick out a segment. Fills in r_segment_end * with a pointer to the first character after the end of the string. */ static const char * log_var_extract_segment(const char* segment_begin) { const char *end; for (end = segment_begin; *end != '\0' && *end != '|'; end++) { } return end; } static bool log_var_matches_segment(const char *segment_begin, const char *segment_end, const char *log_var_begin, const char *log_var_end) { assert(segment_begin <= segment_end); assert(log_var_begin < log_var_end); ptrdiff_t segment_len = segment_end - segment_begin; ptrdiff_t log_var_len = log_var_end - log_var_begin; /* The special '.' segment matches everything. */ if (segment_len == 1 && *segment_begin == '.') { return true; } if (segment_len == log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0; } else if (segment_len < log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0 && log_var_begin[segment_len] == '.'; } else { return false; } } unsigned log_var_update_state(log_var_t *log_var) { const char *log_var_begin = log_var->name; const char *log_var_end = log_var->name + strlen(log_var->name); /* Pointer to one before the beginning of the current segment. */ const char *segment_begin = log_var_names; /* * If log_init done is false, we haven't parsed the malloc conf yet. To * avoid log-spew, we default to not displaying anything. */ if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { return LOG_INITIALIZED_NOT_ENABLED; } while (true) { const char *segment_end = log_var_extract_segment( segment_begin); assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); if (log_var_matches_segment(segment_begin, segment_end, log_var_begin, log_var_end)) { atomic_store_u(&log_var->state, LOG_ENABLED, ATOMIC_RELAXED); return LOG_ENABLED; } if (*segment_end == '\0') { /* Hit the end of the segment string with no match. */ atomic_store_u(&log_var->state, LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); return LOG_INITIALIZED_NOT_ENABLED; } /* Otherwise, skip the delimiter and continue. */ segment_begin = segment_end + 1; } } jemalloc-sys-0.3.2/rep/src/malloc_io.c010064400007650000024000000345141344617474100160360ustar0000000000000000#define JEMALLOC_MALLOC_IO_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/util.h" #ifdef assert # undef assert #endif #ifdef not_reached # undef not_reached #endif #ifdef not_implemented # undef not_implemented #endif #ifdef assert_not_implemented # undef assert_not_implemented #endif /* * Define simple versions of assertion macros that won't recurse in case * of assertion failures in malloc_*printf(). */ #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write(": Failed assertion\n"); \ abort(); \ } \ } while (0) #define not_reached() do { \ if (config_debug) { \ malloc_write(": Unreachable code reached\n"); \ abort(); \ } \ unreachable(); \ } while (0) #define not_implemented() do { \ if (config_debug) { \ malloc_write(": Not implemented\n"); \ abort(); \ } \ } while (0) #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) { \ not_implemented(); \ } \ } while (0) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void wrtmessage(void *cbopaque, const char *s); #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); #define D2S_BUFSIZE (1 + U2S_BUFSIZE) static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); #define O2S_BUFSIZE (1 + U2S_BUFSIZE) static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); #define X2S_BUFSIZE (2 + U2S_BUFSIZE) static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ static void wrtmessage(void *cbopaque, const char *s) { malloc_write_fd(STDERR_FILENO, s, strlen(s)); } JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); /* * Wrapper around malloc_message() that avoids the need for * je_malloc_message(...) throughout the code. */ void malloc_write(const char *s) { if (je_malloc_message != NULL) { je_malloc_message(NULL, s); } else { wrtmessage(NULL, s); } } /* * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so * provide a wrapper. */ int buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, (DWORD)buflen, NULL); return 0; #elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } return 0; #else return strerror_r(err, buf, buflen); #endif } uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; unsigned b; bool neg; const char *p, *ns; p = nptr; if (base < 0 || base == 1 || base > 36) { ns = p; set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': p++; break; case '-': neg = true; /* Fall through. */ case '+': p++; /* Fall through. */ default: goto label_prefix; } } /* Get prefix, if any. */ label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. * " -x"). */ ns = p; if (*p == '0') { switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (b == 0) { b = 8; } if (b == 8) { p++; } break; case 'X': case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': if (b == 0) { b = 16; } if (b == 16) { p += 2; } break; default: break; } break; default: p++; ret = 0; goto label_return; } } if (b == 0) { b = 10; } /* Convert. */ ret = 0; while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { uintmax_t pret = ret; ret *= b; ret += digit; if (ret < pret) { /* Overflow. */ set_errno(ERANGE); ret = UINTMAX_MAX; goto label_return; } p++; } if (neg) { ret = (uintmax_t)(-((intmax_t)ret)); } if (p == ns) { /* No conversion performed. */ set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } label_return: if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; } else { *endptr = (char *)p; } } return ret; } static char * u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; s[i] = '\0'; switch (base) { case 10: do { i--; s[i] = "0123456789"[x % (uint64_t)10]; x /= (uint64_t)10; } while (x > 0); break; case 16: { const char *digits = (uppercase) ? "0123456789ABCDEF" : "0123456789abcdef"; do { i--; s[i] = digits[x & 0xf]; x >>= 4; } while (x > 0); break; } default: { const char *digits = (uppercase) ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" : "0123456789abcdefghijklmnopqrstuvwxyz"; assert(base >= 2 && base <= 36); do { i--; s[i] = digits[x % (uint64_t)base]; x /= (uint64_t)base; } while (x > 0); }} *slen_p = U2S_BUFSIZE - 1 - i; return &s[i]; } static char * d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; if ((neg = (x < 0))) { x = -x; } s = u2s(x, 10, false, s, slen_p); if (neg) { sign = '-'; } switch (sign) { case '-': if (!neg) { break; } /* Fall through. */ case ' ': case '+': s--; (*slen_p)++; *s = sign; break; default: not_reached(); } return s; } static char * o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } return s; } static char * x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } return s; } size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; const char *f; #define APPEND_C(c) do { \ if (i < size) { \ str[i] = (c); \ } \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) #define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (!left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ } \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ } \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ break; \ case '?' | 0x80: \ val = va_arg(ap, unsigned int); \ break; \ case 'l': \ val = va_arg(ap, long); \ break; \ case 'l' | 0x80: \ val = va_arg(ap, unsigned long); \ break; \ case 'q': \ val = va_arg(ap, long long); \ break; \ case 'q' | 0x80: \ val = va_arg(ap, unsigned long long); \ break; \ case 'j': \ val = va_arg(ap, intmax_t); \ break; \ case 'j' | 0x80: \ val = va_arg(ap, uintmax_t); \ break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ case 'z': \ val = va_arg(ap, ssize_t); \ break; \ case 'z' | 0x80: \ val = va_arg(ap, size_t); \ break; \ case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ default: \ not_reached(); \ val = 0; \ } \ } while (0) i = 0; f = format; while (true) { switch (*f) { case '\0': goto label_out; case '%': { bool alt_form = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; char *s; size_t slen; f++; /* Flags. */ while (true) { switch (*f) { case '#': assert(!alt_form); alt_form = true; break; case '-': assert(!left_justify); left_justify = true; break; case ' ': assert(!plus_space); plus_space = true; break; case '+': assert(!plus_plus); plus_plus = true; break; default: goto label_width; } f++; } /* Width. */ label_width: switch (*f) { case '*': width = va_arg(ap, int); f++; if (width < 0) { left_justify = true; width = -width; } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; break; } default: break; } /* Width/precision separator. */ if (*f == '.') { f++; } else { goto label_length; } /* Precision. */ switch (*f) { case '*': prec = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uprec; set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); assert(uprec != UINTMAX_MAX || get_errno() != ERANGE); prec = (int)uprec; break; } default: break; } /* Length. */ label_length: switch (*f) { case 'l': f++; if (*f == 'l') { len = 'q'; f++; } else { len = 'l'; } break; case 'q': case 'j': case 't': case 'z': len = *f; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { case '%': /* %% */ APPEND_C(*f); f++; break; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'x': case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'c': { unsigned char val; char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); val = va_arg(ap, int); buf[0] = val; buf[1] = '\0'; APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; } case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); slen = (prec < 0) ? strlen(s) : (size_t)prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; case 'p': { uintmax_t val; char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } default: not_reached(); } break; } default: { APPEND_C(*f); f++; break; }} } label_out: if (i < size) { str[i] = '\0'; } else { str[size - 1] = '\0'; } #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC return i; } JEMALLOC_FORMAT_PRINTF(3, 4) size_t malloc_snprintf(char *str, size_t size, const char *format, ...) { size_t ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); return ret; } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ write_cb = (je_malloc_message != NULL) ? je_malloc_message : wrtmessage; cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); write_cb(cbopaque, buf); } /* * Print to a callback function in such a way as to (hopefully) avoid memory * allocation. */ JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(write_cb, cbopaque, format, ap); va_end(ap); } /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_FORMAT_PRINTF(1, 2) void malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } /* * Restore normal assertion macros, in order to make it possible to compile all * C files as a single concatenation. */ #undef assert #undef not_reached #undef not_implemented #undef assert_not_implemented #include "jemalloc/internal/assert.h" jemalloc-sys-0.3.2/rep/src/mutex.c010064400007650000024000000131551344617474100152400ustar0000000000000000#define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/spin.h" #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ /* Data. */ #ifdef JEMALLOC_LAZY_LOCK bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the * process goes multi-threaded. */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { return pthread_create_wrapper(thread, attr, start_routine, arg); } #endif /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; nstime_t before = NSTIME_ZERO_INITIALIZER; if (ncpus == 1) { goto label_spin_done; } int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; do { spin_cpu_spinwait(); if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED) && !malloc_mutex_trylock_final(mutex)) { data->n_spin_acquired++; return; } } while (cnt++ < max_cnt); if (!config_stats) { /* Only spin is useful when stats is off. */ malloc_mutex_lock_final(mutex); return; } label_spin_done: nstime_update(&before); /* Copy before to after to avoid clock skews. */ nstime_t after; nstime_copy(&after, &before); uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED) + 1; /* One last try as above two calls may take quite some cycles. */ if (!malloc_mutex_trylock_final(mutex)) { atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); data->n_spin_acquired++; return; } /* True slow path. */ malloc_mutex_lock_final(mutex); /* Update more slow-path only counters. */ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); nstime_update(&after); nstime_t delta; nstime_copy(&delta, &after); nstime_subtract(&delta, &before); data->n_wait_times++; nstime_add(&data->tot_wait_time, &delta); if (nstime_compare(&data->max_wait_time, &delta) < 0) { nstime_copy(&data->max_wait_time, &delta); } if (n_thds > data->max_n_thds) { data->max_n_thds = n_thds; } } static void mutex_prof_data_init(mutex_prof_data_t *data) { memset(data, 0, sizeof(mutex_prof_data_t)); nstime_init(&data->max_wait_time, 0); nstime_init(&data->tot_wait_time, 0); data->prev_owner = NULL; } void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_assert_owner(tsdn, mutex); mutex_prof_data_init(&mutex->prof_data); } static int mutex_addr_comp(const witness_t *witness1, void *mutex1, const witness_t *witness2, void *mutex2) { assert(mutex1 != NULL); assert(mutex2 != NULL); uintptr_t mu1int = (uintptr_t)mutex1; uintptr_t mu2int = (uintptr_t)mutex2; if (mu1int < mu2int) { return -1; } else if (mu1int == mu2int) { return 0; } else { return 1; } } bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) { return true; } # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) { return true; } } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) { return true; } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return true; } pthread_mutexattr_destroy(&attr); #endif if (config_debug) { mutex->lock_order = lock_order; if (lock_order == malloc_mutex_address_ordered) { witness_init(&mutex->witness, name, rank, mutex_addr_comp, mutex); } else { witness_init(&mutex->witness, name, rank, NULL, NULL); } } return false; } void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn, mutex); } void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); } void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); #else if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank, mutex->lock_order)) { malloc_printf(": Error re-initializing mutex in " "child\n"); if (opt_abort) { abort(); } } #endif } bool malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) { return true; } postponed_mutexes = postponed_mutexes->postponed_next; } #endif return false; } jemalloc-sys-0.3.2/rep/src/mutex_pool.c010064400007650000024000000007411344617474100162660ustar0000000000000000#define JEMALLOC_MUTEX_POOL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { if (malloc_mutex_init(&pool->mutexes[i], name, rank, malloc_mutex_address_ordered)) { return true; } } return false; } jemalloc-sys-0.3.2/rep/src/nstime.c010064400007650000024000000066531344617474100154020ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/assert.h" #define BILLION UINT64_C(1000000000) #define MILLION UINT64_C(1000000) void nstime_init(nstime_t *time, uint64_t ns) { time->ns = ns; } void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { time->ns = sec * BILLION + nsec; } uint64_t nstime_ns(const nstime_t *time) { return time->ns; } uint64_t nstime_msec(const nstime_t *time) { return time->ns / MILLION; } uint64_t nstime_sec(const nstime_t *time) { return time->ns / BILLION; } uint64_t nstime_nsec(const nstime_t *time) { return time->ns % BILLION; } void nstime_copy(nstime_t *time, const nstime_t *source) { *time = *source; } int nstime_compare(const nstime_t *a, const nstime_t *b) { return (a->ns > b->ns) - (a->ns < b->ns); } void nstime_add(nstime_t *time, const nstime_t *addend) { assert(UINT64_MAX - time->ns >= addend->ns); time->ns += addend->ns; } void nstime_iadd(nstime_t *time, uint64_t addend) { assert(UINT64_MAX - time->ns >= addend); time->ns += addend; } void nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { assert(nstime_compare(time, subtrahend) >= 0); time->ns -= subtrahend->ns; } void nstime_isubtract(nstime_t *time, uint64_t subtrahend) { assert(time->ns >= subtrahend); time->ns -= subtrahend; } void nstime_imultiply(nstime_t *time, uint64_t multiplier) { assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); time->ns *= multiplier; } void nstime_idivide(nstime_t *time, uint64_t divisor) { assert(divisor != 0); time->ns /= divisor; } uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor) { assert(divisor->ns != 0); return time->ns / divisor->ns; } #ifdef _WIN32 # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { FILETIME ft; uint64_t ticks_100ns; GetSystemTimeAsFileTime(&ft); ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; nstime_init(time, ticks_100ns * 100); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) # define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { nstime_init(time, mach_absolute_time()); } #else # define NSTIME_MONOTONIC false static void nstime_get(nstime_t *time) { struct timeval tv; gettimeofday(&tv, NULL); nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); } #endif static bool nstime_monotonic_impl(void) { return NSTIME_MONOTONIC; #undef NSTIME_MONOTONIC } nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; static bool nstime_update_impl(nstime_t *time) { nstime_t old_time; nstime_copy(&old_time, time); nstime_get(time); /* Handle non-monotonic clocks. */ if (unlikely(nstime_compare(&old_time, time) > 0)) { nstime_copy(time, &old_time); return true; } return false; } nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; jemalloc-sys-0.3.2/rep/src/pages.c010064400007650000024000000360541344617474100152000ustar0000000000000000#define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT #include #ifdef __FreeBSD__ #include #endif #endif /******************************************************************************/ /* Data. */ /* Actual operating system page size, detected during bootstrap, <= PAGE. */ static size_t os_page; #ifndef _WIN32 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) # define PAGES_PROT_DECOMMIT (PROT_NONE) static int mmap_flags; #endif static bool os_overcommits; const char *thp_mode_names[] = { "default", "always", "never", "not supported" }; thp_mode_t opt_thp = THP_MODE_DEFAULT; thp_mode_t init_system_thp_mode; /* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ static bool pages_can_purge_lazy_runtime = true; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void os_pages_unmap(void *addr, size_t size); /******************************************************************************/ static void * os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); assert(ALIGNMENT_CEILING(size, os_page) == size); assert(size != 0); if (os_overcommits) { *commit = true; } void *ret; #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ { int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; ret = mmap(addr, size, prot, mmap_flags, -1, 0); } assert(ret != NULL); if (ret == MAP_FAILED) { ret = NULL; } else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ os_pages_unmap(ret, size); ret = NULL; } #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return ret; } static void * os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit) { void *ret = (void *)((uintptr_t)addr + leadsize); assert(alloc_size >= leadsize + size); #ifdef _WIN32 os_pages_unmap(addr, alloc_size); void *new_addr = os_pages_map(ret, size, PAGE, commit); if (new_addr == ret) { return ret; } if (new_addr != NULL) { os_pages_unmap(new_addr, size); } return NULL; #else size_t trailsize = alloc_size - leadsize - size; if (leadsize != 0) { os_pages_unmap(addr, leadsize); } if (trailsize != 0) { os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); } return ret; #endif } static void os_pages_unmap(void *addr, size_t size) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else if (munmap(addr, size) == -1) #endif { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif "(): %s\n", buf); if (opt_abort) { abort(); } } } static void * pages_map_slow(size_t size, size_t alignment, bool *commit) { size_t alloc_size = size + alignment - os_page; /* Beware size_t wrap-around. */ if (alloc_size < size) { return NULL; } void *ret; do { void *pages = os_pages_map(NULL, alloc_size, alignment, commit); if (pages == NULL) { return NULL; } size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); } while (ret == NULL); assert(ret != NULL); assert(PAGE_ADDR2BASE(ret) == ret); return ret; } void * pages_map(void *addr, size_t size, size_t alignment, bool *commit) { assert(alignment >= PAGE); assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); #if defined(__FreeBSD__) && defined(MAP_EXCL) /* * FreeBSD has mechanisms both to mmap at specific address without * touching existing mappings, and to mmap with specific alignment. */ { if (os_overcommits) { *commit = true; } int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; int flags = mmap_flags; if (addr != NULL) { flags |= MAP_FIXED | MAP_EXCL; } else { unsigned alignment_bits = ffs_zu(alignment); assert(alignment_bits > 1); flags |= MAP_ALIGNED(alignment_bits - 1); } void *ret = mmap(addr, size, prot, flags, -1, 0); if (ret == MAP_FAILED) { ret = NULL; } return ret; } #endif /* * Ideally, there would be a way to specify alignment to mmap() (like * NetBSD has), but in the absence of such a feature, we have to work * hard to efficiently create aligned mappings. The reliable, but * slow method is to create a mapping that is over-sized, then trim the * excess. However, that always results in one or two calls to * os_pages_unmap(), and it can leave holes in the process's virtual * memory map if memory grows downward. * * Optimistically try mapping precisely the right amount before falling * back to the slow method, with the expectation that the optimistic * approach works most of the time. */ void *ret = os_pages_map(addr, size, os_page, commit); if (ret == NULL || ret == addr) { return ret; } assert(addr == NULL); if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { os_pages_unmap(ret, size); return pages_map_slow(size, alignment, commit); } assert(PAGE_ADDR2BASE(ret) == ret); return ret; } void pages_unmap(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); os_pages_unmap(addr, size); } static bool pages_commit_impl(void *addr, size_t size, bool commit) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); if (os_overcommits) { return true; } #ifdef _WIN32 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); #else { int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, -1, 0); if (result == MAP_FAILED) { return true; } if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ os_pages_unmap(result, size); return true; } return false; } #endif } bool pages_commit(void *addr, size_t size) { return pages_commit_impl(addr, size, true); } bool pages_decommit(void *addr, size_t size) { return pages_commit_impl(addr, size, false); } bool pages_purge_lazy(void *addr, size_t size) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); assert(PAGE_CEILING(size) == size); if (!pages_can_purge_lazy) { return true; } if (!pages_can_purge_lazy_runtime) { /* * Built with lazy purge enabled, but detected it was not * supported on the current system. */ return true; } #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); return false; #elif defined(JEMALLOC_PURGE_MADVISE_FREE) return (madvise(addr, size, # ifdef MADV_FREE MADV_FREE # else JEMALLOC_MADV_FREE # endif ) != 0); #elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); #else not_reached(); #endif } bool pages_purge_forced(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); if (!pages_can_purge_forced) { return true; } #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); #elif defined(JEMALLOC_MAPS_COALESCE) /* Try to overlay a new demand-zeroed mapping. */ return pages_commit(addr, size); #else not_reached(); #endif } static bool pages_huge_impl(void *addr, size_t size, bool aligned) { if (aligned) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); } #ifdef JEMALLOC_HAVE_MADVISE_HUGE return (madvise(addr, size, MADV_HUGEPAGE) != 0); #else return true; #endif } bool pages_huge(void *addr, size_t size) { return pages_huge_impl(addr, size, true); } static bool pages_huge_unaligned(void *addr, size_t size) { return pages_huge_impl(addr, size, false); } static bool pages_nohuge_impl(void *addr, size_t size, bool aligned) { if (aligned) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); } #ifdef JEMALLOC_HAVE_MADVISE_HUGE return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); #else return false; #endif } bool pages_nohuge(void *addr, size_t size) { return pages_nohuge_impl(addr, size, true); } static bool pages_nohuge_unaligned(void *addr, size_t size) { return pages_nohuge_impl(addr, size, false); } bool pages_dontdump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); #ifdef JEMALLOC_MADVISE_DONTDUMP return madvise(addr, size, MADV_DONTDUMP) != 0; #else return false; #endif } bool pages_dodump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); #ifdef JEMALLOC_MADVISE_DONTDUMP return madvise(addr, size, MADV_DODUMP) != 0; #else return false; #endif } static size_t os_page_detect(void) { #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; #elif defined(__FreeBSD__) /* * This returns the value obtained from * the auxv vector, avoiding a syscall. */ return getpagesize(); #else long result = sysconf(_SC_PAGESIZE); if (result == -1) { return LG_PAGE; } return (size_t)result; #endif } #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool os_overcommits_sysctl(void) { int vm_overcommit; size_t sz; sz = sizeof(vm_overcommit); #if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) int mib[2]; mib[0] = CTL_VM; mib[1] = VM_OVERCOMMIT; if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } #else if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } #endif return ((vm_overcommit & 0x3) == 0); } #endif #ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY /* * Use syscall(2) rather than {open,read,close}(2) when possible to avoid * reentry during bootstrapping if another library has interposed system call * wrappers. */ static bool os_overcommits_proc(void) { int fd; char buf[1]; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) #if defined(O_CLOEXEC) fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) #if defined(O_CLOEXEC) fd = (int)syscall(SYS_openat, AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = (int)syscall(SYS_openat, AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #else #if defined(O_CLOEXEC) fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } #endif #endif if (fd == -1) { return false; /* Error. */ } ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) syscall(SYS_close, fd); #else close(fd); #endif if (nread < 1) { return false; /* Error. */ } /* * /proc/sys/vm/overcommit_memory meanings: * 0: Heuristic overcommit. * 1: Always overcommit. * 2: Never overcommit. */ return (buf[0] == '0' || buf[0] == '1'); } #endif void pages_set_thp_state (void *ptr, size_t size) { if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { return; } assert(opt_thp != thp_mode_not_supported && init_system_thp_mode != thp_mode_not_supported); if (opt_thp == thp_mode_always && init_system_thp_mode != thp_mode_never) { assert(init_system_thp_mode == thp_mode_default); pages_huge_unaligned(ptr, size); } else if (opt_thp == thp_mode_never) { assert(init_system_thp_mode == thp_mode_default || init_system_thp_mode == thp_mode_always); pages_nohuge_unaligned(ptr, size); } } static void init_thp_state(void) { if (!have_madvise_huge) { if (metadata_thp_enabled() && opt_abort) { malloc_write(": no MADV_HUGEPAGE support\n"); abort(); } goto label_error; } static const char sys_state_madvise[] = "always [madvise] never\n"; static const char sys_state_always[] = "[always] madvise never\n"; static const char sys_state_never[] = "always madvise [never]\n"; char buf[sizeof(sys_state_madvise)]; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) int fd = (int)syscall(SYS_open, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #else int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #endif if (fd == -1) { goto label_error; } ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) syscall(SYS_close, fd); #else close(fd); #endif if (nread < 0) { goto label_error; } if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_default; } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_always; } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) { init_system_thp_mode = thp_mode_never; } else { goto label_error; } return; label_error: opt_thp = init_system_thp_mode = thp_mode_not_supported; } bool pages_boot(void) { os_page = os_page_detect(); if (os_page > PAGE) { malloc_write(": Unsupported system page size\n"); if (opt_abort) { abort(); } return true; } #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; #endif #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT os_overcommits = os_overcommits_sysctl(); #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) os_overcommits = os_overcommits_proc(); # ifdef MAP_NORESERVE if (os_overcommits) { mmap_flags |= MAP_NORESERVE; } # endif #else os_overcommits = false; #endif init_thp_state(); #ifdef __FreeBSD__ /* * FreeBSD doesn't need the check; madvise(2) is known to work. */ #else /* Detect lazy purge runtime support. */ if (pages_can_purge_lazy) { bool committed = false; void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); if (madv_free_page == NULL) { return true; } assert(pages_can_purge_lazy_runtime); if (pages_purge_lazy(madv_free_page, PAGE)) { pages_can_purge_lazy_runtime = false; } os_pages_unmap(madv_free_page, PAGE); } #endif return false; } jemalloc-sys-0.3.2/rep/src/prng.c010064400007650000024000000002041344617474100150330ustar0000000000000000#define JEMALLOC_PRNG_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/rep/src/prof.c010064400007650000024000002301641344617474100150450ustar0000000000000000#define JEMALLOC_PROF_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/emitter.h" /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include #endif #ifdef JEMALLOC_PROF_LIBGCC /* * We have a circular dependency -- jemalloc_internal.h tells us if we should * use libgcc's unwinding functionality, but after we've included that, we've * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. */ #undef _Unwind_Backtrace #include #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) #endif /******************************************************************************/ /* Data. */ bool opt_prof = false; bool opt_prof_active = true; bool opt_prof_thread_active_init = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; bool opt_prof_log = false; char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). */ bool prof_active; static malloc_mutex_t prof_active_mtx; /* * Initialized as opt_prof_thread_active_init, and accessed via * prof_thread_active_init_[gs]et(). */ static bool prof_thread_active_init; static malloc_mutex_t prof_thread_active_init_mtx; /* * Initialized as opt_prof_gdump, and accessed via * prof_gdump_[gs]et{_unlocked,}(). */ bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; uint64_t prof_interval = 0; size_t lg_prof_sample; typedef enum prof_logging_state_e prof_logging_state_t; enum prof_logging_state_e { prof_logging_state_stopped, prof_logging_state_started, prof_logging_state_dumping }; /* * - stopped: log_start never called, or previous log_stop has completed. * - started: log_start called, log_stop not called yet. Allocations are logged. * - dumping: log_stop called but not finished; samples are not logged anymore. */ prof_logging_state_t prof_logging_state = prof_logging_state_stopped; #ifdef JEMALLOC_JET static bool prof_log_dummy = false; #endif /* Incremented for every log file that is output. */ static uint64_t log_seq = 0; static char log_filename[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Timestamp for most recent call to log_start(). */ static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; /* Increment these when adding to the log_bt and log_thr linked lists. */ static size_t log_bt_index = 0; static size_t log_thr_index = 0; /* Linked list node definitions. These are only used in prof.c. */ typedef struct prof_bt_node_s prof_bt_node_t; struct prof_bt_node_s { prof_bt_node_t *next; size_t index; prof_bt_t bt; /* Variable size backtrace vector pointed to by bt. */ void *vec[1]; }; typedef struct prof_thr_node_s prof_thr_node_t; struct prof_thr_node_s { prof_thr_node_t *next; size_t index; uint64_t thr_uid; /* Variable size based on thr_name_sz. */ char name[1]; }; typedef struct prof_alloc_node_s prof_alloc_node_t; /* This is output when logging sampled allocations. */ struct prof_alloc_node_s { prof_alloc_node_t *next; /* Indices into an array of thread data. */ size_t alloc_thr_ind; size_t free_thr_ind; /* Indices into an array of backtraces. */ size_t alloc_bt_ind; size_t free_bt_ind; uint64_t alloc_time_ns; uint64_t free_time_ns; size_t usize; }; /* * Created on the first call to prof_log_start and deleted on prof_log_stop. * These are the backtraces and threads that have already been logged by an * allocation. */ static bool log_tables_initialized = false; static ckh_t log_bt_node_set; static ckh_t log_thr_node_set; /* Store linked lists for logged data. */ static prof_bt_node_t *log_bt_first = NULL; static prof_bt_node_t *log_bt_last = NULL; static prof_thr_node_t *log_thr_first = NULL; static prof_thr_node_t *log_thr_last = NULL; static prof_alloc_node_t *log_alloc_first = NULL; static prof_alloc_node_t *log_alloc_last = NULL; /* Protects the prof_logging_state and any log_{...} variable. */ static malloc_mutex_t log_mtx; /* * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. * The primary motivation for this sharing though is that gctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ static malloc_mutex_t *gctx_locks; static atomic_u_t cum_gctxs; /* Atomic counter. */ /* * Table of mutexes that are shared among tdata's. No operations require * holding multiple tdata locks, so there is no problem with using them for more * than one tdata at the same time, even though a gctx lock may be acquired * while holding a tdata lock. */ static malloc_mutex_t *tdata_locks; /* * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; /* * Tree of all extant prof_tdata_t structures, regardless of state, * {attached,detached,expired}. */ static prof_tdata_tree_t tdatas; static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; /* * This buffer is rather large for stack allocation, so use a single buffer for * all profile dumps. */ static malloc_mutex_t prof_dump_mtx; static char prof_dump_buf[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PROF_DUMP_BUFSIZE #else 1 #endif ]; static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /* Hashtable functions for log_bt_node_set and log_thr_node_set. */ static void prof_thr_node_hash(const void *key, size_t r_hash[2]); static bool prof_thr_node_keycomp(const void *k1, const void *k2); static void prof_bt_node_hash(const void *key, size_t r_hash[2]); static bool prof_bt_node_keycomp(const void *k1, const void *k2); /******************************************************************************/ /* Red-black trees. */ static int prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); if (ret == 0) { uint64_t a_thr_discrim = a->thr_discrim; uint64_t b_thr_discrim = b->thr_discrim; ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < b_thr_discrim); if (ret == 0) { uint64_t a_tctx_uid = a->tctx_uid; uint64_t b_tctx_uid = b->tctx_uid; ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid); } } return ret; } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) static int prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); } return ret; } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) static int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; ret = ((a_uid > b_uid) - (a_uid < b_uid)); if (ret == 0) { uint64_t a_discrim = a->thr_discrim; uint64_t b_discrim = b->thr_discrim; ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } return ret; } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, prof_tdata_comp) /******************************************************************************/ void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); if (updated) { /* * Compute a new sample threshold. This isn't very important in * practice, because this function is rarely executed, so the * potential for sample bias is minimal except in contrived * programs. */ tdata = prof_tdata_get(tsd, true); if (tdata != NULL) { prof_sample_threshold_update(tdata); } } if ((uintptr_t)tctx > (uintptr_t)1U) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } } void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { prof_tctx_set(tsdn, ptr, usize, NULL, tctx); /* Get the current time and set this in the extent_t. We'll read this * when free() is called. */ nstime_t t = NSTIME_ZERO_INITIALIZER; nstime_update(&t); prof_alloc_time_set(tsdn, ptr, NULL, t); malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { tctx->cnts.accumobjs++; tctx->cnts.accumbytes += usize; } tctx->prepared = false; malloc_mutex_unlock(tsdn, tctx->tdata->lock); } static size_t prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { assert(prof_logging_state == prof_logging_state_started); malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); prof_bt_node_t dummy_node; dummy_node.bt = *bt; prof_bt_node_t *node; /* See if this backtrace is already cached in the table. */ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), (void **)(&node), NULL)) { size_t sz = offsetof(prof_bt_node_t, vec) + (bt->len * sizeof(void *)); prof_bt_node_t *new_node = (prof_bt_node_t *) ialloc(tsd, sz, sz_size2index(sz), false, true); if (log_bt_first == NULL) { log_bt_first = new_node; log_bt_last = new_node; } else { log_bt_last->next = new_node; log_bt_last = new_node; } new_node->next = NULL; new_node->index = log_bt_index; /* * Copy the backtrace: bt is inside a tdata or gctx, which * might die before prof_log_stop is called. */ new_node->bt.len = bt->len; memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); new_node->bt.vec = new_node->vec; log_bt_index++; ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); return new_node->index; } else { return node->index; } } static size_t prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { assert(prof_logging_state == prof_logging_state_started); malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); prof_thr_node_t dummy_node; dummy_node.thr_uid = thr_uid; prof_thr_node_t *node; /* See if this thread is already cached in the table. */ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), (void **)(&node), NULL)) { size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; prof_thr_node_t *new_node = (prof_thr_node_t *) ialloc(tsd, sz, sz_size2index(sz), false, true); if (log_thr_first == NULL) { log_thr_first = new_node; log_thr_last = new_node; } else { log_thr_last->next = new_node; log_thr_last = new_node; } new_node->next = NULL; new_node->index = log_thr_index; new_node->thr_uid = thr_uid; strcpy(new_node->name, name); log_thr_index++; ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); return new_node->index; } else { return node->index; } } static void prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); if (cons_tdata == NULL) { /* * We decide not to log these allocations. cons_tdata will be * NULL only when the current thread is in a weird state (e.g. * it's being destroyed). */ return; } malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); if (prof_logging_state != prof_logging_state_started) { goto label_done; } if (!log_tables_initialized) { bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, prof_bt_node_hash, prof_bt_node_keycomp); bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, prof_thr_node_hash, prof_thr_node_keycomp); if (err1 || err2) { goto label_done; } log_tables_initialized = true; } nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, (alloc_ctx_t *)NULL); nstime_t free_time = NSTIME_ZERO_INITIALIZER; nstime_update(&free_time); prof_alloc_node_t *new_node = (prof_alloc_node_t *) ialloc(tsd, sizeof(prof_alloc_node_t), sz_size2index(sizeof(prof_alloc_node_t)), false, true); const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? "" : tctx->tdata->thread_name; const char *cons_thr_name = prof_thread_name_get(tsd); prof_bt_t bt; /* Initialize the backtrace, using the buffer in tdata to store it. */ bt_init(&bt, cons_tdata->vec); prof_backtrace(&bt); prof_bt_t *cons_bt = &bt; /* We haven't destroyed tctx yet, so gctx should be good to read. */ prof_bt_t *prod_bt = &tctx->gctx->bt; new_node->next = NULL; new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, prod_thr_name); new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, cons_thr_name); new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); new_node->alloc_time_ns = nstime_ns(&alloc_time); new_node->free_time_ns = nstime_ns(&free_time); new_node->usize = usize; if (log_alloc_first == NULL) { log_alloc_first = new_node; log_alloc_last = new_node; } else { log_alloc_last->next = new_node; log_alloc_last = new_node; } label_done: malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); } void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; prof_try_log(tsd, ptr, usize, tctx); if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } static void prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } static void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; tdata->enq_idump = false; gdump = tdata->enq_gdump; tdata->enq_gdump = false; if (idump) { prof_idump(tsd_tsdn(tsd)); } if (gdump) { prof_gdump(tsd_tsdn(tsd)); } } } #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); if (nframes <= 0) { return; } bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); return _URC_NO_REASON; } static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); if (ip == NULL) { return _URC_END_OF_STACK; } data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) { return _URC_END_OF_STACK; } return _URC_NO_REASON; } void prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); _Unwind_Backtrace(prof_unwind_callback, &data); } #elif (defined(JEMALLOC_PROF_GCC)) void prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ if (__builtin_frame_address(i) == 0) { \ return; \ } \ p = __builtin_return_address(i); \ if (p == NULL) { \ return; \ } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ } else { \ return; \ } cassert(config_prof); BT_FRAME(0) BT_FRAME(1) BT_FRAME(2) BT_FRAME(3) BT_FRAME(4) BT_FRAME(5) BT_FRAME(6) BT_FRAME(7) BT_FRAME(8) BT_FRAME(9) BT_FRAME(10) BT_FRAME(11) BT_FRAME(12) BT_FRAME(13) BT_FRAME(14) BT_FRAME(15) BT_FRAME(16) BT_FRAME(17) BT_FRAME(18) BT_FRAME(19) BT_FRAME(20) BT_FRAME(21) BT_FRAME(22) BT_FRAME(23) BT_FRAME(24) BT_FRAME(25) BT_FRAME(26) BT_FRAME(27) BT_FRAME(28) BT_FRAME(29) BT_FRAME(30) BT_FRAME(31) BT_FRAME(32) BT_FRAME(33) BT_FRAME(34) BT_FRAME(35) BT_FRAME(36) BT_FRAME(37) BT_FRAME(38) BT_FRAME(39) BT_FRAME(40) BT_FRAME(41) BT_FRAME(42) BT_FRAME(43) BT_FRAME(44) BT_FRAME(45) BT_FRAME(46) BT_FRAME(47) BT_FRAME(48) BT_FRAME(49) BT_FRAME(50) BT_FRAME(51) BT_FRAME(52) BT_FRAME(53) BT_FRAME(54) BT_FRAME(55) BT_FRAME(56) BT_FRAME(57) BT_FRAME(58) BT_FRAME(59) BT_FRAME(60) BT_FRAME(61) BT_FRAME(62) BT_FRAME(63) BT_FRAME(64) BT_FRAME(65) BT_FRAME(66) BT_FRAME(67) BT_FRAME(68) BT_FRAME(69) BT_FRAME(70) BT_FRAME(71) BT_FRAME(72) BT_FRAME(73) BT_FRAME(74) BT_FRAME(75) BT_FRAME(76) BT_FRAME(77) BT_FRAME(78) BT_FRAME(79) BT_FRAME(80) BT_FRAME(81) BT_FRAME(82) BT_FRAME(83) BT_FRAME(84) BT_FRAME(85) BT_FRAME(86) BT_FRAME(87) BT_FRAME(88) BT_FRAME(89) BT_FRAME(90) BT_FRAME(91) BT_FRAME(92) BT_FRAME(93) BT_FRAME(94) BT_FRAME(95) BT_FRAME(96) BT_FRAME(97) BT_FRAME(98) BT_FRAME(99) BT_FRAME(100) BT_FRAME(101) BT_FRAME(102) BT_FRAME(103) BT_FRAME(104) BT_FRAME(105) BT_FRAME(106) BT_FRAME(107) BT_FRAME(108) BT_FRAME(109) BT_FRAME(110) BT_FRAME(111) BT_FRAME(112) BT_FRAME(113) BT_FRAME(114) BT_FRAME(115) BT_FRAME(116) BT_FRAME(117) BT_FRAME(118) BT_FRAME(119) BT_FRAME(120) BT_FRAME(121) BT_FRAME(122) BT_FRAME(123) BT_FRAME(124) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) #undef BT_FRAME } #else void prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } static prof_gctx_t * prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (gctx == NULL) { return NULL; } gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); /* Duplicate bt. */ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; return gctx; } static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_tdata_t *tdata) { cassert(config_prof); /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ prof_enter(tsd, tdata_self); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); } prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); if (opt_prof_accum) { return false; } if (tctx->cnts.curobjs != 0) { return false; } if (tctx->prepared) { return false; } return true; } static bool prof_gctx_should_destroy(prof_gctx_t *gctx) { if (opt_prof_accum) { return false; } if (!tctx_tree_empty(&gctx->tctxs)) { return false; } if (gctx->nlimbo != 0) { return false; } return true; } static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); destroy_tctx = true; if (prof_gctx_should_destroy(gctx)) { /* * Increment gctx->nlimbo in order to keep another * thread from winning the race to destroy gctx while * this one has gctx->lock dropped. Without this, it * would be possible for another thread to: * * 1) Sample an allocation associated with gctx. * 2) Deallocate the sampled object. * 3) Successfully prof_gctx_try_destroy(gctx). * * The result would be that gctx no longer exists by the * time this thread accesses it in * prof_gctx_try_destroy(). */ gctx->nlimbo++; destroy_gctx = true; } else { destroy_gctx = false; } break; case prof_tctx_state_dumping: /* * A dumping thread needs tctx to remain valid until dumping * has finished. Change state such that the dumping thread will * complete destruction during a late dump iteration phase. */ tctx->state = prof_tctx_state_purgatory; destroy_tctx = false; destroy_gctx = false; break; default: not_reached(); destroy_tctx = false; destroy_gctx = false; } malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); } if (destroy_tctx) { idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); } } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; } gctx, tgctx; union { prof_bt_t *p; void *v; } btkey; bool new_gctx; prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ prof_leave(tsd, tdata); tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (tgctx.v == NULL) { return true; } prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { gctx.p = tgctx.p; btkey.p = &gctx.p->bt; if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, true, true); return true; } new_gctx = true; } else { new_gctx = false; } } else { tgctx.v = NULL; new_gctx = false; } if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; if (tgctx.v != NULL) { /* Lost race to insert. */ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true); } } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return false; } prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; } ret; prof_tdata_t *tdata; bool not_found; cassert(config_prof); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) { /* Note double negative! */ ret.p->prepared = true; } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { void *btkey; prof_gctx_t *gctx; bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { return NULL; } /* Link a prof_tctx_t into gctx for this thread. */ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } return NULL; } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } return ret.p; } /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with * floating point support completely disabled. Avoiding floating point code is * important on memory-constrained systems, but it also enables a workaround for * versions of glibc that don't properly save/restore floating point registers * during dynamic lazy symbol loading (which internally calls into whatever * malloc implementation happens to be integrated into the application). Note * that some compilers (e.g. gcc 4.8) may use floating point registers for fast * memory moves, so jemalloc must be compiled with such optimizations disabled * (e.g. * -mno-sse) in order for the workaround to be complete. */ void prof_sample_threshold_update(prof_tdata_t *tdata) { #ifdef JEMALLOC_PROF if (!config_prof) { return; } if (lg_prof_sample == 0) { tsd_bytes_until_sample_set(tsd_fetch(), 0); return; } /* * Compute sample interval as a geometrically distributed random * variable with mean (2^lg_prof_sample). * * __ __ * | log(u) | 1 * tdata->bytes_until_sample = | -------- |, where p = --------------- * | log(1-p) | lg_prof_sample * 2 * * For more information on the math, see: * * Non-Uniform Random Variate Generation * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53); double u = (double)r * (1.0/9007199254740992.0L); uint64_t bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; if (bytes_until_sample > SSIZE_MAX) { bytes_until_sample = SSIZE_MAX; } tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample); #endif } #ifdef JEMALLOC_JET static prof_tdata_t * prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; return NULL; } size_t prof_tdata_count(void) { size_t tdata_count = 0; tsdn_t *tsdn; tsdn = tsdn_fetch(); malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); malloc_mutex_unlock(tsdn, &tdatas_mtx); return tdata_count; } size_t prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); return bt_count; } #endif static int prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf(": creat(\"%s\"), 0644) failed\n", filename); if (opt_abort) { abort(); } } return fd; } prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (!propagate_err) { malloc_write(": write() failed during heap " "profile flush\n"); if (opt_abort) { abort(); } } ret = true; } prof_dump_buf_end = 0; return ret; } static bool prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); ret = prof_dump_flush(propagate_err); close(prof_dump_fd); prof_dump_fd = -1; return ret; } static bool prof_dump_write(bool propagate_err, const char *s) { size_t i, slen, n; cassert(config_prof); i = 0; slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_flush(propagate_err) && propagate_err) { return true; } } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ n = slen - i; } else { /* Write as much of s as will fit. */ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; } memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); prof_dump_buf_end += n; i += n; } return false; } JEMALLOC_FORMAT_PRINTF(2, 3) static bool prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); ret = prof_dump_write(propagate_err, buf); return ret; } static void prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; tdata->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: not_reached(); } } static void prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } } static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); } return NULL; } struct prof_tctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; }; static prof_tctx_t * prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { struct prof_tctx_dump_iter_arg_s *arg = (struct prof_tctx_dump_iter_arg_s *)opaque; malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: case prof_tctx_state_nominal: /* Not captured by this dump. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) { return tctx; } break; default: not_reached(); } return NULL; } static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: tctx->state = prof_tctx_state_nominal; break; case prof_tctx_state_purgatory: ret = tctx; goto label_return; default: not_reached(); } ret = NULL; label_return: return ret; } static void prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); malloc_mutex_unlock(tsdn, gctx->lock); } struct prof_gctx_merge_iter_arg_s { tsdn_t *tsdn; size_t leak_ngctx; }; static prof_gctx_t * prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) { arg->leak_ngctx++; } malloc_mutex_unlock(arg->tsdn, gctx->lock); return NULL; } static void prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; /* * Standard tree iteration won't work here, because as soon as we * decrement gctx->nlimbo and unlock gctx, another thread can * concurrently destroy it, which will corrupt the tree. Therefore, * tear down the tree one node at a time during iteration. */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; next = NULL; do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter, (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); idalloctm(tsd_tsdn(tsd), to_destroy, NULL, NULL, true, true); } else { next = NULL; } } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } } struct prof_tdata_merge_iter_arg_s { tsdn_t *tsdn; prof_cnt_t cnt_all; }; static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *opaque) { struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { prof_tctx_t *p; void *v; } tctx; tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); } arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } } else { tdata->dumping = false; } malloc_mutex_unlock(arg->tsdn, tdata->lock); return NULL; } static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; if (!tdata->dumping) { return NULL; } if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", tdata->thr_uid, tdata->cnt_summed.curobjs, tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", (tdata->thread_name != NULL) ? tdata->thread_name : "")) { return tdata; } return NULL; } static bool prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { return true; } malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); malloc_mutex_unlock(tsdn, &tdatas_mtx); return ret; } prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; static bool prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); ret = false; goto label_return; } if (prof_dump_printf(propagate_err, "@")) { ret = true; goto label_return; } for (i = 0; i < bt->len; i++) { if (prof_dump_printf(propagate_err, " %#"FMTxPTR, (uintptr_t)bt->vec[i])) { ret = true; goto label_return; } } if (prof_dump_printf(propagate_err, "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { ret = true; goto label_return; } prof_tctx_dump_iter_arg.tsdn = tsdn; prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: return ret; } #ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); #if defined(O_CLOEXEC) mfd = open(filename, O_RDONLY | O_CLOEXEC); #else mfd = open(filename, O_RDONLY); if (mfd != -1) { fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); } #endif return mfd; } #endif static int prof_getpid(void) { #ifdef _WIN32 return GetCurrentProcessId(); #else return getpid(); #endif } static bool prof_dump_maps(bool propagate_err) { bool ret; int mfd; cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); #elif defined(_WIN32) mfd = -1; // Not implemented #else { int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); if (mfd == -1) { mfd = prof_open_maps("/proc/%d/maps", pid); } } #endif if (mfd != -1) { ssize_t nread; if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && propagate_err) { ret = true; goto label_return; } nread = 0; do { prof_dump_buf_end += nread; if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { /* Make space in prof_dump_buf before read(). */ if (prof_dump_flush(propagate_err) && propagate_err) { ret = true; goto label_return; } } nread = malloc_read_fd(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); } while (nread > 0); } else { ret = true; goto label_return; } ret = false; label_return: if (mfd != -1) { close(mfd); } return ret; } /* * See prof_sample_threshold_update() comment for why the body of this function * is conditionally compiled. */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { #ifdef JEMALLOC_PROF /* * Scaling is equivalent AdjustSamples() in jeprof, but the result may * differ slightly from what jeprof reports, because here we scale the * summary values, whereas jeprof scales each context individually and * reports the sums of the scaled values. */ if (cnt_all->curbytes != 0) { double sample_period = (double)((uint64_t)1 << lg_prof_sample); double ratio = (((double)cnt_all->curbytes) / (double)cnt_all->curobjs) / sample_period; double scale_factor = 1.0 / (1.0 - exp(-ratio)); uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) * scale_factor); uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * scale_factor); malloc_printf(": Leak approximation summary: ~%"FMTu64 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( ": Run jeprof on \"%s\" for leak detail\n", filename); } #endif } struct prof_gctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; }; static prof_gctx_t * prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; struct prof_gctx_dump_iter_arg_s *arg = (struct prof_gctx_dump_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: malloc_mutex_unlock(arg->tsdn, gctx->lock); return ret; } static void prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, prof_gctx_tree_t *gctxs) { size_t tabind; union { prof_gctx_t *p; void *v; } gctx; prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ gctx_tree_new(gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); } /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)prof_tdata_merge_iter_arg); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_merge_iter_arg->leak_ngctx = 0; gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, (void *)prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); } static bool prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, prof_gctx_tree_t *gctxs) { /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { return true; } /* Dump profile header. */ if (prof_dump_header(tsd_tsdn(tsd), propagate_err, &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; } /* Dump per gctx profile stats. */ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_dump_iter_arg->propagate_err = propagate_err; if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; } /* Dump /proc//maps if possible. */ if (prof_dump_maps(propagate_err)) { goto label_write_error; } if (prof_dump_close(propagate_err)) { return true; } return false; label_write_error: prof_dump_close(propagate_err); return true; } static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); prof_tdata_t * tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return true; } pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_gctx_tree_t gctxs; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &gctxs); bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &prof_gctx_dump_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); post_reentrancy(tsd); if (err) { return true; } if (leakcheck) { prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, prof_gctx_merge_iter_arg.leak_ngctx, filename); } return false; } #ifdef JEMALLOC_JET void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes) { tsd_t *tsd; prof_tdata_t *tdata; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; prof_gctx_tree_t gctxs; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { if (curobjs != NULL) { *curobjs = 0; } if (curbytes != NULL) { *curbytes = 0; } if (accumobjs != NULL) { *accumobjs = 0; } if (accumbytes != NULL) { *accumbytes = 0; } return; } prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); if (curobjs != NULL) { *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; } if (curbytes != NULL) { *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; } if (accumobjs != NULL) { *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; } if (accumbytes != NULL) { *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; } } #endif #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); if (!prof_booted) { return; } tsd = tsd_fetch(); assert(tsd_reentrancy_level_get(tsd) == 0); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { cassert(config_prof); #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { return true; } prof_accum->accumbytes = 0; #else atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); #endif return false; } void prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return; } if (tdata->enq) { tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); if (!opt_prof || !prof_booted) { return true; } char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } return prof_dump(tsd, true, filename, false); } void prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return; } if (tdata->enq) { tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); } static bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); if (bt1->len != bt2->len) { return false; } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } static void prof_bt_node_hash(const void *key, size_t r_hash[2]) { const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; prof_bt_hash((void *)(&bt_node->bt), r_hash); } static bool prof_bt_node_keycomp(const void *k1, const void *k2) { const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; return prof_bt_keycomp((void *)(&bt_node1->bt), (void *)(&bt_node2->bt)); } static void prof_thr_node_hash(const void *key, size_t r_hash[2]) { const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); } static bool prof_thr_node_keycomp(const void *k1, const void *k2) { const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; return thr_node1->thr_uid == thr_node2->thr_uid; } static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); return thr_uid; } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; cassert(config_prof); /* Initialize an empty cache for this thread. */ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { return NULL; } tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; tdata->thread_name = thread_name; tdata->attached = true; tdata->expired = false; tdata->tctx_uid_next = 0; if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); return NULL; } tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); tdata->enq = false; tdata->enq_idump = false; tdata->enq_gdump = false; tdata->dumping = false; tdata->active = active; malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); return tdata; } prof_tdata_t * prof_tdata_init(tsd_t *tsd) { return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); } static bool prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) { return false; } if (ckh_count(&tdata->bt2tctx) != 0) { return false; } return true; } static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); if (tdata->thread_name != NULL) { idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); } ckh_delete(tsd, &tdata->bt2tctx); idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ if (!destroy_tdata) { tdata->attached = false; } tsd_prof_tdata_set(tsd, NULL); } else { destroy_tdata = false; } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); } } prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, active); } static bool prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : prof_tdata_should_destroy(tsdn, tdata, false); } else { destroy_tdata = false; } malloc_mutex_unlock(tsdn, tdata->lock); return destroy_tdata; } static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); } else { next = NULL; } } while (next != NULL); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; if (!config_prof) { return; } tdata = tsd_prof_tdata_get(tsd); if (tdata != NULL) { prof_tdata_detach(tsd, tdata); } } bool prof_active_get(tsdn_t *tsdn) { bool prof_active_current; malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; malloc_mutex_unlock(tsdn, &prof_active_mtx); return prof_active_current; } bool prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; malloc_mutex_unlock(tsdn, &prof_active_mtx); return prof_active_old; } #ifdef JEMALLOC_JET size_t prof_log_bt_count(void) { size_t cnt = 0; prof_bt_node_t *node = log_bt_first; while (node != NULL) { cnt++; node = node->next; } return cnt; } size_t prof_log_alloc_count(void) { size_t cnt = 0; prof_alloc_node_t *node = log_alloc_first; while (node != NULL) { cnt++; node = node->next; } return cnt; } size_t prof_log_thr_count(void) { size_t cnt = 0; prof_thr_node_t *node = log_thr_first; while (node != NULL) { cnt++; node = node->next; } return cnt; } bool prof_log_is_logging(void) { return prof_logging_state == prof_logging_state_started; } bool prof_log_rep_check(void) { if (prof_logging_state == prof_logging_state_stopped && log_tables_initialized) { return true; } if (log_bt_last != NULL && log_bt_last->next != NULL) { return true; } if (log_thr_last != NULL && log_thr_last->next != NULL) { return true; } if (log_alloc_last != NULL && log_alloc_last->next != NULL) { return true; } size_t bt_count = prof_log_bt_count(); size_t thr_count = prof_log_thr_count(); size_t alloc_count = prof_log_alloc_count(); if (prof_logging_state == prof_logging_state_stopped) { if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { return true; } } prof_alloc_node_t *node = log_alloc_first; while (node != NULL) { if (node->alloc_bt_ind >= bt_count) { return true; } if (node->free_bt_ind >= bt_count) { return true; } if (node->alloc_thr_ind >= thr_count) { return true; } if (node->free_thr_ind >= thr_count) { return true; } if (node->alloc_time_ns > node->free_time_ns) { return true; } node = node->next; } return false; } void prof_log_dummy_set(bool new_value) { prof_log_dummy = new_value; } #endif bool prof_log_start(tsdn_t *tsdn, const char *filename) { if (!opt_prof || !prof_booted) { return true; } bool ret = false; size_t buf_size = PATH_MAX + 1; malloc_mutex_lock(tsdn, &log_mtx); if (prof_logging_state != prof_logging_state_stopped) { ret = true; } else if (filename == NULL) { /* Make default name. */ malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", opt_prof_prefix, prof_getpid(), log_seq); log_seq++; prof_logging_state = prof_logging_state_started; } else if (strlen(filename) >= buf_size) { ret = true; } else { strcpy(log_filename, filename); prof_logging_state = prof_logging_state_started; } if (!ret) { nstime_update(&log_start_timestamp); } malloc_mutex_unlock(tsdn, &log_mtx); return ret; } /* Used as an atexit function to stop logging on exit. */ static void prof_log_stop_final(void) { tsd_t *tsd = tsd_fetch(); prof_log_stop(tsd_tsdn(tsd)); } struct prof_emitter_cb_arg_s { int fd; ssize_t ret; }; static void prof_emitter_write_cb(void *opaque, const char *to_write) { struct prof_emitter_cb_arg_s *arg = (struct prof_emitter_cb_arg_s *)opaque; size_t bytes = strlen(to_write); #ifdef JEMALLOC_JET if (prof_log_dummy) { return; } #endif arg->ret = write(arg->fd, (void *)to_write, bytes); } /* * prof_log_emit_{...} goes through the appropriate linked list, emitting each * node to the json and deallocating it. */ static void prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { emitter_json_array_kv_begin(emitter, "threads"); prof_thr_node_t *thr_node = log_thr_first; prof_thr_node_t *thr_old_node; while (thr_node != NULL) { emitter_json_object_begin(emitter); emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, &thr_node->thr_uid); char *thr_name = thr_node->name; emitter_json_kv(emitter, "thr_name", emitter_type_string, &thr_name); emitter_json_object_end(emitter); thr_old_node = thr_node; thr_node = thr_node->next; idalloc(tsd, thr_old_node); } emitter_json_array_end(emitter); } static void prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { emitter_json_array_kv_begin(emitter, "stack_traces"); prof_bt_node_t *bt_node = log_bt_first; prof_bt_node_t *bt_old_node; /* * Calculate how many hex digits we need: twice number of bytes, two for * "0x", and then one more for terminating '\0'. */ char buf[2 * sizeof(intptr_t) + 3]; size_t buf_sz = sizeof(buf); while (bt_node != NULL) { emitter_json_array_begin(emitter); size_t i; for (i = 0; i < bt_node->bt.len; i++) { malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); char *trace_str = buf; emitter_json_value(emitter, emitter_type_string, &trace_str); } emitter_json_array_end(emitter); bt_old_node = bt_node; bt_node = bt_node->next; idalloc(tsd, bt_old_node); } emitter_json_array_end(emitter); } static void prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { emitter_json_array_kv_begin(emitter, "allocations"); prof_alloc_node_t *alloc_node = log_alloc_first; prof_alloc_node_t *alloc_old_node; while (alloc_node != NULL) { emitter_json_object_begin(emitter); emitter_json_kv(emitter, "alloc_thread", emitter_type_size, &alloc_node->alloc_thr_ind); emitter_json_kv(emitter, "free_thread", emitter_type_size, &alloc_node->free_thr_ind); emitter_json_kv(emitter, "alloc_trace", emitter_type_size, &alloc_node->alloc_bt_ind); emitter_json_kv(emitter, "free_trace", emitter_type_size, &alloc_node->free_bt_ind); emitter_json_kv(emitter, "alloc_timestamp", emitter_type_uint64, &alloc_node->alloc_time_ns); emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, &alloc_node->free_time_ns); emitter_json_kv(emitter, "usize", emitter_type_uint64, &alloc_node->usize); emitter_json_object_end(emitter); alloc_old_node = alloc_node; alloc_node = alloc_node->next; idalloc(tsd, alloc_old_node); } emitter_json_array_end(emitter); } static void prof_log_emit_metadata(emitter_t *emitter) { emitter_json_object_kv_begin(emitter, "info"); nstime_t now = NSTIME_ZERO_INITIALIZER; nstime_update(&now); uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); char *vers = JEMALLOC_VERSION; emitter_json_kv(emitter, "version", emitter_type_string, &vers); emitter_json_kv(emitter, "lg_sample_rate", emitter_type_int, &lg_prof_sample); int pid = prof_getpid(); emitter_json_kv(emitter, "pid", emitter_type_int, &pid); emitter_json_object_end(emitter); } bool prof_log_stop(tsdn_t *tsdn) { if (!opt_prof || !prof_booted) { return true; } tsd_t *tsd = tsdn_tsd(tsdn); malloc_mutex_lock(tsdn, &log_mtx); if (prof_logging_state != prof_logging_state_started) { malloc_mutex_unlock(tsdn, &log_mtx); return true; } /* * Set the state to dumping. We'll set it to stopped when we're done. * Since other threads won't be able to start/stop/log when the state is * dumping, we don't have to hold the lock during the whole method. */ prof_logging_state = prof_logging_state_dumping; malloc_mutex_unlock(tsdn, &log_mtx); emitter_t emitter; /* Create a file. */ int fd; #ifdef JEMALLOC_JET if (prof_log_dummy) { fd = 0; } else { fd = creat(log_filename, 0644); } #else fd = creat(log_filename, 0644); #endif if (fd == -1) { malloc_printf(": creat() for log file \"%s\" " " failed with %d\n", log_filename, errno); if (opt_abort) { abort(); } return true; } /* Emit to json. */ struct prof_emitter_cb_arg_s arg; arg.fd = fd; emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, (void *)(&arg)); emitter_json_object_begin(&emitter); prof_log_emit_metadata(&emitter); prof_log_emit_threads(tsd, &emitter); prof_log_emit_traces(tsd, &emitter); prof_log_emit_allocs(tsd, &emitter); emitter_json_object_end(&emitter); /* Reset global state. */ if (log_tables_initialized) { ckh_delete(tsd, &log_bt_node_set); ckh_delete(tsd, &log_thr_node_set); } log_tables_initialized = false; log_bt_index = 0; log_thr_index = 0; log_bt_first = NULL; log_bt_last = NULL; log_thr_first = NULL; log_thr_last = NULL; log_alloc_first = NULL; log_alloc_last = NULL; malloc_mutex_lock(tsdn, &log_mtx); prof_logging_state = prof_logging_state_stopped; malloc_mutex_unlock(tsdn, &log_mtx); #ifdef JEMALLOC_JET if (prof_log_dummy) { return false; } #endif return close(fd); } const char * prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return ""; } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; if (thread_name == NULL) { return NULL; } size = strlen(thread_name) + 1; if (size == 1) { return ""; } ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) { return NULL; } memcpy(ret, thread_name, size); return ret; } int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return EAGAIN; } /* Validate input. */ if (thread_name == NULL) { return EFAULT; } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; if (!isgraph(c) && !isblank(c)) { return EFAULT; } } s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) { return EAGAIN; } if (tdata->thread_name != NULL) { idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) { tdata->thread_name = s; } return 0; } bool prof_thread_active_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return false; } return tdata->active; } bool prof_thread_active_set(tsd_t *tsd, bool active) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return true; } tdata->active = active; return false; } bool prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return active_init; } bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return active_init_old; } bool prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return prof_gdump_current; } bool prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return prof_gdump_old; } void prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT)); } void prof_boot1(void) { cassert(config_prof); /* * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. */ if (opt_prof_leak && !opt_prof) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. */ opt_prof = true; opt_prof_gdump = false; } else if (opt_prof) { if (opt_lg_prof_interval >= 0) { prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); } } } bool prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; } prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, malloc_mutex_rank_exclusive)) { return true; } if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { return true; } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; } next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { return true; } if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) { abort(); } } if (opt_prof_log) { prof_log_start(tsd_tsdn(tsd), NULL); } if (atexit(prof_log_stop_final) != 0) { malloc_write(": Error in atexit() " "for logging\n"); if (opt_abort) { abort(); } } if (malloc_mutex_init(&log_mtx, "prof_log", WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { return true; } if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, prof_bt_node_hash, prof_bt_node_keycomp)) { return true; } if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, prof_thr_node_hash, prof_thr_node_keycomp)) { return true; } log_tables_initialized = true; gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (gctx_locks == NULL) { return true; } for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX, malloc_mutex_rank_exclusive)) { return true; } } tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (tdata_locks == NULL) { return true; } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", WITNESS_RANK_PROF_TDATA, malloc_mutex_rank_exclusive)) { return true; } } #ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal * state before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); #endif } prof_booted = true; return false; } void prof_prefork0(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_prefork(tsdn, &tdata_locks[i]); } for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); } } } void prof_prefork1(tsdn_t *tsdn) { if (config_prof && opt_prof) { malloc_mutex_prefork(tsdn, &prof_active_mtx); malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); malloc_mutex_prefork(tsdn, &prof_gdump_mtx); malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } void prof_postfork_parent(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_postfork_parent(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); } malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void prof_postfork_child(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_child(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); } malloc_mutex_postfork_child(tsdn, &tdatas_mtx); malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } /******************************************************************************/ jemalloc-sys-0.3.2/rep/src/rtree.c010064400007650000024000000211271344617474100152150ustar0000000000000000#define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" /* * Only the most significant bits of keys passed to rtree_{read,write}() are * used. */ bool rtree_new(rtree_t *rtree, bool zeroed) { #ifdef JEMALLOC_JET if (!zeroed) { memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ } #else assert(zeroed); #endif if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, malloc_mutex_rank_exclusive)) { return true; } return false; } static rtree_node_elm_t * rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * sizeof(rtree_node_elm_t), CACHELINE); } rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; static void rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { /* Nodes are never deleted during normal operation. */ not_reached(); } rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = rtree_node_dalloc_impl; static rtree_leaf_elm_t * rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * sizeof(rtree_leaf_elm_t), CACHELINE); } rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; static void rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { /* Leaves are never deleted during normal operation. */ not_reached(); } rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = rtree_leaf_dalloc_impl; #ifdef JEMALLOC_JET # if RTREE_HEIGHT > 1 static void rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, unsigned level) { size_t nchildren = ZU(1) << rtree_levels[level].bits; if (level + 2 < RTREE_HEIGHT) { for (size_t i = 0; i < nchildren; i++) { rtree_node_elm_t *node = (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, ATOMIC_RELAXED); if (node != NULL) { rtree_delete_subtree(tsdn, rtree, node, level + 1); } } } else { for (size_t i = 0; i < nchildren; i++) { rtree_leaf_elm_t *leaf = (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, ATOMIC_RELAXED); if (leaf != NULL) { rtree_leaf_dalloc(tsdn, rtree, leaf); } } } if (subtree != rtree->root) { rtree_node_dalloc(tsdn, rtree, subtree); } } # endif void rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { # if RTREE_HEIGHT > 1 rtree_delete_subtree(tsdn, rtree, rtree->root, 0); # endif } #endif static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); if (node == NULL) { node = rtree_node_alloc(tsdn, rtree, ZU(1) << rtree_levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, node, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return node; } static rtree_leaf_elm_t * rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); if (leaf == NULL) { leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << rtree_levels[RTREE_HEIGHT-1].bits); if (leaf == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, leaf, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return leaf; } static bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node != (uintptr_t)0); } static bool rtree_leaf_valid(rtree_leaf_elm_t *leaf) { return ((uintptr_t)leaf != (uintptr_t)0); } static rtree_node_elm_t * rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *node; if (dependent) { node = (rtree_node_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); } else { node = (rtree_node_elm_t *)atomic_load_p(&elm->child, ATOMIC_ACQUIRE); } assert(!dependent || node != NULL); return node; } static rtree_node_elm_t * rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_node_elm_t *node; node = rtree_child_node_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(node))) { node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); } assert(!dependent || node != NULL); return node; } static rtree_leaf_elm_t * rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_leaf_elm_t *leaf; if (dependent) { leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); } else { leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, ATOMIC_ACQUIRE); } assert(!dependent || leaf != NULL); return leaf; } static rtree_leaf_elm_t * rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_leaf_elm_t *leaf; leaf = rtree_child_leaf_tryread(elm, dependent); if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { leaf = rtree_leaf_init(tsdn, rtree, &elm->child); } assert(!dependent || leaf != NULL); return leaf; } rtree_leaf_elm_t * rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing) { rtree_node_elm_t *node; rtree_leaf_elm_t *leaf; #if RTREE_HEIGHT > 1 node = rtree->root; #else leaf = rtree->root; #endif if (config_debug) { uintptr_t leafkey = rtree_leafkey(key); for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { assert(rtree_ctx->cache[i].leafkey != leafkey); } for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { assert(rtree_ctx->l2_cache[i].leafkey != leafkey); } } #define RTREE_GET_CHILD(level) { \ assert(level < RTREE_HEIGHT-1); \ if (level != 0 && !dependent && \ unlikely(!rtree_node_valid(node))) { \ return NULL; \ } \ uintptr_t subkey = rtree_subkey(key, level); \ if (level + 2 < RTREE_HEIGHT) { \ node = init_missing ? \ rtree_child_node_read(tsdn, rtree, \ &node[subkey], level, dependent) : \ rtree_child_node_tryread(&node[subkey], \ dependent); \ } else { \ leaf = init_missing ? \ rtree_child_leaf_read(tsdn, rtree, \ &node[subkey], level, dependent) : \ rtree_child_leaf_tryread(&node[subkey], \ dependent); \ } \ } /* * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): * (1) evict last entry in L2 cache; (2) move the collision slot from L1 * cache down to L2; and 3) fill L1. */ #define RTREE_GET_LEAF(level) { \ assert(level == RTREE_HEIGHT-1); \ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ return NULL; \ } \ if (RTREE_CTX_NCACHE_L2 > 1) { \ memmove(&rtree_ctx->l2_cache[1], \ &rtree_ctx->l2_cache[0], \ sizeof(rtree_ctx_cache_elm_t) * \ (RTREE_CTX_NCACHE_L2 - 1)); \ } \ size_t slot = rtree_cache_direct_map(key); \ rtree_ctx->l2_cache[0].leafkey = \ rtree_ctx->cache[slot].leafkey; \ rtree_ctx->l2_cache[0].leaf = \ rtree_ctx->cache[slot].leaf; \ uintptr_t leafkey = rtree_leafkey(key); \ rtree_ctx->cache[slot].leafkey = leafkey; \ rtree_ctx->cache[slot].leaf = leaf; \ uintptr_t subkey = rtree_subkey(key, level); \ return &leaf[subkey]; \ } if (RTREE_HEIGHT > 1) { RTREE_GET_CHILD(0) } if (RTREE_HEIGHT > 2) { RTREE_GET_CHILD(1) } if (RTREE_HEIGHT > 3) { for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { RTREE_GET_CHILD(i) } } RTREE_GET_LEAF(RTREE_HEIGHT-1) #undef RTREE_GET_CHILD #undef RTREE_GET_LEAF not_reached(); } void rtree_ctx_data_init(rtree_ctx_t *ctx) { for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; cache->leafkey = RTREE_LEAFKEY_INVALID; cache->leaf = NULL; } for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; cache->leafkey = RTREE_LEAFKEY_INVALID; cache->leaf = NULL; } } jemalloc-sys-0.3.2/rep/src/sc.c010064400007650000024000000203611344617474100145000ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/sc.h" /* * This module computes the size classes used to satisfy allocations. The logic * here was ported more or less line-by-line from a shell script, and because of * that is not the most idiomatic C. Eventually we should fix this, but for now * at least the damage is compartmentalized to this file. */ sc_data_t sc_data_global; static size_t reg_size_compute(int lg_base, int lg_delta, int ndelta) { return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); } /* Returns the number of pages in the slab. */ static int slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) { size_t page = (ZU(1) << lg_page); size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta); size_t try_slab_size = page; size_t try_nregs = try_slab_size / reg_size; size_t perfect_slab_size = 0; bool perfect = false; /* * This loop continues until we find the least common multiple of the * page size and size class size. Size classes are all of the form * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is * (ndelta + ngroup) * delta. The way we choose slabbing strategies * means that delta is at most the page size and ndelta < ngroup. So * the loop executes for at most 2 * ngroup - 1 iterations, which is * also the bound on the number of pages in a slab chosen by default. * With the current default settings, this is at most 7. */ while (!perfect) { perfect_slab_size = try_slab_size; size_t perfect_nregs = try_nregs; try_slab_size += page; try_nregs = try_slab_size / reg_size; if (perfect_slab_size == perfect_nregs * reg_size) { perfect = true; } } return (int)(perfect_slab_size / page); } static void size_class( /* Output. */ sc_t *sc, /* Configuration decisions. */ int lg_max_lookup, int lg_page, int lg_ngroup, /* Inputs specific to the size class. */ int index, int lg_base, int lg_delta, int ndelta) { sc->index = index; sc->lg_base = lg_base; sc->lg_delta = lg_delta; sc->ndelta = ndelta; sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta) % (ZU(1) << lg_page) == 0); size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); if (index == 0) { assert(!sc->psz); } if (size < (ZU(1) << (lg_page + lg_ngroup))) { sc->bin = true; sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta); } else { sc->bin = false; sc->pgs = 0; } if (size <= (ZU(1) << lg_max_lookup)) { sc->lg_delta_lookup = lg_delta; } else { sc->lg_delta_lookup = 0; } } static void size_classes( /* Output. */ sc_data_t *sc_data, /* Determined by the system. */ size_t lg_ptr_size, int lg_quantum, /* Configuration decisions. */ int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) { int ptr_bits = (1 << lg_ptr_size) * 8; int ngroup = (1 << lg_ngroup); int ntiny = 0; int nlbins = 0; int lg_tiny_maxclass = (unsigned)-1; int nbins = 0; int npsizes = 0; int index = 0; int ndelta = 0; int lg_base = lg_tiny_min; int lg_delta = lg_base; /* Outputs that we update as we go. */ size_t lookup_maxclass = 0; size_t small_maxclass = 0; int lg_large_minclass = 0; size_t large_maxclass = 0; /* Tiny size classes. */ while (lg_base < lg_quantum) { sc_t *sc = &sc_data->sc[index]; size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, lg_base, lg_delta, ndelta); if (sc->lg_delta_lookup != 0) { nlbins = index + 1; } if (sc->psz) { npsizes++; } if (sc->bin) { nbins++; } ntiny++; /* Final written value is correct. */ lg_tiny_maxclass = lg_base; index++; lg_delta = lg_base; lg_base++; } /* First non-tiny (pseudo) group. */ if (ntiny != 0) { sc_t *sc = &sc_data->sc[index]; /* * See the note in sc.h; the first non-tiny size class has an * unusual encoding. */ lg_base--; ndelta = 1; size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, lg_base, lg_delta, ndelta); index++; lg_base++; lg_delta++; if (sc->psz) { npsizes++; } if (sc->bin) { nbins++; } } while (ndelta < ngroup) { sc_t *sc = &sc_data->sc[index]; size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, lg_base, lg_delta, ndelta); index++; ndelta++; if (sc->psz) { npsizes++; } if (sc->bin) { nbins++; } } /* All remaining groups. */ lg_base = lg_base + lg_ngroup; while (lg_base < ptr_bits - 1) { ndelta = 1; int ndelta_limit; if (lg_base == ptr_bits - 2) { ndelta_limit = ngroup - 1; } else { ndelta_limit = ngroup; } while (ndelta <= ndelta_limit) { sc_t *sc = &sc_data->sc[index]; size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, lg_base, lg_delta, ndelta); if (sc->lg_delta_lookup != 0) { nlbins = index + 1; /* Final written value is correct. */ lookup_maxclass = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); } if (sc->psz) { npsizes++; } if (sc->bin) { nbins++; /* Final written value is correct. */ small_maxclass = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); if (lg_ngroup > 0) { lg_large_minclass = lg_base + 1; } else { lg_large_minclass = lg_base + 2; } } large_maxclass = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); index++; ndelta++; } lg_base++; lg_delta++; } /* Additional outputs. */ int nsizes = index; unsigned lg_ceil_nsizes = lg_ceil(nsizes); /* Fill in the output data. */ sc_data->ntiny = ntiny; sc_data->nlbins = nlbins; sc_data->nbins = nbins; sc_data->nsizes = nsizes; sc_data->lg_ceil_nsizes = lg_ceil_nsizes; sc_data->npsizes = npsizes; sc_data->lg_tiny_maxclass = lg_tiny_maxclass; sc_data->lookup_maxclass = lookup_maxclass; sc_data->small_maxclass = small_maxclass; sc_data->lg_large_minclass = lg_large_minclass; sc_data->large_minclass = (ZU(1) << lg_large_minclass); sc_data->large_maxclass = large_maxclass; /* * We compute these values in two ways: * - Incrementally, as above. * - In macros, in sc.h. * The computation is easier when done incrementally, but putting it in * a constant makes it available to the fast paths without having to * touch the extra global cacheline. We assert, however, that the two * computations are equivalent. */ assert(sc_data->npsizes == SC_NPSIZES); assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS); assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS); assert(sc_data->large_minclass == SC_LARGE_MINCLASS); assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS); assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS); /* * In the allocation fastpath, we want to assume that we can * unconditionally subtract the requested allocation size from * a ssize_t, and detect passing through 0 correctly. This * results in optimal generated code. For this to work, the * maximum allocation size must be less than SSIZE_MAX. */ assert(SC_LARGE_MAXCLASS < SSIZE_MAX); } void sc_data_init(sc_data_t *sc_data) { assert(!sc_data->initialized); int lg_max_lookup = 12; size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN, lg_max_lookup, LG_PAGE, 2); sc_data->initialized = true; } static void sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) { size_t min_pgs = reg_size / PAGE; if (reg_size % PAGE != 0) { min_pgs++; } /* * BITMAP_MAXBITS is actually determined by putting the smallest * possible size-class on one page, so this can never be 0. */ size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE; assert(min_pgs <= max_pgs); assert(min_pgs > 0); assert(max_pgs >= 1); if (pgs_guess < min_pgs) { sc->pgs = (int)min_pgs; } else if (pgs_guess > max_pgs) { sc->pgs = (int)max_pgs; } else { sc->pgs = (int)pgs_guess; } } void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) { assert(data->initialized); for (int i = 0; i < data->nsizes; i++) { sc_t *sc = &data->sc[i]; if (!sc->bin) { break; } size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta, sc->ndelta); if (begin <= reg_size && reg_size <= end) { sc_data_update_sc_slab_size(sc, reg_size, pgs); } } } void sc_boot(sc_data_t *data) { sc_data_init(data); } jemalloc-sys-0.3.2/rep/src/stats.c010064400007650000024000001336761344617474100152470ustar0000000000000000#define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/emitter.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" const char *global_mutex_names[mutex_prof_num_global_mutexes] = { #define OP(mtx) #mtx, MUTEX_PROF_GLOBAL_MUTEXES #undef OP }; const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { #define OP(mtx) #mtx, MUTEX_PROF_ARENA_MUTEXES #undef OP }; #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_GET(n, i, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_M4_GET(n, i, j, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ bool opt_stats_print = false; char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; /******************************************************************************/ static uint64_t rate_per_second(uint64_t value, uint64_t uptime_ns) { uint64_t billion = 1000000000; if (uptime_ns == 0 || value == 0) { return 0; } if (uptime_ns < billion) { return value; } else { uint64_t uptime_s = uptime_ns / billion; return value / uptime_s; } } /* Calculate x.yyy and output a string (takes a fixed sized char array). */ static bool get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { if (divisor == 0 || dividend > divisor) { /* The rate is not supposed to be greater than 1. */ return true; } if (dividend > 0) { assert(UINT64_MAX / dividend >= 1000); } unsigned n = (unsigned)((dividend * 1000) / divisor); if (n < 10) { malloc_snprintf(str, 6, "0.00%u", n); } else if (n < 100) { malloc_snprintf(str, 6, "0.0%u", n); } else if (n < 1000) { malloc_snprintf(str, 6, "0.%u", n); } else { malloc_snprintf(str, 6, "1"); } return false; } #define MUTEX_CTL_STR_MAX_LENGTH 128 static void gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, const char *mutex, const char *counter) { malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); } static void mutex_stats_init_cols(emitter_row_t *row, const char *table_name, emitter_col_t *name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; emitter_col_t *col; if (name != NULL) { emitter_col_init(name, row); name->justify = emitter_justify_left; name->width = 21; name->type = emitter_type_title; name->str_val = table_name; } #define WIDTH_uint32_t 12 #define WIDTH_uint64_t 16 #define OP(counter, counter_type, human, derived, base_counter) \ col = &col_##counter_type[k_##counter_type]; \ ++k_##counter_type; \ emitter_col_init(col, row); \ col->justify = emitter_justify_right; \ col->width = derived ? 8 : WIDTH_##counter_type; \ col->type = emitter_type_title; \ col->str_val = human; MUTEX_PROF_COUNTERS #undef OP #undef WIDTH_uint32_t #undef WIDTH_uint64_t col_uint64_t[mutex_counter_total_wait_time_ps].width = 10; } static void mutex_stats_read_global(const char *name, emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ if (!derived) { \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "mutexes", name, #counter); \ CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \ } else { \ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } static void mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, const char *name, emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ if (!derived) { \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \ } else { \ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } static void mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { char cmd[MUTEX_CTL_STR_MAX_LENGTH]; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ if (!derived) { \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ "arenas.0.bins.0","mutex", #counter); \ CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ (counter_type *)&dst->bool_val, counter_type); \ } else { \ emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } /* "row" can be NULL to avoid emitting in table mode. */ static void mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { if (row != NULL) { emitter_table_row(emitter, row); } mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; emitter_col_t *col; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 #define OP(counter, type, human, derived, base_counter) \ if (!derived) { \ col = &col_##type[k_##type]; \ ++k_##type; \ emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ (const void *)&col->bool_val); \ } MUTEX_PROF_COUNTERS; #undef OP #undef EMITTER_TYPE_uint32_t #undef EMITTER_TYPE_uint64_t } #define COL(row_name, column_name, left_or_right, col_width, etype) \ emitter_col_t col_##column_name; \ emitter_col_init(&col_##column_name, &row_name); \ col_##column_name.justify = emitter_justify_##left_or_right; \ col_##column_name.width = col_width; \ col_##column_name.type = emitter_type_##etype; #define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \ COL(row_name, column_name, left_or_right, col_width, etype) \ emitter_col_t header_##column_name; \ emitter_col_init(&header_##column_name, &header_##row_name); \ header_##column_name.justify = emitter_justify_##left_or_right; \ header_##column_name.width = col_width; \ header_##column_name.type = emitter_type_title; \ header_##column_name.str_val = human ? human : #column_name; static void stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) { size_t page; bool in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); CTL_GET("arenas.nbins", &nbins, unsigned); emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; emitter_row_init(&row); COL_HDR(row, size, NULL, right, 20, size) COL_HDR(row, ind, NULL, right, 4, unsigned) COL_HDR(row, allocated, NULL, right, 13, uint64) COL_HDR(row, nmalloc, NULL, right, 13, uint64) COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, ndalloc, NULL, right, 13, uint64) COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nrequests, NULL, right, 13, uint64) COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64) COL_HDR(row, nshards, NULL, right, 9, unsigned) COL_HDR(row, curregs, NULL, right, 13, size) COL_HDR(row, curslabs, NULL, right, 13, size) COL_HDR(row, regs, NULL, right, 5, unsigned) COL_HDR(row, pgs, NULL, right, 4, size) /* To buffer a right- and left-justified column. */ COL_HDR(row, justify_spacer, NULL, right, 1, title) COL_HDR(row, util, NULL, right, 6, title) COL_HDR(row, nfills, NULL, right, 13, uint64) COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nflushes, NULL, right, 13, uint64) COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nslabs, NULL, right, 13, uint64) COL_HDR(row, nreslabs, NULL, right, 13, uint64) COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64) /* Don't want to actually print the name. */ header_justify_spacer.str_val = " "; col_justify_spacer.str_val = " "; emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters]; emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters]; emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters]; emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; if (mutex) { mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, col_mutex32); mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, header_mutex32); } /* * We print a "bins:" header as part of the table row; we need to adjust * the header size column to compensate. */ header_size.width -=5; emitter_table_printf(emitter, "bins:"); emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "bins"); for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nslabs; size_t reg_size, slab_size, curregs; size_t curslabs; uint32_t nregs, nshards; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nreslabs; CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, uint64_t); in_gap_prev = in_gap; in_gap = (nslabs == 0); if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, size_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, &nrequests, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, size_t); if (mutex) { mutex_stats_read_arena_bin(i, j, col_mutex64, col_mutex32, uptime); } emitter_json_object_begin(emitter); emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, &nmalloc); emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, &ndalloc); emitter_json_kv(emitter, "curregs", emitter_type_size, &curregs); emitter_json_kv(emitter, "nrequests", emitter_type_uint64, &nrequests); emitter_json_kv(emitter, "nfills", emitter_type_uint64, &nfills); emitter_json_kv(emitter, "nflushes", emitter_type_uint64, &nflushes); emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, &nreslabs); emitter_json_kv(emitter, "curslabs", emitter_type_size, &curslabs); if (mutex) { emitter_json_object_kv_begin(emitter, "mutex"); mutex_stats_emit(emitter, NULL, col_mutex64, col_mutex32); emitter_json_object_end(emitter); } emitter_json_object_end(emitter); size_t availregs = nregs * curslabs; char util[6]; if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) { if (availregs == 0) { malloc_snprintf(util, sizeof(util), "1"); } else if (curregs > availregs) { /* * Race detected: the counters were read in * separate mallctl calls and concurrent * operations happened in between. In this case * no meaningful utilization can be computed. */ malloc_snprintf(util, sizeof(util), " race"); } else { not_reached(); } } col_size.size_val = reg_size; col_ind.unsigned_val = j; col_allocated.size_val = curregs * reg_size; col_nmalloc.uint64_val = nmalloc; col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime); col_ndalloc.uint64_val = ndalloc; col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); col_nshards.unsigned_val = nshards; col_curregs.size_val = curregs; col_curslabs.size_val = curslabs; col_regs.unsigned_val = nregs; col_pgs.size_val = slab_size / page; col_util.str_val = util; col_nfills.uint64_val = nfills; col_nfills_ps.uint64_val = rate_per_second(nfills, uptime); col_nflushes.uint64_val = nflushes; col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime); col_nslabs.uint64_val = nslabs; col_nreslabs.uint64_val = nreslabs; col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime); /* * Note that mutex columns were initialized above, if mutex == * true. */ emitter_table_row(emitter, &row); } emitter_json_array_end(emitter); /* Close "bins". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } } static void stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { unsigned nbins, nlextents, j; bool in_gap, in_gap_prev; CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlextents", &nlextents, unsigned); emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; emitter_row_init(&row); COL_HDR(row, size, NULL, right, 20, size) COL_HDR(row, ind, NULL, right, 4, unsigned) COL_HDR(row, allocated, NULL, right, 13, size) COL_HDR(row, nmalloc, NULL, right, 13, uint64) COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, ndalloc, NULL, right, 13, uint64) COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nrequests, NULL, right, 13, uint64) COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, curlextents, NULL, right, 13, size) /* As with bins, we label the large extents table. */ header_size.width -= 6; emitter_table_printf(emitter, "large:"); emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "lextents"); for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t lextent_size, curlextents; CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, &nrequests, uint64_t); in_gap_prev = in_gap; in_gap = (nrequests == 0); if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, &curlextents, size_t); emitter_json_object_begin(emitter); emitter_json_kv(emitter, "curlextents", emitter_type_size, &curlextents); emitter_json_object_end(emitter); col_size.size_val = lextent_size; col_ind.unsigned_val = nbins + j; col_allocated.size_val = curlextents * lextent_size; col_nmalloc.uint64_val = nmalloc; col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime); col_ndalloc.uint64_val = ndalloc; col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); col_curlextents.size_val = curlextents; if (!in_gap) { emitter_table_row(emitter, &row); } } emitter_json_array_end(emitter); /* Close "lextents". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } } static void stats_arena_extents_print(emitter_t *emitter, unsigned i) { unsigned j; bool in_gap, in_gap_prev; emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; emitter_row_init(&row); COL_HDR(row, size, NULL, right, 20, size) COL_HDR(row, ind, NULL, right, 4, unsigned) COL_HDR(row, ndirty, NULL, right, 13, size) COL_HDR(row, dirty, NULL, right, 13, size) COL_HDR(row, nmuzzy, NULL, right, 13, size) COL_HDR(row, muzzy, NULL, right, 13, size) COL_HDR(row, nretained, NULL, right, 13, size) COL_HDR(row, retained, NULL, right, 13, size) COL_HDR(row, ntotal, NULL, right, 13, size) COL_HDR(row, total, NULL, right, 13, size) /* Label this section. */ header_size.width -= 8; emitter_table_printf(emitter, "extents:"); emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "extents"); in_gap = false; for (j = 0; j < SC_NPSIZES; j++) { size_t ndirty, nmuzzy, nretained, total, dirty_bytes, muzzy_bytes, retained_bytes, total_bytes; CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j, &ndirty, size_t); CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j, &nmuzzy, size_t); CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j, &nretained, size_t); CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j, &dirty_bytes, size_t); CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j, &muzzy_bytes, size_t); CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j, &retained_bytes, size_t); total = ndirty + nmuzzy + nretained; total_bytes = dirty_bytes + muzzy_bytes + retained_bytes; in_gap_prev = in_gap; in_gap = (total == 0); if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } emitter_json_object_begin(emitter); emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty); emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy); emitter_json_kv(emitter, "nretained", emitter_type_size, &nretained); emitter_json_kv(emitter, "dirty_bytes", emitter_type_size, &dirty_bytes); emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size, &muzzy_bytes); emitter_json_kv(emitter, "retained_bytes", emitter_type_size, &retained_bytes); emitter_json_object_end(emitter); col_size.size_val = sz_pind2sz(j); col_ind.size_val = j; col_ndirty.size_val = ndirty; col_dirty.size_val = dirty_bytes; col_nmuzzy.size_val = nmuzzy; col_muzzy.size_val = muzzy_bytes; col_nretained.size_val = nretained; col_retained.size_val = retained_bytes; col_ntotal.size_val = total; col_total.size_val = total_bytes; if (!in_gap) { emitter_table_row(emitter, &row); } } emitter_json_array_end(emitter); /* Close "extents". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } } static void stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) { emitter_row_t row; emitter_col_t col_name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; emitter_col_t col32[mutex_prof_num_uint32_t_counters]; emitter_row_init(&row); mutex_stats_init_cols(&row, "", &col_name, col64, col32); emitter_json_object_kv_begin(emitter, "mutexes"); emitter_table_row(emitter, &row); for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; i++) { const char *name = arena_mutex_names[i]; emitter_json_object_kv_begin(emitter, name); mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, col32, uptime); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); /* Close the mutex dict. */ } emitter_json_object_end(emitter); /* End "mutexes". */ } static void stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, bool mutex, bool extents) { unsigned nthreads; const char *dss; ssize_t dirty_decay_ms, muzzy_decay_ms; size_t page, pactive, pdirty, pmuzzy, mapped, retained; size_t base, internal, resident, metadata_thp, extent_avail; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; size_t tcache_bytes; uint64_t uptime; CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); emitter_kv(emitter, "nthreads", "assigned threads", emitter_type_unsigned, &nthreads); CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, &uptime); CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); emitter_kv(emitter, "dss", "dss allocation precedence", emitter_type_string, &dss); CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); emitter_row_t decay_row; emitter_row_init(&decay_row); /* JSON-style emission. */ emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &dirty_decay_ms); emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &muzzy_decay_ms); emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, &dirty_npurge); emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, &dirty_nmadvise); emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, &dirty_purged); emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, &muzzy_npurge); emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, &muzzy_nmadvise); emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, &muzzy_purged); /* Table-style emission. */ COL(decay_row, decay_type, right, 9, title); col_decay_type.str_val = "decaying:"; COL(decay_row, decay_time, right, 6, title); col_decay_time.str_val = "time"; COL(decay_row, decay_npages, right, 13, title); col_decay_npages.str_val = "npages"; COL(decay_row, decay_sweeps, right, 13, title); col_decay_sweeps.str_val = "sweeps"; COL(decay_row, decay_madvises, right, 13, title); col_decay_madvises.str_val = "madvises"; COL(decay_row, decay_purged, right, 13, title); col_decay_purged.str_val = "purged"; /* Title row. */ emitter_table_row(emitter, &decay_row); /* Dirty row. */ col_decay_type.str_val = "dirty:"; if (dirty_decay_ms >= 0) { col_decay_time.type = emitter_type_ssize; col_decay_time.ssize_val = dirty_decay_ms; } else { col_decay_time.type = emitter_type_title; col_decay_time.str_val = "N/A"; } col_decay_npages.type = emitter_type_size; col_decay_npages.size_val = pdirty; col_decay_sweeps.type = emitter_type_uint64; col_decay_sweeps.uint64_val = dirty_npurge; col_decay_madvises.type = emitter_type_uint64; col_decay_madvises.uint64_val = dirty_nmadvise; col_decay_purged.type = emitter_type_uint64; col_decay_purged.uint64_val = dirty_purged; emitter_table_row(emitter, &decay_row); /* Muzzy row. */ col_decay_type.str_val = "muzzy:"; if (muzzy_decay_ms >= 0) { col_decay_time.type = emitter_type_ssize; col_decay_time.ssize_val = muzzy_decay_ms; } else { col_decay_time.type = emitter_type_title; col_decay_time.str_val = "N/A"; } col_decay_npages.type = emitter_type_size; col_decay_npages.size_val = pmuzzy; col_decay_sweeps.type = emitter_type_uint64; col_decay_sweeps.uint64_val = muzzy_npurge; col_decay_madvises.type = emitter_type_uint64; col_decay_madvises.uint64_val = muzzy_nmadvise; col_decay_purged.type = emitter_type_uint64; col_decay_purged.uint64_val = muzzy_purged; emitter_table_row(emitter, &decay_row); /* Small / large / total allocation counts. */ emitter_row_t alloc_count_row; emitter_row_init(&alloc_count_row); COL(alloc_count_row, count_title, left, 21, title); col_count_title.str_val = ""; COL(alloc_count_row, count_allocated, right, 16, title); col_count_allocated.str_val = "allocated"; COL(alloc_count_row, count_nmalloc, right, 16, title); col_count_nmalloc.str_val = "nmalloc"; COL(alloc_count_row, count_nmalloc_ps, right, 8, title); col_count_nmalloc_ps.str_val = "(#/sec)"; COL(alloc_count_row, count_ndalloc, right, 16, title); col_count_ndalloc.str_val = "ndalloc"; COL(alloc_count_row, count_ndalloc_ps, right, 8, title); col_count_ndalloc_ps.str_val = "(#/sec)"; COL(alloc_count_row, count_nrequests, right, 16, title); col_count_nrequests.str_val = "nrequests"; COL(alloc_count_row, count_nrequests_ps, right, 10, title); col_count_nrequests_ps.str_val = "(#/sec)"; emitter_table_row(emitter, &alloc_count_row); col_count_nmalloc_ps.type = emitter_type_uint64; col_count_ndalloc_ps.type = emitter_type_uint64; col_count_nrequests_ps.type = emitter_type_uint64; #define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ &small_or_large##_##name, valtype##_t); \ emitter_json_kv(emitter, #name, emitter_type_##valtype, \ &small_or_large##_##name); \ col_count_##name.type = emitter_type_##valtype; \ col_count_##name.valtype##_val = small_or_large##_##name; emitter_json_object_kv_begin(emitter, "small"); col_count_title.str_val = "small:"; GET_AND_EMIT_ALLOC_STAT(small, allocated, size) GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) col_count_nmalloc_ps.uint64_val = rate_per_second(col_count_nmalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) col_count_ndalloc_ps.uint64_val = rate_per_second(col_count_ndalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "small". */ emitter_json_object_kv_begin(emitter, "large"); col_count_title.str_val = "large:"; GET_AND_EMIT_ALLOC_STAT(large, allocated, size) GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) col_count_nmalloc_ps.uint64_val = rate_per_second(col_count_nmalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) col_count_ndalloc_ps.uint64_val = rate_per_second(col_count_ndalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "large". */ #undef GET_AND_EMIT_ALLOC_STAT /* Aggregated small + large stats are emitter only in table mode. */ col_count_title.str_val = "total:"; col_count_allocated.size_val = small_allocated + large_allocated; col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; col_count_nrequests.uint64_val = small_nrequests + large_nrequests; col_count_nmalloc_ps.uint64_val = rate_per_second(col_count_nmalloc.uint64_val, uptime); col_count_ndalloc_ps.uint64_val = rate_per_second(col_count_ndalloc.uint64_val, uptime); col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_row_t mem_count_row; emitter_row_init(&mem_count_row); emitter_col_t mem_count_title; emitter_col_init(&mem_count_title, &mem_count_row); mem_count_title.justify = emitter_justify_left; mem_count_title.width = 21; mem_count_title.type = emitter_type_title; mem_count_title.str_val = ""; emitter_col_t mem_count_val; emitter_col_init(&mem_count_val, &mem_count_row); mem_count_val.justify = emitter_justify_right; mem_count_val.width = 16; mem_count_val.type = emitter_type_title; mem_count_val.str_val = ""; emitter_table_row(emitter, &mem_count_row); mem_count_val.type = emitter_type_size; /* Active count in bytes is emitted only in table mode. */ mem_count_title.str_val = "active:"; mem_count_val.size_val = pactive * page; emitter_table_row(emitter, &mem_count_row); #define GET_AND_EMIT_MEM_STAT(stat) \ CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ mem_count_title.str_val = #stat":"; \ mem_count_val.size_val = stat; \ emitter_table_row(emitter, &mem_count_row); GET_AND_EMIT_MEM_STAT(mapped) GET_AND_EMIT_MEM_STAT(retained) GET_AND_EMIT_MEM_STAT(base) GET_AND_EMIT_MEM_STAT(internal) GET_AND_EMIT_MEM_STAT(metadata_thp) GET_AND_EMIT_MEM_STAT(tcache_bytes) GET_AND_EMIT_MEM_STAT(resident) GET_AND_EMIT_MEM_STAT(extent_avail) #undef GET_AND_EMIT_MEM_STAT if (mutex) { stats_arena_mutexes_print(emitter, i, uptime); } if (bins) { stats_arena_bins_print(emitter, mutex, i, uptime); } if (large) { stats_arena_lextents_print(emitter, i, uptime); } if (extents) { stats_arena_extents_print(emitter, i); } } static void stats_general_print(emitter_t *emitter) { const char *cpv; bool bv, bv2; unsigned uv; uint32_t u32v; uint64_t u64v; ssize_t ssv, ssv2; size_t sv, bsz, usz, ssz, sssz, cpsz; bsz = sizeof(bool); usz = sizeof(unsigned); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); /* config. */ emitter_dict_begin(emitter, "config", "Build-time option settings"); #define CONFIG_WRITE_BOOL(name) \ do { \ CTL_GET("config."#name, &bv, bool); \ emitter_kv(emitter, #name, "config."#name, \ emitter_type_bool, &bv); \ } while (0) CONFIG_WRITE_BOOL(cache_oblivious); CONFIG_WRITE_BOOL(debug); CONFIG_WRITE_BOOL(fill); CONFIG_WRITE_BOOL(lazy_lock); emitter_kv(emitter, "malloc_conf", "config.malloc_conf", emitter_type_string, &config_malloc_conf); CONFIG_WRITE_BOOL(prof); CONFIG_WRITE_BOOL(prof_libgcc); CONFIG_WRITE_BOOL(prof_libunwind); CONFIG_WRITE_BOOL(stats); CONFIG_WRITE_BOOL(utrace); CONFIG_WRITE_BOOL(xmalloc); #undef CONFIG_WRITE_BOOL emitter_dict_end(emitter); /* Close "config" dict. */ /* opt. */ #define OPT_WRITE(name, var, size, emitter_type) \ if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ 0) { \ emitter_kv(emitter, name, "opt."name, emitter_type, \ &var); \ } #define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ altname) \ if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ == 0) { \ emitter_kv_note(emitter, name, "opt."name, \ emitter_type, &var1, altname, emitter_type, \ &var2); \ } #define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) #define OPT_WRITE_BOOL_MUTABLE(name, altname) \ OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) #define OPT_WRITE_UNSIGNED(name) \ OPT_WRITE(name, uv, usz, emitter_type_unsigned) #define OPT_WRITE_SIZE_T(name) \ OPT_WRITE(name, sv, ssz, emitter_type_size) #define OPT_WRITE_SSIZE_T(name) \ OPT_WRITE(name, ssv, sssz, emitter_type_ssize) #define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ altname) #define OPT_WRITE_CHAR_P(name) \ OPT_WRITE(name, cpv, cpsz, emitter_type_string) emitter_dict_begin(emitter, "opt", "Run-time option settings"); OPT_WRITE_BOOL("abort") OPT_WRITE_BOOL("abort_conf") OPT_WRITE_BOOL("retain") OPT_WRITE_CHAR_P("dss") OPT_WRITE_UNSIGNED("narenas") OPT_WRITE_CHAR_P("percpu_arena") OPT_WRITE_SIZE_T("oversize_threshold") OPT_WRITE_CHAR_P("metadata_thp") OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") OPT_WRITE_SIZE_T("lg_extent_max_active_fit") OPT_WRITE_CHAR_P("junk") OPT_WRITE_BOOL("zero") OPT_WRITE_BOOL("utrace") OPT_WRITE_BOOL("xmalloc") OPT_WRITE_BOOL("tcache") OPT_WRITE_SSIZE_T("lg_tcache_max") OPT_WRITE_CHAR_P("thp") OPT_WRITE_BOOL("prof") OPT_WRITE_CHAR_P("prof_prefix") OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", "prof.thread_active_init") OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") OPT_WRITE_BOOL("prof_accum") OPT_WRITE_SSIZE_T("lg_prof_interval") OPT_WRITE_BOOL("prof_gdump") OPT_WRITE_BOOL("prof_final") OPT_WRITE_BOOL("prof_leak") OPT_WRITE_BOOL("stats_print") OPT_WRITE_CHAR_P("stats_print_opts") emitter_dict_end(emitter); #undef OPT_WRITE #undef OPT_WRITE_MUTABLE #undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_UNSIGNED #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_SSIZE_T_MUTABLE #undef OPT_WRITE_CHAR_P /* prof. */ if (config_prof) { emitter_dict_begin(emitter, "prof", "Profiling settings"); CTL_GET("prof.thread_active_init", &bv, bool); emitter_kv(emitter, "thread_active_init", "prof.thread_active_init", emitter_type_bool, &bv); CTL_GET("prof.active", &bv, bool); emitter_kv(emitter, "active", "prof.active", emitter_type_bool, &bv); CTL_GET("prof.gdump", &bv, bool); emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, &bv); CTL_GET("prof.interval", &u64v, uint64_t); emitter_kv(emitter, "interval", "prof.interval", emitter_type_uint64, &u64v); CTL_GET("prof.lg_sample", &ssv, ssize_t); emitter_kv(emitter, "lg_sample", "prof.lg_sample", emitter_type_ssize, &ssv); emitter_dict_end(emitter); /* Close "prof". */ } /* arenas. */ /* * The json output sticks arena info into an "arenas" dict; the table * output puts them at the top-level. */ emitter_json_object_kv_begin(emitter, "arenas"); CTL_GET("arenas.narenas", &uv, unsigned); emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv); /* * Decay settings are emitted only in json mode; in table mode, they're * emitted as notes with the opt output, above. */ CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv); CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv); CTL_GET("arenas.quantum", &sv, size_t); emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv); CTL_GET("arenas.page", &sv, size_t); emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { emitter_kv(emitter, "tcache_max", "Maximum thread-cached size class", emitter_type_size, &sv); } unsigned nbins; CTL_GET("arenas.nbins", &nbins, unsigned); emitter_kv(emitter, "nbins", "Number of bin size classes", emitter_type_unsigned, &nbins); unsigned nhbins; CTL_GET("arenas.nhbins", &nhbins, unsigned); emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", emitter_type_unsigned, &nhbins); /* * We do enough mallctls in a loop that we actually want to omit them * (not just omit the printing). */ if (emitter->output == emitter_output_json) { emitter_json_array_kv_begin(emitter, "bin"); for (unsigned i = 0; i < nbins; i++) { emitter_json_object_begin(emitter); CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); emitter_json_kv(emitter, "nregs", emitter_type_uint32, &u32v); CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); emitter_json_kv(emitter, "slab_size", emitter_type_size, &sv); CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t); emitter_json_kv(emitter, "nshards", emitter_type_uint32, &u32v); emitter_json_object_end(emitter); } emitter_json_array_end(emitter); /* Close "bin". */ } unsigned nlextents; CTL_GET("arenas.nlextents", &nlextents, unsigned); emitter_kv(emitter, "nlextents", "Number of large size classes", emitter_type_unsigned, &nlextents); if (emitter->output == emitter_output_json) { emitter_json_array_kv_begin(emitter, "lextent"); for (unsigned i = 0; i < nlextents; i++) { emitter_json_object_begin(emitter); CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); emitter_json_object_end(emitter); } emitter_json_array_end(emitter); /* Close "lextent". */ } emitter_json_object_end(emitter); /* Close "arenas" */ } static void stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, bool unmerged, bool bins, bool large, bool mutex, bool extents) { /* * These should be deleted. We keep them around for a while, to aid in * the transition to the emitter code. */ size_t allocated, active, metadata, metadata_thp, resident, mapped, retained; size_t num_background_threads; uint64_t background_thread_num_runs, background_thread_run_interval; CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); CTL_GET("stats.metadata_thp", &metadata_thp, size_t); CTL_GET("stats.resident", &resident, size_t); CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.retained", &retained, size_t); if (have_background_thread) { CTL_GET("stats.background_thread.num_threads", &num_background_threads, size_t); CTL_GET("stats.background_thread.num_runs", &background_thread_num_runs, uint64_t); CTL_GET("stats.background_thread.run_interval", &background_thread_run_interval, uint64_t); } else { num_background_threads = 0; background_thread_num_runs = 0; background_thread_run_interval = 0; } /* Generic global stats. */ emitter_json_object_kv_begin(emitter, "stats"); emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); emitter_json_kv(emitter, "active", emitter_type_size, &active); emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); emitter_json_kv(emitter, "metadata_thp", emitter_type_size, &metadata_thp); emitter_json_kv(emitter, "resident", emitter_type_size, &resident); emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); emitter_json_kv(emitter, "retained", emitter_type_size, &retained); emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " "retained: %zu\n", allocated, active, metadata, metadata_thp, resident, mapped, retained); /* Background thread stats. */ emitter_json_object_kv_begin(emitter, "background_thread"); emitter_json_kv(emitter, "num_threads", emitter_type_size, &num_background_threads); emitter_json_kv(emitter, "num_runs", emitter_type_uint64, &background_thread_num_runs); emitter_json_kv(emitter, "run_interval", emitter_type_uint64, &background_thread_run_interval); emitter_json_object_end(emitter); /* Close "background_thread". */ emitter_table_printf(emitter, "Background threads: %zu, " "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", num_background_threads, background_thread_num_runs, background_thread_run_interval); if (mutex) { emitter_row_t row; emitter_col_t name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; emitter_col_t col32[mutex_prof_num_uint32_t_counters]; uint64_t uptime; emitter_row_init(&row); mutex_stats_init_cols(&row, "", &name, col64, col32); emitter_table_row(emitter, &row); emitter_json_object_kv_begin(emitter, "mutexes"); CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t); for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { mutex_stats_read_global(global_mutex_names[i], &name, col64, col32, uptime); emitter_json_object_kv_begin(emitter, global_mutex_names[i]); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); } emitter_json_object_end(emitter); /* Close "mutexes". */ } emitter_json_object_end(emitter); /* Close "stats". */ if (merged || destroyed || unmerged) { unsigned narenas; emitter_json_object_kv_begin(emitter, "stats.arenas"); CTL_GET("arenas.narenas", &narenas, unsigned); size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); size_t sz; VARIABLE_ARRAY(bool, initialized, narenas); bool destroyed_initialized; unsigned i, j, ninitialized; xmallctlnametomib("arena.0.initialized", mib, &miblen); for (i = ninitialized = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(bool); xmallctlbymib(mib, miblen, &initialized[i], &sz, NULL, 0); if (initialized[i]) { ninitialized++; } } mib[1] = MALLCTL_ARENAS_DESTROYED; sz = sizeof(bool); xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, NULL, 0); /* Merged stats. */ if (merged && (ninitialized > 1 || !unmerged)) { /* Print merged arena stats. */ emitter_table_printf(emitter, "Merged arenas stats:\n"); emitter_json_object_kv_begin(emitter, "merged"); stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, large, mutex, extents); emitter_json_object_end(emitter); /* Close "merged". */ } /* Destroyed stats. */ if (destroyed_initialized && destroyed) { /* Print destroyed arena stats. */ emitter_table_printf(emitter, "Destroyed arenas stats:\n"); emitter_json_object_kv_begin(emitter, "destroyed"); stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, bins, large, mutex, extents); emitter_json_object_end(emitter); /* Close "destroyed". */ } /* Unmerged stats. */ if (unmerged) { for (i = j = 0; i < narenas; i++) { if (initialized[i]) { char arena_ind_str[20]; malloc_snprintf(arena_ind_str, sizeof(arena_ind_str), "%u", i); emitter_json_object_kv_begin(emitter, arena_ind_str); emitter_table_printf(emitter, "arenas[%s]:\n", arena_ind_str); stats_arena_print(emitter, i, bins, large, mutex, extents); /* Close "". */ emitter_json_object_end(emitter); } } } emitter_json_object_end(emitter); /* Close "stats.arenas". */ } } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; #define OPTION(o, v, d, s) bool v = d; STATS_PRINT_OPTIONS #undef OPTION /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write(": Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write(": Failure in mallctl(\"epoch\", " "...)\n"); abort(); } if (opts != NULL) { for (unsigned i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { #define OPTION(o, v, d, s) case o: v = s; break; STATS_PRINT_OPTIONS #undef OPTION default:; } } } emitter_t emitter; emitter_init(&emitter, json ? emitter_output_json : emitter_output_table, write_cb, cbopaque); emitter_begin(&emitter); emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); emitter_json_object_kv_begin(&emitter, "jemalloc"); if (general) { stats_general_print(&emitter); } if (config_stats) { stats_print_helper(&emitter, merged, destroyed, unmerged, bins, large, mutex, extents); } emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */ emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); emitter_end(&emitter); } jemalloc-sys-0.3.2/rep/src/sz.c010064400007650000024000000033711344617474100145310ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) size_t sz_pind2sz_tab[SC_NPSIZES+1]; static void sz_boot_pind2sz_tab(const sc_data_t *sc_data) { int pind = 0; for (unsigned i = 0; i < SC_NSIZES; i++) { const sc_t *sc = &sc_data->sc[i]; if (sc->psz) { sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << sc->lg_delta); pind++; } } for (int i = pind; i <= (int)SC_NPSIZES; i++) { sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; } } JEMALLOC_ALIGNED(CACHELINE) size_t sz_index2size_tab[SC_NSIZES]; static void sz_boot_index2size_tab(const sc_data_t *sc_data) { for (unsigned i = 0; i < SC_NSIZES; i++) { const sc_t *sc = &sc_data->sc[i]; sz_index2size_tab[i] = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << (sc->lg_delta)); } } /* * To keep this table small, we divide sizes by the tiny min size, which gives * the smallest interval for which the result can change. */ JEMALLOC_ALIGNED(CACHELINE) uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1]; static void sz_boot_size2index_tab(const sc_data_t *sc_data) { size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1; size_t dst_ind = 0; for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max; sc_ind++) { const sc_t *sc = &sc_data->sc[sc_ind]; size_t sz = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << sc->lg_delta); size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1) >> SC_LG_TINY_MIN); for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) { sz_size2index_tab[dst_ind] = sc_ind; } } } void sz_boot(const sc_data_t *sc_data) { sz_boot_pind2sz_tab(sc_data); sz_boot_index2size_tab(sc_data); sz_boot_size2index_tab(sc_data); } jemalloc-sys-0.3.2/rep/src/tcache.c010064400007650000024000000532531344617474100153300ustar0000000000000000#define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sc.h" /******************************************************************************/ /* Data. */ bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; cache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ unsigned nhbins; size_t tcache_maxclass; tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ static tcaches_t *tcaches_avail; /* Protects tcaches{,_past,_avail}. */ static malloc_mutex_t tcaches_mtx; /******************************************************************************/ size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { return arena_salloc(tsdn, ptr); } void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; cache_bin_t *tbin; if (binind < SC_NBINS) { tbin = tcache_small_bin_get(tcache, binind); } else { tbin = tcache_large_bin_get(tcache, binind); } if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < SC_NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); /* * Reduce fill count by 2X. Limit lg_fill_div such that * the fill count is always at least 1. */ cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; if ((tbin_info->ncached_max >> (tcache->lg_fill_div[binind] + 1)) >= 1) { tcache->lg_fill_div[binind]++; } } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } } else if (tbin->low_water < 0) { /* * Increase fill count by 2X for small bins. Make sure * lg_fill_div stays greater than 0. */ if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { tcache->lg_fill_div[binind]--; } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; } } void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; assert(tcache->arena != NULL); arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) { tcache->prof_accumbytes = 0; } ret = cache_bin_alloc_easy(tbin, tcache_success); return ret; } /* Enabled with --enable-extra-size-check. */ #ifdef JEMALLOC_EXTRA_SIZE_CHECK static void tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, size_t nflush, extent_t **extents){ rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); /* * Verify that the items in the tcache all have the correct size; this * is useful for catching sized deallocation bugs, also to fail early * instead of corrupting metadata. Since this can be turned on for opt * builds, avoid the branch in the loop. */ szind_t szind; size_t sz_sum = binind * nflush; for (unsigned i = 0 ; i < nflush; i++) { rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true, &extents[i], &szind); sz_sum -= szind; } if (sz_sum != 0) { abort(); } } #endif void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem) { bool merged_stats = false; assert(binind < SC_NBINS); assert((cache_bin_sz_t)rem <= tbin->ncached); arena_t *arena = tcache->arena; assert(arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); #ifndef JEMALLOC_EXTRA_SIZE_CHECK /* Look up extent once per item. */ for (unsigned i = 0 ; i < nflush; i++) { item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); } #else tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, item_extent); #endif while (nflush > 0) { /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; unsigned bin_arena_ind = extent_arena_ind_get(extent); arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, false); unsigned binshard = extent_binshard_get(extent); assert(binshard < bin_infos[binind].n_shards); bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } tcache->prof_accumbytes = 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena && !merged_stats) { merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); extent = item_extent[i]; assert(ptr != NULL && extent != NULL); if (extent_arena_ind_get(extent) == bin_arena_ind && extent_binshard_get(extent) == binshard) { arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), bin_arena, bin, binind, extent, ptr); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; ndeferred++; } } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind, &binshard); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); assert((cache_bin_sz_t)rem <= tbin->ncached); arena_t *tcache_arena = tcache->arena; assert(tcache_arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); #ifndef JEMALLOC_EXTRA_SIZE_CHECK /* Look up extent once per item. */ for (unsigned i = 0 ; i < nflush; i++) { item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); } #else tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, item_extent); #endif while (nflush > 0) { /* Lock the arena associated with the first object. */ extent_t *extent = item_extent[0]; unsigned locked_arena_ind = extent_arena_ind_get(extent); arena_t *locked_arena = arena_get(tsd_tsdn(tsd), locked_arena_ind, false); bool idump; if (config_prof) { idump = false; } bool lock_large = !arena_is_auto(locked_arena); if (lock_large) { malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); } for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); extent = item_extent[i]; if (extent_arena_ind_get(extent) == locked_arena_ind) { large_dalloc_prep_junked_locked(tsd_tsdn(tsd), extent); } } if ((config_prof || config_stats) && (locked_arena == tcache_arena)) { if (config_prof) { idump = arena_prof_accum(tsd_tsdn(tsd), tcache_arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; arena_stats_large_nrequests_add(tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } if (lock_large) { malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); } unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); extent = item_extent[i]; assert(ptr != NULL && extent != NULL); if (extent_arena_ind_get(extent) == locked_arena_ind) { large_dalloc_finish(tsd_tsdn(tsd), extent); } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; ndeferred++; } } if (config_prof && idump) { prof_idump(tsd_tsdn(tsd)); } arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - ndeferred); nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_stats_large_nrequests_add(tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { assert(tcache->arena == NULL); tcache->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); cache_bin_array_descriptor_init( &tcache->cache_bin_array_descriptor, tcache->bins_small, tcache->bins_large); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } static void tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { arena_t *arena = tcache->arena; assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { bool in_ql = false; tcache_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { if (iter == tcache) { in_ql = true; break; } } assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); ql_remove(&arena->cache_bin_array_descriptor_ql, &tcache->cache_bin_array_descriptor, link); tcache_stats_merge(tsdn, tcache, arena); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } tcache->arena = NULL; } void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { tcache_arena_dissociate(tsdn, tcache); tcache_arena_associate(tsdn, tcache, arena); } bool tsd_tcache_enabled_data_init(tsd_t *tsd) { /* Called upon tsd initialization. */ tsd_tcache_enabled_set(tsd, opt_tcache); tsd_slow_update(tsd); if (opt_tcache) { /* Trigger tcache init. */ tsd_tcache_data_init(tsd); } return false; } /* Initialize auto tcache (embedded in TSD). */ static void tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); tcache->prof_accumbytes = 0; tcache->next_gc_bin = 0; tcache->arena = NULL; ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); size_t stack_offset = 0; assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS); memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS)); unsigned i = 0; for (; i < SC_NBINS; i++) { tcache->lg_fill_div[i] = 1; stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); /* * avail points past the available space. Allocations will * access the slots toward higher addresses (for the benefit of * prefetch). */ tcache_small_bin_get(tcache, i)->avail = (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); } for (; i < nhbins; i++) { stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); tcache_large_bin_get(tcache, i)->avail = (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); } assert(stack_offset == stack_nelms * sizeof(void *)); } /* Initialize auto tcache (embedded in TSD). */ bool tsd_tcache_data_init(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); assert(tcache_small_bin_get(tcache, 0)->avail == NULL); size_t size = stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sz_sa2u(size, CACHELINE); void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (avail_array == NULL) { return true; } tcache_init(tsd, tcache, avail_array); /* * Initialization is a bit tricky here. After malloc init is done, all * threads can rely on arena_choose and associate tcache accordingly. * However, the thread that does actual malloc bootstrapping relies on * functional tsd, and it can only rely on a0. In that case, we * associate its tcache to a0 temporarily, and later on * arena_choose_hard() will re-associate properly. */ tcache->arena = NULL; arena_t *arena; if (!malloc_initialized()) { /* If in initialization, assign to a0. */ arena = arena_get(tsd_tsdn(tsd), 0, false); tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); } else { arena = arena_choose(tsd, NULL); /* This may happen if thread.tcache.enabled is used. */ if (tcache->arena == NULL) { tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); } } assert(arena == tcache->arena); return false; } /* Created manual tcache for tcache.create mallctl. */ tcache_t * tcache_create_explicit(tsd_t *tsd) { tcache_t *tcache; size_t size, stack_offset; size = sizeof(tcache_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sz_sa2u(size, CACHELINE); tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) { return NULL; } tcache_init(tsd, tcache, (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); return tcache; } static void tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { assert(tcache->arena != NULL); for (unsigned i = 0; i < SC_NBINS; i++) { cache_bin_t *tbin = tcache_small_bin_get(tcache, i); tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats) { assert(tbin->tstats.nrequests == 0); } } for (unsigned i = SC_NBINS; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats) { assert(tbin->tstats.nrequests == 0); } } if (config_prof && tcache->prof_accumbytes > 0 && arena_prof_accum(tsd_tsdn(tsd), tcache->arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } } void tcache_flush(tsd_t *tsd) { assert(tcache_available(tsd)); tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); } static void tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { tcache_flush_cache(tsd, tcache); arena_t *arena = tcache->arena; tcache_arena_dissociate(tsd_tsdn(tsd), tcache); if (tsd_tcache) { /* Release the avail array for the TSD embedded auto tcache. */ void *avail_array = (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); } else { /* Release both the tcache struct and avail array. */ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } /* * The deallocation and tcache flush above may not trigger decay since * we are on the tcache shutdown path (potentially with non-nominal * tsd). Manually trigger decay to avoid pathological cases. Also * include arena 0 because the tcache array is allocated from it. */ arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), false, false); if (arena_nthreads_get(arena, false) == 0 && !background_thread_enabled()) { /* Force purging when no threads assigned to the arena anymore. */ arena_decay(tsd_tsdn(tsd), arena, false, true); } else { arena_decay(tsd_tsdn(tsd), arena, false, false); } } /* For auto tcache (embedded in TSD) only. */ void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get(tsd); if (!tcache_available(tsd)) { assert(tsd_tcache_enabled_get(tsd) == false); if (config_debug) { assert(tcache_small_bin_get(tcache, 0)->avail == NULL); } return; } assert(tsd_tcache_enabled_get(tsd)); assert(tcache_small_bin_get(tcache, 0)->avail != NULL); tcache_destroy(tsd, tcache, true); if (config_debug) { tcache_small_bin_get(tcache, 0)->avail = NULL; } } void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); /* Merge and reset tcache stats. */ for (i = 0; i < SC_NBINS; i++) { cache_bin_t *tbin = tcache_small_bin_get(tcache, i); unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); arena_stats_large_nrequests_add(tsdn, &arena->stats, i, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } static bool tcaches_create_prep(tsd_t *tsd) { bool err; malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches == NULL) { tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); if (tcaches == NULL) { err = true; goto label_return; } } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { err = true; goto label_return; } err = false; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); return err; } bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); bool err; if (tcaches_create_prep(tsd)) { err = true; goto label_return; } tcache_t *tcache = tcache_create_explicit(tsd); if (tcache == NULL) { err = true; goto label_return; } tcaches_t *elm; malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; *r_ind = (unsigned)(elm - tcaches); } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; *r_ind = tcaches_past; tcaches_past++; } malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); err = false; label_return: witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); return err; } static tcache_t * tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); if (elm->tcache == NULL) { return NULL; } tcache_t *tcache = elm->tcache; if (allow_reinit) { elm->tcache = TCACHES_ELM_NEED_REINIT; } else { elm->tcache = NULL; } if (tcache == TCACHES_ELM_NEED_REINIT) { return NULL; } return tcache; } void tcaches_flush(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true); malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); if (tcache != NULL) { /* Destroy the tcache; recreate in tcaches_get() if needed. */ tcache_destroy(tsd, tcache, false); } } void tcaches_destroy(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcaches_t *elm = &tcaches[ind]; tcache_t *tcache = tcaches_elm_remove(tsd, elm, false); elm->next = tcaches_avail; tcaches_avail = elm; malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); if (tcache != NULL) { tcache_destroy(tsd, tcache, false); } } bool tcache_boot(tsdn_t *tsdn) { /* If necessary, clamp opt_lg_tcache_max. */ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SC_SMALL_MAXCLASS) { tcache_maxclass = SC_SMALL_MAXCLASS; } else { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); } if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, malloc_mutex_rank_exclusive)) { return true; } nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins * sizeof(cache_bin_info_t), CACHELINE); if (tcache_bin_info == NULL) { return true; } stack_nelms = 0; unsigned i; for (i = 0; i < SC_NBINS; i++) { if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (bin_infos[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; } stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } return false; } void tcache_prefork(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_prefork(tsdn, &tcaches_mtx); } } void tcache_postfork_parent(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); } } void tcache_postfork_child(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_child(tsdn, &tcaches_mtx); } } jemalloc-sys-0.3.2/rep/src/test_hooks.c010064400007650000024000000006271344617474100162600ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" /* * The hooks are a little bit screwy -- they're not genuinely exported in the * sense that we want them available to end-users, but we do want them visible * from outside the generated library, so that we can use them in test code. */ JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)() = NULL; JEMALLOC_EXPORT void (*test_hooks_libc_hook)() = NULL; jemalloc-sys-0.3.2/rep/src/ticker.c010064400007650000024000000002061344617474100153500ustar0000000000000000#define JEMALLOC_TICKER_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" jemalloc-sys-0.3.2/rep/src/tsd.c010064400007650000024000000343621344617474100146730ustar0000000000000000#define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; /* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; __thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; bool tsd_booted = false; #elif (defined(JEMALLOC_TLS)) __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; pthread_key_t tsd_tsd; bool tsd_booted = false; #elif (defined(_WIN32)) DWORD tsd_tsd; tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; bool tsd_booted = false; #else /* * This contains a mutex, but it's pretty convenient to allow the mutex code to * have a dependency on tsd. So we define the struct here, and only refer to it * by pointer in the header. */ struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; pthread_key_t tsd_tsd; tsd_init_head_t tsd_init_head = { ql_head_initializer(blocks), MALLOC_MUTEX_INITIALIZER }; tsd_wrapper_t tsd_boot_wrapper = { false, TSD_INITIALIZER }; bool tsd_booted = false; #endif JEMALLOC_DIAGNOSTIC_POP /******************************************************************************/ /* A list of all the tsds in the nominal state. */ typedef ql_head(tsd_t) tsd_list_t; static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds); static malloc_mutex_t tsd_nominal_tsds_lock; /* How many slow-path-enabling features are turned on. */ static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0); static bool tsd_in_nominal_list(tsd_t *tsd) { tsd_t *tsd_list; bool found = false; /* * We don't know that tsd is nominal; it might not be safe to get data * out of it here. */ malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock); ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { if (tsd == tsd_list) { found = true; break; } } malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock); return found; } static void tsd_add_nominal(tsd_t *tsd) { assert(!tsd_in_nominal_list(tsd)); assert(tsd_state_get(tsd) <= tsd_state_nominal_max); ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link); malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } static void tsd_remove_nominal(tsd_t *tsd) { assert(tsd_in_nominal_list(tsd)); assert(tsd_state_get(tsd) <= tsd_state_nominal_max); malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } static void tsd_force_recompute(tsdn_t *tsdn) { /* * The stores to tsd->state here need to synchronize with the exchange * in tsd_slow_update. */ atomic_fence(ATOMIC_RELEASE); malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock); tsd_t *remote_tsd; ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED) <= tsd_state_nominal_max); tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute, ATOMIC_RELAXED); } malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock); } void tsd_global_slow_inc(tsdn_t *tsdn) { atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); /* * We unconditionally force a recompute, even if the global slow count * was already positive. If we didn't, then it would be possible for us * to return to the user, have the user synchronize externally with some * other thread, and then have that other thread not have picked up the * update yet (since the original incrementing thread might still be * making its way through the tsd list). */ tsd_force_recompute(tsdn); } void tsd_global_slow_dec(tsdn_t *tsdn) { atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); /* See the note in ..._inc(). */ tsd_force_recompute(tsdn); } static bool tsd_local_slow(tsd_t *tsd) { return !tsd_tcache_enabled_get(tsd) || tsd_reentrancy_level_get(tsd) > 0; } bool tsd_global_slow() { return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0; } /******************************************************************************/ static uint8_t tsd_state_compute(tsd_t *tsd) { if (!tsd_nominal(tsd)) { return tsd_state_get(tsd); } /* We're in *a* nominal state; but which one? */ if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) { return tsd_state_nominal_slow; } else { return tsd_state_nominal; } } void tsd_slow_update(tsd_t *tsd) { uint8_t old_state; do { uint8_t new_state = tsd_state_compute(tsd); old_state = tsd_atomic_exchange(&tsd->state, new_state, ATOMIC_ACQUIRE); } while (old_state == tsd_state_nominal_recompute); } void tsd_state_set(tsd_t *tsd, uint8_t new_state) { /* Only the tsd module can change the state *to* recompute. */ assert(new_state != tsd_state_nominal_recompute); uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED); if (old_state > tsd_state_nominal_max) { /* * Not currently in the nominal list, but it might need to be * inserted there. */ assert(!tsd_in_nominal_list(tsd)); tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED); if (new_state <= tsd_state_nominal_max) { tsd_add_nominal(tsd); } } else { /* * We're currently nominal. If the new state is non-nominal, * great; we take ourselves off the list and just enter the new * state. */ assert(tsd_in_nominal_list(tsd)); if (new_state > tsd_state_nominal_max) { tsd_remove_nominal(tsd); tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED); } else { /* * This is the tricky case. We're transitioning from * one nominal state to another. The caller can't know * about any races that are occuring at the same time, * so we always have to recompute no matter what. */ tsd_slow_update(tsd); } } } static bool tsd_data_init(tsd_t *tsd) { /* * We initialize the rtree context first (before the tcache), since the * tcache initialization depends on it. */ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); /* * A nondeterministic seed based on the address of tsd reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ *tsd_offset_statep_get(tsd) = config_debug ? 0 : (uint64_t)(uintptr_t)tsd; return tsd_tcache_enabled_data_init(tsd); } static void assert_tsd_data_cleanup_done(tsd_t *tsd) { assert(!tsd_nominal(tsd)); assert(!tsd_in_nominal_list(tsd)); assert(*tsd_arenap_get_unsafe(tsd) == NULL); assert(*tsd_iarenap_get_unsafe(tsd) == NULL); assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); } static bool tsd_data_init_nocleanup(tsd_t *tsd) { assert(tsd_state_get(tsd) == tsd_state_reincarnated || tsd_state_get(tsd) == tsd_state_minimal_initialized); /* * During reincarnation, there is no guarantee that the cleanup function * will be called (deallocation may happen after all tsd destructors). * We set up tsd in a way that no cleanup is needed. */ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); *tsd_arenas_tdata_bypassp_get(tsd) = true; *tsd_tcache_enabledp_get_unsafe(tsd) = false; *tsd_reentrancy_levelp_get(tsd) = 1; assert_tsd_data_cleanup_done(tsd); return false; } tsd_t * tsd_fetch_slow(tsd_t *tsd, bool minimal) { assert(!tsd_fast(tsd)); if (tsd_state_get(tsd) == tsd_state_nominal_slow) { /* * On slow path but no work needed. Note that we can't * necessarily *assert* that we're slow, because we might be * slow because of an asynchronous modification to global state, * which might be asynchronously modified *back*. */ } else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) { tsd_slow_update(tsd); } else if (tsd_state_get(tsd) == tsd_state_uninitialized) { if (!minimal) { if (tsd_booted) { tsd_state_set(tsd, tsd_state_nominal); tsd_slow_update(tsd); /* Trigger cleanup handler registration. */ tsd_set(tsd); tsd_data_init(tsd); } } else { tsd_state_set(tsd, tsd_state_minimal_initialized); tsd_set(tsd); tsd_data_init_nocleanup(tsd); } } else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) { if (!minimal) { /* Switch to fully initialized. */ tsd_state_set(tsd, tsd_state_nominal); assert(*tsd_reentrancy_levelp_get(tsd) >= 1); (*tsd_reentrancy_levelp_get(tsd))--; tsd_slow_update(tsd); tsd_data_init(tsd); } else { assert_tsd_data_cleanup_done(tsd); } } else if (tsd_state_get(tsd) == tsd_state_purgatory) { tsd_state_set(tsd, tsd_state_reincarnated); tsd_set(tsd); tsd_data_init_nocleanup(tsd); } else { assert(tsd_state_get(tsd) == tsd_state_reincarnated); } return tsd; } void * malloc_tsd_malloc(size_t size) { return a0malloc(CACHELINE_CEILING(size)); } void malloc_tsd_dalloc(void *wrapper) { a0dalloc(wrapper); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) #ifndef _WIN32 JEMALLOC_EXPORT #endif void _malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; for (i = 0; i < ncleanups; i++) { pending[i] = true; } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); if (pending[i]) { again = true; } } } } while (again); } #endif void malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } static void tsd_do_data_cleanup(tsd_t *tsd) { prof_tdata_cleanup(tsd); iarena_cleanup(tsd); arena_cleanup(tsd); arenas_tdata_cleanup(tsd); tcache_cleanup(tsd); witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); } void tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd_state_get(tsd)) { case tsd_state_uninitialized: /* Do nothing. */ break; case tsd_state_minimal_initialized: /* This implies the thread only did free() in its life time. */ /* Fall through. */ case tsd_state_reincarnated: /* * Reincarnated means another destructor deallocated memory * after the destructor was called. Cleanup isn't required but * is still called for testing and completeness. */ assert_tsd_data_cleanup_done(tsd); /* Fall through. */ case tsd_state_nominal: case tsd_state_nominal_slow: tsd_do_data_cleanup(tsd); tsd_state_set(tsd, tsd_state_purgatory); tsd_set(tsd); break; case tsd_state_purgatory: /* * The previous time this destructor was called, we set the * state to tsd_state_purgatory so that other destructors * wouldn't cause re-creation of the tsd. This time, do * nothing, and do not request another callback. */ break; default: not_reached(); } #ifdef JEMALLOC_JET test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); int *data = tsd_test_datap_get_unsafe(tsd); if (test_callback != NULL) { test_callback(data); } #endif } tsd_t * malloc_tsd_boot0(void) { tsd_t *tsd; ncleanups = 0; if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock", WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { return NULL; } if (tsd_boot0()) { return NULL; } tsd = tsd_fetch(); *tsd_arenas_tdata_bypassp_get(tsd) = true; return tsd; } void malloc_tsd_boot1(void) { tsd_boot1(); tsd_t *tsd = tsd_fetch(); /* malloc_slow has been set properly. Update tsd_slow. */ tsd_slow_update(tsd); *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 static BOOL WINAPI _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: isthreaded = true; break; #endif case DLL_THREAD_DETACH: _malloc_thread_cleanup(); break; default: break; } return true; } /* * We need to be able to say "read" here (in the "pragma section"), but have * hooked "read". We won't read for the rest of the file, so we can get away * with unhooking. */ #ifdef read # undef read #endif #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") # pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") # pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) ) # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { malloc_mutex_unlock(TSDN_NULL, &head->lock); return iter->data; } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); return NULL; } void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif void tsd_prefork(tsd_t *tsd) { malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } void tsd_postfork_parent(tsd_t *tsd) { malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } void tsd_postfork_child(tsd_t *tsd) { malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); ql_new(&tsd_nominal_tsds); if (tsd_state_get(tsd) <= tsd_state_nominal_max) { tsd_add_nominal(tsd); } } jemalloc-sys-0.3.2/rep/src/witness.c010064400007650000024000000046351344617474100155750ustar0000000000000000#define JEMALLOC_WITNESS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp, void *opaque) { witness->name = name; witness->rank = rank; witness->comp = comp; witness->opaque = opaque; } static void witness_lock_error_impl(const witness_list_t *witnesses, const witness_t *witness) { witness_t *w; malloc_printf(": Lock rank order reversal:"); ql_foreach(w, witnesses, link) { malloc_printf(" %s(%u)", w->name, w->rank); } malloc_printf(" %s(%u)\n", witness->name, witness->rank); abort(); } witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; static void witness_owner_error_impl(const witness_t *witness) { malloc_printf(": Should own %s(%u)\n", witness->name, witness->rank); abort(); } witness_owner_error_t *JET_MUTABLE witness_owner_error = witness_owner_error_impl; static void witness_not_owner_error_impl(const witness_t *witness) { malloc_printf(": Should not own %s(%u)\n", witness->name, witness->rank); abort(); } witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = witness_not_owner_error_impl; static void witness_depth_error_impl(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { witness_t *w; malloc_printf(": Should own %u lock%s of rank >= %u:", depth, (depth != 1) ? "s" : "", rank_inclusive); ql_foreach(w, witnesses, link) { malloc_printf(" %s(%u)", w->name, w->rank); } malloc_printf("\n"); abort(); } witness_depth_error_t *JET_MUTABLE witness_depth_error = witness_depth_error_impl; void witnesses_cleanup(witness_tsd_t *witness_tsd) { witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); /* Do nothing. */ } void witness_prefork(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } witness_tsd->forking = true; } void witness_postfork_parent(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } witness_tsd->forking = false; } void witness_postfork_child(witness_tsd_t *witness_tsd) { if (!config_debug) { return; } #ifndef JEMALLOC_MUTEX_INIT_CB witness_list_t *witnesses; witnesses = &witness_tsd->witnesses; ql_new(witnesses); #endif witness_tsd->forking = false; } jemalloc-sys-0.3.2/rep/src/zone.c010064400007650000024000000351411344617474100150500ustar0000000000000000#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif /* Definitions of the following structs in malloc/malloc.h might be too old * for the built binary to run on newer versions of OSX. So use the newest * possible version of those structs. */ typedef struct _malloc_zone_t { void *reserved1; void *reserved2; size_t (*size)(struct _malloc_zone_t *, const void *); void *(*malloc)(struct _malloc_zone_t *, size_t); void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); void *(*valloc)(struct _malloc_zone_t *, size_t); void (*free)(struct _malloc_zone_t *, void *); void *(*realloc)(struct _malloc_zone_t *, void *, size_t); void (*destroy)(struct _malloc_zone_t *); const char *zone_name; unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); struct malloc_introspection_t *introspect; unsigned version; void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); } malloc_zone_t; typedef struct { vm_address_t address; vm_size_t size; } vm_range_t; typedef struct malloc_statistics_t { unsigned blocks_in_use; size_t size_in_use; size_t max_size_in_use; size_t size_allocated; } malloc_statistics_t; typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); typedef struct malloc_introspection_t { kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); size_t (*good_size)(malloc_zone_t *, size_t); boolean_t (*check)(malloc_zone_t *); void (*print)(malloc_zone_t *, boolean_t); void (*log)(malloc_zone_t *, void *); void (*force_lock)(malloc_zone_t *); void (*force_unlock)(malloc_zone_t *); void (*statistics)(malloc_zone_t *, malloc_statistics_t *); boolean_t (*zone_locked)(malloc_zone_t *); boolean_t (*enable_discharge_checking)(malloc_zone_t *); boolean_t (*disable_discharge_checking)(malloc_zone_t *); void (*discharge)(malloc_zone_t *, void *); #ifdef __BLOCKS__ void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); #else void *enumerate_unavailable_without_blocks; #endif void (*reinit_lock)(malloc_zone_t *); } malloc_introspection_t; extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); extern malloc_zone_t *malloc_default_zone(void); extern void malloc_zone_register(malloc_zone_t *zone); extern void malloc_zone_unregister(malloc_zone_t *zone); /* * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ static malloc_zone_t *default_zone, *purgeable_zone; static malloc_zone_t jemalloc_zone; static struct malloc_introspection_t jemalloc_zone_introspect; static pid_t zone_force_lock_pid = -1; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static size_t zone_size(malloc_zone_t *zone, const void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); static void *zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); static void zone_destroy(malloc_zone_t *zone); static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); static size_t zone_good_size(malloc_zone_t *zone, size_t size); static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); static boolean_t zone_check(malloc_zone_t *zone); static void zone_print(malloc_zone_t *zone, boolean_t verbose); static void zone_log(malloc_zone_t *zone, void *address); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats); static boolean_t zone_locked(malloc_zone_t *zone); static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* * Functions. */ static size_t zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If * we knew that all pointers were owned by *some* zone, we could split * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they * reside within a mapped extent before determining size. */ return ivsalloc(tsdn_fetch(), ptr); } static void * zone_malloc(malloc_zone_t *zone, size_t size) { return je_malloc(size); } static void * zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { return je_calloc(num, size); } static void * zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); return ret; } static void zone_free(malloc_zone_t *zone, void *ptr) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; } free(ptr); } static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { return je_realloc(ptr, size); } return realloc(ptr, size); } static void * zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); return ret; } static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; alloc_size = ivsalloc(tsdn_fetch(), ptr); if (alloc_size != 0) { assert(alloc_size == size); je_free(ptr); return; } free(ptr); } static void zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); } static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) { unsigned i; for (i = 0; i < num_requested; i++) { results[i] = je_malloc(size); if (!results[i]) break; } return i; } static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed) { unsigned i; for (i = 0; i < num_to_be_freed; i++) { zone_free(zone, to_be_freed[i]); to_be_freed[i] = NULL; } } static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { return 0; } static size_t zone_good_size(malloc_zone_t *zone, size_t size) { if (size == 0) { size = 1; } return sz_s2u(size); } static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) { return KERN_SUCCESS; } static boolean_t zone_check(malloc_zone_t *zone) { return true; } static void zone_print(malloc_zone_t *zone, boolean_t verbose) { } static void zone_log(malloc_zone_t *zone, void *address) { } static void zone_force_lock(malloc_zone_t *zone) { if (isthreaded) { /* * See the note in zone_force_unlock, below, to see why we need * this. */ assert(zone_force_lock_pid == -1); zone_force_lock_pid = getpid(); jemalloc_prefork(); } } static void zone_force_unlock(malloc_zone_t *zone) { /* * zone_force_lock and zone_force_unlock are the entry points to the * forking machinery on OS X. The tricky thing is, the child is not * allowed to unlock mutexes locked in the parent, even if owned by the * forking thread (and the mutex type we use in OS X will fail an assert * if we try). In the child, we can get away with reinitializing all * the mutexes, which has the effect of unlocking them. In the parent, * doing this would mean we wouldn't wake any waiters blocked on the * mutexes we unlock. So, we record the pid of the current thread in * zone_force_lock, and use that to detect if we're in the parent or * child here, to decide which unlock logic we need. */ if (isthreaded) { assert(zone_force_lock_pid != -1); if (getpid() == zone_force_lock_pid) { jemalloc_postfork_parent(); } else { jemalloc_postfork_child(); } zone_force_lock_pid = -1; } } static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { /* We make no effort to actually fill the values */ stats->blocks_in_use = 0; stats->size_in_use = 0; stats->max_size_in_use = 0; stats->size_allocated = 0; } static boolean_t zone_locked(malloc_zone_t *zone) { /* Pretend no lock is being held */ return false; } static void zone_reinit_lock(malloc_zone_t *zone) { /* As of OSX 10.12, this function is only used when force_unlock would * be used if the zone version were < 9. So just use force_unlock. */ zone_force_unlock(zone); } static void zone_init(void) { jemalloc_zone.size = zone_size; jemalloc_zone.malloc = zone_malloc; jemalloc_zone.calloc = zone_calloc; jemalloc_zone.valloc = zone_valloc; jemalloc_zone.free = zone_free; jemalloc_zone.realloc = zone_realloc; jemalloc_zone.destroy = zone_destroy; jemalloc_zone.zone_name = "jemalloc_zone"; jemalloc_zone.batch_malloc = zone_batch_malloc; jemalloc_zone.batch_free = zone_batch_free; jemalloc_zone.introspect = &jemalloc_zone_introspect; jemalloc_zone.version = 9; jemalloc_zone.memalign = zone_memalign; jemalloc_zone.free_definite_size = zone_free_definite_size; jemalloc_zone.pressure_relief = zone_pressure_relief; jemalloc_zone_introspect.enumerator = zone_enumerator; jemalloc_zone_introspect.good_size = zone_good_size; jemalloc_zone_introspect.check = zone_check; jemalloc_zone_introspect.print = zone_print; jemalloc_zone_introspect.log = zone_log; jemalloc_zone_introspect.force_lock = zone_force_lock; jemalloc_zone_introspect.force_unlock = zone_force_unlock; jemalloc_zone_introspect.statistics = zone_statistics; jemalloc_zone_introspect.zone_locked = zone_locked; jemalloc_zone_introspect.enable_discharge_checking = NULL; jemalloc_zone_introspect.disable_discharge_checking = NULL; jemalloc_zone_introspect.discharge = NULL; #ifdef __BLOCKS__ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; #else jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; } static malloc_zone_t * zone_default_get(void) { malloc_zone_t **zones = NULL; unsigned int num_zones = 0; /* * On OSX 10.12, malloc_default_zone returns a special zone that is not * present in the list of registered zones. That zone uses a "lite zone" * if one is present (apparently enabled when malloc stack logging is * enabled), or the first registered zone otherwise. In practice this * means unless malloc stack logging is enabled, the first registered * zone is the default. So get the list of zones to get the first one, * instead of relying on malloc_default_zone. */ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) { /* * Reset the value in case the failure happened after it was * set. */ num_zones = 0; } if (num_zones) { return zones[0]; } return malloc_default_zone(); } /* As written, this function can only promote jemalloc_zone. */ static void zone_promote(void) { malloc_zone_t *zone; do { /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it * at the location of the specified zone. Unregistering the * default zone thus makes the last registered one the default. * On OSX < 10.6, unregistering shifts all registered zones. * The first registered zone then becomes the default. */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it * owns the default zone allocated pointers. We thus * unregister/re-register it in order to ensure it's always * after the default zone. On OSX < 10.6, there is no purgeable * zone, so this does nothing. On OSX >= 10.6, unregistering * replaces the purgeable zone with the last registered zone * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ if (purgeable_zone != NULL) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } zone = zone_default_get(); } while (zone != &jemalloc_zone); } JEMALLOC_ATTR(constructor) void zone_register(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ default_zone = zone_default_get(); if (!default_zone->zone_name || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { return; } /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone() is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ zone_init(); malloc_zone_register(&jemalloc_zone); /* Promote the custom zone to be default. */ zone_promote(); } jemalloc-sys-0.3.2/rep/test/include/test/SFMT-alti.h010064400007650000024000000134271344617474100203770ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt */ #ifndef SFMT_ALTI_H #define SFMT_ALTI_H /** * This function represents the recursion formula in AltiVec and BIG ENDIAN. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @return output */ JEMALLOC_ALWAYS_INLINE vector unsigned int vec_recursion(vector unsigned int a, vector unsigned int b, vector unsigned int c, vector unsigned int d) { const vector unsigned int sl1 = ALTI_SL1; const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 const vector unsigned int mask = ALTI_MSK64; const vector unsigned char perm_sl = ALTI_SL2_PERM64; const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else const vector unsigned int mask = ALTI_MSK; const vector unsigned char perm_sl = ALTI_SL2_PERM; const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif vector unsigned int v, w, x, y, z; x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); v = a; y = vec_sr(b, sr1); z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); w = vec_sl(d, sl1); z = vec_xor(z, w); y = vec_and(y, mask); v = vec_xor(v, x); z = vec_xor(z, y); z = vec_xor(z, v); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j].s = array[j + size - N].s; } for (; i < size; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; ctx->sfmt[j++].s = r; r1 = r2; r2 = r; } } #ifndef ONLY64 #if defined(__APPLE__) #define ALTI_SWAP (vector unsigned char) \ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) #else #define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} #endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. * * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ static inline void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; for (i = 0; i < size; i++) { array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); } } #endif #endif jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params.h010064400007650000024000000102761344617474100207300ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS_H #define SFMT_PARAMS_H #if !defined(MEXP) #ifdef __GNUC__ #warning "MEXP is not defined. I assume MEXP is 19937." #endif #define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS -----------------*/ /** Mersenne Exponent. The period of the sequence * is a multiple of 2^MEXP-1. * #define MEXP 19937 */ /** SFMT generator has an internal state array of 128-bit integers, * and N is its size. */ #define N (MEXP / 128 + 1) /** N32 is the size of internal state array when regarded as an array * of 32-bit integers.*/ #define N32 (N * 4) /** N64 is the size of internal state array when regarded as an array * of 64-bit integers.*/ #define N64 (N * 2) /*---------------------- the parameters of SFMT following definitions are in paramsXXXX.h file. ----------------------*/ /** the pick up position of the array. #define POS1 122 */ /** the parameter of shift left as four 32-bit registers. #define SL1 18 */ /** the parameter of shift left as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SL2 1 */ /** the parameter of shift right as four 32-bit registers. #define SR1 11 */ /** the parameter of shift right as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SR2 1 */ /** A bitmask, used in the recursion. These parameters are introduced * to break symmetry of SIMD. #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U */ /** These definitions are part of a 128-bit period certification vector. #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xc98e126aU */ #if MEXP == 607 #include "test/SFMT-params607.h" #elif MEXP == 1279 #include "test/SFMT-params1279.h" #elif MEXP == 2281 #include "test/SFMT-params2281.h" #elif MEXP == 4253 #include "test/SFMT-params4253.h" #elif MEXP == 11213 #include "test/SFMT-params11213.h" #elif MEXP == 19937 #include "test/SFMT-params19937.h" #elif MEXP == 44497 #include "test/SFMT-params44497.h" #elif MEXP == 86243 #include "test/SFMT-params86243.h" #elif MEXP == 132049 #include "test/SFMT-params132049.h" #elif MEXP == 216091 #include "test/SFMT-params216091.h" #else #ifdef __GNUC__ #error "MEXP is not valid." #undef MEXP #else #undef MEXP #endif #endif #endif /* SFMT_PARAMS_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params11213.h010064400007650000024000000067561344617474100213300ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H #define POS1 68 #define SL1 14 #define SL2 3 #define SR1 7 #define SR2 3 #define MSK1 0xeffff7fbU #define MSK2 0xffffffefU #define MSK3 0xdfdfbfffU #define MSK4 0x7fffdbfdU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xe8148000U #define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params1279.h010064400007650000024000000067401344617474100212540ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H #define POS1 7 #define SL1 14 #define SL2 3 #define SR1 5 #define SR2 1 #define MSK1 0xf7fefffdU #define MSK2 0x7fefcfffU #define MSK3 0xaff3ef3fU #define MSK4 0xb5ffff7fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params132049.h010064400007650000024000000067541344617474100214210ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H #define POS1 110 #define SL1 19 #define SL2 1 #define SR1 21 #define SR2 1 #define MSK1 0xffffbb5fU #define MSK2 0xfb6ebf95U #define MSK3 0xfffefffaU #define MSK4 0xcff77fffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xcb520000U #define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params19937.h010064400007650000024000000067501344617474100213470ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H #define POS1 122 #define SL1 18 #define SL2 1 #define SR1 11 #define SR2 1 #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params216091.h010064400007650000024000000067561344617474100214230ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H #define POS1 627 #define SL1 11 #define SL2 3 #define SR1 10 #define SR2 1 #define MSK1 0xbff7bff7U #define MSK2 0xbfffffffU #define MSK3 0xbffffa7fU #define MSK4 0xffddfbfbU #define PARITY1 0xf8000001U #define PARITY2 0x89e80709U #define PARITY3 0x3bd2b64bU #define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params2281.h010064400007650000024000000067401344617474100212460ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H #define POS1 12 #define SL1 19 #define SL2 1 #define SR1 5 #define SR2 1 #define MSK1 0xbff7ffbfU #define MSK2 0xfdfffffeU #define MSK3 0xf7ffef7fU #define MSK4 0xf2f7cbbfU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params4253.h010064400007650000024000000067401344617474100212470ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H #define POS1 17 #define SL1 20 #define SL2 1 #define SR1 7 #define SR2 1 #define MSK1 0x9f7bffffU #define MSK2 0x9fffff5fU #define MSK3 0x3efffffbU #define MSK4 0xfffff7bbU #define PARITY1 0xa8000001U #define PARITY2 0xaf5390a3U #define PARITY3 0xb740b3f8U #define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params44497.h010064400007650000024000000067561344617474100213540ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H #define POS1 330 #define SL1 5 #define SL2 3 #define SR1 9 #define SR2 3 #define MSK1 0xeffffffbU #define MSK2 0xdfbebfffU #define MSK3 0xbfbf7befU #define MSK4 0x9ffd7bffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xa3ac4000U #define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params607.h010064400007650000024000000067461344617474100211740ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H #define POS1 2 #define SL1 15 #define SL2 3 #define SR1 13 #define SR2 3 #define MSK1 0xfdff37ffU #define MSK2 0xef7f3f7dU #define MSK3 0xff777b7dU #define MSK4 0x7ff7fb2fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-params86243.h010064400007650000024000000067541344617474100213450ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H #define POS1 366 #define SL1 6 #define SL2 7 #define SR1 19 #define SR2 1 #define MSK1 0xfdbffbffU #define MSK2 0xbff7ff3fU #define MSK3 0xfd77efffU #define MSK4 0xbf9ff3ffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) #define ALTI_SL2_PERM64 \ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */ jemalloc-sys-0.3.2/rep/test/include/test/SFMT-sse2.h010064400007650000024000000121231344617474100203120ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * @note We assume LITTLE ENDIAN in this file * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #ifndef SFMT_SSE2_H #define SFMT_SSE2_H /** * This function represents the recursion formula. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @param mask 128-bit mask * @return output */ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); v = _mm_slli_epi32(d, SL1); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, v); x = _mm_slli_si128(x, SL2); y = _mm_and_si128(y, mask); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, y); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { r = _mm_load_si128(&array[j + size - N].si); _mm_store_si128(&ctx->sfmt[j].si, r); } for (; i < size; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); _mm_store_si128(&ctx->sfmt[j++].si, r); r1 = r2; r2 = r; } } #endif jemalloc-sys-0.3.2/rep/test/include/test/SFMT.h010064400007650000024000000123231344617474100174420ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom * number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt * * @note We assume that your system has inttypes.h. If your system * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, * and you have to define PRIu64 and PRIx64 in this file as follows: * @verbatim typedef unsigned int uint32_t typedef unsigned long long uint64_t #define PRIu64 "llu" #define PRIx64 "llx" @endverbatim * uint32_t must be exactly 32-bit unsigned integer type (no more, no * less), and uint64_t must be exactly 64-bit unsigned integer type. * PRIu64 and PRIx64 are used for printf function to print 64-bit * unsigned int and 64-bit unsigned int in hexadecimal format. */ #ifndef SFMT_H #define SFMT_H typedef struct sfmt_s sfmt_t; uint32_t gen_rand32(sfmt_t *ctx); uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); uint64_t gen_rand64(sfmt_t *ctx); uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); void fill_array32(sfmt_t *ctx, uint32_t *array, int size); void fill_array64(sfmt_t *ctx, uint64_t *array, int size); sfmt_t *init_gen_rand(uint32_t seed); sfmt_t *init_by_array(uint32_t *init_key, int key_length); void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ static inline double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ static inline double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ static inline double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ static inline double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ static inline double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ static inline double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ static inline double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ static inline double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ static inline double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ static inline double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); } #endif jemalloc-sys-0.3.2/rep/test/include/test/btalloc.h010064400007650000024000000014561344617474100203160ustar0000000000000000/* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); #define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) #define btalloc_n_gen(n) \ void * \ btalloc_##n(size_t size, unsigned bits) { \ void *p; \ \ if (bits == 0) { \ p = mallocx(size, 0); \ } else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ break; \ case 1: \ p = (btalloc_1(size, bits >> 1)); \ break; \ default: not_reached(); \ } \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ return p; \ } jemalloc-sys-0.3.2/rep/test/include/test/extent_hooks.h010064400007650000024000000230351344617474100214050ustar0000000000000000/* * Boilerplate code used for testing extent hooks via interception and * passthrough. */ static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind); static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind); static extent_hooks_t *default_hooks; static extent_hooks_t hooks = { extent_alloc_hook, extent_dalloc_hook, extent_destroy_hook, extent_commit_hook, extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, extent_merge_hook }; /* Control whether hook functions pass calls through to default hooks. */ static bool try_alloc = true; static bool try_dalloc = true; static bool try_destroy = true; static bool try_commit = true; static bool try_decommit = true; static bool try_purge_lazy = true; static bool try_purge_forced = true; static bool try_split = true; static bool try_merge = true; /* Set to false prior to operations, then introspect after operations. */ static bool called_alloc; static bool called_dalloc; static bool called_destroy; static bool called_commit; static bool called_decommit; static bool called_purge_lazy; static bool called_purge_forced; static bool called_split; static bool called_merge; /* Set to false prior to operations, then introspect after operations. */ static bool did_alloc; static bool did_dalloc; static bool did_destroy; static bool did_commit; static bool did_decommit; static bool did_purge_lazy; static bool did_purge_forced; static bool did_split; static bool did_merge; #if 0 # define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) #else # define TRACE_HOOK(fmt, ...) #endif static void * extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { void *ret; TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, new_addr, size, alignment, *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook, "Wrong hook function"); called_alloc = true; if (!try_alloc) { return NULL; } ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, zero, commit, 0); did_alloc = (ret != NULL); return ret; } static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; } err = default_hooks->dalloc(default_hooks, addr, size, committed, 0); did_dalloc = !err; return err; } static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook, "Wrong hook function"); called_destroy = true; if (!try_destroy) { return; } default_hooks->destroy(default_hooks, addr, size, committed, 0); did_destroy = true; } static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->commit, extent_commit_hook, "Wrong hook function"); called_commit = true; if (!try_commit) { return true; } err = default_hooks->commit(default_hooks, addr, size, offset, length, 0); did_commit = !err; return err; } static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook, "Wrong hook function"); called_decommit = true; if (!try_decommit) { return true; } err = default_hooks->decommit(default_hooks, addr, size, offset, length, 0); did_decommit = !err; return err; } static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, "Wrong hook function"); called_purge_lazy = true; if (!try_purge_lazy) { return true; } err = default_hooks->purge_lazy == NULL || default_hooks->purge_lazy(default_hooks, addr, size, offset, length, 0); did_purge_lazy = !err; return err; } static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, offset, length, arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, "Wrong hook function"); called_purge_forced = true; if (!try_purge_forced) { return true; } err = default_hooks->purge_forced == NULL || default_hooks->purge_forced(default_hooks, addr, size, offset, length, 0); did_purge_forced = !err; return err; } static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, addr, size, size_a, size_b, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->split, extent_split_hook, "Wrong hook function"); called_split = true; if (!try_split) { return true; } err = (default_hooks->split == NULL || default_hooks->split(default_hooks, addr, size, size_a, size_b, committed, 0)); did_split = !err; return err; } static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { bool err; TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, addr_a, size_a, addr_b, size_b, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->merge, extent_merge_hook, "Wrong hook function"); assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b, "Extents not mergeable"); called_merge = true; if (!try_merge) { return true; } err = (default_hooks->merge == NULL || default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, committed, 0)); did_merge = !err; return err; } static void extent_hooks_prep(void) { size_t sz; sz = sizeof(default_hooks); assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); } jemalloc-sys-0.3.2/rep/test/include/test/jemalloc_test.h010064400007650000024000000107121344617502700215140ustar0000000000000000#ifdef __cplusplus extern "C" { #endif #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include #include #include #include #include #include #ifdef _WIN32 # include "msvc_compat/strings.h" #endif #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" #else # include #endif #include "test/jemalloc_test_defs.h" #if defined(JEMALLOC_OSATOMIC) # include #endif #if defined(HAVE_ALTIVEC) && !defined(__APPLE__) # include #endif #ifdef HAVE_SSE2 # include #endif /******************************************************************************/ /* * For unit tests, expose all public and private interfaces. */ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" /******************************************************************************/ /* * For integration tests, expose the public jemalloc interfaces, but only * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ #elif defined(JEMALLOC_INTEGRATION_TEST) || \ defined(JEMALLOC_INTEGRATION_CPP_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; # define JEMALLOC_N(n) je_##n # include "jemalloc/internal/private_namespace.h" # include "jemalloc/internal/test_hooks.h" /* Hermetic headers. */ # include "jemalloc/internal/assert.h" # include "jemalloc/internal/malloc_io.h" # include "jemalloc/internal/nstime.h" # include "jemalloc/internal/util.h" /* Non-hermetic headers. */ # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" /******************************************************************************/ /* * For stress tests, expose the public jemalloc interfaces with name mangling * so that they can be tested as e.g. malloc() and free(). Also expose the * public jemalloc interfaces with jet_ prefixes, so that stress tests can use * a separate allocator for their internal data structures. */ #elif defined(JEMALLOC_STRESS_TEST) # include "jemalloc/jemalloc.h" # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET # include "jemalloc/jemalloc_rename.h" # define JEMALLOC_MANGLE # ifdef JEMALLOC_STRESS_TESTLIB # include "jemalloc/jemalloc_mangle_jet.h" # else # include "jemalloc/jemalloc_mangle.h" # endif /******************************************************************************/ /* * This header does dangerous things, the effects of which only test code * should be subject to. */ #else # error "This header cannot be included outside a testing context" #endif /******************************************************************************/ /* * Common test utilities. */ #include "test/btalloc.h" #include "test/math.h" #include "test/mtx.h" #include "test/mq.h" #include "test/test.h" #include "test/timer.h" #include "test/thd.h" #define MEXP 19937 #include "test/SFMT.h" /******************************************************************************/ /* * Define always-enabled assertion macros, so that test assertions execute even * if assertions are disabled in the library code. */ #undef assert #undef not_reached #undef not_implemented #undef assert_not_implemented #define assert(e) do { \ if (!(e)) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #define not_reached() do { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define not_implemented() do { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define assert_not_implemented(e) do { \ if (!(e)) { \ not_implemented(); \ } \ } while (0) #ifdef __cplusplus } #endif jemalloc-sys-0.3.2/rep/test/include/test/jemalloc_test.h.in010064400007650000024000000107721344617474100221310ustar0000000000000000#ifdef __cplusplus extern "C" { #endif #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include #include #include #include #include #include #ifdef _WIN32 # include "msvc_compat/strings.h" #endif #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" #else # include #endif #include "test/jemalloc_test_defs.h" #if defined(JEMALLOC_OSATOMIC) # include #endif #if defined(HAVE_ALTIVEC) && !defined(__APPLE__) # include #endif #ifdef HAVE_SSE2 # include #endif /******************************************************************************/ /* * For unit tests, expose all public and private interfaces. */ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" /******************************************************************************/ /* * For integration tests, expose the public jemalloc interfaces, but only * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ #elif defined(JEMALLOC_INTEGRATION_TEST) || \ defined(JEMALLOC_INTEGRATION_CPP_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" # include "jemalloc/internal/test_hooks.h" /* Hermetic headers. */ # include "jemalloc/internal/assert.h" # include "jemalloc/internal/malloc_io.h" # include "jemalloc/internal/nstime.h" # include "jemalloc/internal/util.h" /* Non-hermetic headers. */ # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" /******************************************************************************/ /* * For stress tests, expose the public jemalloc interfaces with name mangling * so that they can be tested as e.g. malloc() and free(). Also expose the * public jemalloc interfaces with jet_ prefixes, so that stress tests can use * a separate allocator for their internal data structures. */ #elif defined(JEMALLOC_STRESS_TEST) # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET # include "jemalloc/internal/jemalloc_preamble.h" # include "jemalloc/internal/jemalloc_internal_includes.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET # include "jemalloc/jemalloc_rename.h" # define JEMALLOC_MANGLE # ifdef JEMALLOC_STRESS_TESTLIB # include "jemalloc/jemalloc_mangle_jet.h" # else # include "jemalloc/jemalloc_mangle.h" # endif /******************************************************************************/ /* * This header does dangerous things, the effects of which only test code * should be subject to. */ #else # error "This header cannot be included outside a testing context" #endif /******************************************************************************/ /* * Common test utilities. */ #include "test/btalloc.h" #include "test/math.h" #include "test/mtx.h" #include "test/mq.h" #include "test/test.h" #include "test/timer.h" #include "test/thd.h" #define MEXP 19937 #include "test/SFMT.h" /******************************************************************************/ /* * Define always-enabled assertion macros, so that test assertions execute even * if assertions are disabled in the library code. */ #undef assert #undef not_reached #undef not_implemented #undef assert_not_implemented #define assert(e) do { \ if (!(e)) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #define not_reached() do { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define not_implemented() do { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define assert_not_implemented(e) do { \ if (!(e)) { \ not_implemented(); \ } \ } while (0) #ifdef __cplusplus } #endif jemalloc-sys-0.3.2/rep/test/include/test/jemalloc_test_defs.h010064400007650000024000000006231344617503000225070ustar0000000000000000/* test/include/test/jemalloc_test_defs.h. Generated from jemalloc_test_defs.h.in by configure. */ #include "jemalloc/internal/jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" /* * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its * dependencies are notoriously unportable in practice. */ /* #undef HAVE_SSE2 */ /* #undef HAVE_ALTIVEC */ jemalloc-sys-0.3.2/rep/test/include/test/jemalloc_test_defs.h.in010064400007650000024000000004421344617474100231230ustar0000000000000000#include "jemalloc/internal/jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" /* * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its * dependencies are notoriously unportable in practice. */ #undef HAVE_SSE2 #undef HAVE_ALTIVEC jemalloc-sys-0.3.2/rep/test/include/test/math.h010064400007650000024000000172721344617474100176320ustar0000000000000000/* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * * This implementation is based on: * * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ static inline double ln_gamma(double x) { double f, z; assert(x > 0.0); if (x < 7.0) { f = 1.0; z = x; while (z < 7.0) { f *= z; z += 1.0; } x = z; f = -log(f); } else { f = 0.0; } z = 1.0 / (x * x); return f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - 0.002777777777778) * z + 0.083333333333333) / x; } /* * Compute the incomplete Gamma ratio for [0..x], where p is the shape * parameter, and ln_gamma_p is ln_gamma(p). * * This implementation is based on: * * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ static inline double i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; assert(p > 0.0); assert(x >= 0.0); if (x == 0.0) { return 0.0; } acu = 1.0e-10; oflo = 1.0e30; gin = 0.0; factor = exp(p * log(x) - x - ln_gamma_p); if (x <= 1.0 || x < p) { /* Calculation by series expansion. */ gin = 1.0; term = 1.0; rn = p; while (true) { rn += 1.0; term *= x / rn; gin += term; if (term <= acu) { gin *= factor / p; return gin; } } } else { /* Calculation by continued fraction. */ a = 1.0 - p; b = a + x + 1.0; term = 0.0; pn[0] = 1.0; pn[1] = x; pn[2] = x + 1.0; pn[3] = x * b; gin = pn[2] / pn[3]; while (true) { a += 1.0; b += 2.0; term += 1.0; an = a * term; for (i = 0; i < 2; i++) { pn[i+4] = b * pn[i+2] - an * pn[i]; } if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; return gin; } gin = rn; } for (i = 0; i < 4; i++) { pn[i] = pn[i+2]; } if (fabs(pn[4]) >= oflo) { for (i = 0; i < 4; i++) { pn[i] /= oflo; } } } } } /* * Given a value p in [0..1] of the lower tail area of the normal distribution, * compute the limit on the definite integral from [-inf..z] that satisfies p, * accurate to 16 decimal places. * * This implementation is based on: * * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ static inline double pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); q = p - 0.5; if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; return q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) * r + 3.3871328727963666080e0) / (((((((5.2264952788528545610e3 * r + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) * r + 1.0); } else { if (q < 0.0) { r = p; } else { r = 1.0 - p; } assert(r > 0.0); r = sqrt(-log(r)); if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; ret = ((((((((7.74545014278341407640e-4 * r + 2.27238449892691845833e-2) * r + 2.41780725177450611770e-1) * r + 1.27045825245236838258e0) * r + 3.64784832476320460504e0) * r + 5.76949722146069140550e0) * r + 4.63033784615654529590e0) * r + 1.42343711074968357734e0) / (((((((1.05075007164441684324e-9 * r + 5.47593808499534494600e-4) * r + 1.51986665636164571966e-2) * r + 1.48103976427480074590e-1) * r + 6.89767334985100004550e-1) * r + 1.67638483018380384940e0) * r + 2.05319162663775882187e0) * r + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; ret = ((((((((2.01033439929228813265e-7 * r + 2.71155556874348757815e-5) * r + 1.24266094738807843860e-3) * r + 2.65321895265761230930e-2) * r + 2.96560571828504891230e-1) * r + 1.78482653991729133580e0) * r + 5.46378491116411436990e0) * r + 6.65790464350110377720e0) / (((((((2.04426310338993978564e-15 * r + 1.42151175831644588870e-7) * r + 1.84631831751005468180e-5) * r + 7.86869131145613259100e-4) * r + 1.48753612908506148525e-2) * r + 1.36929880922735805310e-1) * r + 5.99832206555887937690e-1) * r + 1.0)); } if (q < 0.0) { ret = -ret; } return ret; } } /* * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute * the upper limit on the definite integral from [0..z] that satisfies p, * accurate to 12 decimal places. * * This implementation is based on: * * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of * the Chi^2 distribution. Applied Statistics 24(3):385-388. * * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ static inline double pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; assert(p >= 0.0 && p < 1.0); assert(df > 0.0); e = 5.0e-7; aa = 0.6931471805; xx = 0.5 * df; c = xx - 1.0; if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); if (ch - e < 0.0) { return ch; } } else { if (df > 0.32) { x = pt_norm(p); /* * Starting approximation using Wilson and Hilferty * estimate. */ p1 = 0.222222 / df; ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + ln_gamma_df_2); } } else { ch = 0.4; a = log(1.0 - p); while (true) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { break; } } } } for (i = 0; i < 20; i++) { /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; if (p1 < 0.0) { return -1.0; } p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + 60.0 * a))))) / 420.0; s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * (889.0 + 1740.0 * a))) / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); if (fabs(q / ch - 1.0) <= e) { break; } } return ch; } /* * Given a value p in [0..1] and Gamma distribution shape and scale parameters, * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ static inline double pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale; } jemalloc-sys-0.3.2/rep/test/include/test/mq.h010064400007650000024000000055031344617474100173100ustar0000000000000000void mq_nanosleep(unsigned ns); /* * Simple templated message queue implementation that relies on only mutexes for * synchronization (which reduces portability issues). Given the following * setup: * * typedef struct mq_msg_s mq_msg_t; * struct mq_msg_s { * mq_msg(mq_msg_t) link; * [message data] * }; * mq_gen(, mq_, mq_t, mq_msg_t, link) * * The API is as follows: * * bool mq_init(mq_t *mq); * void mq_fini(mq_t *mq); * unsigned mq_count(mq_t *mq); * mq_msg_t *mq_tryget(mq_t *mq); * mq_msg_t *mq_get(mq_t *mq); * void mq_put(mq_t *mq, mq_msg_t *msg); * * The message queue linkage embedded in each message is to be treated as * externally opaque (no need to initialize or clean up externally). mq_fini() * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ unsigned count; \ } a_mq_type; \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ if (mtx_init(&mq->lock)) { \ return true; \ } \ ql_new(&mq->msgs); \ mq->count = 0; \ return false; \ } \ a_attr void \ a_prefix##fini(a_mq_type *mq) { \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ a_prefix##count(a_mq_type *mq) { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ return count; \ } \ a_attr a_mq_msg_type * \ a_prefix##tryget(a_mq_type *mq) { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ msg = ql_first(&mq->msgs); \ if (msg != NULL) { \ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ return msg; \ } \ a_attr a_mq_msg_type * \ a_prefix##get(a_mq_type *mq) { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ if (msg != NULL) { \ return msg; \ } \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ if (msg != NULL) { \ return msg; \ } \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ if (ns > 1000*1000*1000) { \ ns = 1000*1000*1000; \ } \ } \ } \ } \ a_attr void \ a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ ql_tail_insert(&mq->msgs, msg, a_field); \ mq->count++; \ mtx_unlock(&mq->lock); \ } jemalloc-sys-0.3.2/rep/test/include/test/mtx.h010064400007650000024000000010241344617474100174750ustar0000000000000000/* * mtx is a slightly simplified version of malloc_mutex. This code duplication * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx); void mtx_fini(mtx_t *mtx); void mtx_lock(mtx_t *mtx); void mtx_unlock(mtx_t *mtx); jemalloc-sys-0.3.2/rep/test/include/test/test.h010064400007650000024000000322341344617474100176530ustar0000000000000000#define ASSERT_BUFSIZE 256 #define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) " #cmp " (%s) --> " \ "%" pri " " #neg_cmp " %" pri ": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) #define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) #define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) #define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) #define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) #define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) #define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) #define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) #define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) #define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) #define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) #define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) #define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) #define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) #define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) #define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) #define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) #define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) #define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) #define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) #define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) #define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) #define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) #define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) #define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) #define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) #define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) #define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) #define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) #define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) #define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) #define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) #define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) #define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) #define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) #define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) #define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) #define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) #define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) #define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) #define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) #define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) #define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) #define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) #define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) #define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) #define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) #define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) #define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) #define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) #define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) #define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) #define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) #define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) #define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) #define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) #define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) #define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) #define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) #define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) #define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) #define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) #define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) #define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) #define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) #define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) #define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) #define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) #define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) #define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) #define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) #define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) #define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) #define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) #define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, FMTd32, __VA_ARGS__) #define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, FMTd32, __VA_ARGS__) #define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, FMTd32, __VA_ARGS__) #define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, FMTd32, __VA_ARGS__) #define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, FMTd32, __VA_ARGS__) #define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, FMTd32, __VA_ARGS__) #define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, FMTu32, __VA_ARGS__) #define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, FMTu32, __VA_ARGS__) #define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, FMTu32, __VA_ARGS__) #define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, FMTu32, __VA_ARGS__) #define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, FMTu32, __VA_ARGS__) #define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, FMTu32, __VA_ARGS__) #define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, FMTd64, __VA_ARGS__) #define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, FMTd64, __VA_ARGS__) #define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, FMTd64, __VA_ARGS__) #define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, FMTd64, __VA_ARGS__) #define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, FMTd64, __VA_ARGS__) #define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, FMTd64, __VA_ARGS__) #define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, FMTu64, __VA_ARGS__) #define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, FMTu64, __VA_ARGS__) #define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, FMTu64, __VA_ARGS__) #define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, FMTu64, __VA_ARGS__) #define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, FMTu64, __VA_ARGS__) #define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, FMTu64, __VA_ARGS__) #define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) == (%s) --> %s != %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) != (%s) --> %s == %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) #define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) same as (%s) --> " \ "\"%s\" differs from \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) differs from (%s) --> " \ "\"%s\" same as \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Unreachable code reached: ", \ __func__, __FILE__, __LINE__); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } while (0) /* * If this enum changes, corresponding changes in test/test.sh.in are also * necessary. */ typedef enum { test_status_pass = 0, test_status_skip = 1, test_status_fail = 2, test_status_count = 3 } test_status_t; typedef void (test_t)(void); #define TEST_BEGIN(f) \ static void \ f(void) { \ p_test_init(#f); #define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) #define test_no_reentrancy(...) \ p_test_no_reentrancy(__VA_ARGS__, NULL) #define test_no_malloc_init(...) \ p_test_no_malloc_init(__VA_ARGS__, NULL) #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) bool test_is_reentrant(); void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); test_status_t p_test_no_reentrancy(test_t *t, ...); test_status_t p_test_no_malloc_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message); jemalloc-sys-0.3.2/rep/test/include/test/thd.h010064400007650000024000000003401344617474100174440ustar0000000000000000/* Abstraction layer for threading in tests. */ #ifdef _WIN32 typedef HANDLE thd_t; #else typedef pthread_t thd_t; #endif void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); void thd_join(thd_t thd, void **ret); jemalloc-sys-0.3.2/rep/test/include/test/timer.h010064400007650000024000000004701344617474100200110ustar0000000000000000/* Simple timer, for use in benchmark reporting. */ typedef struct { nstime_t t0; nstime_t t1; } timedelta_t; void timer_start(timedelta_t *timer); void timer_stop(timedelta_t *timer); uint64_t timer_usec(const timedelta_t *timer); void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); jemalloc-sys-0.3.2/rep/test/integration/MALLOCX_ARENA.c010064400007650000024000000026411344617474100206140ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 static bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; void * thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; void *p; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.create"); if (thread_ind % 4 != 3) { size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); const char *dss_precs[] = {"disabled", "primary", "secondary"}; unsigned prec_ind = thread_ind % (sizeof(dss_precs)/sizeof(char*)); const char *dss = dss_precs[prec_ind]; int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Error in mallctlnametomib()"); mib[1] = arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, sizeof(const char *)), expected_err, "Error in mallctlbymib()"); } p = mallocx(1, MALLOCX_ARENA(arena_ind)); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); return NULL; } TEST_BEGIN(test_MALLOCX_ARENA) { thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)(uintptr_t)i); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END int main(void) { return test( test_MALLOCX_ARENA); } jemalloc-sys-0.3.2/rep/test/integration/aligned_alloc.c010064400007650000024000000072521344617474100213670ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; alignment = 0; set_errno(0); p = aligned_alloc(alignment, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that * they support. Here we want to explicitly test that the allocation functions * do indeed fail properly when this is the case, which triggers the warning. * Therefore we disable the warning for these tests. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END /* Re-enable the "-Walloc-size-larger-than=" warning */ JEMALLOC_DIAGNOSTIC_POP TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { ps[i] = aligned_alloc(alignment, size); if (ps[i] == NULL) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END TEST_BEGIN(test_zero_alloc) { void *res = aligned_alloc(8, 0); assert(res); size_t usable = malloc_usable_size(res); assert(usable > 0); free(res); } TEST_END int main(void) { return test( test_alignment_errors, test_oom_errors, test_alignment_and_size, test_zero_alloc); } jemalloc-sys-0.3.2/rep/test/integration/allocated.c010064400007650000024000000060021344617474100205320ustar0000000000000000#include "test/jemalloc_test.h" static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; void * thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; uint64_t *ap0, *ap1, *dp0, *dp1; size_t sz, usize; sz = sizeof(a0); if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*ap0, a0, "\"thread.allocatedp\" should provide a pointer to internal " "storage"); sz = sizeof(d0); if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*dp0, d0, "\"thread.deallocatedp\" should provide a pointer to internal " "storage"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); sz = sizeof(ap1); mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); assert_ptr_eq(ap0, ap1, "Pointer returned by \"thread.allocatedp\" should not change"); usize = malloc_usable_size(p); assert_u64_le(a0 + usize, a1, "Allocated memory counter should increase by at least the amount " "explicitly allocated"); free(p); sz = sizeof(d1); mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); sz = sizeof(dp1); mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); assert_ptr_eq(dp0, dp1, "Pointer returned by \"thread.deallocatedp\" should not change"); assert_u64_le(d0 + usize, d1, "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); return NULL; label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); return NULL; } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread); } jemalloc-sys-0.3.2/rep/test/integration/cpp/basic.cpp010064400007650000024000000006551344617474100210150ustar0000000000000000#include #include "test/jemalloc_test.h" TEST_BEGIN(test_basic) { auto foo = new long(4); assert_ptr_not_null(foo, "Unexpected new[] failure"); delete foo; // Test nullptr handling. foo = nullptr; delete foo; auto bar = new long; assert_ptr_not_null(bar, "Unexpected new failure"); delete bar; // Test nullptr handling. bar = nullptr; delete bar; } TEST_END int main() { return test( test_basic); } jemalloc-sys-0.3.2/rep/test/integration/extent.c010064400007650000024000000176461344617474100201310ustar0000000000000000#include "test/jemalloc_test.h" #include "test/extent_hooks.h" static bool check_background_thread_enabled(void) { bool enabled; size_t sz = sizeof(bool); int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); if (ret == ENOENT) { return false; } assert_d_eq(ret, 0, "Unexpected mallctl error"); return enabled; } static void test_extent_body(unsigned arena_ind) { void *p; size_t large0, large1, large2, sz; size_t purge_mib[3]; size_t purge_miblen; int flags; bool xallocx_success_a, xallocx_success_b, xallocx_success_c; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Get large size classes. */ sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected arenas.lextent.0.size failure"); assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, 0), 0, "Unexpected arenas.lextent.1.size failure"); assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, 0), 0, "Unexpected arenas.lextent.2.size failure"); /* Test dalloc/decommit/purge cascade. */ purge_miblen = sizeof(purge_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), 0, "Unexpected mallctlnametomib() failure"); purge_mib[1] = (size_t)arena_ind; called_alloc = false; try_alloc = true; try_dalloc = false; try_decommit = false; p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_true(called_alloc, "Expected alloc call"); called_dalloc = false; called_decommit = false; did_purge_lazy = false; did_purge_forced = false; called_split = false; xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_a) { assert_true(called_dalloc, "Expected dalloc call"); assert_true(called_decommit, "Expected decommit call"); assert_true(did_purge_lazy || did_purge_forced, "Expected purge"); } assert_true(called_split, "Expected split call"); dallocx(p, flags); try_dalloc = true; /* Test decommit/commit and observe split/merge. */ try_dalloc = false; try_decommit = true; p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_decommit = false; did_commit = false; called_split = false; did_split = false; did_merge = false; xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_b) { assert_true(did_split, "Expected split"); } xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); if (did_split) { assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); } if (xallocx_success_b && xallocx_success_c) { assert_true(did_merge, "Expected merge"); } dallocx(p, flags); try_dalloc = true; try_decommit = false; /* Make sure non-large allocation succeeds. */ p = mallocx(42, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, flags); } static void test_manual_hook_auto_arena(void) { unsigned narenas; size_t old_size, new_size, sz; size_t hooks_mib[3]; size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); sz = sizeof(unsigned); /* Get number of auto arenas. */ assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); if (narenas == 1) { return; } /* Install custom extent hooks on arena 1 (might not be initialized). */ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = 1; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, (void *)&new_hooks, new_size), 0, "Unexpected extent_hooks error"); static bool auto_arena_created = false; if (old_hooks != &hooks) { assert_b_eq(auto_arena_created, false, "Expected auto arena 1 created only once."); auto_arena_created = true; } } static void test_manual_hook_body(void) { unsigned arena_ind; size_t old_size, new_size, sz; size_t hooks_mib[3]; size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); /* Install custom extent hooks. */ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = (size_t)arena_ind; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, (void *)&new_hooks, new_size), 0, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->alloc, extent_alloc_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->commit, extent_commit_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->decommit, extent_decommit_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->split, extent_split_hook, "Unexpected extent_hooks error"); assert_ptr_ne(old_hooks->merge, extent_merge_hook, "Unexpected extent_hooks error"); if (!check_background_thread_enabled()) { test_extent_body(arena_ind); } /* Restore extent hooks. */ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->alloc, default_hooks->alloc, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->commit, default_hooks->commit, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->decommit, default_hooks->decommit, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->split, default_hooks->split, "Unexpected extent_hooks error"); assert_ptr_eq(old_hooks->merge, default_hooks->merge, "Unexpected extent_hooks error"); } TEST_BEGIN(test_extent_manual_hook) { test_manual_hook_auto_arena(); test_manual_hook_body(); /* Test failure paths. */ try_split = false; test_manual_hook_body(); try_merge = false; test_manual_hook_body(); try_purge_lazy = false; try_purge_forced = false; test_manual_hook_body(); try_split = try_merge = try_purge_lazy = try_purge_forced = true; } TEST_END TEST_BEGIN(test_extent_auto_hook) { unsigned arena_ind; size_t new_size, sz; extent_hooks_t *new_hooks; extent_hooks_prep(); sz = sizeof(unsigned); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); test_skip_if(check_background_thread_enabled()); test_extent_body(arena_ind); } TEST_END int main(void) { return test( test_extent_manual_hook, test_extent_auto_hook); } jemalloc-sys-0.3.2/rep/test/integration/extent.sh010064400007650000024000000001271344617474100203030ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/rep/test/integration/malloc.c010064400007650000024000000003641344617474100200560ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_zero_alloc) { void *res = malloc(0); assert(res); size_t usable = malloc_usable_size(res); assert(usable > 0); free(res); } TEST_END int main(void) { return test( test_zero_alloc); } jemalloc-sys-0.3.2/rep/test/integration/mallocx.c010064400007650000024000000157711344617474100202560ustar0000000000000000#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that * they support. Here we want to explicitly test that the allocation functions * do indeed fail properly when this is the case, which triggers the warning. * Therefore we disable the warning for these tests. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; largemax = get_large_size(get_nlarge()-1); assert_ptr_null(mallocx(largemax+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(mallocx(SIZE_T_MAX, 0), "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); } TEST_END static void * remote_alloc(void *arg) { unsigned arena; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t large_sz; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, NULL, 0), 0, "Unexpected mallctl failure"); void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); void **ret = (void **)arg; *ret = ptr; return NULL; } TEST_BEGIN(test_remote_free) { thd_t thd; void *ret; thd_create(&thd, remote_alloc, (void *)&ret); thd_join(thd, NULL); assert_ptr_not_null(ret, "Unexpected mallocx failure"); /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */ dallocx(ret, 0); mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); } TEST_END TEST_BEGIN(test_oom) { size_t largemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ largemax = get_large_size(get_nlarge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0)); if (ptrs[i] == NULL) { oom = true; } } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", largemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) { dallocx(ptrs[i], 0); } } purge(); #if LG_SIZEOF_PTR == 3 assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), "Expected OOM for mallocx()"); #endif } TEST_END /* Re-enable the "-Walloc-size-larger-than=" warning */ JEMALLOC_DIAGNOSTIC_POP TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", nsz); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); purge(); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { const char *percpu_arena; size_t sz = sizeof(percpu_arena); if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || strcmp(percpu_arena, "disabled") != 0) { test_skip("test_alignment_and_size skipped: " "not working with percpu arena."); }; #define MAXALIGN (((size_t)1) << 23) #define NITER 4 size_t nsz, rsz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO | MALLOCX_ARENA(0)); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO | MALLOCX_ARENA(0)); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } purge(); } #undef MAXALIGN #undef NITER } TEST_END int main(void) { return test( test_overflow, test_oom, test_remote_free, test_basic, test_alignment_and_size); } jemalloc-sys-0.3.2/rep/test/integration/mallocx.sh010064400007650000024000000001271344617474100204330ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/rep/test/integration/overflow.c010064400007650000024000000035561344617474100204600ustar0000000000000000#include "test/jemalloc_test.h" /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that * they support. Here we want to explicitly test that the allocation functions * do indeed fail properly when this is the case, which triggers the warning. * Therefore we disable the warning for these tests. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(malloc(SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() OOM"); assert_ptr_null(realloc(p, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(realloc(p, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); free(p); } TEST_END /* Re-enable the "-Walloc-size-larger-than=" warning */ JEMALLOC_DIAGNOSTIC_POP int main(void) { return test( test_overflow); } jemalloc-sys-0.3.2/rep/test/integration/posix_memalign.c010064400007650000024000000055021344617474100216210ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, "Expected error for invalid alignment %zu", alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END int main(void) { return test( test_alignment_errors, test_oom_errors, test_alignment_and_size); } jemalloc-sys-0.3.2/rep/test/integration/rallocx.c010064400007650000024000000145371344617474100202620ustar0000000000000000#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; #define NCYCLES 3 unsigned i, j; #define NSZS 1024 size_t szs[NSZS]; #define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); szs[0] = sallocx(p, 0); for (i = 0; i < NCYCLES; i++) { for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { q = rallocx(p, szs[j-1]+1, 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j-1], szs[j-1]+1); szs[j] = sallocx(q, 0); assert_zu_ne(szs[j], szs[j-1]+1, "Expected size to be at least: %zu", szs[j-1]+1); p = q; } for (j--; j > 0; j--) { q = rallocx(p, szs[j-1], 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j], szs[j-1]); tsz = sallocx(q, 0); assert_zu_eq(tsz, szs[j-1], "Expected size=%zu, got size=%zu", szs[j-1], tsz); p = q; } } dallocx(p, 0); #undef MAXSZ #undef NSZS #undef NCYCLES } TEST_END static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; for (i = 0; i < len; i++) { uint8_t b = buf[offset+i]; if (b != c) { test_fail("Allocation at %p (len=%zu) contains %#x " "rather than %#x at offset %zu", p, len, b, c, offset+i); ret = true; } } return ret; } TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; #define FILL_BYTE 0xaaU #define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; p = mallocx(start_size, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); psz = sallocx(p, 0); assert_false(validate_fill(p, 0, 0, psz), "Expected zeroed memory"); memset(p, FILL_BYTE, psz); assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); for (j = 1; j < RANGE; j++) { q = rallocx(p, start_size+j, MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error"); qsz = sallocx(q, 0); if (q != p || qsz != psz) { assert_false(validate_fill(q, FILL_BYTE, 0, psz), "Expected filled memory"); assert_false(validate_fill(q, 0, psz, qsz-psz), "Expected zeroed memory"); } if (psz != qsz) { memset((void *)((uintptr_t)q+psz), FILL_BYTE, qsz-psz); psz = qsz; } p = q; } assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); dallocx(p, 0); } #undef FILL_BYTE } TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN } TEST_END TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; unsigned lg_align; size_t sz; #define MAX_LG_ALIGN 25 #define MAX_VALIDATE (ZU(1) << 22) lg_align = 0; p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error for lg_align=%u", lg_align); assert_ptr_null( (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), "%p inadequately aligned for lg_align=%u", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { assert_false(validate_fill(q, 0, 0, sz), "Expected zeroed memory"); } else { assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), "Expected zeroed memory"); assert_false(validate_fill( (void *)((uintptr_t)q+sz-MAX_VALIDATE), 0, 0, MAX_VALIDATE), "Expected zeroed memory"); } p = q; } dallocx(p, 0); #undef MAX_VALIDATE #undef MAX_LG_ALIGN } TEST_END /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that * they support. Here we want to explicitly test that the allocation functions * do indeed fail properly when this is the case, which triggers the warning. * Therefore we disable the warning for these tests. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; void *p; largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, largemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); } TEST_END /* Re-enable the "-Walloc-size-larger-than=" warning */ JEMALLOC_DIAGNOSTIC_POP int main(void) { return test( test_grow_and_shrink, test_zero, test_align, test_lg_align_and_zero, test_overflow); } jemalloc-sys-0.3.2/rep/test/integration/sdallocx.c010064400007650000024000000020311344617474100204110ustar0000000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 22) #define NITER 3 TEST_BEGIN(test_basic) { void *ptr = mallocx(64, 0); sdallocx(ptr, 64, 0); } TEST_END TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { sdallocx(ps[i], sz, MALLOCX_ALIGN(alignment)); ps[i] = NULL; } } } } } TEST_END int main(void) { return test_no_reentrancy( test_basic, test_alignment_and_size); } jemalloc-sys-0.3.2/rep/test/integration/slab_sizes.c010064400007650000024000000043271344617474100207500ustar0000000000000000#include "test/jemalloc_test.h" /* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */ TEST_BEGIN(test_slab_sizes) { unsigned nbins; size_t page; size_t sizemib[4]; size_t slabmib[4]; size_t len; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, "nbins mallctl failure"); len = sizeof(page); assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0, "page mallctl failure"); len = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0, "bin size mallctlnametomib failure"); len = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len), 0, "slab size mallctlnametomib failure"); size_t biggest_slab_seen = 0; for (unsigned i = 0; i < nbins; i++) { size_t bin_size; size_t slab_size; len = sizeof(size_t); sizemib[2] = i; slabmib[2] = i; assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len, NULL, 0), 0, "bin size mallctlbymib failure"); len = sizeof(size_t); assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len, NULL, 0), 0, "slab size mallctlbymib failure"); if (bin_size < 100) { /* * Then we should be as close to 17 as possible. Since * not all page sizes are valid (because of bitmap * limitations on the number of items in a slab), we * should at least make sure that the number of pages * goes up. */ assert_zu_ge(slab_size, biggest_slab_seen, "Slab sizes should go up"); biggest_slab_seen = slab_size; } else if ( (100 <= bin_size && bin_size < 128) || (128 < bin_size && bin_size <= 200)) { assert_zu_eq(slab_size, page, "Forced-small slabs should be small"); } else if (bin_size == 128) { assert_zu_eq(slab_size, 2 * page, "Forced-2-page slab should be 2 pages"); } else if (200 < bin_size && bin_size <= 4096) { assert_zu_ge(slab_size, biggest_slab_seen, "Slab sizes should go up"); biggest_slab_seen = slab_size; } } /* * For any reasonable configuration, 17 pages should be a valid slab * size for 4096-byte items. */ assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target"); } TEST_END int main(void) { return test( test_slab_sizes); } jemalloc-sys-0.3.2/rep/test/integration/slab_sizes.sh010064400007650000024000000001531344617474100211310ustar0000000000000000#!/bin/sh # Some screwy-looking slab sizes. export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2" jemalloc-sys-0.3.2/rep/test/integration/smallocx.c010064400007650000024000000201211344617474100204220ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/jemalloc_macros.h" #define STR_HELPER(x) #x #define STR(x) STR_HELPER(x) #ifndef JEMALLOC_VERSION_GID_IDENT #error "JEMALLOC_VERSION_GID_IDENT not defined" #endif #define JOIN(x, y) x ## y #define JOIN2(x, y) JOIN(x, y) #define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT) typedef struct { void *ptr; size_t size; } smallocx_return_t; extern smallocx_return_t smallocx(size_t size, int flags); static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that * they support. Here we want to explicitly test that the allocation functions * do indeed fail properly when this is the case, which triggers the warning. * Therefore we disable the warning for these tests. */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; largemax = get_large_size(get_nlarge()-1); assert_ptr_null(smallocx(largemax+1, 0).ptr, "Expected OOM for smallocx(size=%#zx, 0)", largemax+1); assert_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr, "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(smallocx(SIZE_T_MAX, 0).ptr, "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr, "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); } TEST_END static void * remote_alloc(void *arg) { unsigned arena; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t large_sz; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, NULL, 0), 0, "Unexpected mallctl failure"); smallocx_return_t r = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); void *ptr = r.ptr; assert_zu_eq(r.size, nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE), "Expected smalloc(size,flags).size == nallocx(size,flags)"); void **ret = (void **)arg; *ret = ptr; return NULL; } TEST_BEGIN(test_remote_free) { thd_t thd; void *ret; thd_create(&thd, remote_alloc, (void *)&ret); thd_join(thd, NULL); assert_ptr_not_null(ret, "Unexpected smallocx failure"); /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */ dallocx(ret, 0); mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); } TEST_END TEST_BEGIN(test_oom) { size_t largemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ largemax = get_large_size(get_nlarge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = smallocx(largemax, 0).ptr; if (ptrs[i] == NULL) { oom = true; } } assert_true(oom, "Expected OOM during series of calls to smallocx(size=%zu, 0)", largemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) { dallocx(ptrs[i], 0); } } purge(); #if LG_SIZEOF_PTR == 3 assert_ptr_null(smallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)).ptr, "Expected OOM for smallocx()"); assert_ptr_null(smallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)).ptr, "Expected OOM for smallocx()"); #else assert_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr, "Expected OOM for smallocx()"); #endif } TEST_END /* Re-enable the "-Walloc-size-larger-than=" warning */ JEMALLOC_DIAGNOSTIC_POP TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { smallocx_return_t ret; size_t nsz, rsz, smz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); ret = smallocx(sz, 0); p = ret.ptr; smz = ret.size; assert_ptr_not_null(p, "Unexpected smallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch"); dallocx(p, 0); ret = smallocx(sz, 0); p = ret.ptr; smz = ret.size; assert_ptr_not_null(p, "Unexpected smallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); assert_zu_ne(smz, 0, "Unexpected smallocx() error"); ret = smallocx(sz, MALLOCX_ZERO); p = ret.ptr; assert_ptr_not_null(p, "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error", nsz); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch"); dallocx(p, 0); purge(); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { const char *percpu_arena; size_t sz = sizeof(percpu_arena); if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || strcmp(percpu_arena, "disabled") != 0) { test_skip("test_alignment_and_size skipped: " "not working with percpu arena."); }; #define MAXALIGN (((size_t)1) << 23) #define NITER 4 size_t nsz, rsz, smz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); smallocx_return_t ret = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = ret.ptr; assert_ptr_not_null(ps[i], "smallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); smz = ret.size; assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } purge(); } #undef MAXALIGN #undef NITER } TEST_END int main(void) { return test( test_overflow, test_oom, test_remote_free, test_basic, test_alignment_and_size); } jemalloc-sys-0.3.2/rep/test/integration/smallocx.sh010064400007650000024000000001311344617474100206110ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/rep/test/integration/thread_arena.c010064400007650000024000000034571344617474100212320ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 void * thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; size_t size; int err; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); free(p); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, (void *)&main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); return NULL; } static void mallctl_failure(int err) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } TEST_BEGIN(test_thread_arena) { void *p; int err; thd_t thds[NTHREADS]; unsigned i; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); unsigned arena_ind, old_arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Arena creation failure"); size_t size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, (void *)&arena_ind, sizeof(arena_ind))) != 0) { mallctl_failure(err); } for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arena_ind); } for (i = 0; i < NTHREADS; i++) { intptr_t join_ret; thd_join(thds[i], (void *)&join_ret); assert_zd_eq(join_ret, 0, "Unexpected thread join error"); } free(p); } TEST_END int main(void) { return test( test_thread_arena); } jemalloc-sys-0.3.2/rep/test/integration/thread_tcache_enabled.c010064400007650000024000000044651344617474100230450ustar0000000000000000#include "test/jemalloc_test.h" void * thd_start(void *arg) { bool e0, e1; size_t sz = sizeof(bool); assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); if (e0) { e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); } e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); return NULL; } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread); } jemalloc-sys-0.3.2/rep/test/integration/xallocx.c010064400007650000024000000234241344617474100202630ustar0000000000000000#include "test/jemalloc_test.h" /* * Use a separate arena for xallocx() extension/contraction tests so that * internal allocation e.g. by heap profiling can't interpose allocations where * xallocx() would ordinarily be able to extend. */ static unsigned arena_ind(void) { static unsigned ind = 0; if (ind == 0) { size_t sz = sizeof(ind); assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, 0), 0, "Unexpected mallctl failure creating arena"); } return ind; } TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, sz-42, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz + 5, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nsmall(void) { return get_nsizes_impl("arenas.nbins"); } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_small_size(size_t ind) { return get_size_impl("arenas.bin.0.size", ind); } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } TEST_BEGIN(test_size) { size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test smallest supported size. */ assert_zu_eq(xallocx(p, 1, 0, 0), small0, "Unexpected xallocx() behavior"); /* Test largest supported size. */ assert_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); /* Test overflow such that largemax-size underflows. */ assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_large) { int flags = MALLOCX_ARENA(arena_ind()); size_t smallmax, large1, large2, large3, largemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); large1 = get_large_size(1); large2 = get_large_size(2); large3 = get_large_size(3); largemax = get_large_size(get_nlarge()-1); p = mallocx(large3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, "Unexpected xallocx() behavior"); if (xallocx(p, large3, 0, flags) != large3) { p = rallocx(p, large3, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); if (xallocx(p, large3, 0, flags) != large3) { p = rallocx(p, large3, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } /* Test size+extra overflow. */ assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, "Unexpected xallocx() behavior"); dallocx(p, flags); } TEST_END static void print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); range0 = 0; c0 = pc[0]; for (i = 0; i < len; i++) { if (pc[i] != c0) { malloc_printf(" %#x[%zu..%zu)", c0, range0, i); range0 = i; c0 = pc[i]; } } malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); } static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { if (pc[i] != c) { err = true; } } if (err) { print_filled_extents(p, c, offset + len); } return err; } static void test_zero(size_t szmin, size_t szmax) { int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect * errors. */ memset(p, FILL_BYTE, sz); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; if (xallocx(p, sz, 0, flags) != sz) { p = rallocx(p, sz, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, flags); if (xallocx(p, sz+1, 0, flags) != nsz) { p = rallocx(p, sz+1, flags); assert_ptr_not_null(p, "Unexpected rallocx() failure"); } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); assert_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } dallocx(p, flags); } TEST_BEGIN(test_zero_large) { size_t large0, large1; /* Get size classes. */ large0 = get_large_size(0); large1 = get_large_size(1); test_zero(large1, large0 * 2); } TEST_END int main(void) { return test( test_same_size, test_extra_no_move, test_no_move_fail, test_size, test_size_extra_overflow, test_extra_small, test_extra_large, test_zero_large); } jemalloc-sys-0.3.2/rep/test/integration/xallocx.sh010064400007650000024000000001271344617474100204460ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="junk:false" fi jemalloc-sys-0.3.2/rep/test/src/SFMT.c010064400007650000024000000501701344617474100156240ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" #if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(ONLY64) && !defined(BIG_ENDIAN64) #if defined(__GNUC__) #error "-DONLY64 must be specified with -DBIG_ENDIAN64" #endif #undef ONLY64 #endif /*------------------------------------------------------ 128-bit SIMD data type for Altivec, SSE2 or standard C ------------------------------------------------------*/ #if defined(HAVE_ALTIVEC) /** 128-bit data structure */ union W128_T { vector unsigned int s; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #elif defined(HAVE_SSE2) /** 128-bit data structure */ union W128_T { __m128i si; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #else /** 128-bit data structure */ struct W128_T { uint32_t u[4]; }; /** 128-bit data type */ typedef struct W128_T w128_t; #endif struct sfmt_s { /** the 128-bit internal state array */ w128_t sfmt[N]; /** index counter to the 32-bit internal state array */ int idx; /** a flag: it is 0 if and only if the internal state is not yet * initialized. */ int initialized; }; /*-------------------------------------- FILE GLOBAL VARIABLES internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ static inline int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) static inline void rshift128(w128_t *out, w128_t const *in, int shift); static inline void lshift128(w128_t *out, w128_t const *in, int shift); #endif static inline void gen_rand_all(sfmt_t *ctx); static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); static inline uint32_t func1(uint32_t x); static inline uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) static inline void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) #include "test/SFMT-alti.h" #elif defined(HAVE_SSE2) #include "test/SFMT-sse2.h" #endif /** * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 static inline int idxof(int i) { return i ^ 1; } #else static inline int idxof(int i) { return i; } #endif /** * This function simulates SIMD 128-bit right shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif /** * This function simulates SIMD 128-bit left shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #ifdef ONLY64 static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif #endif /** * This function represents the recursion formula. * @param r output * @param a a 128-bit part of the internal state array * @param b a 128-bit part of the internal state array * @param c a 128-bit part of the internal state array * @param d a 128-bit part of the internal state array */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif #endif #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) /** * This function fills the internal state array with pseudorandom * integers. */ static inline void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } for (; i < N; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < N; i++) { do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < size - N; i++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j] = array[j + size - N]; } for (; i < size; i++, j++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; ctx->sfmt[j] = array[i]; } } #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) static inline void swap(w128_t *array, int size) { int i; uint32_t x, y; for (i = 0; i < size; i++) { x = array[i].u[0]; y = array[i].u[2]; array[i].u[0] = array[i].u[1]; array[i].u[2] = array[i].u[3]; array[i].u[1] = x; array[i].u[3] = y; } } #endif /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func1(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func2(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^{MEXP} */ static void period_certification(sfmt_t *ctx) { int inner = 0; int i, j; uint32_t work; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; for (i = 0; i < 4; i++) inner ^= psfmt32[idxof(i)] & parity[i]; for (i = 16; i > 0; i >>= 1) inner ^= inner >> i; inner &= 1; /* check OK */ if (inner == 1) { return; } /* check NG, and modification */ for (i = 0; i < 4; i++) { work = 1; for (j = 0; j < 32; j++) { if ((work & parity[i]) != 0) { psfmt32[idxof(i)] ^= work; return; } work = work << 1; } } } /*---------------- PUBLIC FUNCTIONS ----------------*/ /** * This function returns the identification string. * The string shows the word size, the Mersenne exponent, * and all parameters of this generator. */ const char *get_idstring(void) { return IDSTR; } /** * This function returns the minimum size of array used for \b * fill_array32() function. * @return minimum size of array used for fill_array32() function. */ int get_min_array_size32(void) { return N32; } /** * This function returns the minimum size of array used for \b * fill_array64() function. * @return minimum size of array used for fill_array64() function. */ int get_min_array_size64(void) { return N64; } #ifndef ONLY64 /** * This function generates and returns 32-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * @return 32-bit pseudorandom number */ uint32_t gen_rand32(sfmt_t *ctx) { uint32_t r; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; assert(ctx->initialized); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } r = psfmt32[ctx->idx++]; return r; } /* Generate a random integer in [0..limit). */ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { uint32_t ret, above; above = 0xffffffffU - (0xffffffffU % limit); while (1) { ret = gen_rand32(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #endif /** * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { #if defined(BIG_ENDIAN64) && !defined(ONLY64) uint32_t r1, r2; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; #else uint64_t r; uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; #endif assert(ctx->initialized); assert(ctx->idx % 2 == 0); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } #if defined(BIG_ENDIAN64) && !defined(ONLY64) r1 = psfmt32[ctx->idx]; r2 = psfmt32[ctx->idx + 1]; ctx->idx += 2; return ((uint64_t)r2 << 32) | r1; #else r = psfmt64[ctx->idx / 2]; ctx->idx += 2; return r; #endif } /* Generate a random integer in [0..limit). */ uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { uint64_t ret, above; above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); while (1) { ret = gen_rand64(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #ifndef ONLY64 /** * This function generates pseudorandom 32-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 624 and a * multiple of four. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 32-bit integers are filled * by this function. The pointer to the array must be \b "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 32-bit pseudorandom integers to be * generated. size must be a multiple of 4, and greater than or equal * to (MEXP / 128 + 1) * 4. * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 4 == 0); assert(size >= N32); gen_rand_array(ctx, (w128_t *)array, size / 4); ctx->idx = N32; } #endif /** * This function generates pseudorandom 64-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 312 and a * multiple of two. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 64-bit integers are filled * by this function. The pointer to the array must be "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 64-bit pseudorandom integers to be * generated. size must be a multiple of 2, and greater than or equal * to (MEXP / 128 + 1) * 2 * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 2 == 0); assert(size >= N64); gen_rand_array(ctx, (w128_t *)array, size / 2); ctx->idx = N32; #if defined(BIG_ENDIAN64) && !defined(ONLY64) swap((w128_t *)array, size /2); #endif } /** * This function initializes the internal state array with a 32-bit * integer seed. * * @param seed a 32-bit integer used as the seed. */ sfmt_t *init_gen_rand(uint32_t seed) { void *p; sfmt_t *ctx; int i; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } /** * This function initializes the internal state array, * with an array of 32-bit integers used as the seeds * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { void *p; sfmt_t *ctx; int i, j, count; uint32_t r; int lag; int mid; int size = N * 4; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; if (size >= 623) { lag = 11; } else if (size >= 68) { lag = 7; } else if (size >= 39) { lag = 5; } else { lag = 3; } mid = (size - lag) / 2; memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); if (key_length + 1 > N32) { count = key_length + 1; } else { count = N32; } r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; psfmt32[idxof(mid + lag)] += r; psfmt32[idxof(0)] = r; count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (; j < count; j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (j = 0; j < N32; j++) { r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; psfmt32[idxof((i + mid + lag) % N32)] ^= r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } void fini_gen_rand(sfmt_t *ctx) { assert(ctx != NULL); ctx->initialized = 0; free(ctx); } jemalloc-sys-0.3.2/rep/test/src/btalloc.c010064400007650000024000000001571344617474100164730ustar0000000000000000#include "test/jemalloc_test.h" void * btalloc(size_t size, unsigned bits) { return btalloc_0(size, bits); } jemalloc-sys-0.3.2/rep/test/src/btalloc_0.c010064400007650000024000000000621344617474100167050ustar0000000000000000#include "test/jemalloc_test.h" btalloc_n_gen(0) jemalloc-sys-0.3.2/rep/test/src/btalloc_1.c010064400007650000024000000000621344617474100167060ustar0000000000000000#include "test/jemalloc_test.h" btalloc_n_gen(1) jemalloc-sys-0.3.2/rep/test/src/math.c010064400007650000024000000000601344617474100157750ustar0000000000000000#define MATH_C_ #include "test/jemalloc_test.h" jemalloc-sys-0.3.2/rep/test/src/mq.c010064400007650000024000000007121344617474100154650ustar0000000000000000#include "test/jemalloc_test.h" /* * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep * time is guaranteed. */ void mq_nanosleep(unsigned ns) { assert(ns <= 1000*1000*1000); #ifdef _WIN32 Sleep(ns / 1000); #else { struct timespec timeout; if (ns < 1000*1000*1000) { timeout.tv_sec = 0; timeout.tv_nsec = ns; } else { timeout.tv_sec = 1; timeout.tv_nsec = 0; } nanosleep(&timeout, NULL); } #endif } jemalloc-sys-0.3.2/rep/test/src/mtx.c010064400007650000024000000022171344617474100156620ustar0000000000000000#include "test/jemalloc_test.h" #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif bool mtx_init(mtx_t *mtx) { #ifdef _WIN32 if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) { return true; } #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mtx->lock = OS_UNFAIR_LOCK_INIT; #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) { return true; } pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); if (pthread_mutex_init(&mtx->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return true; } pthread_mutexattr_destroy(&attr); #endif return false; } void mtx_fini(mtx_t *mtx) { #ifdef _WIN32 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #else pthread_mutex_destroy(&mtx->lock); #endif } void mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_lock(&mtx->lock); #else pthread_mutex_lock(&mtx->lock); #endif } void mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_unlock(&mtx->lock); #else pthread_mutex_unlock(&mtx->lock); #endif } jemalloc-sys-0.3.2/rep/test/src/test.c010064400007650000024000000116441344617474100160350ustar0000000000000000#include "test/jemalloc_test.h" /* Test status state. */ static unsigned test_count = 0; static test_status_t test_counts[test_status_count] = {0, 0, 0}; static test_status_t test_status = test_status_pass; static const char * test_name = ""; /* Reentrancy testing helpers. */ #define NUM_REENTRANT_ALLOCS 20 typedef enum { non_reentrant = 0, libc_reentrant = 1, arena_new_reentrant = 2 } reentrancy_t; static reentrancy_t reentrancy; static bool libc_hook_ran = false; static bool arena_new_hook_ran = false; static const char * reentrancy_t_str(reentrancy_t r) { switch (r) { case non_reentrant: return "non-reentrant"; case libc_reentrant: return "libc-reentrant"; case arena_new_reentrant: return "arena_new-reentrant"; default: unreachable(); } } static void do_hook(bool *hook_ran, void (**hook)()) { *hook_ran = true; *hook = NULL; size_t alloc_size = 1; for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) { free(malloc(alloc_size)); alloc_size *= 2; } } static void libc_reentrancy_hook() { do_hook(&libc_hook_ran, &test_hooks_libc_hook); } static void arena_new_reentrancy_hook() { do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook); } /* Actual test infrastructure. */ bool test_is_reentrant() { return reentrancy != non_reentrant; } JEMALLOC_FORMAT_PRINTF(1, 2) void test_skip(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_skip; } JEMALLOC_FORMAT_PRINTF(1, 2) void test_fail(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_fail; } static const char * test_status_string(test_status_t test_status) { switch (test_status) { case test_status_pass: return "pass"; case test_status_skip: return "skip"; case test_status_fail: return "fail"; default: not_reached(); } } void p_test_init(const char *name) { test_count++; test_status = test_status_pass; test_name = name; } void p_test_fini(void) { test_counts[test_status]++; malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy), test_status_string(test_status)); } static void check_global_slow(test_status_t *status) { #ifdef JEMALLOC_UNIT_TEST /* * This check needs to peek into tsd internals, which is why it's only * exposed in unit tests. */ if (tsd_global_slow()) { malloc_printf("Testing increased global slow count\n"); *status = test_status_fail; } #endif } static test_status_t p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { test_status_t ret; if (do_malloc_init) { /* * Make sure initialization occurs prior to running tests. * Tests are special because they may use internal facilities * prior to triggering initialization as a side effect of * calling into the public API. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); return test_status_fail; } } ret = test_status_pass; for (; t != NULL; t = va_arg(ap, test_t *)) { /* Non-reentrant run. */ reentrancy = non_reentrant; test_hooks_arena_new_hook = test_hooks_libc_hook = NULL; t(); if (test_status > ret) { ret = test_status; } check_global_slow(&ret); /* Reentrant run. */ if (do_reentrant) { reentrancy = libc_reentrant; test_hooks_arena_new_hook = NULL; test_hooks_libc_hook = &libc_reentrancy_hook; t(); if (test_status > ret) { ret = test_status; } check_global_slow(&ret); reentrancy = arena_new_reentrant; test_hooks_libc_hook = NULL; test_hooks_arena_new_hook = &arena_new_reentrancy_hook; t(); if (test_status > ret) { ret = test_status; } check_global_slow(&ret); } } malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", test_status_string(test_status_pass), test_counts[test_status_pass], test_count, test_status_string(test_status_skip), test_counts[test_status_skip], test_count, test_status_string(test_status_fail), test_counts[test_status_fail], test_count); return ret; } test_status_t p_test(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); ret = p_test_impl(true, true, t, ap); va_end(ap); return ret; } test_status_t p_test_no_reentrancy(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); ret = p_test_impl(true, false, t, ap); va_end(ap); return ret; } test_status_t p_test_no_malloc_init(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); /* * We also omit reentrancy from bootstrapping tests, since we don't * (yet) care about general reentrancy during bootstrapping. */ ret = p_test_impl(false, false, t, ap); va_end(ap); return ret; } void p_test_fail(const char *prefix, const char *message) { malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); test_status = test_status_fail; } jemalloc-sys-0.3.2/rep/test/src/thd.c010064400007650000024000000013671344617474100156360ustar0000000000000000#include "test/jemalloc_test.h" #ifdef _WIN32 void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); if (*thd == NULL) { test_fail("Error in CreateThread()\n"); } } void thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; GetExitCodeThread(thd, (LPDWORD) &exit_code); *ret = (void *)(uintptr_t)exit_code; } } #else void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { if (pthread_create(thd, NULL, proc, arg) != 0) { test_fail("Error in pthread_create()\n"); } } void thd_join(thd_t thd, void **ret) { pthread_join(thd, ret); } #endif jemalloc-sys-0.3.2/rep/test/src/timer.c010064400007650000024000000021031344617474100161640ustar0000000000000000#include "test/jemalloc_test.h" void timer_start(timedelta_t *timer) { nstime_init(&timer->t0, 0); nstime_update(&timer->t0); } void timer_stop(timedelta_t *timer) { nstime_copy(&timer->t1, &timer->t0); nstime_update(&timer->t1); } uint64_t timer_usec(const timedelta_t *timer) { nstime_t delta; nstime_copy(&delta, &timer->t1); nstime_subtract(&delta, &timer->t0); return nstime_ns(&delta) / 1000; } void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; size_t i = 0; size_t j, n; /* Whole. */ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); i += n; if (i >= buflen) { return; } mult = 1; for (j = 0; j < n; j++) { mult *= 10; } /* Decimal. */ n = malloc_snprintf(&buf[i], buflen-i, "."); i += n; /* Fraction. */ while (i < buflen-1) { uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 >= 5)) ? 1 : 0; n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, (t0 * mult / t1) % 10 + round); i += n; mult *= 10; } } jemalloc-sys-0.3.2/rep/test/stress/hookbench.c010064400007650000024000000032061344617474100175450ustar0000000000000000#include "test/jemalloc_test.h" static void noop_alloc_hook(void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]) { } static void noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { } static void noop_expand_hook(void *extra, hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) { } static void malloc_free_loop(int iters) { for (int i = 0; i < iters; i++) { void *p = mallocx(1, 0); free(p); } } static void test_hooked(int iters) { hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, NULL}; int err; void *handles[HOOK_MAX]; size_t sz = sizeof(handles[0]); for (int i = 0; i < HOOK_MAX; i++) { err = mallctl("experimental.hooks.install", &handles[i], &sz, &hooks, sizeof(hooks)); assert(err == 0); timedelta_t timer; timer_start(&timer); malloc_free_loop(iters); timer_stop(&timer); malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1, i + 1 == 1 ? "" : "s", timer_usec(&timer)); } for (int i = 0; i < HOOK_MAX; i++) { err = mallctl("experimental.hooks.remove", NULL, NULL, &handles[i], sizeof(handles[i])); assert(err == 0); } } static void test_unhooked(int iters) { timedelta_t timer; timer_start(&timer); malloc_free_loop(iters); timer_stop(&timer); malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer)); } int main(void) { /* Initialize */ free(mallocx(1, 0)); int iters = 10 * 1000 * 1000; malloc_printf("Benchmarking hooks with %d iterations:\n", iters); test_hooked(iters); test_unhooked(iters); } jemalloc-sys-0.3.2/rep/test/stress/microbench.c010064400007650000024000000061731344617474100177240ustar0000000000000000#include "test/jemalloc_test.h" static inline void time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) { uint64_t i; for (i = 0; i < nwarmup; i++) { func(); } timer_start(timer); for (i = 0; i < niter; i++) { func(); } timer_stop(timer); } void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, void (*func_a), const char *name_b, void (*func_b)) { timedelta_t timer_a, timer_b; char ratio_buf[6]; void *p; p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } time_func(&timer_a, nwarmup, niter, func_a); time_func(&timer_b, nwarmup, niter, func_b); timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " "%s=%"FMTu64"us, ratio=1:%s\n", niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), ratio_buf); dallocx(p, 0); } static void malloc_free(void) { /* The compiler can optimize away free(malloc(1))! */ void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } free(p); } static void mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } free(p); } TEST_BEGIN(test_malloc_vs_mallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END static void malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } dallocx(p, 0); } static void malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } sdallocx(p, 1, 0); } TEST_BEGIN(test_free_vs_dallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END TEST_BEGIN(test_dallocx_vs_sdallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END static void malloc_mus_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } malloc_usable_size(p); free(p); } static void malloc_sallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (sallocx(p, 0) < 1) { test_fail("Unexpected sallocx() failure"); } free(p); } TEST_BEGIN(test_mus_vs_sallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END static void malloc_nallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (nallocx(1, 0) < 1) { test_fail("Unexpected nallocx() failure"); } free(p); } TEST_BEGIN(test_sallocx_vs_nallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int main(void) { return test_no_reentrancy( test_malloc_vs_mallocx, test_free_vs_dallocx, test_dallocx_vs_sdallocx, test_mus_vs_sallocx, test_sallocx_vs_nallocx); } jemalloc-sys-0.3.2/rep/test/test.sh010064400007650000024000000044461344617502700154360ustar0000000000000000#!/bin/sh case macho in macho) export DYLD_FALLBACK_LIBRARY_PATH="lib" ;; pecoff) export PATH="${PATH}:lib" ;; *) ;; esac # Make a copy of the JE_MALLOC_CONF passed in to this script, so # it can be repeatedly concatenated with per test settings. export MALLOC_CONF_ALL=${JE_MALLOC_CONF} # Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. export_malloc_conf() { if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then export JE_MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" else export JE_MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" fi } # Corresponds to test_status_t. pass_code=0 skip_code=1 fail_code=2 pass_count=0 skip_count=0 fail_count=0 for t in $@; do if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then echo fi echo "=== ${t} ===" if [ -e "${t}.sh" ] ; then # Source the shell script corresponding to the test in a subshell and # execute the test. This allows the shell script to set MALLOC_CONF, which # is then used to set JE_MALLOC_CONF (thus allowing the # per test shell script to ignore the JE_ detail). enable_fill=1 \ enable_prof=0 \ . ${t}.sh && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t} /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ else export MALLOC_CONF= && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t} /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ /Users/gnzlbg/projects/sideprojects/jemallocator/jemalloc-sys/rep/ fi result_code=$? case ${result_code} in ${pass_code}) pass_count=$((pass_count+1)) ;; ${skip_code}) skip_count=$((skip_count+1)) ;; ${fail_code}) fail_count=$((fail_count+1)) ;; *) echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 exit 1 esac done total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` echo echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" if [ ${fail_count} -eq 0 ] ; then exit 0 else exit 1 fi jemalloc-sys-0.3.2/rep/test/test.sh.in010064400007650000024000000043621344617474100160420ustar0000000000000000#!/bin/sh case @abi@ in macho) export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" ;; pecoff) export PATH="${PATH}:@objroot@lib" ;; *) ;; esac # Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so # it can be repeatedly concatenated with per test settings. export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF} # Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. export_malloc_conf() { if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" else export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" fi } # Corresponds to test_status_t. pass_code=0 skip_code=1 fail_code=2 pass_count=0 skip_count=0 fail_count=0 for t in $@; do if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then echo fi echo "=== ${t} ===" if [ -e "@srcroot@${t}.sh" ] ; then # Source the shell script corresponding to the test in a subshell and # execute the test. This allows the shell script to set MALLOC_CONF, which # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). enable_fill=@enable_fill@ \ enable_prof=@enable_prof@ \ . @srcroot@${t}.sh && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ else export MALLOC_CONF= && \ export_malloc_conf && \ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ fi result_code=$? case ${result_code} in ${pass_code}) pass_count=$((pass_count+1)) ;; ${skip_code}) skip_count=$((skip_count+1)) ;; ${fail_code}) fail_count=$((fail_count+1)) ;; *) echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 exit 1 esac done total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` echo echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" if [ ${fail_count} -eq 0 ] ; then exit 0 else exit 1 fi jemalloc-sys-0.3.2/rep/test/unit/SFMT.c010064400007650000024000002530701344617474100160200ustar0000000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "test/jemalloc_test.h" #define BLOCK_SIZE 10000 #define BLOCK_SIZE64 (BLOCK_SIZE / 2) #define COUNT_1 1000 #define COUNT_2 700 static const uint32_t init_gen_rand_32_expected[] = { 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U }; static const uint32_t init_by_array_32_expected[] = { 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U }; static const uint64_t init_gen_rand_64_expected[] = { KQU(16924766246869039260), KQU( 8201438687333352714), KQU( 2265290287015001750), KQU(18397264611805473832), KQU( 3375255223302384358), KQU( 6345559975416828796), KQU(18229739242790328073), KQU( 7596792742098800905), KQU( 255338647169685981), KQU( 2052747240048610300), KQU(18328151576097299343), KQU(12472905421133796567), KQU(11315245349717600863), KQU(16594110197775871209), KQU(15708751964632456450), KQU(10452031272054632535), KQU(11097646720811454386), KQU( 4556090668445745441), KQU(17116187693090663106), KQU(14931526836144510645), KQU( 9190752218020552591), KQU( 9625800285771901401), KQU(13995141077659972832), KQU( 5194209094927829625), KQU( 4156788379151063303), KQU( 8523452593770139494), KQU(14082382103049296727), KQU( 2462601863986088483), KQU( 3030583461592840678), KQU( 5221622077872827681), KQU( 3084210671228981236), KQU(13956758381389953823), KQU(13503889856213423831), KQU(15696904024189836170), KQU( 4612584152877036206), KQU( 6231135538447867881), KQU(10172457294158869468), KQU( 6452258628466708150), KQU(14044432824917330221), KQU( 370168364480044279), KQU(10102144686427193359), KQU( 667870489994776076), KQU( 2732271956925885858), KQU(18027788905977284151), KQU(15009842788582923859), KQU( 7136357960180199542), KQU(15901736243475578127), KQU(16951293785352615701), KQU(10551492125243691632), KQU(17668869969146434804), KQU(13646002971174390445), KQU( 9804471050759613248), KQU( 5511670439655935493), KQU(18103342091070400926), KQU(17224512747665137533), KQU(15534627482992618168), KQU( 1423813266186582647), KQU(15821176807932930024), KQU( 30323369733607156), KQU(11599382494723479403), KQU( 653856076586810062), KQU( 3176437395144899659), KQU(14028076268147963917), KQU(16156398271809666195), KQU( 3166955484848201676), KQU( 5746805620136919390), KQU(17297845208891256593), KQU(11691653183226428483), KQU(17900026146506981577), KQU(15387382115755971042), KQU(16923567681040845943), KQU( 8039057517199388606), KQU(11748409241468629263), KQU( 794358245539076095), KQU(13438501964693401242), KQU(14036803236515618962), KQU( 5252311215205424721), KQU(17806589612915509081), KQU( 6802767092397596006), KQU(14212120431184557140), KQU( 1072951366761385712), KQU(13098491780722836296), KQU( 9466676828710797353), KQU(12673056849042830081), KQU(12763726623645357580), KQU(16468961652999309493), KQU(15305979875636438926), KQU(17444713151223449734), KQU( 5692214267627883674), KQU(13049589139196151505), KQU( 880115207831670745), KQU( 1776529075789695498), KQU(16695225897801466485), KQU(10666901778795346845), KQU( 6164389346722833869), KQU( 2863817793264300475), KQU( 9464049921886304754), KQU( 3993566636740015468), KQU( 9983749692528514136), KQU(16375286075057755211), KQU(16042643417005440820), KQU(11445419662923489877), KQU( 7999038846885158836), KQU( 6721913661721511535), KQU( 5363052654139357320), KQU( 1817788761173584205), KQU(13290974386445856444), KQU( 4650350818937984680), KQU( 8219183528102484836), KQU( 1569862923500819899), KQU( 4189359732136641860), KQU(14202822961683148583), KQU( 4457498315309429058), KQU(13089067387019074834), KQU(11075517153328927293), KQU(10277016248336668389), KQU( 7070509725324401122), KQU(17808892017780289380), KQU(13143367339909287349), KQU( 1377743745360085151), KQU( 5749341807421286485), KQU(14832814616770931325), KQU( 7688820635324359492), KQU(10960474011539770045), KQU( 81970066653179790), KQU(12619476072607878022), KQU( 4419566616271201744), KQU(15147917311750568503), KQU( 5549739182852706345), KQU( 7308198397975204770), KQU(13580425496671289278), KQU(17070764785210130301), KQU( 8202832846285604405), KQU( 6873046287640887249), KQU( 6927424434308206114), KQU( 6139014645937224874), KQU(10290373645978487639), KQU(15904261291701523804), KQU( 9628743442057826883), KQU(18383429096255546714), KQU( 4977413265753686967), KQU( 7714317492425012869), KQU( 9025232586309926193), KQU(14627338359776709107), KQU(14759849896467790763), KQU(10931129435864423252), KQU( 4588456988775014359), KQU(10699388531797056724), KQU( 468652268869238792), KQU( 5755943035328078086), KQU( 2102437379988580216), KQU( 9986312786506674028), KQU( 2654207180040945604), KQU( 8726634790559960062), KQU( 100497234871808137), KQU( 2800137176951425819), KQU( 6076627612918553487), KQU( 5780186919186152796), KQU( 8179183595769929098), KQU( 6009426283716221169), KQU( 2796662551397449358), KQU( 1756961367041986764), KQU( 6972897917355606205), KQU(14524774345368968243), KQU( 2773529684745706940), KQU( 4853632376213075959), KQU( 4198177923731358102), KQU( 8271224913084139776), KQU( 2741753121611092226), KQU(16782366145996731181), KQU(15426125238972640790), KQU(13595497100671260342), KQU( 3173531022836259898), KQU( 6573264560319511662), KQU(18041111951511157441), KQU( 2351433581833135952), KQU( 3113255578908173487), KQU( 1739371330877858784), KQU(16046126562789165480), KQU( 8072101652214192925), KQU(15267091584090664910), KQU( 9309579200403648940), KQU( 5218892439752408722), KQU(14492477246004337115), KQU(17431037586679770619), KQU( 7385248135963250480), KQU( 9580144956565560660), KQU( 4919546228040008720), KQU(15261542469145035584), KQU(18233297270822253102), KQU( 5453248417992302857), KQU( 9309519155931460285), KQU(10342813012345291756), KQU(15676085186784762381), KQU(15912092950691300645), KQU( 9371053121499003195), KQU( 9897186478226866746), KQU(14061858287188196327), KQU( 122575971620788119), KQU(12146750969116317754), KQU( 4438317272813245201), KQU( 8332576791009527119), KQU(13907785691786542057), KQU(10374194887283287467), KQU( 2098798755649059566), KQU( 3416235197748288894), KQU( 8688269957320773484), KQU( 7503964602397371571), KQU(16724977015147478236), KQU( 9461512855439858184), KQU(13259049744534534727), KQU( 3583094952542899294), KQU( 8764245731305528292), KQU(13240823595462088985), KQU(13716141617617910448), KQU(18114969519935960955), KQU( 2297553615798302206), KQU( 4585521442944663362), KQU(17776858680630198686), KQU( 4685873229192163363), KQU( 152558080671135627), KQU(15424900540842670088), KQU(13229630297130024108), KQU(17530268788245718717), KQU(16675633913065714144), KQU( 3158912717897568068), KQU(15399132185380087288), KQU( 7401418744515677872), KQU(13135412922344398535), KQU( 6385314346100509511), KQU(13962867001134161139), KQU(10272780155442671999), KQU(12894856086597769142), KQU(13340877795287554994), KQU(12913630602094607396), KQU(12543167911119793857), KQU(17343570372251873096), KQU(10959487764494150545), KQU( 6966737953093821128), KQU(13780699135496988601), KQU( 4405070719380142046), KQU(14923788365607284982), KQU( 2869487678905148380), KQU( 6416272754197188403), KQU(15017380475943612591), KQU( 1995636220918429487), KQU( 3402016804620122716), KQU(15800188663407057080), KQU(11362369990390932882), KQU(15262183501637986147), KQU(10239175385387371494), KQU( 9352042420365748334), KQU( 1682457034285119875), KQU( 1724710651376289644), KQU( 2038157098893817966), KQU( 9897825558324608773), KQU( 1477666236519164736), KQU(16835397314511233640), KQU(10370866327005346508), KQU(10157504370660621982), KQU(12113904045335882069), KQU(13326444439742783008), KQU(11302769043000765804), KQU(13594979923955228484), KQU(11779351762613475968), KQU( 3786101619539298383), KQU( 8021122969180846063), KQU(15745904401162500495), KQU(10762168465993897267), KQU(13552058957896319026), KQU(11200228655252462013), KQU( 5035370357337441226), KQU( 7593918984545500013), KQU( 5418554918361528700), KQU( 4858270799405446371), KQU( 9974659566876282544), KQU(18227595922273957859), KQU( 2772778443635656220), KQU(14285143053182085385), KQU( 9939700992429600469), KQU(12756185904545598068), KQU( 2020783375367345262), KQU( 57026775058331227), KQU( 950827867930065454), KQU( 6602279670145371217), KQU( 2291171535443566929), KQU( 5832380724425010313), KQU( 1220343904715982285), KQU(17045542598598037633), KQU(15460481779702820971), KQU(13948388779949365130), KQU(13975040175430829518), KQU(17477538238425541763), KQU(11104663041851745725), KQU(15860992957141157587), KQU(14529434633012950138), KQU( 2504838019075394203), KQU( 7512113882611121886), KQU( 4859973559980886617), KQU( 1258601555703250219), KQU(15594548157514316394), KQU( 4516730171963773048), KQU(11380103193905031983), KQU( 6809282239982353344), KQU(18045256930420065002), KQU( 2453702683108791859), KQU( 977214582986981460), KQU( 2006410402232713466), KQU( 6192236267216378358), KQU( 3429468402195675253), KQU(18146933153017348921), KQU(17369978576367231139), KQU( 1246940717230386603), KQU(11335758870083327110), KQU(14166488801730353682), KQU( 9008573127269635732), KQU(10776025389820643815), KQU(15087605441903942962), KQU( 1359542462712147922), KQU(13898874411226454206), KQU(17911176066536804411), KQU( 9435590428600085274), KQU( 294488509967864007), KQU( 8890111397567922046), KQU( 7987823476034328778), KQU(13263827582440967651), KQU( 7503774813106751573), KQU(14974747296185646837), KQU( 8504765037032103375), KQU(17340303357444536213), KQU( 7704610912964485743), KQU( 8107533670327205061), KQU( 9062969835083315985), KQU(16968963142126734184), KQU(12958041214190810180), KQU( 2720170147759570200), KQU( 2986358963942189566), KQU(14884226322219356580), KQU( 286224325144368520), KQU(11313800433154279797), KQU(18366849528439673248), KQU(17899725929482368789), KQU( 3730004284609106799), KQU( 1654474302052767205), KQU( 5006698007047077032), KQU( 8196893913601182838), KQU(15214541774425211640), KQU(17391346045606626073), KQU( 8369003584076969089), KQU( 3939046733368550293), KQU(10178639720308707785), KQU( 2180248669304388697), KQU( 62894391300126322), KQU( 9205708961736223191), KQU( 6837431058165360438), KQU( 3150743890848308214), KQU(17849330658111464583), KQU(12214815643135450865), KQU(13410713840519603402), KQU( 3200778126692046802), KQU(13354780043041779313), KQU( 800850022756886036), KQU(15660052933953067433), KQU( 6572823544154375676), KQU(11030281857015819266), KQU(12682241941471433835), KQU(11654136407300274693), KQU( 4517795492388641109), KQU( 9757017371504524244), KQU(17833043400781889277), KQU(12685085201747792227), KQU(10408057728835019573), KQU( 98370418513455221), KQU( 6732663555696848598), KQU(13248530959948529780), KQU( 3530441401230622826), KQU(18188251992895660615), KQU( 1847918354186383756), KQU( 1127392190402660921), KQU(11293734643143819463), KQU( 3015506344578682982), KQU(13852645444071153329), KQU( 2121359659091349142), KQU( 1294604376116677694), KQU( 5616576231286352318), KQU( 7112502442954235625), KQU(11676228199551561689), KQU(12925182803007305359), KQU( 7852375518160493082), KQU( 1136513130539296154), KQU( 5636923900916593195), KQU( 3221077517612607747), KQU(17784790465798152513), KQU( 3554210049056995938), KQU(17476839685878225874), KQU( 3206836372585575732), KQU( 2765333945644823430), KQU(10080070903718799528), KQU( 5412370818878286353), KQU( 9689685887726257728), KQU( 8236117509123533998), KQU( 1951139137165040214), KQU( 4492205209227980349), KQU(16541291230861602967), KQU( 1424371548301437940), KQU( 9117562079669206794), KQU(14374681563251691625), KQU(13873164030199921303), KQU( 6680317946770936731), KQU(15586334026918276214), KQU(10896213950976109802), KQU( 9506261949596413689), KQU( 9903949574308040616), KQU( 6038397344557204470), KQU( 174601465422373648), KQU(15946141191338238030), KQU(17142225620992044937), KQU( 7552030283784477064), KQU( 2947372384532947997), KQU( 510797021688197711), KQU( 4962499439249363461), KQU( 23770320158385357), KQU( 959774499105138124), KQU( 1468396011518788276), KQU( 2015698006852312308), KQU( 4149400718489980136), KQU( 5992916099522371188), KQU(10819182935265531076), KQU(16189787999192351131), KQU( 342833961790261950), KQU(12470830319550495336), KQU(18128495041912812501), KQU( 1193600899723524337), KQU( 9056793666590079770), KQU( 2154021227041669041), KQU( 4963570213951235735), KQU( 4865075960209211409), KQU( 2097724599039942963), KQU( 2024080278583179845), KQU(11527054549196576736), KQU(10650256084182390252), KQU( 4808408648695766755), KQU( 1642839215013788844), KQU(10607187948250398390), KQU( 7076868166085913508), KQU( 730522571106887032), KQU(12500579240208524895), KQU( 4484390097311355324), KQU(15145801330700623870), KQU( 8055827661392944028), KQU( 5865092976832712268), KQU(15159212508053625143), KQU( 3560964582876483341), KQU( 4070052741344438280), KQU( 6032585709886855634), KQU(15643262320904604873), KQU( 2565119772293371111), KQU( 318314293065348260), KQU(15047458749141511872), KQU( 7772788389811528730), KQU( 7081187494343801976), KQU( 6465136009467253947), KQU(10425940692543362069), KQU( 554608190318339115), KQU(14796699860302125214), KQU( 1638153134431111443), KQU(10336967447052276248), KQU( 8412308070396592958), KQU( 4004557277152051226), KQU( 8143598997278774834), KQU(16413323996508783221), KQU(13139418758033994949), KQU( 9772709138335006667), KQU( 2818167159287157659), KQU(17091740573832523669), KQU(14629199013130751608), KQU(18268322711500338185), KQU( 8290963415675493063), KQU( 8830864907452542588), KQU( 1614839084637494849), KQU(14855358500870422231), KQU( 3472996748392519937), KQU(15317151166268877716), KQU( 5825895018698400362), KQU(16730208429367544129), KQU(10481156578141202800), KQU( 4746166512382823750), KQU(12720876014472464998), KQU( 8825177124486735972), KQU(13733447296837467838), KQU( 6412293741681359625), KQU( 8313213138756135033), KQU(11421481194803712517), KQU( 7997007691544174032), KQU( 6812963847917605930), KQU( 9683091901227558641), KQU(14703594165860324713), KQU( 1775476144519618309), KQU( 2724283288516469519), KQU( 717642555185856868), KQU( 8736402192215092346), KQU(11878800336431381021), KQU( 4348816066017061293), KQU( 6115112756583631307), KQU( 9176597239667142976), KQU(12615622714894259204), KQU(10283406711301385987), KQU( 5111762509485379420), KQU( 3118290051198688449), KQU( 7345123071632232145), KQU( 9176423451688682359), KQU( 4843865456157868971), KQU(12008036363752566088), KQU(12058837181919397720), KQU( 2145073958457347366), KQU( 1526504881672818067), KQU( 3488830105567134848), KQU(13208362960674805143), KQU( 4077549672899572192), KQU( 7770995684693818365), KQU( 1398532341546313593), KQU(12711859908703927840), KQU( 1417561172594446813), KQU(17045191024194170604), KQU( 4101933177604931713), KQU(14708428834203480320), KQU(17447509264469407724), KQU(14314821973983434255), KQU(17990472271061617265), KQU( 5087756685841673942), KQU(12797820586893859939), KQU( 1778128952671092879), KQU( 3535918530508665898), KQU( 9035729701042481301), KQU(14808661568277079962), KQU(14587345077537747914), KQU(11920080002323122708), KQU( 6426515805197278753), KQU( 3295612216725984831), KQU(11040722532100876120), KQU(12305952936387598754), KQU(16097391899742004253), KQU( 4908537335606182208), KQU(12446674552196795504), KQU(16010497855816895177), KQU( 9194378874788615551), KQU( 3382957529567613384), KQU( 5154647600754974077), KQU( 9801822865328396141), KQU( 9023662173919288143), KQU(17623115353825147868), KQU( 8238115767443015816), KQU(15811444159859002560), KQU( 9085612528904059661), KQU( 6888601089398614254), KQU( 258252992894160189), KQU( 6704363880792428622), KQU( 6114966032147235763), KQU(11075393882690261875), KQU( 8797664238933620407), KQU( 5901892006476726920), KQU( 5309780159285518958), KQU(14940808387240817367), KQU(14642032021449656698), KQU( 9808256672068504139), KQU( 3670135111380607658), KQU(11211211097845960152), KQU( 1474304506716695808), KQU(15843166204506876239), KQU( 7661051252471780561), KQU(10170905502249418476), KQU( 7801416045582028589), KQU( 2763981484737053050), KQU( 9491377905499253054), KQU(16201395896336915095), KQU( 9256513756442782198), KQU( 5411283157972456034), KQU( 5059433122288321676), KQU( 4327408006721123357), KQU( 9278544078834433377), KQU( 7601527110882281612), KQU(11848295896975505251), KQU(12096998801094735560), KQU(14773480339823506413), KQU(15586227433895802149), KQU(12786541257830242872), KQU( 6904692985140503067), KQU( 5309011515263103959), KQU(12105257191179371066), KQU(14654380212442225037), KQU( 2556774974190695009), KQU( 4461297399927600261), KQU(14888225660915118646), KQU(14915459341148291824), KQU( 2738802166252327631), KQU( 6047155789239131512), KQU(12920545353217010338), KQU(10697617257007840205), KQU( 2751585253158203504), KQU(13252729159780047496), KQU(14700326134672815469), KQU(14082527904374600529), KQU(16852962273496542070), KQU(17446675504235853907), KQU(15019600398527572311), KQU(12312781346344081551), KQU(14524667935039810450), KQU( 5634005663377195738), KQU(11375574739525000569), KQU( 2423665396433260040), KQU( 5222836914796015410), KQU( 4397666386492647387), KQU( 4619294441691707638), KQU( 665088602354770716), KQU(13246495665281593610), KQU( 6564144270549729409), KQU(10223216188145661688), KQU( 3961556907299230585), KQU(11543262515492439914), KQU(16118031437285993790), KQU( 7143417964520166465), KQU(13295053515909486772), KQU( 40434666004899675), KQU(17127804194038347164), KQU( 8599165966560586269), KQU( 8214016749011284903), KQU(13725130352140465239), KQU( 5467254474431726291), KQU( 7748584297438219877), KQU(16933551114829772472), KQU( 2169618439506799400), KQU( 2169787627665113463), KQU(17314493571267943764), KQU(18053575102911354912), KQU(11928303275378476973), KQU(11593850925061715550), KQU(17782269923473589362), KQU( 3280235307704747039), KQU( 6145343578598685149), KQU(17080117031114086090), KQU(18066839902983594755), KQU( 6517508430331020706), KQU( 8092908893950411541), KQU(12558378233386153732), KQU( 4476532167973132976), KQU(16081642430367025016), KQU( 4233154094369139361), KQU( 8693630486693161027), KQU(11244959343027742285), KQU(12273503967768513508), KQU(14108978636385284876), KQU( 7242414665378826984), KQU( 6561316938846562432), KQU( 8601038474994665795), KQU(17532942353612365904), KQU(17940076637020912186), KQU( 7340260368823171304), KQU( 7061807613916067905), KQU(10561734935039519326), KQU(17990796503724650862), KQU( 6208732943911827159), KQU( 359077562804090617), KQU(14177751537784403113), KQU(10659599444915362902), KQU(15081727220615085833), KQU(13417573895659757486), KQU(15513842342017811524), KQU(11814141516204288231), KQU( 1827312513875101814), KQU( 2804611699894603103), KQU(17116500469975602763), KQU(12270191815211952087), KQU(12256358467786024988), KQU(18435021722453971267), KQU( 671330264390865618), KQU( 476504300460286050), KQU(16465470901027093441), KQU( 4047724406247136402), KQU( 1322305451411883346), KQU( 1388308688834322280), KQU( 7303989085269758176), KQU( 9323792664765233642), KQU( 4542762575316368936), KQU(17342696132794337618), KQU( 4588025054768498379), KQU(13415475057390330804), KQU(17880279491733405570), KQU(10610553400618620353), KQU( 3180842072658960139), KQU(13002966655454270120), KQU( 1665301181064982826), KQU( 7083673946791258979), KQU( 190522247122496820), KQU(17388280237250677740), KQU( 8430770379923642945), KQU(12987180971921668584), KQU( 2311086108365390642), KQU( 2870984383579822345), KQU(14014682609164653318), KQU(14467187293062251484), KQU( 192186361147413298), KQU(15171951713531796524), KQU( 9900305495015948728), KQU(17958004775615466344), KQU(14346380954498606514), KQU(18040047357617407096), KQU( 5035237584833424532), KQU(15089555460613972287), KQU( 4131411873749729831), KQU( 1329013581168250330), KQU(10095353333051193949), KQU(10749518561022462716), KQU( 9050611429810755847), KQU(15022028840236655649), KQU( 8775554279239748298), KQU(13105754025489230502), KQU(15471300118574167585), KQU( 89864764002355628), KQU( 8776416323420466637), KQU( 5280258630612040891), KQU( 2719174488591862912), KQU( 7599309137399661994), KQU(15012887256778039979), KQU(14062981725630928925), KQU(12038536286991689603), KQU( 7089756544681775245), KQU(10376661532744718039), KQU( 1265198725901533130), KQU(13807996727081142408), KQU( 2935019626765036403), KQU( 7651672460680700141), KQU( 3644093016200370795), KQU( 2840982578090080674), KQU(17956262740157449201), KQU(18267979450492880548), KQU(11799503659796848070), KQU( 9942537025669672388), KQU(11886606816406990297), KQU( 5488594946437447576), KQU( 7226714353282744302), KQU( 3784851653123877043), KQU( 878018453244803041), KQU(12110022586268616085), KQU( 734072179404675123), KQU(11869573627998248542), KQU( 469150421297783998), KQU( 260151124912803804), KQU(11639179410120968649), KQU( 9318165193840846253), KQU(12795671722734758075), KQU(15318410297267253933), KQU( 691524703570062620), KQU( 5837129010576994601), KQU(15045963859726941052), KQU( 5850056944932238169), KQU(12017434144750943807), KQU( 7447139064928956574), KQU( 3101711812658245019), KQU(16052940704474982954), KQU(18195745945986994042), KQU( 8932252132785575659), KQU(13390817488106794834), KQU(11582771836502517453), KQU( 4964411326683611686), KQU( 2195093981702694011), KQU(14145229538389675669), KQU(16459605532062271798), KQU( 866316924816482864), KQU( 4593041209937286377), KQU( 8415491391910972138), KQU( 4171236715600528969), KQU(16637569303336782889), KQU( 2002011073439212680), KQU(17695124661097601411), KQU( 4627687053598611702), KQU( 7895831936020190403), KQU( 8455951300917267802), KQU( 2923861649108534854), KQU( 8344557563927786255), KQU( 6408671940373352556), KQU(12210227354536675772), KQU(14294804157294222295), KQU(10103022425071085127), KQU(10092959489504123771), KQU( 6554774405376736268), KQU(12629917718410641774), KQU( 6260933257596067126), KQU( 2460827021439369673), KQU( 2541962996717103668), KQU( 597377203127351475), KQU( 5316984203117315309), KQU( 4811211393563241961), KQU(13119698597255811641), KQU( 8048691512862388981), KQU(10216818971194073842), KQU( 4612229970165291764), KQU(10000980798419974770), KQU( 6877640812402540687), KQU( 1488727563290436992), KQU( 2227774069895697318), KQU(11237754507523316593), KQU(13478948605382290972), KQU( 1963583846976858124), KQU( 5512309205269276457), KQU( 3972770164717652347), KQU( 3841751276198975037), KQU(10283343042181903117), KQU( 8564001259792872199), KQU(16472187244722489221), KQU( 8953493499268945921), KQU( 3518747340357279580), KQU( 4003157546223963073), KQU( 3270305958289814590), KQU( 3966704458129482496), KQU( 8122141865926661939), KQU(14627734748099506653), KQU(13064426990862560568), KQU( 2414079187889870829), KQU( 5378461209354225306), KQU(10841985740128255566), KQU( 538582442885401738), KQU( 7535089183482905946), KQU(16117559957598879095), KQU( 8477890721414539741), KQU( 1459127491209533386), KQU(17035126360733620462), KQU( 8517668552872379126), KQU(10292151468337355014), KQU(17081267732745344157), KQU(13751455337946087178), KQU(14026945459523832966), KQU( 6653278775061723516), KQU(10619085543856390441), KQU( 2196343631481122885), KQU(10045966074702826136), KQU(10082317330452718282), KQU( 5920859259504831242), KQU( 9951879073426540617), KQU( 7074696649151414158), KQU(15808193543879464318), KQU( 7385247772746953374), KQU( 3192003544283864292), KQU(18153684490917593847), KQU(12423498260668568905), KQU(10957758099756378169), KQU(11488762179911016040), KQU( 2099931186465333782), KQU(11180979581250294432), KQU( 8098916250668367933), KQU( 3529200436790763465), KQU(12988418908674681745), KQU( 6147567275954808580), KQU( 3207503344604030989), KQU(10761592604898615360), KQU( 229854861031893504), KQU( 8809853962667144291), KQU(13957364469005693860), KQU( 7634287665224495886), KQU(12353487366976556874), KQU( 1134423796317152034), KQU( 2088992471334107068), KQU( 7393372127190799698), KQU( 1845367839871058391), KQU( 207922563987322884), KQU(11960870813159944976), KQU(12182120053317317363), KQU(17307358132571709283), KQU(13871081155552824936), KQU(18304446751741566262), KQU( 7178705220184302849), KQU(10929605677758824425), KQU(16446976977835806844), KQU(13723874412159769044), KQU( 6942854352100915216), KQU( 1726308474365729390), KQU( 2150078766445323155), KQU(15345558947919656626), KQU(12145453828874527201), KQU( 2054448620739726849), KQU( 2740102003352628137), KQU(11294462163577610655), KQU( 756164283387413743), KQU(17841144758438810880), KQU(10802406021185415861), KQU( 8716455530476737846), KQU( 6321788834517649606), KQU(14681322910577468426), KQU(17330043563884336387), KQU(12701802180050071614), KQU(14695105111079727151), KQU( 5112098511654172830), KQU( 4957505496794139973), KQU( 8270979451952045982), KQU(12307685939199120969), KQU(12425799408953443032), KQU( 8376410143634796588), KQU(16621778679680060464), KQU( 3580497854566660073), KQU( 1122515747803382416), KQU( 857664980960597599), KQU( 6343640119895925918), KQU(12878473260854462891), KQU(10036813920765722626), KQU(14451335468363173812), KQU( 5476809692401102807), KQU(16442255173514366342), KQU(13060203194757167104), KQU(14354124071243177715), KQU(15961249405696125227), KQU(13703893649690872584), KQU( 363907326340340064), KQU( 6247455540491754842), KQU(12242249332757832361), KQU( 156065475679796717), KQU( 9351116235749732355), KQU( 4590350628677701405), KQU( 1671195940982350389), KQU(13501398458898451905), KQU( 6526341991225002255), KQU( 1689782913778157592), KQU( 7439222350869010334), KQU(13975150263226478308), KQU(11411961169932682710), KQU(17204271834833847277), KQU( 541534742544435367), KQU( 6591191931218949684), KQU( 2645454775478232486), KQU( 4322857481256485321), KQU( 8477416487553065110), KQU(12902505428548435048), KQU( 971445777981341415), KQU(14995104682744976712), KQU( 4243341648807158063), KQU( 8695061252721927661), KQU( 5028202003270177222), KQU( 2289257340915567840), KQU(13870416345121866007), KQU(13994481698072092233), KQU( 6912785400753196481), KQU( 2278309315841980139), KQU( 4329765449648304839), KQU( 5963108095785485298), KQU( 4880024847478722478), KQU(16015608779890240947), KQU( 1866679034261393544), KQU( 914821179919731519), KQU( 9643404035648760131), KQU( 2418114953615593915), KQU( 944756836073702374), KQU(15186388048737296834), KQU( 7723355336128442206), KQU( 7500747479679599691), KQU(18013961306453293634), KQU( 2315274808095756456), KQU(13655308255424029566), KQU(17203800273561677098), KQU( 1382158694422087756), KQU( 5090390250309588976), KQU( 517170818384213989), KQU( 1612709252627729621), KQU( 1330118955572449606), KQU( 300922478056709885), KQU(18115693291289091987), KQU(13491407109725238321), KQU(15293714633593827320), KQU( 5151539373053314504), KQU( 5951523243743139207), KQU(14459112015249527975), KQU( 5456113959000700739), KQU( 3877918438464873016), KQU(12534071654260163555), KQU(15871678376893555041), KQU(11005484805712025549), KQU(16353066973143374252), KQU( 4358331472063256685), KQU( 8268349332210859288), KQU(12485161590939658075), KQU(13955993592854471343), KQU( 5911446886848367039), KQU(14925834086813706974), KQU( 6590362597857994805), KQU( 1280544923533661875), KQU( 1637756018947988164), KQU( 4734090064512686329), KQU(16693705263131485912), KQU( 6834882340494360958), KQU( 8120732176159658505), KQU( 2244371958905329346), KQU(10447499707729734021), KQU( 7318742361446942194), KQU( 8032857516355555296), KQU(14023605983059313116), KQU( 1032336061815461376), KQU( 9840995337876562612), KQU( 9869256223029203587), KQU(12227975697177267636), KQU(12728115115844186033), KQU( 7752058479783205470), KQU( 729733219713393087), KQU(12954017801239007622) }; static const uint64_t init_by_array_64_expected[] = { KQU( 2100341266307895239), KQU( 8344256300489757943), KQU(15687933285484243894), KQU( 8268620370277076319), KQU(12371852309826545459), KQU( 8800491541730110238), KQU(18113268950100835773), KQU( 2886823658884438119), KQU( 3293667307248180724), KQU( 9307928143300172731), KQU( 7688082017574293629), KQU( 900986224735166665), KQU( 9977972710722265039), KQU( 6008205004994830552), KQU( 546909104521689292), KQU( 7428471521869107594), KQU(14777563419314721179), KQU(16116143076567350053), KQU( 5322685342003142329), KQU( 4200427048445863473), KQU( 4693092150132559146), KQU(13671425863759338582), KQU( 6747117460737639916), KQU( 4732666080236551150), KQU( 5912839950611941263), KQU( 3903717554504704909), KQU( 2615667650256786818), KQU(10844129913887006352), KQU(13786467861810997820), KQU(14267853002994021570), KQU(13767807302847237439), KQU(16407963253707224617), KQU( 4802498363698583497), KQU( 2523802839317209764), KQU( 3822579397797475589), KQU( 8950320572212130610), KQU( 3745623504978342534), KQU(16092609066068482806), KQU( 9817016950274642398), KQU(10591660660323829098), KQU(11751606650792815920), KQU( 5122873818577122211), KQU(17209553764913936624), KQU( 6249057709284380343), KQU(15088791264695071830), KQU(15344673071709851930), KQU( 4345751415293646084), KQU( 2542865750703067928), KQU(13520525127852368784), KQU(18294188662880997241), KQU( 3871781938044881523), KQU( 2873487268122812184), KQU(15099676759482679005), KQU(15442599127239350490), KQU( 6311893274367710888), KQU( 3286118760484672933), KQU( 4146067961333542189), KQU(13303942567897208770), KQU( 8196013722255630418), KQU( 4437815439340979989), KQU(15433791533450605135), KQU( 4254828956815687049), KQU( 1310903207708286015), KQU(10529182764462398549), KQU(14900231311660638810), KQU( 9727017277104609793), KQU( 1821308310948199033), KQU(11628861435066772084), KQU( 9469019138491546924), KQU( 3145812670532604988), KQU( 9938468915045491919), KQU( 1562447430672662142), KQU(13963995266697989134), KQU( 3356884357625028695), KQU( 4499850304584309747), KQU( 8456825817023658122), KQU(10859039922814285279), KQU( 8099512337972526555), KQU( 348006375109672149), KQU(11919893998241688603), KQU( 1104199577402948826), KQU(16689191854356060289), KQU(10992552041730168078), KQU( 7243733172705465836), KQU( 5668075606180319560), KQU(18182847037333286970), KQU( 4290215357664631322), KQU( 4061414220791828613), KQU(13006291061652989604), KQU( 7140491178917128798), KQU(12703446217663283481), KQU( 5500220597564558267), KQU(10330551509971296358), KQU(15958554768648714492), KQU( 5174555954515360045), KQU( 1731318837687577735), KQU( 3557700801048354857), KQU(13764012341928616198), KQU(13115166194379119043), KQU( 7989321021560255519), KQU( 2103584280905877040), KQU( 9230788662155228488), KQU(16396629323325547654), KQU( 657926409811318051), KQU(15046700264391400727), KQU( 5120132858771880830), KQU( 7934160097989028561), KQU( 6963121488531976245), KQU(17412329602621742089), KQU(15144843053931774092), KQU(17204176651763054532), KQU(13166595387554065870), KQU( 8590377810513960213), KQU( 5834365135373991938), KQU( 7640913007182226243), KQU( 3479394703859418425), KQU(16402784452644521040), KQU( 4993979809687083980), KQU(13254522168097688865), KQU(15643659095244365219), KQU( 5881437660538424982), KQU(11174892200618987379), KQU( 254409966159711077), KQU(17158413043140549909), KQU( 3638048789290376272), KQU( 1376816930299489190), KQU( 4622462095217761923), KQU(15086407973010263515), KQU(13253971772784692238), KQU( 5270549043541649236), KQU(11182714186805411604), KQU(12283846437495577140), KQU( 5297647149908953219), KQU(10047451738316836654), KQU( 4938228100367874746), KQU(12328523025304077923), KQU( 3601049438595312361), KQU( 9313624118352733770), KQU(13322966086117661798), KQU(16660005705644029394), KQU(11337677526988872373), KQU(13869299102574417795), KQU(15642043183045645437), KQU( 3021755569085880019), KQU( 4979741767761188161), KQU(13679979092079279587), KQU( 3344685842861071743), KQU(13947960059899588104), KQU( 305806934293368007), KQU( 5749173929201650029), KQU(11123724852118844098), KQU(15128987688788879802), KQU(15251651211024665009), KQU( 7689925933816577776), KQU(16732804392695859449), KQU(17087345401014078468), KQU(14315108589159048871), KQU( 4820700266619778917), KQU(16709637539357958441), KQU( 4936227875177351374), KQU( 2137907697912987247), KQU(11628565601408395420), KQU( 2333250549241556786), KQU( 5711200379577778637), KQU( 5170680131529031729), KQU(12620392043061335164), KQU( 95363390101096078), KQU( 5487981914081709462), KQU( 1763109823981838620), KQU( 3395861271473224396), KQU( 1300496844282213595), KQU( 6894316212820232902), KQU(10673859651135576674), KQU( 5911839658857903252), KQU(17407110743387299102), KQU( 8257427154623140385), KQU(11389003026741800267), KQU( 4070043211095013717), KQU(11663806997145259025), KQU(15265598950648798210), KQU( 630585789434030934), KQU( 3524446529213587334), KQU( 7186424168495184211), KQU(10806585451386379021), KQU(11120017753500499273), KQU( 1586837651387701301), KQU(17530454400954415544), KQU( 9991670045077880430), KQU( 7550997268990730180), KQU( 8640249196597379304), KQU( 3522203892786893823), KQU(10401116549878854788), KQU(13690285544733124852), KQU( 8295785675455774586), KQU(15535716172155117603), KQU( 3112108583723722511), KQU(17633179955339271113), KQU(18154208056063759375), KQU( 1866409236285815666), KQU(13326075895396412882), KQU( 8756261842948020025), KQU( 6281852999868439131), KQU(15087653361275292858), KQU(10333923911152949397), KQU( 5265567645757408500), KQU(12728041843210352184), KQU( 6347959327507828759), KQU( 154112802625564758), KQU(18235228308679780218), KQU( 3253805274673352418), KQU( 4849171610689031197), KQU(17948529398340432518), KQU(13803510475637409167), KQU(13506570190409883095), KQU(15870801273282960805), KQU( 8451286481299170773), KQU( 9562190620034457541), KQU( 8518905387449138364), KQU(12681306401363385655), KQU( 3788073690559762558), KQU( 5256820289573487769), KQU( 2752021372314875467), KQU( 6354035166862520716), KQU( 4328956378309739069), KQU( 449087441228269600), KQU( 5533508742653090868), KQU( 1260389420404746988), KQU(18175394473289055097), KQU( 1535467109660399420), KQU( 8818894282874061442), KQU(12140873243824811213), KQU(15031386653823014946), KQU( 1286028221456149232), KQU( 6329608889367858784), KQU( 9419654354945132725), KQU( 6094576547061672379), KQU(17706217251847450255), KQU( 1733495073065878126), KQU(16918923754607552663), KQU( 8881949849954945044), KQU(12938977706896313891), KQU(14043628638299793407), KQU(18393874581723718233), KQU( 6886318534846892044), KQU(14577870878038334081), KQU(13541558383439414119), KQU(13570472158807588273), KQU(18300760537910283361), KQU( 818368572800609205), KQU( 1417000585112573219), KQU(12337533143867683655), KQU(12433180994702314480), KQU( 778190005829189083), KQU(13667356216206524711), KQU( 9866149895295225230), KQU(11043240490417111999), KQU( 1123933826541378598), KQU( 6469631933605123610), KQU(14508554074431980040), KQU(13918931242962026714), KQU( 2870785929342348285), KQU(14786362626740736974), KQU(13176680060902695786), KQU( 9591778613541679456), KQU( 9097662885117436706), KQU( 749262234240924947), KQU( 1944844067793307093), KQU( 4339214904577487742), KQU( 8009584152961946551), KQU(16073159501225501777), KQU( 3335870590499306217), KQU(17088312653151202847), KQU( 3108893142681931848), KQU(16636841767202792021), KQU(10423316431118400637), KQU( 8008357368674443506), KQU(11340015231914677875), KQU(17687896501594936090), KQU(15173627921763199958), KQU( 542569482243721959), KQU(15071714982769812975), KQU( 4466624872151386956), KQU( 1901780715602332461), KQU( 9822227742154351098), KQU( 1479332892928648780), KQU( 6981611948382474400), KQU( 7620824924456077376), KQU(14095973329429406782), KQU( 7902744005696185404), KQU(15830577219375036920), KQU(10287076667317764416), KQU(12334872764071724025), KQU( 4419302088133544331), KQU(14455842851266090520), KQU(12488077416504654222), KQU( 7953892017701886766), KQU( 6331484925529519007), KQU( 4902145853785030022), KQU(17010159216096443073), KQU(11945354668653886087), KQU(15112022728645230829), KQU(17363484484522986742), KQU( 4423497825896692887), KQU( 8155489510809067471), KQU( 258966605622576285), KQU( 5462958075742020534), KQU( 6763710214913276228), KQU( 2368935183451109054), KQU(14209506165246453811), KQU( 2646257040978514881), KQU( 3776001911922207672), KQU( 1419304601390147631), KQU(14987366598022458284), KQU( 3977770701065815721), KQU( 730820417451838898), KQU( 3982991703612885327), KQU( 2803544519671388477), KQU(17067667221114424649), KQU( 2922555119737867166), KQU( 1989477584121460932), KQU(15020387605892337354), KQU( 9293277796427533547), KQU(10722181424063557247), KQU(16704542332047511651), KQU( 5008286236142089514), KQU(16174732308747382540), KQU(17597019485798338402), KQU(13081745199110622093), KQU( 8850305883842258115), KQU(12723629125624589005), KQU( 8140566453402805978), KQU(15356684607680935061), KQU(14222190387342648650), KQU(11134610460665975178), KQU( 1259799058620984266), KQU(13281656268025610041), KQU( 298262561068153992), KQU(12277871700239212922), KQU(13911297774719779438), KQU(16556727962761474934), KQU(17903010316654728010), KQU( 9682617699648434744), KQU(14757681836838592850), KQU( 1327242446558524473), KQU(11126645098780572792), KQU( 1883602329313221774), KQU( 2543897783922776873), KQU(15029168513767772842), KQU(12710270651039129878), KQU(16118202956069604504), KQU(15010759372168680524), KQU( 2296827082251923948), KQU(10793729742623518101), KQU(13829764151845413046), KQU(17769301223184451213), KQU( 3118268169210783372), KQU(17626204544105123127), KQU( 7416718488974352644), KQU(10450751996212925994), KQU( 9352529519128770586), KQU( 259347569641110140), KQU( 8048588892269692697), KQU( 1774414152306494058), KQU(10669548347214355622), KQU(13061992253816795081), KQU(18432677803063861659), KQU( 8879191055593984333), KQU(12433753195199268041), KQU(14919392415439730602), KQU( 6612848378595332963), KQU( 6320986812036143628), KQU(10465592420226092859), KQU( 4196009278962570808), KQU( 3747816564473572224), KQU(17941203486133732898), KQU( 2350310037040505198), KQU( 5811779859134370113), KQU(10492109599506195126), KQU( 7699650690179541274), KQU( 1954338494306022961), KQU(14095816969027231152), KQU( 5841346919964852061), KQU(14945969510148214735), KQU( 3680200305887550992), KQU( 6218047466131695792), KQU( 8242165745175775096), KQU(11021371934053307357), KQU( 1265099502753169797), KQU( 4644347436111321718), KQU( 3609296916782832859), KQU( 8109807992218521571), KQU(18387884215648662020), KQU(14656324896296392902), KQU(17386819091238216751), KQU(17788300878582317152), KQU( 7919446259742399591), KQU( 4466613134576358004), KQU(12928181023667938509), KQU(13147446154454932030), KQU(16552129038252734620), KQU( 8395299403738822450), KQU(11313817655275361164), KQU( 434258809499511718), KQU( 2074882104954788676), KQU( 7929892178759395518), KQU( 9006461629105745388), KQU( 5176475650000323086), KQU(11128357033468341069), KQU(12026158851559118955), KQU(14699716249471156500), KQU( 448982497120206757), KQU( 4156475356685519900), KQU( 6063816103417215727), KQU(10073289387954971479), KQU( 8174466846138590962), KQU( 2675777452363449006), KQU( 9090685420572474281), KQU( 6659652652765562060), KQU(12923120304018106621), KQU(11117480560334526775), KQU( 937910473424587511), KQU( 1838692113502346645), KQU(11133914074648726180), KQU( 7922600945143884053), KQU(13435287702700959550), KQU( 5287964921251123332), KQU(11354875374575318947), KQU(17955724760748238133), KQU(13728617396297106512), KQU( 4107449660118101255), KQU( 1210269794886589623), KQU(11408687205733456282), KQU( 4538354710392677887), KQU(13566803319341319267), KQU(17870798107734050771), KQU( 3354318982568089135), KQU( 9034450839405133651), KQU(13087431795753424314), KQU( 950333102820688239), KQU( 1968360654535604116), KQU(16840551645563314995), KQU( 8867501803892924995), KQU(11395388644490626845), KQU( 1529815836300732204), KQU(13330848522996608842), KQU( 1813432878817504265), KQU( 2336867432693429560), KQU(15192805445973385902), KQU( 2528593071076407877), KQU( 128459777936689248), KQU( 9976345382867214866), KQU( 6208885766767996043), KQU(14982349522273141706), KQU( 3099654362410737822), KQU(13776700761947297661), KQU( 8806185470684925550), KQU( 8151717890410585321), KQU( 640860591588072925), KQU(14592096303937307465), KQU( 9056472419613564846), KQU(14861544647742266352), KQU(12703771500398470216), KQU( 3142372800384138465), KQU( 6201105606917248196), KQU(18337516409359270184), KQU(15042268695665115339), KQU(15188246541383283846), KQU(12800028693090114519), KQU( 5992859621101493472), KQU(18278043971816803521), KQU( 9002773075219424560), KQU( 7325707116943598353), KQU( 7930571931248040822), KQU( 5645275869617023448), KQU( 7266107455295958487), KQU( 4363664528273524411), KQU(14313875763787479809), KQU(17059695613553486802), KQU( 9247761425889940932), KQU(13704726459237593128), KQU( 2701312427328909832), KQU(17235532008287243115), KQU(14093147761491729538), KQU( 6247352273768386516), KQU( 8268710048153268415), KQU( 7985295214477182083), KQU(15624495190888896807), KQU( 3772753430045262788), KQU( 9133991620474991698), KQU( 5665791943316256028), KQU( 7551996832462193473), KQU(13163729206798953877), KQU( 9263532074153846374), KQU( 1015460703698618353), KQU(17929874696989519390), KQU(18257884721466153847), KQU(16271867543011222991), KQU( 3905971519021791941), KQU(16814488397137052085), KQU( 1321197685504621613), KQU( 2870359191894002181), KQU(14317282970323395450), KQU(13663920845511074366), KQU( 2052463995796539594), KQU(14126345686431444337), KQU( 1727572121947022534), KQU(17793552254485594241), KQU( 6738857418849205750), KQU( 1282987123157442952), KQU(16655480021581159251), KQU( 6784587032080183866), KQU(14726758805359965162), KQU( 7577995933961987349), KQU(12539609320311114036), KQU(10789773033385439494), KQU( 8517001497411158227), KQU(10075543932136339710), KQU(14838152340938811081), KQU( 9560840631794044194), KQU(17445736541454117475), KQU(10633026464336393186), KQU(15705729708242246293), KQU( 1117517596891411098), KQU( 4305657943415886942), KQU( 4948856840533979263), KQU(16071681989041789593), KQU(13723031429272486527), KQU( 7639567622306509462), KQU(12670424537483090390), KQU( 9715223453097197134), KQU( 5457173389992686394), KQU( 289857129276135145), KQU(17048610270521972512), KQU( 692768013309835485), KQU(14823232360546632057), KQU(18218002361317895936), KQU( 3281724260212650204), KQU(16453957266549513795), KQU( 8592711109774511881), KQU( 929825123473369579), KQU(15966784769764367791), KQU( 9627344291450607588), KQU(10849555504977813287), KQU( 9234566913936339275), KQU( 6413807690366911210), KQU(10862389016184219267), KQU(13842504799335374048), KQU( 1531994113376881174), KQU( 2081314867544364459), KQU(16430628791616959932), KQU( 8314714038654394368), KQU( 9155473892098431813), KQU(12577843786670475704), KQU( 4399161106452401017), KQU( 1668083091682623186), KQU( 1741383777203714216), KQU( 2162597285417794374), KQU(15841980159165218736), KQU( 1971354603551467079), KQU( 1206714764913205968), KQU( 4790860439591272330), KQU(14699375615594055799), KQU( 8374423871657449988), KQU(10950685736472937738), KQU( 697344331343267176), KQU(10084998763118059810), KQU(12897369539795983124), KQU(12351260292144383605), KQU( 1268810970176811234), KQU( 7406287800414582768), KQU( 516169557043807831), KQU( 5077568278710520380), KQU( 3828791738309039304), KQU( 7721974069946943610), KQU( 3534670260981096460), KQU( 4865792189600584891), KQU(16892578493734337298), KQU( 9161499464278042590), KQU(11976149624067055931), KQU(13219479887277343990), KQU(14161556738111500680), KQU(14670715255011223056), KQU( 4671205678403576558), KQU(12633022931454259781), KQU(14821376219869187646), KQU( 751181776484317028), KQU( 2192211308839047070), KQU(11787306362361245189), KQU(10672375120744095707), KQU( 4601972328345244467), KQU(15457217788831125879), KQU( 8464345256775460809), KQU(10191938789487159478), KQU( 6184348739615197613), KQU(11425436778806882100), KQU( 2739227089124319793), KQU( 461464518456000551), KQU( 4689850170029177442), KQU( 6120307814374078625), KQU(11153579230681708671), KQU( 7891721473905347926), KQU(10281646937824872400), KQU( 3026099648191332248), KQU( 8666750296953273818), KQU(14978499698844363232), KQU(13303395102890132065), KQU( 8182358205292864080), KQU(10560547713972971291), KQU(11981635489418959093), KQU( 3134621354935288409), KQU(11580681977404383968), KQU(14205530317404088650), KQU( 5997789011854923157), KQU(13659151593432238041), KQU(11664332114338865086), KQU( 7490351383220929386), KQU( 7189290499881530378), KQU(15039262734271020220), KQU( 2057217285976980055), KQU( 555570804905355739), KQU(11235311968348555110), KQU(13824557146269603217), KQU(16906788840653099693), KQU( 7222878245455661677), KQU( 5245139444332423756), KQU( 4723748462805674292), KQU(12216509815698568612), KQU(17402362976648951187), KQU(17389614836810366768), KQU( 4880936484146667711), KQU( 9085007839292639880), KQU(13837353458498535449), KQU(11914419854360366677), KQU(16595890135313864103), KQU( 6313969847197627222), KQU(18296909792163910431), KQU(10041780113382084042), KQU( 2499478551172884794), KQU(11057894246241189489), KQU( 9742243032389068555), KQU(12838934582673196228), KQU(13437023235248490367), KQU(13372420669446163240), KQU( 6752564244716909224), KQU( 7157333073400313737), KQU(12230281516370654308), KQU( 1182884552219419117), KQU( 2955125381312499218), KQU(10308827097079443249), KQU( 1337648572986534958), KQU(16378788590020343939), KQU( 108619126514420935), KQU( 3990981009621629188), KQU( 5460953070230946410), KQU( 9703328329366531883), KQU(13166631489188077236), KQU( 1104768831213675170), KQU( 3447930458553877908), KQU( 8067172487769945676), KQU( 5445802098190775347), KQU( 3244840981648973873), KQU(17314668322981950060), KQU( 5006812527827763807), KQU(18158695070225526260), KQU( 2824536478852417853), KQU(13974775809127519886), KQU( 9814362769074067392), KQU(17276205156374862128), KQU(11361680725379306967), KQU( 3422581970382012542), KQU(11003189603753241266), KQU(11194292945277862261), KQU( 6839623313908521348), KQU(11935326462707324634), KQU( 1611456788685878444), KQU(13112620989475558907), KQU( 517659108904450427), KQU(13558114318574407624), KQU(15699089742731633077), KQU( 4988979278862685458), KQU( 8111373583056521297), KQU( 3891258746615399627), KQU( 8137298251469718086), KQU(12748663295624701649), KQU( 4389835683495292062), KQU( 5775217872128831729), KQU( 9462091896405534927), KQU( 8498124108820263989), KQU( 8059131278842839525), KQU(10503167994254090892), KQU(11613153541070396656), KQU(18069248738504647790), KQU( 570657419109768508), KQU( 3950574167771159665), KQU( 5514655599604313077), KQU( 2908460854428484165), KQU(10777722615935663114), KQU(12007363304839279486), KQU( 9800646187569484767), KQU( 8795423564889864287), KQU(14257396680131028419), KQU( 6405465117315096498), KQU( 7939411072208774878), KQU(17577572378528990006), KQU(14785873806715994850), KQU(16770572680854747390), KQU(18127549474419396481), KQU(11637013449455757750), KQU(14371851933996761086), KQU( 3601181063650110280), KQU( 4126442845019316144), KQU(10198287239244320669), KQU(18000169628555379659), KQU(18392482400739978269), KQU( 6219919037686919957), KQU( 3610085377719446052), KQU( 2513925039981776336), KQU(16679413537926716955), KQU(12903302131714909434), KQU( 5581145789762985009), KQU(12325955044293303233), KQU(17216111180742141204), KQU( 6321919595276545740), KQU( 3507521147216174501), KQU( 9659194593319481840), KQU(11473976005975358326), KQU(14742730101435987026), KQU( 492845897709954780), KQU(16976371186162599676), KQU(17712703422837648655), KQU( 9881254778587061697), KQU( 8413223156302299551), KQU( 1563841828254089168), KQU( 9996032758786671975), KQU( 138877700583772667), KQU(13003043368574995989), KQU( 4390573668650456587), KQU( 8610287390568126755), KQU(15126904974266642199), KQU( 6703637238986057662), KQU( 2873075592956810157), KQU( 6035080933946049418), KQU(13382846581202353014), KQU( 7303971031814642463), KQU(18418024405307444267), KQU( 5847096731675404647), KQU( 4035880699639842500), KQU(11525348625112218478), KQU( 3041162365459574102), KQU( 2604734487727986558), KQU(15526341771636983145), KQU(14556052310697370254), KQU(12997787077930808155), KQU( 9601806501755554499), KQU(11349677952521423389), KQU(14956777807644899350), KQU(16559736957742852721), KQU(12360828274778140726), KQU( 6685373272009662513), KQU(16932258748055324130), KQU(15918051131954158508), KQU( 1692312913140790144), KQU( 546653826801637367), KQU( 5341587076045986652), KQU(14975057236342585662), KQU(12374976357340622412), KQU(10328833995181940552), KQU(12831807101710443149), KQU(10548514914382545716), KQU( 2217806727199715993), KQU(12627067369242845138), KQU( 4598965364035438158), KQU( 150923352751318171), KQU(14274109544442257283), KQU( 4696661475093863031), KQU( 1505764114384654516), KQU(10699185831891495147), KQU( 2392353847713620519), KQU( 3652870166711788383), KQU( 8640653276221911108), KQU( 3894077592275889704), KQU( 4918592872135964845), KQU(16379121273281400789), KQU(12058465483591683656), KQU(11250106829302924945), KQU( 1147537556296983005), KQU( 6376342756004613268), KQU(14967128191709280506), KQU(18007449949790627628), KQU( 9497178279316537841), KQU( 7920174844809394893), KQU(10037752595255719907), KQU(15875342784985217697), KQU(15311615921712850696), KQU( 9552902652110992950), KQU(14054979450099721140), KQU( 5998709773566417349), KQU(18027910339276320187), KQU( 8223099053868585554), KQU( 7842270354824999767), KQU( 4896315688770080292), KQU(12969320296569787895), KQU( 2674321489185759961), KQU( 4053615936864718439), KQU(11349775270588617578), KQU( 4743019256284553975), KQU( 5602100217469723769), KQU(14398995691411527813), KQU( 7412170493796825470), KQU( 836262406131744846), KQU( 8231086633845153022), KQU( 5161377920438552287), KQU( 8828731196169924949), KQU(16211142246465502680), KQU( 3307990879253687818), KQU( 5193405406899782022), KQU( 8510842117467566693), KQU( 6070955181022405365), KQU(14482950231361409799), KQU(12585159371331138077), KQU( 3511537678933588148), KQU( 2041849474531116417), KQU(10944936685095345792), KQU(18303116923079107729), KQU( 2720566371239725320), KQU( 4958672473562397622), KQU( 3032326668253243412), KQU(13689418691726908338), KQU( 1895205511728843996), KQU( 8146303515271990527), KQU(16507343500056113480), KQU( 473996939105902919), KQU( 9897686885246881481), KQU(14606433762712790575), KQU( 6732796251605566368), KQU( 1399778120855368916), KQU( 935023885182833777), KQU(16066282816186753477), KQU( 7291270991820612055), KQU(17530230393129853844), KQU(10223493623477451366), KQU(15841725630495676683), KQU(17379567246435515824), KQU( 8588251429375561971), KQU(18339511210887206423), KQU(17349587430725976100), KQU(12244876521394838088), KQU( 6382187714147161259), KQU(12335807181848950831), KQU(16948885622305460665), KQU(13755097796371520506), KQU(14806740373324947801), KQU( 4828699633859287703), KQU( 8209879281452301604), KQU(12435716669553736437), KQU(13970976859588452131), KQU( 6233960842566773148), KQU(12507096267900505759), KQU( 1198713114381279421), KQU(14989862731124149015), KQU(15932189508707978949), KQU( 2526406641432708722), KQU( 29187427817271982), KQU( 1499802773054556353), KQU(10816638187021897173), KQU( 5436139270839738132), KQU( 6659882287036010082), KQU( 2154048955317173697), KQU(10887317019333757642), KQU(16281091802634424955), KQU(10754549879915384901), KQU(10760611745769249815), KQU( 2161505946972504002), KQU( 5243132808986265107), KQU(10129852179873415416), KQU( 710339480008649081), KQU( 7802129453068808528), KQU(17967213567178907213), KQU(15730859124668605599), KQU(13058356168962376502), KQU( 3701224985413645909), KQU(14464065869149109264), KQU( 9959272418844311646), KQU(10157426099515958752), KQU(14013736814538268528), KQU(17797456992065653951), KQU(17418878140257344806), KQU(15457429073540561521), KQU( 2184426881360949378), KQU( 2062193041154712416), KQU( 8553463347406931661), KQU( 4913057625202871854), KQU( 2668943682126618425), KQU(17064444737891172288), KQU( 4997115903913298637), KQU(12019402608892327416), KQU(17603584559765897352), KQU(11367529582073647975), KQU( 8211476043518436050), KQU( 8676849804070323674), KQU(18431829230394475730), KQU(10490177861361247904), KQU( 9508720602025651349), KQU( 7409627448555722700), KQU( 5804047018862729008), KQU(11943858176893142594), KQU(11908095418933847092), KQU( 5415449345715887652), KQU( 1554022699166156407), KQU( 9073322106406017161), KQU( 7080630967969047082), KQU(18049736940860732943), KQU(12748714242594196794), KQU( 1226992415735156741), KQU(17900981019609531193), KQU(11720739744008710999), KQU( 3006400683394775434), KQU(11347974011751996028), KQU( 3316999628257954608), KQU( 8384484563557639101), KQU(18117794685961729767), KQU( 1900145025596618194), KQU(17459527840632892676), KQU( 5634784101865710994), KQU( 7918619300292897158), KQU( 3146577625026301350), KQU( 9955212856499068767), KQU( 1873995843681746975), KQU( 1561487759967972194), KQU( 8322718804375878474), KQU(11300284215327028366), KQU( 4667391032508998982), KQU( 9820104494306625580), KQU(17922397968599970610), KQU( 1784690461886786712), KQU(14940365084341346821), KQU( 5348719575594186181), KQU(10720419084507855261), KQU(14210394354145143274), KQU( 2426468692164000131), KQU(16271062114607059202), KQU(14851904092357070247), KQU( 6524493015693121897), KQU( 9825473835127138531), KQU(14222500616268569578), KQU(15521484052007487468), KQU(14462579404124614699), KQU(11012375590820665520), KQU(11625327350536084927), KQU(14452017765243785417), KQU( 9989342263518766305), KQU( 3640105471101803790), KQU( 4749866455897513242), KQU(13963064946736312044), KQU(10007416591973223791), KQU(18314132234717431115), KQU( 3286596588617483450), KQU( 7726163455370818765), KQU( 7575454721115379328), KQU( 5308331576437663422), KQU(18288821894903530934), KQU( 8028405805410554106), KQU(15744019832103296628), KQU( 149765559630932100), KQU( 6137705557200071977), KQU(14513416315434803615), KQU(11665702820128984473), KQU( 218926670505601386), KQU( 6868675028717769519), KQU(15282016569441512302), KQU( 5707000497782960236), KQU( 6671120586555079567), KQU( 2194098052618985448), KQU(16849577895477330978), KQU(12957148471017466283), KQU( 1997805535404859393), KQU( 1180721060263860490), KQU(13206391310193756958), KQU(12980208674461861797), KQU( 3825967775058875366), KQU(17543433670782042631), KQU( 1518339070120322730), KQU(16344584340890991669), KQU( 2611327165318529819), KQU(11265022723283422529), KQU( 4001552800373196817), KQU(14509595890079346161), KQU( 3528717165416234562), KQU(18153222571501914072), KQU( 9387182977209744425), KQU(10064342315985580021), KQU(11373678413215253977), KQU( 2308457853228798099), KQU( 9729042942839545302), KQU( 7833785471140127746), KQU( 6351049900319844436), KQU(14454610627133496067), KQU(12533175683634819111), KQU(15570163926716513029), KQU(13356980519185762498) }; TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_gen_rand(1234); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_gen_rand(1234); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_gen_rand_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_by_array(ini, 4); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_by_array(ini, 4); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_by_array_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_gen_rand(4321); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_gen_rand(4321); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_gen_rand_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; uint32_t ini[] = {5, 4, 3, 2, 1}; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_by_array(ini, 5); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_by_array(ini, 5); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_by_array_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END int main(void) { return test( test_gen_rand_32, test_by_array_32, test_gen_rand_64, test_by_array_64); } jemalloc-sys-0.3.2/rep/test/unit/a0.c010064400007650000024000000003451344617474100155420ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_a0) { void *p; p = a0malloc(1); assert_ptr_not_null(p, "Unexpected a0malloc() error"); a0dalloc(p); } TEST_END int main(void) { return test_no_malloc_init( test_a0); } jemalloc-sys-0.3.2/rep/test/unit/arena_reset.c010064400007650000024000000211201344617474100175240ustar0000000000000000#ifndef ARENA_RESET_PROF_C_ #include "test/jemalloc_test.h" #endif #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/rtree.h" #include "test/extent_hooks.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return ret; } static unsigned get_nsmall(void) { return get_nsizes_impl("arenas.nbins"); } static unsigned get_nlarge(void) { return get_nsizes_impl("arenas.nlextents"); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } static size_t get_small_size(size_t ind) { return get_size_impl("arenas.bin.0.size", ind); } static size_t get_large_size(size_t ind) { return get_size_impl("arenas.lextent.0.size", ind); } /* Like ivsalloc(), but safe to call on discarded allocations. */ static size_t vsalloc(tsdn_t *tsdn, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); extent_t *extent; szind_t szind; if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, false, &extent, &szind)) { return 0; } if (extent == NULL) { return 0; } if (extent_state_get(extent) != extent_state_active) { return 0; } if (szind == SC_NSIZES) { return 0; } return sz_index2size(szind); } static unsigned do_arena_create(extent_hooks_t *h) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, "Unexpected mallctl() failure"); return arena_ind; } static void do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { #define NLARGE 32 unsigned nsmall, nlarge, i; size_t sz; int flags; tsdn_t *tsdn; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; nsmall = get_nsmall(); nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); *nptrs = nsmall + nlarge; *ptrs = (void **)malloc(*nptrs * sizeof(void *)); assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); /* Allocate objects with a wide range of sizes. */ for (i = 0; i < nsmall; i++) { sz = get_small_size(i); (*ptrs)[i] = mallocx(sz, flags); assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } for (i = 0; i < nlarge; i++) { sz = get_large_size(i); (*ptrs)[nsmall + i] = mallocx(sz, flags); assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } tsdn = tsdn_fetch(); /* Verify allocations. */ for (i = 0; i < *nptrs; i++) { assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, "Allocation should have queryable size"); } } static void do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { tsdn_t *tsdn; unsigned i; tsdn = tsdn_fetch(); if (have_background_thread) { malloc_mutex_lock(tsdn, &background_thread_info_get(arena_ind)->mtx); } /* Verify allocations no longer exist. */ for (i = 0; i < nptrs; i++) { assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, "Allocation should no longer exist"); } if (have_background_thread) { malloc_mutex_unlock(tsdn, &background_thread_info_get(arena_ind)->mtx); } free(ptrs); } static void do_arena_reset_destroy(const char *name, unsigned arena_ind) { size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib(name, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static void do_arena_reset(unsigned arena_ind) { do_arena_reset_destroy("arena.0.reset", arena_ind); } static void do_arena_destroy(unsigned arena_ind) { do_arena_reset_destroy("arena.0.destroy", arena_ind); } TEST_BEGIN(test_arena_reset) { unsigned arena_ind; void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); do_arena_reset(arena_ind); do_arena_reset_post(ptrs, nptrs, arena_ind); } TEST_END static bool arena_i_initialized(unsigned arena_ind, bool refresh) { bool initialized; size_t mib[3]; size_t miblen, sz; if (refresh) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return initialized; } TEST_BEGIN(test_arena_destroy_initial) { assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should not be initialized"); } TEST_END TEST_BEGIN(test_arena_destroy_hooks_default) { unsigned arena_ind, arena_ind_another, arena_ind_prev; void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_false(arena_i_initialized(arena_ind, false), "Arena stats should not be initialized"); assert_true(arena_i_initialized(arena_ind, true), "Arena stats should be initialized"); /* * Create another arena before destroying one, to better verify arena * index reuse. */ arena_ind_another = do_arena_create(NULL); do_arena_destroy(arena_ind); assert_false(arena_i_initialized(arena_ind, true), "Arena stats should not be initialized"); assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should be initialized"); do_arena_reset_post(ptrs, nptrs, arena_ind); arena_ind_prev = arena_ind; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_u_eq(arena_ind, arena_ind_prev, "Arena index should have been recycled"); do_arena_destroy(arena_ind); do_arena_reset_post(ptrs, nptrs, arena_ind); do_arena_destroy(arena_ind_another); } TEST_END /* * Actually unmap extents, regardless of opt_retain, so that attempts to access * a destroyed arena's memory will segfault. */ static bool extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? "true" : "false", arena_ind); assert_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; } pages_unmap(addr, size); did_dalloc = true; return false; } static extent_hooks_t hooks_orig; static extent_hooks_t hooks_unmap = { extent_alloc_hook, extent_dalloc_unmap, /* dalloc */ extent_destroy_hook, extent_commit_hook, extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, extent_merge_hook }; TEST_BEGIN(test_arena_destroy_hooks_unmap) { unsigned arena_ind; void **ptrs; unsigned nptrs; extent_hooks_prep(); try_decommit = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); did_alloc = false; arena_ind = do_arena_create(&hooks); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); assert_true(did_alloc, "Expected alloc"); assert_false(arena_i_initialized(arena_ind, false), "Arena stats should not be initialized"); assert_true(arena_i_initialized(arena_ind, true), "Arena stats should be initialized"); did_dalloc = false; do_arena_destroy(arena_ind); assert_true(did_dalloc, "Expected dalloc"); assert_false(arena_i_initialized(arena_ind, true), "Arena stats should not be initialized"); assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), "Destroyed arena stats should be initialized"); do_arena_reset_post(ptrs, nptrs, arena_ind); memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END int main(void) { return test( test_arena_reset, test_arena_destroy_initial, test_arena_destroy_hooks_default, test_arena_destroy_hooks_unmap); } jemalloc-sys-0.3.2/rep/test/unit/arena_reset_prof.c010064400007650000024000000001261344617474100205550ustar0000000000000000#include "test/jemalloc_test.h" #define ARENA_RESET_PROF_C_ #include "arena_reset.c" jemalloc-sys-0.3.2/rep/test/unit/arena_reset_prof.sh010064400007650000024000000000731344617474100207460ustar0000000000000000#!/bin/sh export MALLOC_CONF="prof:true,lg_prof_sample:0" jemalloc-sys-0.3.2/rep/test/unit/atomic.c010064400007650000024000000156561344617474100165310ustar0000000000000000#include "test/jemalloc_test.h" /* * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for * bool, etc. The one exception is that the short name for void * is "p" in * some places and "ptr" in others. In the long run it would be nice to unify * these, but in the short run we'll use this shim. */ #define assert_p_eq assert_ptr_eq /* * t: the non-atomic type, like "uint32_t". * ta: the short name for the type, like "u32". * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected, * and val3 for desired. */ #define DO_TESTS(t, ta, val1, val2, val3) do { \ t val; \ t expected; \ bool success; \ /* This (along with the load below) also tests ATOMIC_LOAD. */ \ atomic_##ta##_t atom = ATOMIC_INIT(val1); \ \ /* ATOMIC_INIT and load. */ \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, "Load or init failed"); \ \ /* Store. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val2, val, "Store failed"); \ \ /* Exchange. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val2, val, "Exchange store invalid value"); \ \ /* \ * Weak CAS. Spurious failures are allowed, so we loop a few \ * times. \ */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ success = false; \ for (int i = 0; i < 10 && !success; i++) { \ expected = val2; \ success = atomic_compare_exchange_weak_##ta(&atom, \ &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, expected, \ "CAS should update expected"); \ } \ assert_b_eq(val1 == val2, success, \ "Weak CAS did the wrong state update"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ if (success) { \ assert_##ta##_eq(val3, val, \ "Successful CAS should update atomic"); \ } else { \ assert_##ta##_eq(val1, val, \ "Unsuccessful CAS should not update atomic"); \ } \ \ /* Strong CAS. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ expected = val2; \ success = atomic_compare_exchange_strong_##ta(&atom, &expected, \ val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ assert_b_eq(val1 == val2, success, \ "Strong CAS did the wrong state update"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ if (success) { \ assert_##ta##_eq(val3, val, \ "Successful CAS should update atomic"); \ } else { \ assert_##ta##_eq(val1, val, \ "Unsuccessful CAS should not update atomic"); \ } \ \ \ } while (0) #define DO_INTEGER_TESTS(t, ta, val1, val2) do { \ atomic_##ta##_t atom; \ t val; \ \ /* Fetch-add. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-add should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 + val2, val, \ "Fetch-add should update atomic"); \ \ /* Fetch-sub. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-sub should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 - val2, val, \ "Fetch-sub should update atomic"); \ \ /* Fetch-and. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-and should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 & val2, val, \ "Fetch-and should update atomic"); \ \ /* Fetch-or. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-or should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 | val2, val, \ "Fetch-or should update atomic"); \ \ /* Fetch-xor. */ \ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \ assert_##ta##_eq(val1, val, \ "Fetch-xor should return previous value"); \ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ assert_##ta##_eq(val1 ^ val2, val, \ "Fetch-xor should update atomic"); \ } while (0) #define TEST_STRUCT(t, ta) \ typedef struct { \ t val1; \ t val2; \ t val3; \ } ta##_test_t; #define TEST_CASES(t) { \ {(t)-1, (t)-1, (t)-2}, \ {(t)-1, (t) 0, (t)-2}, \ {(t)-1, (t) 1, (t)-2}, \ \ {(t) 0, (t)-1, (t)-2}, \ {(t) 0, (t) 0, (t)-2}, \ {(t) 0, (t) 1, (t)-2}, \ \ {(t) 1, (t)-1, (t)-2}, \ {(t) 1, (t) 0, (t)-2}, \ {(t) 1, (t) 1, (t)-2}, \ \ {(t)0, (t)-(1 << 22), (t)-2}, \ {(t)0, (t)(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)(1 << 22), (t)-2} \ } #define TEST_BODY(t, ta) do { \ const ta##_test_t tests[] = TEST_CASES(t); \ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ ta##_test_t test = tests[i]; \ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ } \ } while (0) #define INTEGER_TEST_BODY(t, ta) do { \ const ta##_test_t tests[] = TEST_CASES(t); \ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ ta##_test_t test = tests[i]; \ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \ } \ } while (0) TEST_STRUCT(uint64_t, u64); TEST_BEGIN(test_atomic_u64) { #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) test_skip("64-bit atomic operations not supported"); #else INTEGER_TEST_BODY(uint64_t, u64); #endif } TEST_END TEST_STRUCT(uint32_t, u32); TEST_BEGIN(test_atomic_u32) { INTEGER_TEST_BODY(uint32_t, u32); } TEST_END TEST_STRUCT(void *, p); TEST_BEGIN(test_atomic_p) { TEST_BODY(void *, p); } TEST_END TEST_STRUCT(size_t, zu); TEST_BEGIN(test_atomic_zu) { INTEGER_TEST_BODY(size_t, zu); } TEST_END TEST_STRUCT(ssize_t, zd); TEST_BEGIN(test_atomic_zd) { INTEGER_TEST_BODY(ssize_t, zd); } TEST_END TEST_STRUCT(unsigned, u); TEST_BEGIN(test_atomic_u) { INTEGER_TEST_BODY(unsigned, u); } TEST_END int main(void) { return test( test_atomic_u64, test_atomic_u32, test_atomic_p, test_atomic_zu, test_atomic_zd, test_atomic_u); } jemalloc-sys-0.3.2/rep/test/unit/background_thread.c010064400007650000024000000061461344617474100207150ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" static void test_switch_background_thread_ctl(bool new_val) { bool e0, e1; size_t sz = sizeof(bool); e1 = new_val; assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, !e1, "background_thread should be %d before.\n", !e1); if (e1) { assert_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); } else { assert_zu_eq(n_background_threads, 0, "Number of background threads should be zero.\n"); } } static void test_repeat_background_thread_ctl(bool before) { bool e0, e1; size_t sz = sizeof(bool); e1 = before; assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, before, "background_thread should be %d.\n", before); if (e1) { assert_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); } else { assert_zu_eq(n_background_threads, 0, "Number of background threads should be zero.\n"); } } TEST_BEGIN(test_background_thread_ctl) { test_skip_if(!have_background_thread); bool e0, e1; size_t sz = sizeof(bool); assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_b_eq(e0, e1, "Default and opt.background_thread does not match.\n"); if (e0) { test_switch_background_thread_ctl(false); } assert_zu_eq(n_background_threads, 0, "Number of background threads should be 0.\n"); for (unsigned i = 0; i < 4; i++) { test_switch_background_thread_ctl(true); test_repeat_background_thread_ctl(true); test_repeat_background_thread_ctl(true); test_switch_background_thread_ctl(false); test_repeat_background_thread_ctl(false); test_repeat_background_thread_ctl(false); } } TEST_END TEST_BEGIN(test_background_thread_running) { test_skip_if(!have_background_thread); test_skip_if(!config_stats); #if defined(JEMALLOC_BACKGROUND_THREAD) tsd_t *tsd = tsd_fetch(); background_thread_info_t *info = &background_thread_info[0]; test_repeat_background_thread_ctl(false); test_switch_background_thread_ctl(true); assert_b_eq(info->state, background_thread_started, "Background_thread did not start.\n"); nstime_t start, now; nstime_init(&start, 0); nstime_update(&start); bool ran = false; while (true) { malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); if (info->tot_n_runs > 0) { ran = true; } malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); if (ran) { break; } nstime_init(&now, 0); nstime_update(&now); nstime_subtract(&now, &start); assert_u64_lt(nstime_sec(&now), 1000, "Background threads did not run for 1000 seconds."); sleep(1); } test_switch_background_thread_ctl(false); #endif } TEST_END int main(void) { /* Background_thread creation tests reentrancy naturally. */ return test_no_reentrancy( test_background_thread_ctl, test_background_thread_running); } jemalloc-sys-0.3.2/rep/test/unit/background_thread_enable.c010064400007650000024000000054341344617474100222220ustar0000000000000000#include "test/jemalloc_test.h" const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20"; TEST_BEGIN(test_deferred) { test_skip_if(!have_background_thread); unsigned id; size_t sz_u = sizeof(unsigned); /* * 10 here is somewhat arbitrary, except insofar as we want to ensure * that the number of background threads is smaller than the number of * arenas. I'll ragequit long before we have to spin up 10 threads per * cpu to handle background purging, so this is a conservative * approximation. */ for (unsigned i = 0; i < 10 * ncpus; i++) { assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } bool enable = true; size_t sz_b = sizeof(bool); assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); enable = false; assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to disable background threads"); } TEST_END TEST_BEGIN(test_max_background_threads) { test_skip_if(!have_background_thread); size_t max_n_thds; size_t opt_max_n_thds; size_t sz_m = sizeof(max_n_thds); assert_d_eq(mallctl("opt.max_background_threads", &opt_max_n_thds, &sz_m, NULL, 0), 0, "Failed to get opt.max_background_threads"); assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL, 0), 0, "Failed to get max background threads"); assert_zu_eq(opt_max_n_thds, max_n_thds, "max_background_threads and " "opt.max_background_threads should match"); assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds, sz_m), 0, "Failed to set max background threads"); unsigned id; size_t sz_u = sizeof(unsigned); for (unsigned i = 0; i < 10 * ncpus; i++) { assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } bool enable = true; size_t sz_b = sizeof(bool); assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); assert_zu_eq(n_background_threads, max_n_thds, "Number of background threads should not change.\n"); size_t new_max_thds = max_n_thds - 1; if (new_max_thds > 0) { assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds, sz_m), 0, "Failed to set max background threads"); assert_zu_eq(n_background_threads, new_max_thds, "Number of background threads should decrease by 1.\n"); } new_max_thds = 1; assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds, sz_m), 0, "Failed to set max background threads"); assert_zu_eq(n_background_threads, new_max_thds, "Number of background threads should be 1.\n"); } TEST_END int main(void) { return test_no_reentrancy( test_deferred, test_max_background_threads); } jemalloc-sys-0.3.2/rep/test/unit/base.c010064400007650000024000000150201344617474100161500ustar0000000000000000#include "test/jemalloc_test.h" #include "test/extent_hooks.h" static extent_hooks_t hooks_null = { extent_alloc_hook, NULL, /* dalloc */ NULL, /* destroy */ NULL, /* commit */ NULL, /* decommit */ NULL, /* purge_lazy */ NULL, /* purge_forced */ NULL, /* split */ NULL /* merge */ }; static extent_hooks_t hooks_not_null = { extent_alloc_hook, extent_dalloc_hook, extent_destroy_hook, NULL, /* commit */ extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, NULL, /* split */ NULL /* merge */ }; TEST_BEGIN(test_base_hooks_default) { base_t *base; size_t allocated0, allocated1, resident, mapped, n_thp; tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); if (config_stats) { base_stats_get(tsdn, base, &allocated0, &resident, &mapped, &n_thp); assert_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { assert_zu_gt(n_thp, 0, "Base should have 1 THP at least."); } } assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &resident, &mapped, &n_thp); assert_zu_ge(allocated1 - allocated0, 42, "At least 42 bytes were allocated by base_alloc()"); } base_delete(tsdn, base); } TEST_END TEST_BEGIN(test_base_hooks_null) { extent_hooks_t hooks_orig; base_t *base; size_t allocated0, allocated1, resident, mapped, n_thp; extent_hooks_prep(); try_dalloc = false; try_destroy = true; try_decommit = false; try_purge_lazy = false; try_purge_forced = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated0, &resident, &mapped, &n_thp); assert_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { assert_zu_gt(n_thp, 0, "Base should have 1 THP at least."); } } assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &resident, &mapped, &n_thp); assert_zu_ge(allocated1 - allocated0, 42, "At least 42 bytes were allocated by base_alloc()"); } base_delete(tsdn, base); memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END TEST_BEGIN(test_base_hooks_not_null) { extent_hooks_t hooks_orig; base_t *base; void *p, *q, *r, *r_exp; extent_hooks_prep(); try_dalloc = false; try_destroy = true; try_decommit = false; try_purge_lazy = false; try_purge_forced = false; memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); did_alloc = false; base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); assert_true(did_alloc, "Expected alloc"); /* * Check for tight packing at specified alignment under simple * conditions. */ { const size_t alignments[] = { 1, QUANTUM, QUANTUM << 1, CACHELINE, CACHELINE << 1, }; unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; size_t align_ceil = ALIGNMENT_CEILING(alignment, QUANTUM); p = base_alloc(tsdn, base, 1, alignment); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); assert_ptr_eq(p, (void *)(ALIGNMENT_CEILING((uintptr_t)p, alignment)), "Expected quantum alignment"); q = base_alloc(tsdn, base, alignment, alignment); assert_ptr_not_null(q, "Unexpected base_alloc() failure"); assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q, "Minimal allocation should take up %zu bytes", align_ceil); r = base_alloc(tsdn, base, 1, alignment); assert_ptr_not_null(r, "Unexpected base_alloc() failure"); assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r, "Minimal allocation should take up %zu bytes", align_ceil); } } /* * Allocate an object that cannot fit in the first block, then verify * that the first block's remaining space is considered for subsequent * allocation. */ assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM, "Remainder insufficient for test"); /* Use up all but one quantum of block. */ while (extent_bsize_get(&base->blocks->extent) > QUANTUM) { p = base_alloc(tsdn, base, QUANTUM, QUANTUM); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); } r_exp = extent_addr_get(&base->blocks->extent); assert_zu_eq(base->extent_sn_next, 1, "One extant block expected"); q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM); assert_ptr_not_null(q, "Unexpected base_alloc() failure"); assert_ptr_ne(q, r_exp, "Expected allocation from new block"); assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); r = base_alloc(tsdn, base, QUANTUM, QUANTUM); assert_ptr_not_null(r, "Unexpected base_alloc() failure"); assert_ptr_eq(r, r_exp, "Expected allocation from first block"); assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); /* * Check for proper alignment support when normal blocks are too small. */ { const size_t alignments[] = { HUGEPAGE, HUGEPAGE << 1 }; unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; p = base_alloc(tsdn, base, QUANTUM, alignment); assert_ptr_not_null(p, "Unexpected base_alloc() failure"); assert_ptr_eq(p, (void *)(ALIGNMENT_CEILING((uintptr_t)p, alignment)), "Expected %zu-byte alignment", alignment); } } called_dalloc = called_destroy = called_decommit = called_purge_lazy = called_purge_forced = false; base_delete(tsdn, base); assert_true(called_dalloc, "Expected dalloc call"); assert_true(!called_destroy, "Unexpected destroy call"); assert_true(called_decommit, "Expected decommit call"); assert_true(called_purge_lazy, "Expected purge_lazy call"); assert_true(called_purge_forced, "Expected purge_forced call"); try_dalloc = true; try_destroy = true; try_decommit = true; try_purge_lazy = true; try_purge_forced = true; memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } TEST_END int main(void) { return test( test_base_hooks_default, test_base_hooks_null, test_base_hooks_not_null); } jemalloc-sys-0.3.2/rep/test/unit/binshard.c010064400007650000024000000071201344617474100170320ustar0000000000000000#include "test/jemalloc_test.h" /* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */ #define NTHREADS 16 #define REMOTE_NALLOC 256 static void * thd_producer(void *varg) { void **mem = varg; unsigned arena, i; size_t sz; sz = sizeof(arena); /* Remote arena. */ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); for (i = 0; i < REMOTE_NALLOC / 2; i++) { mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena)); } /* Remote bin. */ for (; i < REMOTE_NALLOC; i++) { mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0)); } return NULL; } TEST_BEGIN(test_producer_consumer) { thd_t thds[NTHREADS]; void *mem[NTHREADS][REMOTE_NALLOC]; unsigned i; /* Create producer threads to allocate. */ for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_producer, mem[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } /* Remote deallocation by the current thread. */ for (i = 0; i < NTHREADS; i++) { for (unsigned j = 0; j < REMOTE_NALLOC; j++) { assert_ptr_not_null(mem[i][j], "Unexpected remote allocation failure"); dallocx(mem[i][j], 0); } } } TEST_END static void * thd_start(void *varg) { void *ptr, *ptr2; extent_t *extent; unsigned shard1, shard2; tsdn_t *tsdn = tsdn_fetch(); /* Try triggering allocations from sharded bins. */ for (unsigned i = 0; i < 1024; i++) { ptr = mallocx(1, MALLOCX_TCACHE_NONE); ptr2 = mallocx(129, MALLOCX_TCACHE_NONE); extent = iealloc(tsdn, ptr); shard1 = extent_binshard_get(extent); dallocx(ptr, 0); assert_u_lt(shard1, 16, "Unexpected bin shard used"); extent = iealloc(tsdn, ptr2); shard2 = extent_binshard_get(extent); dallocx(ptr2, 0); assert_u_lt(shard2, 4, "Unexpected bin shard used"); if (shard1 > 0 || shard2 > 0) { /* Triggered sharded bin usage. */ return (void *)(uintptr_t)shard1; } } return NULL; } TEST_BEGIN(test_bin_shard_mt) { thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, NULL); } bool sharded = false; for (i = 0; i < NTHREADS; i++) { void *ret; thd_join(thds[i], &ret); if (ret != NULL) { sharded = true; } } assert_b_eq(sharded, true, "Did not find sharded bins"); } TEST_END TEST_BEGIN(test_bin_shard) { unsigned nbins, i; size_t mib[4], mib2[4]; size_t miblen, miblen2, len; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); miblen2 = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < nbins; i++) { uint32_t nshards; size_t size, sz1, sz2; mib[2] = i; sz1 = sizeof(nshards); assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1, NULL, 0), 0, "Unexpected mallctlbymib() failure"); mib2[2] = i; sz2 = sizeof(size); assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2, NULL, 0), 0, "Unexpected mallctlbymib() failure"); if (size >= 1 && size <= 128) { assert_u_eq(nshards, 16, "Unexpected nshards"); } else if (size == 256) { assert_u_eq(nshards, 8, "Unexpected nshards"); } else if (size > 128 && size <= 512) { assert_u_eq(nshards, 4, "Unexpected nshards"); } else { assert_u_eq(nshards, 1, "Unexpected nshards"); } } } TEST_END int main(void) { return test_no_reentrancy( test_bin_shard, test_bin_shard_mt, test_producer_consumer); } jemalloc-sys-0.3.2/rep/test/unit/binshard.sh010064400007650000024000000001221344617474100172150ustar0000000000000000#!/bin/sh export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" jemalloc-sys-0.3.2/rep/test/unit/bit_util.c010064400007650000024000000060061344617474100170550ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/bit_util.h" #define TEST_POW2_CEIL(t, suf, pri) do { \ unsigned i, pow2; \ t x; \ \ assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ \ for (i = 0; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ << i, "Unexpected result"); \ } \ \ for (i = 2; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ ((t)1) << i, "Unexpected result"); \ } \ \ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ ((t)1) << (i+1), "Unexpected result"); \ } \ \ for (pow2 = 1; pow2 < 25; pow2++) { \ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ x++) { \ assert_##suf##_eq(pow2_ceil_##suf(x), \ ((t)1) << pow2, \ "Unexpected result, x=%"pri, x); \ } \ } \ } while (0) TEST_BEGIN(test_pow2_ceil_u64) { TEST_POW2_CEIL(uint64_t, u64, FMTu64); } TEST_END TEST_BEGIN(test_pow2_ceil_u32) { TEST_POW2_CEIL(uint32_t, u32, FMTu32); } TEST_END TEST_BEGIN(test_pow2_ceil_zu) { TEST_POW2_CEIL(size_t, zu, "zu"); } TEST_END void assert_lg_ceil_range(size_t input, unsigned answer) { if (input == 1) { assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer); return; } assert_zu_le(input, (ZU(1) << answer), "Got %u as lg_ceil of %zu", answer, input); assert_zu_gt(input, (ZU(1) << (answer - 1)), "Got %u as lg_ceil of %zu", answer, input); } void assert_lg_floor_range(size_t input, unsigned answer) { if (input == 1) { assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer); return; } assert_zu_ge(input, (ZU(1) << answer), "Got %u as lg_floor of %zu", answer, input); assert_zu_lt(input, (ZU(1) << (answer + 1)), "Got %u as lg_floor of %zu", answer, input); } TEST_BEGIN(test_lg_ceil_floor) { for (size_t i = 1; i < 10 * 1000 * 1000; i++) { assert_lg_ceil_range(i, lg_ceil(i)); assert_lg_ceil_range(i, LG_CEIL(i)); assert_lg_floor_range(i, lg_floor(i)); assert_lg_floor_range(i, LG_FLOOR(i)); } for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) { for (size_t j = 0; j < (1 << 4); j++) { size_t num1 = ((size_t)1 << i) - j * ((size_t)1 << (i - 4)); size_t num2 = ((size_t)1 << i) + j * ((size_t)1 << (i - 4)); assert_zu_ne(num1, 0, "Invalid lg argument"); assert_zu_ne(num2, 0, "Invalid lg argument"); assert_lg_ceil_range(num1, lg_ceil(num1)); assert_lg_ceil_range(num1, LG_CEIL(num1)); assert_lg_ceil_range(num2, lg_ceil(num2)); assert_lg_ceil_range(num2, LG_CEIL(num2)); assert_lg_floor_range(num1, lg_floor(num1)); assert_lg_floor_range(num1, LG_FLOOR(num1)); assert_lg_floor_range(num2, lg_floor(num2)); assert_lg_floor_range(num2, LG_FLOOR(num2)); } } } TEST_END int main(void) { return test( test_pow2_ceil_u64, test_pow2_ceil_u32, test_pow2_ceil_zu, test_lg_ceil_floor); } jemalloc-sys-0.3.2/rep/test/unit/bitmap.c010064400007650000024000000257411344617474100165250ustar0000000000000000#include "test/jemalloc_test.h" #define NBITS_TAB \ NB( 1) \ NB( 2) \ NB( 3) \ NB( 4) \ NB( 5) \ NB( 6) \ NB( 7) \ NB( 8) \ NB( 9) \ NB(10) \ NB(11) \ NB(12) \ NB(13) \ NB(14) \ NB(15) \ NB(16) \ NB(17) \ NB(18) \ NB(19) \ NB(20) \ NB(21) \ NB(22) \ NB(23) \ NB(24) \ NB(25) \ NB(26) \ NB(27) \ NB(28) \ NB(29) \ NB(30) \ NB(31) \ NB(32) \ \ NB(33) \ NB(34) \ NB(35) \ NB(36) \ NB(37) \ NB(38) \ NB(39) \ NB(40) \ NB(41) \ NB(42) \ NB(43) \ NB(44) \ NB(45) \ NB(46) \ NB(47) \ NB(48) \ NB(49) \ NB(50) \ NB(51) \ NB(52) \ NB(53) \ NB(54) \ NB(55) \ NB(56) \ NB(57) \ NB(58) \ NB(59) \ NB(60) \ NB(61) \ NB(62) \ NB(63) \ NB(64) \ NB(65) \ \ NB(126) \ NB(127) \ NB(128) \ NB(129) \ NB(130) \ \ NB(254) \ NB(255) \ NB(256) \ NB(257) \ NB(258) \ \ NB(510) \ NB(511) \ NB(512) \ NB(513) \ NB(514) \ \ NB(1024) \ NB(2048) \ NB(4096) \ NB(8192) \ NB(16384) \ static void test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { bitmap_info_t binfo_dyn; bitmap_info_init(&binfo_dyn, nbits); assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); assert_zu_eq(binfo->nbits, binfo_dyn.nbits, "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); #ifdef BITMAP_USE_TREE assert_u_eq(binfo->nlevels, binfo_dyn.nlevels, "Unexpected difference between static and dynamic initialization, " "nbits=%zu", nbits); { unsigned i; for (i = 0; i < binfo->nlevels; i++) { assert_zu_eq(binfo->levels[i].group_offset, binfo_dyn.levels[i].group_offset, "Unexpected difference between static and dynamic " "initialization, nbits=%zu, level=%u", nbits, i); } } #else assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups, "Unexpected difference between static and dynamic initialization"); #endif } TEST_BEGIN(test_bitmap_initializer) { #define NB(nbits) { \ if (nbits <= BITMAP_MAXBITS) { \ bitmap_info_t binfo = \ BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_initializer_body(&binfo, nbits); \ } \ } NBITS_TAB #undef NB } TEST_END static size_t test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, size_t prev_size) { size_t size = bitmap_size(binfo); assert_zu_ge(size, (nbits >> 3), "Bitmap size is smaller than expected"); assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); return size; } TEST_BEGIN(test_bitmap_size) { size_t nbits, prev_size; prev_size = 0; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ prev_size = test_bitmap_size_body(&binfo, nbits, \ prev_size); \ } prev_size = 0; NBITS_TAB #undef NB } TEST_END static void test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { assert_false(bitmap_get(bitmap, binfo, i), "Bit should be unset"); } bitmap_init(bitmap, binfo, true); for (i = 0; i < nbits; i++) { assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set"); } free(bitmap); } TEST_BEGIN(test_bitmap_init) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_init_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_init_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); free(bitmap); } TEST_BEGIN(test_bitmap_set) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_set_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_set_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); for (i = 0; i < nbits; i++) { bitmap_unset(bitmap, binfo, i); } for (i = 0; i < nbits; i++) { bitmap_set(bitmap, binfo, i); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); free(bitmap); } TEST_BEGIN(test_bitmap_unset) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_unset_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_unset_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END static void test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); /* Iteratively set bits starting at the beginning. */ for (size_t i = 0; i < nbits; i++) { assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should be just after previous first unset " "bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should be just after previous first unset " "bit"); } assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); /* * Iteratively unset bits starting at the end, and verify that * bitmap_sfu() reaches the unset bits. */ for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */ bitmap_unset(bitmap, binfo, i); assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should the bit previously unset"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should the bit previously unset"); bitmap_unset(bitmap, binfo, i); } assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset"); /* * Iteratively set bits starting at the beginning, and verify that * bitmap_sfu() looks past them. */ for (size_t i = 1; i < nbits; i++) { bitmap_set(bitmap, binfo, i - 1); assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should be just after the bit previously " "set"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "First unset bit should be just after the bit previously " "set"); bitmap_unset(bitmap, binfo, i); } assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, "First unset bit should be the last bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, "First unset bit should be the last bit"); assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); /* * Bubble a "usu" pattern through the bitmap and verify that * bitmap_ffu() finds the correct bit for all five min_bit cases. */ if (nbits >= 3) { for (size_t i = 0; i < nbits-2; i++) { bitmap_unset(bitmap, binfo, i); bitmap_unset(bitmap, binfo, i+2); if (i > 0) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, "Unexpected first unset bit"); if (i + 3 < nbits) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3), nbits, "Unexpected first unset bit"); } assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2, "Unexpected first unset bit"); } } /* * Unset the last bit, bubble another unset bit through the bitmap, and * verify that bitmap_ffu() finds the correct bit for all four min_bit * cases. */ if (nbits >= 3) { bitmap_unset(bitmap, binfo, nbits-1); for (size_t i = 0; i < nbits-1; i++) { bitmap_unset(bitmap, binfo, i); if (i > 0) { assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, "Unexpected first unset bit"); assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), nbits-1, "Unexpected first unset bit"); assert_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); } assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, "Unexpected first unset bit"); } free(bitmap); } TEST_BEGIN(test_bitmap_xfu) { size_t nbits; for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); test_bitmap_xfu_body(&binfo, nbits); } #define NB(nbits) { \ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ test_bitmap_xfu_body(&binfo, nbits); \ } NBITS_TAB #undef NB } TEST_END int main(void) { return test( test_bitmap_initializer, test_bitmap_size, test_bitmap_init, test_bitmap_set, test_bitmap_unset, test_bitmap_xfu); } jemalloc-sys-0.3.2/rep/test/unit/ckh.c010064400007650000024000000125461344617474100160150ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { "a string", "A string", "a string.", "A string." }; const char *missing = "A string not in the hash table."; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1, "ckh_count() should return %zu, but it returned %zu", i+1, ckh_count(&ckh)); } /* Search. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_search(&ckh, strs[i], kp, vp), "Unexpected ckh_search() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); } assert_true(ckh_search(&ckh, missing, NULL, NULL), "Unexpected ckh_search() success"); /* Remove. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), "Unexpected ckh_remove() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); assert_zu_eq(ckh_count(&ckh), sizeof(strs)/sizeof(const char *) - i - 1, "ckh_count() should return %zu, but it returned %zu", sizeof(strs)/sizeof(const char *) - i - 1, ckh_count(&ckh)); } ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; void *q, *r; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { p[i] = mallocx(i+1, 0); assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); } for (i = 0; i < NITEMS; i++) { size_t j; for (j = i; j < NITEMS; j++) { assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), "Unexpected ckh_insert() failure"); assert_false(ckh_search(&ckh, p[j], &q, &r), "Unexpected ckh_search() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); } assert_zu_eq(ckh_count(&ckh), NITEMS, "ckh_count() should return %zu, but it returned %zu", NITEMS, ckh_count(&ckh)); for (j = i + 1; j < NITEMS; j++) { assert_false(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() success"); } { bool seen[NITEMS]; size_t tabind; memset(seen, 0, sizeof(seen)); for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { size_t k; assert_ptr_eq(q, r, "Key and val not equal"); for (k = 0; k < NITEMS; k++) { if (p[k] == q) { assert_false(seen[k], "Item %zu already seen", k); seen[k] = true; break; } } } for (j = 0; j < i + 1; j++) { assert_true(seen[j], "Item %zu not seen", j); } for (; j < NITEMS; j++) { assert_false(seen[j], "Item %zu seen", j); } } } for (i = 0; i < NITEMS; i++) { assert_false(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[i], q, "Key pointer mismatch"); assert_ptr_eq(p[i], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() success"); dallocx(p[i], 0); } assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); ckh_delete(tsd, &ckh); #undef NITEMS } TEST_END int main(void) { return test( test_new_delete, test_count_insert_search_remove, test_insert_iter_remove); } jemalloc-sys-0.3.2/rep/test/unit/decay.c010064400007650000024000000440141344617474100163300ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ticker.h" static nstime_monotonic_t *nstime_monotonic_orig; static nstime_update_t *nstime_update_orig; static unsigned nupdates_mock; static nstime_t time_mock; static bool monotonic_mock; static bool check_background_thread_enabled(void) { bool enabled; size_t sz = sizeof(bool); int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); if (ret == ENOENT) { return false; } assert_d_eq(ret, 0, "Unexpected mallctl error"); return enabled; } static bool nstime_monotonic_mock(void) { return monotonic_mock; } static bool nstime_update_mock(nstime_t *time) { nupdates_mock++; if (monotonic_mock) { nstime_copy(time, &time_mock); } return !monotonic_mock; } static unsigned do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, "Unexpected mallctlbymib() failure"); assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, "Unexpected mallctlbymib() failure"); return arena_ind; } static void do_arena_destroy(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } void do_epoch(void) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } void do_purge(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } void do_decay(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static uint64_t get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; uint64_t npurge = 0; size_t sz = sizeof(npurge); assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); return npurge; } static uint64_t get_arena_dirty_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); } static uint64_t get_arena_dirty_purged(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind); } static uint64_t get_arena_muzzy_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } static uint64_t get_arena_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } static size_t get_arena_pdirty(unsigned arena_ind) { do_epoch(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; size_t pdirty; size_t sz = sizeof(pdirty); assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return pdirty; } static size_t get_arena_pmuzzy(unsigned arena_ind) { do_epoch(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; size_t pmuzzy; size_t sz = sizeof(pmuzzy); assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0, "Unexpected mallctlbymib() failure"); return pmuzzy; } static void * do_mallocx(size_t size, int flags) { void *p = mallocx(size, flags); assert_ptr_not_null(p, "Unexpected mallocx() failure"); return p; } static void generate_dirty(unsigned arena_ind, size_t size) { int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; void *p = do_mallocx(size, flags); dallocx(p, flags); } TEST_BEGIN(test_decay_ticks) { test_skip_if(check_background_thread_enabled()); ticker_t *decay_ticker; unsigned tick0, tick1, arena_ind; size_t sz, large0; void *p; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); /* Set up a manually managed arena for test. */ arena_ind = do_arena_create(0, 0); /* Migrate to the new arena, and get the ticker. */ unsigned old_arena_ind; size_t sz_arena_ind = sizeof(old_arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); assert_ptr_not_null(decay_ticker, "Unexpected failure getting decay ticker"); /* * Test the standard APIs using a large size class, since we can't * control tcache interactions for small size classes (except by * completely disabling tcache for the entire test program). */ /* malloc(). */ tick0 = ticker_read(decay_ticker); p = malloc(large0); assert_ptr_not_null(p, "Unexpected malloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); /* free(). */ tick0 = ticker_read(decay_ticker); free(p); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); /* calloc(). */ tick0 = ticker_read(decay_ticker); p = calloc(1, large0); assert_ptr_not_null(p, "Unexpected calloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); free(p); /* posix_memalign(). */ tick0 = ticker_read(decay_ticker); assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during posix_memalign()"); free(p); /* aligned_alloc(). */ tick0 = ticker_read(decay_ticker); p = aligned_alloc(sizeof(size_t), large0); assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during aligned_alloc()"); free(p); /* realloc(). */ /* Allocate. */ tick0 = ticker_read(decay_ticker); p = realloc(NULL, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Reallocate. */ tick0 = ticker_read(decay_ticker); p = realloc(p, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Deallocate. */ tick0 = ticker_read(decay_ticker); realloc(p, 0); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* * Test the *allocx() APIs using large and small size classes, with * tcache explicitly disabled. */ { unsigned i; size_t allocx_sizes[2]; allocx_sizes[0] = large0; allocx_sizes[1] = 1; for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { sz = allocx_sizes[i]; /* mallocx(). */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during mallocx() (sz=%zu)", sz); /* rallocx(). */ tick0 = ticker_read(decay_ticker); p = rallocx(p, sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected rallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during rallocx() (sz=%zu)", sz); /* xallocx(). */ tick0 = ticker_read(decay_ticker); xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during xallocx() (sz=%zu)", sz); /* dallocx(). */ tick0 = ticker_read(decay_ticker); dallocx(p, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during dallocx() (sz=%zu)", sz); /* sdallocx(). */ p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick0 = ticker_read(decay_ticker); sdallocx(p, sz, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during sdallocx() " "(sz=%zu)", sz); } } /* * Test tcache fill/flush interactions for large and small size classes, * using an explicit tcache. */ unsigned tcache_ind, i; size_t tcache_sizes[2]; tcache_sizes[0] = large0; tcache_sizes[1] = 1; size_t tcache_max, sz_tcache_max; sz_tcache_max = sizeof(tcache_max); assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { sz = tcache_sizes[i]; /* tcache fill. */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache fill " "(sz=%zu)", sz); /* tcache flush. */ dallocx(p, MALLOCX_TCACHE(tcache_ind)); tick0 = ticker_read(decay_ticker); assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tcache_ind, sizeof(unsigned)), 0, "Unexpected mallctl failure"); tick1 = ticker_read(decay_ticker); /* Will only tick if it's in tcache. */ if (sz <= tcache_max) { assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache " "flush (sz=%zu)", sz); } else { assert_u32_eq(tick1, tick0, "Unexpected ticker tick during tcache " "flush (sz=%zu)", sz); } } } TEST_END static void decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) { #define NINTERVALS 101 nstime_t time, update_interval, decay_ms, deadline; nstime_init(&time, 0); nstime_update(&time); nstime_init2(&decay_ms, dt, 0); nstime_copy(&deadline, &time); nstime_add(&deadline, &decay_ms); nstime_init2(&update_interval, dt, 0); nstime_idivide(&update_interval, NINTERVALS); /* * Keep q's slab from being deallocated during the looping below. If a * cached slab were to repeatedly come and go during looping, it could * prevent the decay backlog ever becoming empty. */ void *p = do_mallocx(1, flags); uint64_t dirty_npurge1, muzzy_npurge1; do { for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { void *q = do_mallocx(1, flags); dallocx(q, flags); } dirty_npurge1 = get_arena_dirty_npurge(arena_ind); muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind); nstime_add(&time_mock, &update_interval); nstime_update(&time); } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || !terminate_asap)); dallocx(p, flags); if (config_stats) { assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + muzzy_npurge0, "Expected purging to occur"); } #undef NINTERVALS } TEST_BEGIN(test_decay_ticker) { test_skip_if(check_background_thread_enabled()); #define NPS 2048 ssize_t ddt = opt_dirty_decay_ms; ssize_t mdt = opt_muzzy_decay_ms; unsigned arena_ind = do_arena_create(ddt, mdt); int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); void *ps[NPS]; size_t large; /* * Allocate a bunch of large objects, pause the clock, deallocate every * other object (to fragment virtual memory), restore the clock, then * [md]allocx() in a tight loop while advancing time rapidly to verify * the ticker triggers purging. */ size_t tcache_max; size_t sz = sizeof(size_t); assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, 0), 0, "Unexpected mallctl failure"); large = nallocx(tcache_max + 1, flags); do_purge(arena_ind); uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind); for (unsigned i = 0; i < NPS; i++) { ps[i] = do_mallocx(large, flags); } nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = true; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (unsigned i = 0; i < NPS; i += 2) { dallocx(ps[i], flags); unsigned nupdates0 = nupdates_mock; do_decay(arena_ind); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, muzzy_npurge0, true); decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, muzzy_npurge0, false); do_arena_destroy(arena_ind); nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END TEST_BEGIN(test_decay_nonmonotonic) { test_skip_if(check_background_thread_enabled()); #define NPS (SMOOTHSTEP_NSTEPS + 1) int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); void *ps[NPS]; uint64_t npurge0 = 0; uint64_t npurge1 = 0; size_t sz, large0; unsigned i, nupdates0; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); do_epoch(); sz = sizeof(uint64_t); npurge0 = get_arena_npurge(0); nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = false; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { ps[i] = mallocx(large0, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } for (i = 0; i < NPS; i++) { dallocx(ps[i], flags); nupdates0 = nupdates_mock; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.decay failure"); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } do_epoch(); sz = sizeof(uint64_t); npurge1 = get_arena_npurge(0); if (config_stats) { assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); } nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END TEST_BEGIN(test_decay_now) { test_skip_if(check_background_thread_enabled()); unsigned arena_ind = do_arena_create(0, 0); assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; /* Verify that dirty/muzzy pages never linger after deallocation. */ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { size_t size = sizes[i]; generate_dirty(arena_ind, size); assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); } do_arena_destroy(arena_ind); } TEST_END TEST_BEGIN(test_decay_never) { test_skip_if(check_background_thread_enabled() || !config_stats); unsigned arena_ind = do_arena_create(-1, -1); int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; void *ptrs[sizeof(sizes)/sizeof(size_t)]; for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { ptrs[i] = do_mallocx(sizes[i], flags); } /* Verify that each deallocation generates additional dirty pages. */ size_t pdirty_prev = get_arena_pdirty(arena_ind); size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind); assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { dallocx(ptrs[i], flags); size_t pdirty = get_arena_pdirty(arena_ind); size_t pmuzzy = get_arena_pmuzzy(arena_ind); assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind), pdirty_prev, "Expected dirty pages to increase."); assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages"); pdirty_prev = pdirty; } do_arena_destroy(arena_ind); } TEST_END int main(void) { return test( test_decay_ticks, test_decay_ticker, test_decay_nonmonotonic, test_decay_now, test_decay_never); } jemalloc-sys-0.3.2/rep/test/unit/decay.sh010064400007650000024000000001301344617474100165070ustar0000000000000000#!/bin/sh export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" jemalloc-sys-0.3.2/rep/test/unit/div.c010064400007650000024000000012651344617474100160260ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/div.h" TEST_BEGIN(test_div_exhaustive) { for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) { div_info_t div_info; div_init(&div_info, divisor); size_t max = 1000 * divisor; if (max < 1000 * 1000) { max = 1000 * 1000; } for (size_t dividend = 0; dividend < 1000 * divisor; dividend += divisor) { size_t quotient = div_compute( &div_info, dividend); assert_zu_eq(dividend, quotient * divisor, "With divisor = %zu, dividend = %zu, " "got quotient %zu", divisor, dividend, quotient); } } } TEST_END int main(void) { return test_no_reentrancy( test_div_exhaustive); } jemalloc-sys-0.3.2/rep/test/unit/emitter.c010064400007650000024000000271071344617474100167200ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/emitter.h" /* * This is so useful for debugging and feature work, we'll leave printing * functionality committed but disabled by default. */ /* Print the text as it will appear. */ static bool print_raw = false; /* Print the text escaped, so it can be copied back into the test case. */ static bool print_escaped = false; typedef struct buf_descriptor_s buf_descriptor_t; struct buf_descriptor_s { char *buf; size_t len; bool mid_quote; }; /* * Forwards all writes to the passed-in buf_v (which should be cast from a * buf_descriptor_t *). */ static void forwarding_cb(void *buf_descriptor_v, const char *str) { buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v; if (print_raw) { malloc_printf("%s", str); } if (print_escaped) { const char *it = str; while (*it != '\0') { if (!buf_descriptor->mid_quote) { malloc_printf("\""); buf_descriptor->mid_quote = true; } switch (*it) { case '\\': malloc_printf("\\"); break; case '\"': malloc_printf("\\\""); break; case '\t': malloc_printf("\\t"); break; case '\n': malloc_printf("\\n\"\n"); buf_descriptor->mid_quote = false; break; default: malloc_printf("%c", *it); } it++; } } size_t written = malloc_snprintf(buf_descriptor->buf, buf_descriptor->len, "%s", str); assert_zu_eq(written, strlen(str), "Buffer overflow!"); buf_descriptor->buf += written; buf_descriptor->len -= written; assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!"); } static void assert_emit_output(void (*emit_fn)(emitter_t *), const char *expected_json_output, const char *expected_table_output) { emitter_t emitter; char buf[MALLOC_PRINTF_BUFSIZE]; buf_descriptor_t buf_descriptor; buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; emitter_init(&emitter, emitter_output_json, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); assert_str_eq(expected_json_output, buf, "json output failure"); buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; emitter_init(&emitter, emitter_output_table, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); assert_str_eq(expected_table_output, buf, "table output failure"); } static void emit_dict(emitter_t *emitter) { bool b_false = false; bool b_true = true; int i_123 = 123; const char *str = "a string"; emitter_begin(emitter); emitter_dict_begin(emitter, "foo", "This is the foo table:"); emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false); emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true); emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123, "note_key1", emitter_type_string, &str); emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str, "note_key2", emitter_type_bool, &b_false); emitter_dict_end(emitter); emitter_end(emitter); } static const char *dict_json = "{\n" "\t\"foo\": {\n" "\t\t\"abc\": false,\n" "\t\t\"def\": true,\n" "\t\t\"ghi\": 123,\n" "\t\t\"jkl\": \"a string\"\n" "\t}\n" "}\n"; static const char *dict_table = "This is the foo table:\n" " ABC: false\n" " DEF: true\n" " GHI: 123 (note_key1: \"a string\")\n" " JKL: \"a string\" (note_key2: false)\n"; TEST_BEGIN(test_dict) { assert_emit_output(&emit_dict, dict_json, dict_table); } TEST_END static void emit_table_printf(emitter_t *emitter) { emitter_begin(emitter); emitter_table_printf(emitter, "Table note 1\n"); emitter_table_printf(emitter, "Table note 2 %s\n", "with format string"); emitter_end(emitter); } static const char *table_printf_json = "{\n" "}\n"; static const char *table_printf_table = "Table note 1\n" "Table note 2 with format string\n"; TEST_BEGIN(test_table_printf) { assert_emit_output(&emit_table_printf, table_printf_json, table_printf_table); } TEST_END static void emit_nested_dict(emitter_t *emitter) { int val = 123; emitter_begin(emitter); emitter_dict_begin(emitter, "json1", "Dict 1"); emitter_dict_begin(emitter, "json2", "Dict 2"); emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val); emitter_dict_end(emitter); /* Close 2 */ emitter_dict_begin(emitter, "json3", "Dict 3"); emitter_dict_end(emitter); /* Close 3 */ emitter_dict_end(emitter); /* Close 1 */ emitter_dict_begin(emitter, "json4", "Dict 4"); emitter_kv(emitter, "primitive", "Another primitive", emitter_type_int, &val); emitter_dict_end(emitter); /* Close 4 */ emitter_end(emitter); } static const char *nested_object_json = "{\n" "\t\"json1\": {\n" "\t\t\"json2\": {\n" "\t\t\t\"primitive\": 123\n" "\t\t},\n" "\t\t\"json3\": {\n" "\t\t}\n" "\t},\n" "\t\"json4\": {\n" "\t\t\"primitive\": 123\n" "\t}\n" "}\n"; static const char *nested_object_table = "Dict 1\n" " Dict 2\n" " A primitive: 123\n" " Dict 3\n" "Dict 4\n" " Another primitive: 123\n"; TEST_BEGIN(test_nested_dict) { assert_emit_output(&emit_nested_dict, nested_object_json, nested_object_table); } TEST_END static void emit_types(emitter_t *emitter) { bool b = false; int i = -123; unsigned u = 123; ssize_t zd = -456; size_t zu = 456; const char *str = "string"; uint32_t u32 = 789; uint64_t u64 = 10000000000ULL; emitter_begin(emitter); emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b); emitter_kv(emitter, "k2", "K2", emitter_type_int, &i); emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u); emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd); emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu); emitter_kv(emitter, "k6", "K6", emitter_type_string, &str); emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32); emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64); /* * We don't test the title type, since it's only used for tables. It's * tested in the emitter_table_row tests. */ emitter_end(emitter); } static const char *types_json = "{\n" "\t\"k1\": false,\n" "\t\"k2\": -123,\n" "\t\"k3\": 123,\n" "\t\"k4\": -456,\n" "\t\"k5\": 456,\n" "\t\"k6\": \"string\",\n" "\t\"k7\": 789,\n" "\t\"k8\": 10000000000\n" "}\n"; static const char *types_table = "K1: false\n" "K2: -123\n" "K3: 123\n" "K4: -456\n" "K5: 456\n" "K6: \"string\"\n" "K7: 789\n" "K8: 10000000000\n"; TEST_BEGIN(test_types) { assert_emit_output(&emit_types, types_json, types_table); } TEST_END static void emit_modal(emitter_t *emitter) { int val = 123; emitter_begin(emitter); emitter_dict_begin(emitter, "j0", "T0"); emitter_json_key(emitter, "j1"); emitter_json_object_begin(emitter); emitter_kv(emitter, "i1", "I1", emitter_type_int, &val); emitter_json_kv(emitter, "i2", emitter_type_int, &val); emitter_table_kv(emitter, "I3", emitter_type_int, &val); emitter_table_dict_begin(emitter, "T1"); emitter_kv(emitter, "i4", "I4", emitter_type_int, &val); emitter_json_object_end(emitter); /* Close j1 */ emitter_kv(emitter, "i5", "I5", emitter_type_int, &val); emitter_table_dict_end(emitter); /* Close T1 */ emitter_kv(emitter, "i6", "I6", emitter_type_int, &val); emitter_dict_end(emitter); /* Close j0 / T0 */ emitter_end(emitter); } const char *modal_json = "{\n" "\t\"j0\": {\n" "\t\t\"j1\": {\n" "\t\t\t\"i1\": 123,\n" "\t\t\t\"i2\": 123,\n" "\t\t\t\"i4\": 123\n" "\t\t},\n" "\t\t\"i5\": 123,\n" "\t\t\"i6\": 123\n" "\t}\n" "}\n"; const char *modal_table = "T0\n" " I1: 123\n" " I3: 123\n" " T1\n" " I4: 123\n" " I5: 123\n" " I6: 123\n"; TEST_BEGIN(test_modal) { assert_emit_output(&emit_modal, modal_json, modal_table); } TEST_END static void emit_json_arr(emitter_t *emitter) { int ival = 123; emitter_begin(emitter); emitter_json_key(emitter, "dict"); emitter_json_object_begin(emitter); emitter_json_key(emitter, "arr"); emitter_json_array_begin(emitter); emitter_json_object_begin(emitter); emitter_json_kv(emitter, "foo", emitter_type_int, &ival); emitter_json_object_end(emitter); /* Close arr[0] */ /* arr[1] and arr[2] are primitives. */ emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_object_begin(emitter); emitter_json_kv(emitter, "bar", emitter_type_int, &ival); emitter_json_kv(emitter, "baz", emitter_type_int, &ival); emitter_json_object_end(emitter); /* Close arr[3]. */ emitter_json_array_end(emitter); /* Close arr. */ emitter_json_object_end(emitter); /* Close dict. */ emitter_end(emitter); } static const char *json_array_json = "{\n" "\t\"dict\": {\n" "\t\t\"arr\": [\n" "\t\t\t{\n" "\t\t\t\t\"foo\": 123\n" "\t\t\t},\n" "\t\t\t123,\n" "\t\t\t123,\n" "\t\t\t{\n" "\t\t\t\t\"bar\": 123,\n" "\t\t\t\t\"baz\": 123\n" "\t\t\t}\n" "\t\t]\n" "\t}\n" "}\n"; static const char *json_array_table = ""; TEST_BEGIN(test_json_arr) { assert_emit_output(&emit_json_arr, json_array_json, json_array_table); } TEST_END static void emit_json_nested_array(emitter_t *emitter) { int ival = 123; char *sval = "foo"; emitter_begin(emitter); emitter_json_array_begin(emitter); emitter_json_array_begin(emitter); emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_value(emitter, emitter_type_string, &sval); emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_value(emitter, emitter_type_string, &sval); emitter_json_array_end(emitter); emitter_json_array_begin(emitter); emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_array_end(emitter); emitter_json_array_begin(emitter); emitter_json_value(emitter, emitter_type_string, &sval); emitter_json_value(emitter, emitter_type_int, &ival); emitter_json_array_end(emitter); emitter_json_array_begin(emitter); emitter_json_array_end(emitter); emitter_json_array_end(emitter); emitter_end(emitter); } static const char *json_nested_array_json = "{\n" "\t[\n" "\t\t[\n" "\t\t\t123,\n" "\t\t\t\"foo\",\n" "\t\t\t123,\n" "\t\t\t\"foo\"\n" "\t\t],\n" "\t\t[\n" "\t\t\t123\n" "\t\t],\n" "\t\t[\n" "\t\t\t\"foo\",\n" "\t\t\t123\n" "\t\t],\n" "\t\t[\n" "\t\t]\n" "\t]\n" "}\n"; TEST_BEGIN(test_json_nested_arr) { assert_emit_output(&emit_json_nested_array, json_nested_array_json, json_array_table); } TEST_END static void emit_table_row(emitter_t *emitter) { emitter_begin(emitter); emitter_row_t row; emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}}; abc.str_val = "ABC title"; emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}}; def.str_val = "DEF title"; emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}}; ghi.str_val = "GHI"; emitter_row_init(&row); emitter_col_init(&abc, &row); emitter_col_init(&def, &row); emitter_col_init(&ghi, &row); emitter_table_row(emitter, &row); abc.type = emitter_type_int; def.type = emitter_type_bool; ghi.type = emitter_type_int; abc.int_val = 123; def.bool_val = true; ghi.int_val = 456; emitter_table_row(emitter, &row); abc.int_val = 789; def.bool_val = false; ghi.int_val = 1011; emitter_table_row(emitter, &row); abc.type = emitter_type_string; abc.str_val = "a string"; def.bool_val = false; ghi.type = emitter_type_title; ghi.str_val = "ghi"; emitter_table_row(emitter, &row); emitter_end(emitter); } static const char *table_row_json = "{\n" "}\n"; static const char *table_row_table = "ABC title DEF title GHI\n" "123 true 456\n" "789 false 1011\n" "\"a string\" false ghi\n"; TEST_BEGIN(test_table_row) { assert_emit_output(&emit_table_row, table_row_json, table_row_table); } TEST_END int main(void) { return test_no_reentrancy( test_dict, test_table_printf, test_nested_dict, test_types, test_modal, test_json_arr, test_json_nested_arr, test_table_row); } jemalloc-sys-0.3.2/rep/test/unit/extent_quantize.c010064400007650000024000000103511344617474100204670ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_small_extent_size) { unsigned nbins, i; size_t sz, extent_size; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all small size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nbins; i++) { mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); assert_zu_eq(extent_size, extent_size_quantize_floor(extent_size), "Small extent quantization should be a no-op " "(extent_size=%zu)", extent_size); assert_zu_eq(extent_size, extent_size_quantize_ceil(extent_size), "Small extent quantization should be a no-op " "(extent_size=%zu)", extent_size); } } TEST_END TEST_BEGIN(test_large_extent_size) { bool cache_oblivious; unsigned nlextents, i; size_t sz, extent_size_prev, ceil_prev; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all large size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ sz = sizeof(bool); assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nlextents; i++) { size_t lextent_size, extent_size, floor, ceil; mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); extent_size = cache_oblivious ? lextent_size + PAGE : lextent_size; floor = extent_size_quantize_floor(extent_size); ceil = extent_size_quantize_ceil(extent_size); assert_zu_eq(extent_size, floor, "Extent quantization should be a no-op for precise size " "(lextent_size=%zu, extent_size=%zu)", lextent_size, extent_size); assert_zu_eq(extent_size, ceil, "Extent quantization should be a no-op for precise size " "(lextent_size=%zu, extent_size=%zu)", lextent_size, extent_size); if (i > 0) { assert_zu_eq(extent_size_prev, extent_size_quantize_floor(extent_size - PAGE), "Floor should be a precise size"); if (extent_size_prev < ceil_prev) { assert_zu_eq(ceil_prev, extent_size, "Ceiling should be a precise size " "(extent_size_prev=%zu, ceil_prev=%zu, " "extent_size=%zu)", extent_size_prev, ceil_prev, extent_size); } } if (i + 1 < nlextents) { extent_size_prev = floor; ceil_prev = extent_size_quantize_ceil(extent_size + PAGE); } } } TEST_END TEST_BEGIN(test_monotonic) { #define SZ_MAX ZU(4 * 1024 * 1024) unsigned i; size_t floor_prev, ceil_prev; floor_prev = 0; ceil_prev = 0; for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) { size_t extent_size, floor, ceil; extent_size = i << LG_PAGE; floor = extent_size_quantize_floor(extent_size); ceil = extent_size_quantize_ceil(extent_size); assert_zu_le(floor, extent_size, "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)", floor, extent_size, ceil); assert_zu_ge(ceil, extent_size, "Ceiling should be >= (floor=%zu, extent_size=%zu, " "ceil=%zu)", floor, extent_size, ceil); assert_zu_le(floor_prev, floor, "Floor should be monotonic " "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)", floor_prev, floor, extent_size, ceil); assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)", floor, extent_size, ceil_prev, ceil); floor_prev = floor; ceil_prev = ceil; } } TEST_END int main(void) { return test( test_small_extent_size, test_large_extent_size, test_monotonic); } jemalloc-sys-0.3.2/rep/test/unit/fork.c010064400007650000024000000056571344617474100162160ustar0000000000000000#include "test/jemalloc_test.h" #ifndef _WIN32 #include #endif #ifndef _WIN32 static void wait_for_child_exit(int pid) { int status; while (true) { if (waitpid(pid, &status, 0) == -1) { test_fail("Unexpected waitpid() failure."); } if (WIFSIGNALED(status)) { test_fail("Unexpected child termination due to " "signal %d", WTERMSIG(status)); break; } if (WIFEXITED(status)) { if (WEXITSTATUS(status) != 0) { test_fail("Unexpected child exit value %d", WEXITSTATUS(status)); } break; } } } #endif TEST_BEGIN(test_fork) { #ifndef _WIN32 void *p; pid_t pid; /* Set up a manually managed arena for test. */ unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); /* Migrate to the new arena. */ unsigned old_arena_ind; sz = sizeof(old_arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); pid = fork(); free(p); p = malloc(64); assert_ptr_not_null(p, "Unexpected malloc() failure"); free(p); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure"); } else if (pid == 0) { /* Child. */ _exit(0); } else { wait_for_child_exit(pid); } #else test_skip("fork(2) is irrelevant to Windows"); #endif } TEST_END #ifndef _WIN32 static void * do_fork_thd(void *arg) { malloc(1); int pid = fork(); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure"); } else if (pid == 0) { /* Child. */ char *args[] = {"true", NULL}; execvp(args[0], args); test_fail("Exec failed"); } else { /* Parent */ wait_for_child_exit(pid); } return NULL; } #endif #ifndef _WIN32 static void do_test_fork_multithreaded() { thd_t child; thd_create(&child, do_fork_thd, NULL); do_fork_thd(NULL); thd_join(child, NULL); } #endif TEST_BEGIN(test_fork_multithreaded) { #ifndef _WIN32 /* * We've seen bugs involving hanging on arenas_lock (though the same * class of bugs can happen on any mutex). The bugs are intermittent * though, so we want to run the test multiple times. Since we hold the * arenas lock only early in the process lifetime, we can't just run * this test in a loop (since, after all the arenas are initialized, we * won't acquire arenas_lock any further). We therefore repeat the test * with multiple processes. */ for (int i = 0; i < 100; i++) { int pid = fork(); if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure,"); } else if (pid == 0) { /* Child. */ do_test_fork_multithreaded(); _exit(0); } else { wait_for_child_exit(pid); } } #else test_skip("fork(2) is irrelevant to Windows"); #endif } TEST_END int main(void) { return test_no_reentrancy( test_fork, test_fork_multithreaded); } jemalloc-sys-0.3.2/rep/test/unit/hash.c010064400007650000024000000116741344617474100161740ustar0000000000000000/* * This file is based on code that is part of SMHasher * (https://code.google.com/p/smhasher/), and is subject to the MIT license * (http://www.opensource.org/licenses/mit-license.php). Both email addresses * associated with the source code's revision history belong to Austin Appleby, * and the revision history ranges from 2010 to 2012. Therefore the copyright * and license are here taken to be: * * Copyright (c) 2010-2012 Austin Appleby * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "test/jemalloc_test.h" #include "jemalloc/internal/hash.h" typedef enum { hash_variant_x86_32, hash_variant_x86_128, hash_variant_x64_128 } hash_variant_t; static int hash_variant_bits(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return 32; case hash_variant_x86_128: return 128; case hash_variant_x64_128: return 128; default: not_reached(); } } static const char * hash_variant_string(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return "hash_x86_32"; case hash_variant_x86_128: return "hash_x86_128"; case hash_variant_x64_128: return "hash_x64_128"; default: not_reached(); } } #define KEY_SIZE 256 static void hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { const int hashbytes = hash_variant_bits(variant) / 8; const int hashes_size = hashbytes * 256; VARIABLE_ARRAY(uint8_t, hashes, hashes_size); VARIABLE_ARRAY(uint8_t, final, hashbytes); unsigned i; uint32_t computed, expected; memset(key, 0, KEY_SIZE); memset(hashes, 0, hashes_size); memset(final, 0, hashbytes); /* * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the * seed. */ for (i = 0; i < 256; i++) { key[i] = (uint8_t)i; switch (variant) { case hash_variant_x86_32: { uint32_t out; out = hash_x86_32(key, i, 256-i); memcpy(&hashes[i*hashbytes], &out, hashbytes); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } default: not_reached(); } } /* Hash the result array. */ switch (variant) { case hash_variant_x86_32: { uint32_t out = hash_x86_32(hashes, hashes_size, 0); memcpy(final, &out, sizeof(out)); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } default: not_reached(); } computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | (final[3] << 24); switch (variant) { #ifdef JEMALLOC_BIG_ENDIAN case hash_variant_x86_32: expected = 0x6213303eU; break; case hash_variant_x86_128: expected = 0x266820caU; break; case hash_variant_x64_128: expected = 0xcc622b6fU; break; #else case hash_variant_x86_32: expected = 0xb0f57ee3U; break; case hash_variant_x86_128: expected = 0xb3ece62aU; break; case hash_variant_x64_128: expected = 0x6384ba69U; break; #endif default: not_reached(); } assert_u32_eq(computed, expected, "Hash mismatch for %s(): expected %#x but got %#x", hash_variant_string(variant), expected, computed); } static void hash_variant_verify(hash_variant_t variant) { #define MAX_ALIGN 16 uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; unsigned i; for (i = 0; i < MAX_ALIGN; i++) { hash_variant_verify_key(variant, &key[i]); } #undef MAX_ALIGN } #undef KEY_SIZE TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int main(void) { return test( test_hash_x86_32, test_hash_x86_128, test_hash_x64_128); } jemalloc-sys-0.3.2/rep/test/unit/hook.c010064400007650000024000000446141344617474100162110ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/hook.h" static void *arg_extra; static int arg_type; static void *arg_result; static void *arg_address; static size_t arg_old_usize; static size_t arg_new_usize; static uintptr_t arg_result_raw; static uintptr_t arg_args_raw[4]; static int call_count = 0; static void reset_args() { arg_extra = NULL; arg_type = 12345; arg_result = NULL; arg_address = NULL; arg_old_usize = 0; arg_new_usize = 0; arg_result_raw = 0; memset(arg_args_raw, 77, sizeof(arg_args_raw)); } static void alloc_free_size(size_t sz) { void *ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, MALLOCX_TCACHE_NONE); dallocx(ptr, MALLOCX_TCACHE_NONE); } /* * We want to support a degree of user reentrancy. This tests a variety of * allocation scenarios. */ static void be_reentrant() { /* Let's make sure the tcache is non-empty if enabled. */ alloc_free_size(1); alloc_free_size(1024); alloc_free_size(64 * 1024); alloc_free_size(256 * 1024); alloc_free_size(1024 * 1024); /* Some reallocation. */ void *ptr = mallocx(129, 0); ptr = rallocx(ptr, 130, 0); free(ptr); ptr = mallocx(2 * 1024 * 1024, 0); free(ptr); ptr = mallocx(1 * 1024 * 1024, 0); ptr = rallocx(ptr, 2 * 1024 * 1024, 0); free(ptr); ptr = mallocx(1, 0); ptr = rallocx(ptr, 1000, 0); free(ptr); } static void set_args_raw(uintptr_t *args_raw, int nargs) { memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs); } static void assert_args_raw(uintptr_t *args_raw_expected, int nargs) { int cmp = memcmp(args_raw_expected, arg_args_raw, sizeof(uintptr_t) * nargs); assert_d_eq(cmp, 0, "Raw args mismatch"); } static void reset() { call_count = 0; reset_args(); } static void test_alloc_hook(void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]) { call_count++; arg_extra = extra; arg_type = (int)type; arg_result = result; arg_result_raw = result_raw; set_args_raw(args_raw, 3); be_reentrant(); } static void test_dalloc_hook(void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { call_count++; arg_extra = extra; arg_type = (int)type; arg_address = address; set_args_raw(args_raw, 3); be_reentrant(); } static void test_expand_hook(void *extra, hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) { call_count++; arg_extra = extra; arg_type = (int)type; arg_address = address; arg_old_usize = old_usize; arg_new_usize = new_usize; arg_result_raw = result_raw; set_args_raw(args_raw, 4); be_reentrant(); } TEST_BEGIN(test_hooks_basic) { /* Just verify that the record their arguments correctly. */ hooks_t hooks = { &test_alloc_hook, &test_dalloc_hook, &test_expand_hook, (void *)111}; void *handle = hook_install(TSDN_NULL, &hooks); uintptr_t args_raw[4] = {10, 20, 30, 40}; /* Alloc */ reset_args(); hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333, args_raw); assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer"); assert_d_eq((int)hook_alloc_posix_memalign, arg_type, "Passed wrong alloc type"); assert_ptr_eq((void *)222, arg_result, "Passed wrong result address"); assert_u64_eq(333, arg_result_raw, "Passed wrong result"); assert_args_raw(args_raw, 3); /* Dalloc */ reset_args(); hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw); assert_d_eq((int)hook_dalloc_sdallocx, arg_type, "Passed wrong dalloc type"); assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); assert_ptr_eq((void *)222, arg_address, "Passed wrong address"); assert_args_raw(args_raw, 3); /* Expand */ reset_args(); hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555, args_raw); assert_d_eq((int)hook_expand_xallocx, arg_type, "Passed wrong expand type"); assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); assert_ptr_eq((void *)222, arg_address, "Passed wrong address"); assert_zu_eq(333, arg_old_usize, "Passed wrong old usize"); assert_zu_eq(444, arg_new_usize, "Passed wrong new usize"); assert_zu_eq(555, arg_result_raw, "Passed wrong result"); assert_args_raw(args_raw, 4); hook_remove(TSDN_NULL, handle); } TEST_END TEST_BEGIN(test_hooks_null) { /* Null hooks should be ignored, not crash. */ hooks_t hooks1 = {NULL, NULL, NULL, NULL}; hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL}; hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL}; hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL}; void *handle1 = hook_install(TSDN_NULL, &hooks1); void *handle2 = hook_install(TSDN_NULL, &hooks2); void *handle3 = hook_install(TSDN_NULL, &hooks3); void *handle4 = hook_install(TSDN_NULL, &hooks4); assert_ptr_ne(handle1, NULL, "Hook installation failed"); assert_ptr_ne(handle2, NULL, "Hook installation failed"); assert_ptr_ne(handle3, NULL, "Hook installation failed"); assert_ptr_ne(handle4, NULL, "Hook installation failed"); uintptr_t args_raw[4] = {10, 20, 30, 40}; call_count = 0; hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw); assert_d_eq(call_count, 1, "Called wrong number of times"); call_count = 0; hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw); assert_d_eq(call_count, 1, "Called wrong number of times"); call_count = 0; hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw); assert_d_eq(call_count, 1, "Called wrong number of times"); hook_remove(TSDN_NULL, handle1); hook_remove(TSDN_NULL, handle2); hook_remove(TSDN_NULL, handle3); hook_remove(TSDN_NULL, handle4); } TEST_END TEST_BEGIN(test_hooks_remove) { hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); call_count = 0; uintptr_t args_raw[4] = {10, 20, 30, 40}; hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw); assert_d_eq(call_count, 1, "Hook not invoked"); call_count = 0; hook_remove(TSDN_NULL, handle); hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL); assert_d_eq(call_count, 0, "Hook invoked after removal"); } TEST_END TEST_BEGIN(test_hooks_alloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); /* Stop malloc from being optimized away. */ volatile int err; void *volatile ptr; /* malloc */ reset(); ptr = malloc(1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); free(ptr); /* posix_memalign */ reset(); err = posix_memalign((void **)&ptr, 1024, 1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_posix_memalign, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument"); assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument"); free(ptr); /* aligned_alloc */ reset(); ptr = aligned_alloc(1024, 1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); /* calloc */ reset(); ptr = calloc(11, 13); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument"); free(ptr); /* memalign */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN reset(); ptr = memalign(1024, 1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); #endif /* JEMALLOC_OVERRIDE_MEMALIGN */ /* valloc */ #ifdef JEMALLOC_OVERRIDE_VALLOC reset(); ptr = valloc(1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); free(ptr); #endif /* JEMALLOC_OVERRIDE_VALLOC */ /* mallocx */ reset(); ptr = mallocx(1, MALLOCX_LG_ALIGN(10)); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1], "Wrong flags"); free(ptr); hook_remove(TSDN_NULL, handle); } TEST_END TEST_BEGIN(test_hooks_dalloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; /* free() */ reset(); ptr = malloc(1); free(ptr); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); /* dallocx() */ reset(); ptr = malloc(1); dallocx(ptr, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], "Wrong raw arg"); /* sdallocx() */ reset(); ptr = malloc(1); sdallocx(ptr, 1, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], "Wrong raw arg"); hook_remove(TSDN_NULL, handle); } TEST_END TEST_BEGIN(test_hooks_expand_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; /* xallocx() */ reset(); ptr = malloc(1); size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded"); assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize"); assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize"); assert_u64_eq(new_usize, arg_result_raw, "Wrong result"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg"); assert_u64_eq(100, arg_args_raw[1], "Wrong arg"); assert_u64_eq(200, arg_args_raw[2], "Wrong arg"); assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg"); hook_remove(TSDN_NULL, handle); } TEST_END TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; /* realloc(NULL, size) as malloc */ reset(); ptr = realloc(NULL, 1); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); /* realloc(ptr, 0) as free */ ptr = malloc(1); reset(); realloc(ptr, 0); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_realloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg"); /* realloc(NULL, 0) as malloc(0) */ reset(); ptr = realloc(NULL, 0); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); assert_ptr_eq(ptr, arg_result, "Wrong result"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument"); free(ptr); hook_remove(TSDN_NULL, handle); } TEST_END static void do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, int expand_type, int dalloc_type) { hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; void *volatile ptr2; /* Realloc in-place, small. */ ptr = malloc(129); reset(); ptr2 = ralloc(ptr, 130, flags); assert_ptr_eq(ptr, ptr2, "Small realloc moved"); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, expand_type, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong address"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument"); free(ptr); /* * Realloc in-place, large. Since we can't guarantee the large case * across all platforms, we stay resilient to moving results. */ ptr = malloc(2 * 1024 * 1024); free(ptr); ptr2 = malloc(1 * 1024 * 1024); reset(); ptr = ralloc(ptr2, 2 * 1024 * 1024, flags); /* ptr is the new address, ptr2 is the old address. */ if (ptr == ptr2) { assert_d_eq(call_count, 1, "Hook not called"); assert_d_eq(arg_type, expand_type, "Wrong hook type"); } else { assert_d_eq(call_count, 2, "Wrong hooks called"); assert_ptr_eq(ptr, arg_result, "Wrong address"); assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); } assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_ptr_eq(ptr2, arg_address, "Wrong address"); assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument"); free(ptr); /* Realloc with move, small. */ ptr = malloc(8); reset(); ptr2 = ralloc(ptr, 128, flags); assert_ptr_ne(ptr, ptr2, "Small realloc didn't move"); assert_d_eq(call_count, 2, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong address"); assert_ptr_eq(ptr2, arg_result, "Wrong address"); assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument"); free(ptr2); /* Realloc with move, large. */ ptr = malloc(1); reset(); ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags); assert_ptr_ne(ptr, ptr2, "Large realloc didn't move"); assert_d_eq(call_count, 2, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong address"); assert_ptr_eq(ptr2, arg_result, "Wrong address"); assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument"); free(ptr2); hook_remove(TSDN_NULL, handle); } static void * realloc_wrapper(void *ptr, size_t size, UNUSED int flags) { return realloc(ptr, size); } TEST_BEGIN(test_hooks_realloc) { do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc, hook_dalloc_realloc); } TEST_END TEST_BEGIN(test_hooks_rallocx) { do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx, hook_dalloc_rallocx); } TEST_END int main(void) { /* We assert on call counts. */ return test_no_reentrancy( test_hooks_basic, test_hooks_null, test_hooks_remove, test_hooks_alloc_simple, test_hooks_dalloc_simple, test_hooks_expand_simple, test_hooks_realloc_as_malloc_or_free, test_hooks_realloc, test_hooks_rallocx); } jemalloc-sys-0.3.2/rep/test/unit/huge.c010064400007650000024000000072051344617474100161740ustar0000000000000000#include "test/jemalloc_test.h" /* Threshold: 2 << 20 = 2097152. */ const char *malloc_conf = "oversize_threshold:2097152"; #define HUGE_SZ (2 << 20) #define SMALL_SZ (8) TEST_BEGIN(huge_bind_thread) { unsigned arena1, arena2; size_t sz = sizeof(unsigned); /* Bind to a manual arena. */ assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1, sizeof(arena1)), 0, "Fail to bind thread"); void *ptr = mallocx(HUGE_SZ, 0); assert_ptr_not_null(ptr, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena1, arena2, "Wrong arena used after binding"); dallocx(ptr, 0); /* Switch back to arena 0. */ test_skip_if(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); arena2 = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2, sizeof(arena2)), 0, "Fail to bind thread"); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena2, 0, "Wrong arena used after binding"); dallocx(ptr, MALLOCX_TCACHE_NONE); /* Then huge allocation should use the huge arena. */ ptr = mallocx(HUGE_SZ, 0); assert_ptr_not_null(ptr, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena2, 0, "Wrong arena used after binding"); assert_u_ne(arena1, arena2, "Wrong arena used after binding"); dallocx(ptr, 0); } TEST_END TEST_BEGIN(huge_mallocx) { unsigned arena1, arena2; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1)); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, sizeof(huge)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena1, arena2, "Wrong arena used for mallocx"); dallocx(huge, MALLOCX_ARENA(arena1)); void *huge2 = mallocx(HUGE_SZ, 0); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2, sizeof(huge2)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge allocation should not come from the manual arena."); assert_u_ne(arena2, 0, "Huge allocation should not come from the arena 0."); dallocx(huge2, 0); } TEST_END TEST_BEGIN(huge_allocation) { unsigned arena1, arena2; void *ptr = mallocx(HUGE_SZ, 0); assert_ptr_not_null(ptr, "Fail to allocate huge size"); size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0"); dallocx(ptr, 0); ptr = mallocx(HUGE_SZ >> 1, 0); assert_ptr_not_null(ptr, "Fail to allocate half huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Wrong arena used for half huge"); dallocx(ptr, 0); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Fail to allocate small size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge and small should be from different arenas"); dallocx(ptr, 0); } TEST_END int main(void) { return test( huge_allocation, huge_mallocx, huge_bind_thread); } jemalloc-sys-0.3.2/rep/test/unit/junk.c010064400007650000024000000070341344617474100162130ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; static large_dalloc_junk_t *large_dalloc_junk_orig; static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig; static void *watch_for_junking; static bool saw_junking; static void watch_junking(void *p) { watch_for_junking = p; saw_junking = false; } static void arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); for (i = 0; i < bin_info->reg_size; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } if (ptr == watch_for_junking) { saw_junking = true; } } static void large_dalloc_junk_intercept(void *ptr, size_t usize) { size_t i; large_dalloc_junk_orig(ptr, usize); for (i = 0; i < usize; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } if (ptr == watch_for_junking) { saw_junking = true; } } static void large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) { large_dalloc_maybe_junk_orig(ptr, usize); if (ptr == watch_for_junking) { saw_junking = true; } } static void test_junk(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; large_dalloc_junk_orig = large_dalloc_junk; large_dalloc_junk = large_dalloc_junk_intercept; large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk; large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept; } sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { if (opt_junk_alloc) { assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, "Newly allocated byte %zu/%zu isn't " "junk-filled", i, sz); } s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { uint8_t *t; watch_junking(s); t = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)t, "Unexpected rallocx() failure"); assert_zu_ge(sallocx(t, 0), sz+1, "Unexpectedly small rallocx() result"); if (!background_thread_enabled()) { assert_ptr_ne(s, t, "Unexpected in-place rallocx()"); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be " "junk-filled", sz); } s = t; } } watch_junking(s); dallocx(s, 0); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; large_dalloc_junk = large_dalloc_junk_orig; large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig; } } TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); test_junk(1, SC_SMALL_MAXCLASS - 1); } TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1))); } TEST_END int main(void) { return test( test_junk_small, test_junk_large); } jemalloc-sys-0.3.2/rep/test/unit/junk.sh010064400007650000024000000001551344617474100164000ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:true" fi jemalloc-sys-0.3.2/rep/test/unit/junk_alloc.c010064400007650000024000000000221344617474100173530ustar0000000000000000#include "junk.c" jemalloc-sys-0.3.2/rep/test/unit/junk_alloc.sh010064400007650000024000000001561344617474100175530ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:alloc" fi jemalloc-sys-0.3.2/rep/test/unit/junk_free.c010064400007650000024000000000221344617474100172020ustar0000000000000000#include "junk.c" jemalloc-sys-0.3.2/rep/test/unit/junk_free.sh010064400007650000024000000001551344617474100174010ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,zero:false,junk:free" fi jemalloc-sys-0.3.2/rep/test/unit/log.c010064400007650000024000000077141344617474100160320ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/log.h" static void expect_no_logging(const char *names) { log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l2 = LOG_VAR_INIT("l2"); log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); strcpy(log_var_names, names); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l2) count++; log_do_end(log_l2) log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) } assert_d_eq(count, 0, "Disabled logging not ignored!"); } TEST_BEGIN(test_log_disabled) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); expect_no_logging(""); expect_no_logging("abc"); expect_no_logging("a.b.c"); expect_no_logging("l12"); expect_no_logging("l123|a456|b789"); expect_no_logging("|||"); } TEST_END TEST_BEGIN(test_log_enabled_direct) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); log_var_t log_l2 = LOG_VAR_INIT("l2"); int count; count = 0; strcpy(log_var_names, "l1"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) } assert_d_eq(count, 10, "Mis-logged!"); count = 0; strcpy(log_var_names, "l1.a"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) } assert_d_eq(count, 10, "Mis-logged!"); count = 0; strcpy(log_var_names, "l1.a|abc|l2|def"); for (int i = 0; i < 10; i++) { log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) log_do_begin(log_l2) count++; log_do_end(log_l2) } assert_d_eq(count, 20, "Mis-logged!"); } TEST_END TEST_BEGIN(test_log_enabled_indirect) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); strcpy(log_var_names, "l0|l1|abc|l2.b|def"); /* On. */ log_var_t log_l1 = LOG_VAR_INIT("l1"); /* Off. */ log_var_t log_l1a = LOG_VAR_INIT("l1a"); /* On. */ log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); /* Off. */ log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); /* On. */ log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a"); /* On. */ log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b"); /* 4 are on total, so should sum to 40. */ int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l1a) count++; log_do_end(log_l1a) log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) log_do_begin(log_l2_b_a) count++; log_do_end(log_l2_b_a) log_do_begin(log_l2_b_b) count++; log_do_end(log_l2_b_b) } assert_d_eq(count, 40, "Mis-logged!"); } TEST_END TEST_BEGIN(test_log_enabled_global) { test_skip_if(!config_log); atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); strcpy(log_var_names, "abc|.|def"); log_var_t log_l1 = LOG_VAR_INIT("l1"); log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a"); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(log_l1) count++; log_do_end(log_l1) log_do_begin(log_l2_a_a) count++; log_do_end(log_l2_a_a) } assert_d_eq(count, 20, "Mis-logged!"); } TEST_END TEST_BEGIN(test_logs_if_no_init) { test_skip_if(!config_log); atomic_store_b(&log_init_done, false, ATOMIC_RELAXED); log_var_t l = LOG_VAR_INIT("definitely.not.enabled"); int count = 0; for (int i = 0; i < 10; i++) { log_do_begin(l) count++; log_do_end(l) } assert_d_eq(count, 0, "Logging shouldn't happen if not initialized."); } TEST_END /* * This really just checks to make sure that this usage compiles; we don't have * any test code to run. */ TEST_BEGIN(test_log_only_format_string) { if (false) { LOG("log_str", "No arguments follow this format string."); } } TEST_END int main(void) { return test( test_log_disabled, test_log_enabled_direct, test_log_enabled_indirect, test_log_enabled_global, test_logs_if_no_init, test_log_only_format_string); } jemalloc-sys-0.3.2/rep/test/unit/mallctl.c010064400007650000024000000705441344617474100167020ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/util.h" TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; size_t sz; assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, "mallctl() should return ENOENT for non-existent names"); assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on attempt to write " "read-only value"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, "mallctlnametomib() should return ENOENT for non-existent names"); } TEST_END TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; size_t sz; size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " "attempt to write read-only value"); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; size_t sz = sizeof(old_epoch); /* Blind. */ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END TEST_BEGIN(test_mallctlnametomib_short_mib) { size_t mib[4]; size_t miblen; miblen = 3; mib[3] = 42; assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_zu_eq(miblen, 3, "Unexpected mib output length"); assert_zu_eq(mib[3], 42, "mallctlnametomib() wrote past the end of the input mib"); } TEST_END TEST_BEGIN(test_mallctl_config) { #define TEST_MALLCTL_CONFIG(config, t) do { \ t oldval; \ size_t sz = sizeof(oldval); \ assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_b_eq(oldval, config_##config, "Incorrect config value"); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_CONFIG(cache_oblivious, bool); TEST_MALLCTL_CONFIG(debug, bool); TEST_MALLCTL_CONFIG(fill, bool); TEST_MALLCTL_CONFIG(lazy_lock, bool); TEST_MALLCTL_CONFIG(malloc_conf, const char *); TEST_MALLCTL_CONFIG(prof, bool); TEST_MALLCTL_CONFIG(prof_libgcc, bool); TEST_MALLCTL_CONFIG(prof_libunwind, bool); TEST_MALLCTL_CONFIG(stats, bool); TEST_MALLCTL_CONFIG(utrace, bool); TEST_MALLCTL_CONFIG(xmalloc, bool); #undef TEST_MALLCTL_CONFIG } TEST_END TEST_BEGIN(test_mallctl_opt) { bool config_always = true; #define TEST_MALLCTL_OPT(t, opt, config) do { \ t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ 0); \ assert_d_eq(result, expected, \ "Unexpected mallctl() result for opt."#opt); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(bool, abort_conf, always); TEST_MALLCTL_OPT(const char *, metadata_thp, always); TEST_MALLCTL_OPT(bool, retain, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(const char *, percpu_arena, always); TEST_MALLCTL_OPT(size_t, oversize_threshold, always); TEST_MALLCTL_OPT(bool, background_thread, always); TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always); TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always); TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(const char *, junk, fill); TEST_MALLCTL_OPT(bool, zero, fill); TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); TEST_MALLCTL_OPT(bool, tcache, always); TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always); TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); TEST_MALLCTL_OPT(const char *, thp, always); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof); TEST_MALLCTL_OPT(bool, prof_active, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); TEST_MALLCTL_OPT(bool, prof_accum, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); TEST_MALLCTL_OPT(bool, prof_gdump, prof); TEST_MALLCTL_OPT(bool, prof_final, prof); TEST_MALLCTL_OPT(bool, prof_leak, prof); #undef TEST_MALLCTL_OPT } TEST_END TEST_BEGIN(test_manpage_example) { unsigned nbins, i; size_t mib[4]; size_t len, miblen; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } TEST_END TEST_BEGIN(test_tcache_none) { test_skip_if(!opt_tcache); /* Allocate p and q. */ void *p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); void *q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ dallocx(p0, 0); dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ void *p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); /* Clean up. */ dallocx(p1, MALLOCX_TCACHE_NONE); } TEST_END TEST_BEGIN(test_tcache) { #define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; psz = 42; qsz = nallocx(psz, 0) + 1; /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Verify that tcaches allocate cached regions. */ for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); assert_ptr_eq(ps[i], p0, "Expected mallocx() to allocate cached region, i=%u", i); } /* Verify that reallocation uses cached regions. */ for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", i); assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) { qs[i] = ps[i]; } } for (i = 0; i < NTCACHES; i++) { dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } } TEST_END TEST_BEGIN(test_thread_arena) { unsigned old_arena_ind, new_arena_ind, narenas; const char *opa; size_t sz = sizeof(opa); assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); if (opt_oversize_threshold != 0) { narenas--; } assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); if (strcmp(opa, "disabled") == 0) { new_arena_ind = narenas - 1; assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); new_arena_ind = 0; assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); } else { assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; if (old_arena_ind != new_arena_ind) { assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, sizeof(unsigned)), EPERM, "thread.arena ctl " "should not be allowed with percpu arena"); } } } TEST_END TEST_BEGIN(test_arena_i_initialized) { unsigned narenas, i; size_t sz; size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); bool initialized; sz = sizeof(narenas); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); } mib[1] = MALLCTL_ARENAS_ALL; sz = sizeof(initialized); assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_true(initialized, "Merged arena statistics should always be initialized"); /* Equivalent to the above but using mallctl() directly. */ sz = sizeof(initialized); assert_d_eq(mallctl( "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", (void *)&initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_true(initialized, "Merged arena statistics should always be initialized"); } TEST_END TEST_BEGIN(test_arena_i_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arena.0.dirty_decay_ms", (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; assert_d_eq(mallctl("arena.0.dirty_decay_ms", (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arena.0.dirty_decay_ms"); } } TEST_END TEST_BEGIN(test_arena_i_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arena.0.muzzy_decay_ms", (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; assert_d_eq(mallctl("arena.0.muzzy_decay_ms", (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arena.0.muzzy_decay_ms"); } } TEST_END TEST_BEGIN(test_arena_i_purge) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); mib[1] = MALLCTL_ARENAS_ALL; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_decay) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); mib[1] = MALLCTL_ARENAS_ALL; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; size_t sz = sizeof(dss_prec_old); size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); } TEST_END TEST_BEGIN(test_arena_i_retain_grow_limit) { size_t old_limit, new_limit, default_limit; size_t mib[3]; size_t miblen; bool retain_enabled; size_t sz = sizeof(retain_enabled); assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); test_skip_if(!retain_enabled); sz = sizeof(default_limit); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(default_limit, SC_LARGE_MAXCLASS, "Unexpected default for retain_grow_limit"); new_limit = PAGE - 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), EFAULT, "Unexpected mallctl() success"); new_limit = PAGE + 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(old_limit, PAGE, "Unexpected value for retain_grow_limit"); /* Expect grow less than psize class 10. */ new_limit = sz_pind2sz(10) - 1; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(old_limit, sz_pind2sz(9), "Unexpected value for retain_grow_limit"); /* Restore to default. */ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit, sizeof(default_limit)), 0, "Unexpected mallctl() failure"); } TEST_END TEST_BEGIN(test_arenas_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arenas.dirty_decay_ms", (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; assert_d_eq(mallctl("arenas.dirty_decay_ms", (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arenas.dirty_decay_ms"); } } TEST_END TEST_BEGIN(test_arenas_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arenas.muzzy_decay_ms", (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; assert_d_eq(mallctl("arenas.muzzy_decay_ms", (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arenas.muzzy_decay_ms"); } } TEST_END TEST_BEGIN(test_arenas_constants) { #define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS); TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS); #undef TEST_ARENAS_CONSTANT } TEST_END TEST_BEGIN(test_arenas_bin_constants) { #define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, bin_infos[0].slab_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards); #undef TEST_ARENAS_BIN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_lextent_constants) { #define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, SC_LARGE_MINCLASS); #undef TEST_ARENAS_LEXTENT_CONSTANT } TEST_END TEST_BEGIN(test_arenas_create) { unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas_before+1, narenas_after, "Unexpected number of arenas before versus after extension"); assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); } TEST_END TEST_BEGIN(test_arenas_lookup) { unsigned arena, arena1; void *ptr; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Unexpected mallocx() failure"); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena, arena1, "Unexpected arena index"); dallocx(ptr, 0); } TEST_END TEST_BEGIN(test_stats_arenas) { #define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ } while (0) TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(const char *, dss); TEST_STATS_ARENAS(ssize_t, dirty_decay_ms); TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms); TEST_STATS_ARENAS(size_t, pactive); TEST_STATS_ARENAS(size_t, pdirty); #undef TEST_STATS_ARENAS } TEST_END static void alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result, UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) { *(bool *)extra = true; } static void dalloc_hook(void *extra, UNUSED hook_dalloc_t type, UNUSED void *address, UNUSED uintptr_t args_raw[3]) { *(bool *)extra = true; } TEST_BEGIN(test_hooks) { bool hook_called = false; hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; void *handle = NULL; size_t sz = sizeof(handle); int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); assert_d_eq(err, 0, "Hook installation failed"); assert_ptr_ne(handle, NULL, "Hook installation gave null handle"); void *ptr = mallocx(1, 0); assert_true(hook_called, "Alloc hook not called"); hook_called = false; free(ptr); assert_true(hook_called, "Free hook not called"); err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle)); assert_d_eq(err, 0, "Hook removal failed"); hook_called = false; ptr = mallocx(1, 0); free(ptr); assert_false(hook_called, "Hook called after removal"); } TEST_END TEST_BEGIN(test_hooks_exhaustion) { bool hook_called = false; hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; void *handle; void *handles[HOOK_MAX]; size_t sz = sizeof(handle); int err; for (int i = 0; i < HOOK_MAX; i++) { handle = NULL; err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); assert_d_eq(err, 0, "Error installation hooks"); assert_ptr_ne(handle, NULL, "Got NULL handle"); handles[i] = handle; } err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); assert_d_eq(err, EAGAIN, "Should have failed hook installation"); for (int i = 0; i < HOOK_MAX; i++) { err = mallctl("experimental.hooks.remove", NULL, NULL, &handles[i], sizeof(handles[i])); assert_d_eq(err, 0, "Hook removal failed"); } /* Insertion failed, but then we removed some; it should work now. */ handle = NULL; err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); assert_d_eq(err, 0, "Hook insertion failed"); assert_ptr_ne(handle, NULL, "Got NULL handle"); err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle)); assert_d_eq(err, 0, "Hook removal failed"); } TEST_END int main(void) { return test( test_mallctl_errors, test_mallctlnametomib_errors, test_mallctlbymib_errors, test_mallctl_read_write, test_mallctlnametomib_short_mib, test_mallctl_config, test_mallctl_opt, test_manpage_example, test_tcache_none, test_tcache, test_thread_arena, test_arena_i_initialized, test_arena_i_dirty_decay_ms, test_arena_i_muzzy_decay_ms, test_arena_i_purge, test_arena_i_decay, test_arena_i_dss, test_arena_i_retain_grow_limit, test_arenas_dirty_decay_ms, test_arenas_muzzy_decay_ms, test_arenas_constants, test_arenas_bin_constants, test_arenas_lextent_constants, test_arenas_create, test_arenas_lookup, test_stats_arenas, test_hooks, test_hooks_exhaustion); } jemalloc-sys-0.3.2/rep/test/unit/malloc_io.c010064400007650000024000000175361344617474100172120ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_malloc_strtoumax_no_endptr) { int err; set_errno(0); assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); err = get_errno(); assert_d_eq(err, 0, "Unexpected failure"); } TEST_END TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; int base; int expected_errno; const char *expected_errno_name; uintmax_t expected_x; }; #define ERR(e) e, #e #define KUMAX(x) ((uintmax_t)x##ULL) #define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, {"-42", "", 0, ERR(0), KSMAX(-42)}, {"042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)}, {"-042", "", 0, ERR(0), KSMAX(-042)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, {"42", "", 0, ERR(0), KUMAX(42)}, {" 42", "", 0, ERR(0), KUMAX(42)}, {"42 ", " ", 0, ERR(0), KUMAX(42)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"42x", "x", 0, ERR(0), KUMAX(42)}, {"07", "", 0, ERR(0), KUMAX(7)}, {"010", "", 0, ERR(0), KUMAX(8)}, {"08", "8", 0, ERR(0), KUMAX(0)}, {"0_", "_", 0, ERR(0), KUMAX(0)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"0X", "X", 0, ERR(0), KUMAX(0)}, {"0xg", "xg", 0, ERR(0), KUMAX(0)}, {"0XA", "", 0, ERR(0), KUMAX(10)}, {"010", "", 10, ERR(0), KUMAX(10)}, {"0x3", "x3", 10, ERR(0), KUMAX(0)}, {"12", "2", 2, ERR(0), KUMAX(1)}, {"78", "8", 8, ERR(0), KUMAX(7)}, {"9a", "a", 10, ERR(0), KUMAX(9)}, {"9A", "A", 10, ERR(0), KUMAX(9)}, {"fg", "g", 16, ERR(0), KUMAX(15)}, {"FG", "G", 16, ERR(0), KUMAX(15)}, {"0xfg", "g", 16, ERR(0), KUMAX(15)}, {"0XFG", "G", 16, ERR(0), KUMAX(15)}, {"z_", "_", 36, ERR(0), KUMAX(35)}, {"Z_", "_", 36, ERR(0), KUMAX(35)} }; #undef ERR #undef KUMAX #undef KSMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; int err; uintmax_t result; char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); err = get_errno(); assert_d_eq(err, test->expected_errno, "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); assert_str_eq(remainder, test->expected_remainder, "Unexpected remainder for \"%s\", base %d", test->input, test->base); if (err == 0) { assert_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", test->input, test->base); } } } TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { #define BUFLEN 15 char buf[BUFLEN]; size_t result; size_t len; #define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ buf, expected_str_untruncated); \ assert_zu_eq(result, strlen(expected_str_untruncated), \ "Unexpected result"); \ } while (0) for (len = 1; len < BUFLEN; len++) { TEST("012346789", "012346789"); TEST("a0123b", "a%sb", "0123"); TEST("a01234567", "a%s%s", "0123", "4567"); TEST("a0123 ", "a%-6s", "0123"); TEST("a 0123", "a%6s", "0123"); TEST("a 012", "a%6.3s", "0123"); TEST("a 012", "a%*.*s", 6, 3, "0123"); TEST("a 123b", "a% db", 123); TEST("a123b", "a%-db", 123); TEST("a-123b", "a%-db", -123); TEST("a+123b", "a%+db", 123); } #undef BUFLEN #undef TEST } TEST_END TEST_BEGIN(test_malloc_snprintf) { #define BUFLEN 128 char buf[BUFLEN]; size_t result; #define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ } while (0) TEST("hello", "hello"); TEST("50%, 100%", "50%%, %d%%", 100); TEST("a0123b", "a%sb", "0123"); TEST("a 0123b", "a%5sb", "0123"); TEST("a 0123b", "a%*sb", 5, "0123"); TEST("a0123 b", "a%-5sb", "0123"); TEST("a0123b", "a%*sb", -1, "0123"); TEST("a0123 b", "a%*sb", -5, "0123"); TEST("a0123 b", "a%-*sb", -5, "0123"); TEST("a012b", "a%.3sb", "0123"); TEST("a012b", "a%.*sb", 3, "0123"); TEST("a0123b", "a%.*sb", -3, "0123"); TEST("a 012b", "a%5.3sb", "0123"); TEST("a 012b", "a%5.*sb", 3, "0123"); TEST("a 012b", "a%*.3sb", 5, "0123"); TEST("a 012b", "a%*.*sb", 5, 3, "0123"); TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); TEST("_abcd_", "_%x_", 0xabcd); TEST("_0xabcd_", "_%#x_", 0xabcd); TEST("_1234_", "_%o_", 01234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_1234_", "_%d_", 1234); TEST("_ 1234_", "_% d_", 1234); TEST("_+1234_", "_%+d_", 1234); TEST("_-1234_", "_%d_", -1234); TEST("_-1234_", "_% d_", -1234); TEST("_-1234_", "_%+d_", -1234); TEST("_-1234_", "_%d_", -1234); TEST("_1234_", "_%d_", 1234); TEST("_-1234_", "_%i_", -1234); TEST("_1234_", "_%i_", 1234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_0x1234abc_", "_%#x_", 0x1234abc); TEST("_0X1234ABC_", "_%#X_", 0x1234abc); TEST("_c_", "_%c_", 'c'); TEST("_string_", "_%s_", "string"); TEST("_0x42_", "_%p_", ((void *)0x42)); TEST("_-1234_", "_%ld_", ((long)-1234)); TEST("_1234_", "_%ld_", ((long)1234)); TEST("_-1234_", "_%li_", ((long)-1234)); TEST("_1234_", "_%li_", ((long)1234)); TEST("_01234_", "_%#lo_", ((long)01234)); TEST("_1234_", "_%lu_", ((long)1234)); TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); TEST("_-1234_", "_%lld_", ((long long)-1234)); TEST("_1234_", "_%lld_", ((long long)1234)); TEST("_-1234_", "_%lli_", ((long long)-1234)); TEST("_1234_", "_%lli_", ((long long)1234)); TEST("_01234_", "_%#llo_", ((long long)01234)); TEST("_1234_", "_%llu_", ((long long)1234)); TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%qd_", ((long long)-1234)); TEST("_1234_", "_%qd_", ((long long)1234)); TEST("_-1234_", "_%qi_", ((long long)-1234)); TEST("_1234_", "_%qi_", ((long long)1234)); TEST("_01234_", "_%#qo_", ((long long)01234)); TEST("_1234_", "_%qu_", ((long long)1234)); TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); TEST("_1234_", "_%jd_", ((intmax_t)1234)); TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); TEST("_1234_", "_%ji_", ((intmax_t)1234)); TEST("_01234_", "_%#jo_", ((intmax_t)01234)); TEST("_1234_", "_%ju_", ((intmax_t)1234)); TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); TEST("_1234_", "_%zd_", ((ssize_t)1234)); TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); TEST("_1234_", "_%zi_", ((ssize_t)1234)); TEST("_01234_", "_%#zo_", ((ssize_t)01234)); TEST("_1234_", "_%zu_", ((ssize_t)1234)); TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); #undef BUFLEN } TEST_END int main(void) { return test( test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, test_malloc_snprintf_truncated, test_malloc_snprintf); } jemalloc-sys-0.3.2/rep/test/unit/math.c010064400007650000024000000440701344617474100161760ustar0000000000000000#include "test/jemalloc_test.h" #define MAX_REL_ERR 1.0e-9 #define MAX_ABS_ERR 1.0e-9 #include #ifdef __PGI #undef INFINITY #endif #ifndef INFINITY #define INFINITY (DBL_MAX + DBL_MAX) #endif static bool double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; if (fabs(a - b) < max_abs_err) { return true; } rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t factorial(unsigned x) { uint64_t ret = 1; unsigned i; for (i = 2; i <= x; i++) { ret *= (uint64_t)i; } return ret; } TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ for (x = 1; x <= 21; x++) { assert_true(double_eq_rel(exp(ln_gamma(x)), (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), "Incorrect factorial result for x=%u", x); } } TEST_END /* Expected ln_gamma([0.0..100.0] increment=0.25). */ static const double ln_gamma_misc_expected[] = { INFINITY, 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, 359.13420536957539753 }; TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { double x = (double)i * 0.25; assert_true(double_eq_rel(ln_gamma(x), ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect ln_gamma result for i=%u", i); } } TEST_END /* Expected pt_norm([0.01..0.99] increment=0.01). */ static const double pt_norm_expected[] = { -INFINITY, -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { double p = (double)i * 0.01; assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_norm result for i=%u", i); } } TEST_END /* * Expected pt_chi2(p=[0.01..0.99] increment=0.07, * df={0.1, 1.1, 10.1, 100.1, 1000.1}). */ static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; static const double pt_chi2_expected[] = { 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, 2.606673548632508, 4.602913725294877, 5.646152813924212, 6.488971315540869, 7.249823275816285, 7.977314231410841, 8.700354939944047, 9.441728024225892, 10.224338321374127, 11.076435368801061, 12.039320937038386, 13.183878752697167, 14.657791935084575, 16.885728216339373, 23.361991680031817, 70.14844087392152, 80.92379498849355, 85.53325420085891, 88.94433120715347, 91.83732712857017, 94.46719943606301, 96.96896479994635, 99.43412843510363, 101.94074719829733, 104.57228644307247, 107.43900093448734, 110.71844673417287, 114.76616819871325, 120.57422505959563, 135.92318818757556, 899.0072447849649, 937.9271278858220, 953.8117189560207, 965.3079371501154, 974.8974061207954, 983.4936235182347, 991.5691170518946, 999.4334123954690, 1007.3391826856553, 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { double df = pt_chi2_df[i]; double ln_gamma_df = ln_gamma(df * 0.5); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_chi2 result for i=%u, j=%u", i, j); e++; } } } TEST_END /* * Expected pt_gamma(p=[0.1..0.99] increment=0.07, * shape=[0.5..3.0] increment=0.5). */ static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; static const double pt_gamma_expected[] = { 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { double shape = pt_gamma_shape[i]; double ln_gamma_shape = ln_gamma(shape); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_gamma result for i=%u, j=%u", i, j); e++; } } } TEST_END TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); assert_true(double_eq_rel( pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, MAX_ABS_ERR), "Scale should be trivially equivalent to external multiplication"); } TEST_END int main(void) { return test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, test_pt_gamma_scale); } jemalloc-sys-0.3.2/rep/test/unit/mq.c010064400007650000024000000034111344617474100156540ustar0000000000000000#include "test/jemalloc_test.h" #define NSENDERS 3 #define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { mq_msg(mq_msg_t) link; }; mq_gen(static, mq_, mq_t, mq_msg_t, link) TEST_BEGIN(test_mq_basic) { mq_t mq; mq_msg_t msg; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); assert_u_eq(mq_count(&mq), 0, "mq should be empty"); assert_ptr_null(mq_tryget(&mq), "mq_tryget() should fail when the queue is empty"); mq_put(&mq, &msg); assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); mq_put(&mq, &msg); assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); mq_fini(&mq); } TEST_END static void * thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < (NSENDERS * NMSGS); i++) { mq_msg_t *msg = mq_get(mq); assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } return NULL; } static void * thd_sender_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < NMSGS; i++) { mq_msg_t *msg; void *p; p = mallocx(sizeof(mq_msg_t), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); msg = (mq_msg_t *)p; mq_put(mq, msg); } return NULL; } TEST_BEGIN(test_mq_threaded) { mq_t mq; thd_t receiver; thd_t senders[NSENDERS]; unsigned i; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); thd_create(&receiver, thd_receiver_start, (void *)&mq); for (i = 0; i < NSENDERS; i++) { thd_create(&senders[i], thd_sender_start, (void *)&mq); } thd_join(receiver, NULL); for (i = 0; i < NSENDERS; i++) { thd_join(senders[i], NULL); } mq_fini(&mq); } TEST_END int main(void) { return test( test_mq_basic, test_mq_threaded); } jemalloc-sys-0.3.2/rep/test/unit/mtx.c010064400007650000024000000017601344617474100160540ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 2 #define NINCRS 2000000 TEST_BEGIN(test_mtx_basic) { mtx_t mtx; assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); mtx_lock(&mtx); mtx_unlock(&mtx); mtx_fini(&mtx); } TEST_END typedef struct { mtx_t mtx; unsigned x; } thd_start_arg_t; static void * thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; unsigned i; for (i = 0; i < NINCRS; i++) { mtx_lock(&arg->mtx); arg->x++; mtx_unlock(&arg->mtx); } return NULL; } TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; thd_t thds[NTHREADS]; unsigned i; assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arg); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } assert_u_eq(arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int main(void) { return test( test_mtx_basic, test_mtx_race); } jemalloc-sys-0.3.2/rep/test/unit/nstime.c010064400007650000024000000142621344617474100165440ustar0000000000000000#include "test/jemalloc_test.h" #define BILLION UINT64_C(1000000000) TEST_BEGIN(test_nstime_init) { nstime_t nst; nstime_init(&nst, 42000000043); assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_init2) { nstime_t nst; nstime_init2(&nst, 42, 43); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_copy) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_init(&nstb, 0); nstime_copy(&nstb, &nsta); assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); } TEST_END TEST_BEGIN(test_nstime_compare) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); nstime_init2(&nstb, 42, 42); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 42, 44); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); nstime_init2(&nstb, 41, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 43, 0); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); } TEST_END TEST_BEGIN(test_nstime_add) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 84, 86); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, BILLION - 1); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 85, BILLION - 2); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END TEST_BEGIN(test_nstime_iadd) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, BILLION - 1); nstime_iadd(&nsta, 1); nstime_init2(&nstb, 43, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, 1); nstime_iadd(&nsta, BILLION + 1); nstime_init2(&nstb, 43, 2); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END TEST_BEGIN(test_nstime_subtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_subtract(&nsta, &nstb); nstime_init(&nstb, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_init2(&nstb, 41, 44); nstime_subtract(&nsta, &nstb); nstime_init2(&nstb, 0, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END TEST_BEGIN(test_nstime_isubtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_isubtract(&nsta, 42*BILLION + 43); nstime_init(&nstb, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_isubtract(&nsta, 41*BILLION + 44); nstime_init2(&nstb, 0, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END TEST_BEGIN(test_nstime_imultiply) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_imultiply(&nsta, 10); nstime_init2(&nstb, 420, 430); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); nstime_init2(&nsta, 42, 666666666); nstime_imultiply(&nsta, 3); nstime_init2(&nstb, 127, 999999998); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); } TEST_END TEST_BEGIN(test_nstime_idivide) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_idivide(&nsta, 10); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); nstime_init2(&nsta, 42, 666666666); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 3); nstime_idivide(&nsta, 3); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_divide) { nstime_t nsta, nstb, nstc; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_add(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_subtract(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 9, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_monotonic) { nstime_monotonic(); } TEST_END TEST_BEGIN(test_nstime_update) { nstime_t nst; nstime_init(&nst, 0); assert_false(nstime_update(&nst), "Basic time update failed."); /* Only Rip Van Winkle sleeps this long. */ { nstime_t addend; nstime_init2(&addend, 631152000, 0); nstime_add(&nst, &addend); } { nstime_t nst0; nstime_copy(&nst0, &nst); assert_true(nstime_update(&nst), "Update should detect time roll-back."); assert_d_eq(nstime_compare(&nst, &nst0), 0, "Time should not have been modified"); } } TEST_END int main(void) { return test( test_nstime_init, test_nstime_init2, test_nstime_copy, test_nstime_compare, test_nstime_add, test_nstime_iadd, test_nstime_subtract, test_nstime_isubtract, test_nstime_imultiply, test_nstime_idivide, test_nstime_divide, test_nstime_monotonic, test_nstime_update); } jemalloc-sys-0.3.2/rep/test/unit/pack.c010064400007650000024000000076761344617474100161760ustar0000000000000000#include "test/jemalloc_test.h" /* * Size class that is a divisor of the page size, ideally 4+ regions per run. */ #if LG_PAGE <= 14 #define SZ (ZU(1) << (LG_PAGE - 2)) #else #define SZ ZU(4096) #endif /* * Number of slabs to consume at high water mark. Should be at least 2 so that * if mmap()ed memory grows downward, downward growth of mmap()ed memory is * tested. */ #define NSLABS 8 static unsigned binind_compute(void) { size_t sz; unsigned nbins, i; sz = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < nbins; i++) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); size_t size; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)i; sz = sizeof(size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); if (size == SZ) { return i; } } test_fail("Unable to compute nregs_per_run"); return 0; } static size_t nregs_per_run_compute(void) { uint32_t nregs; size_t sz; unsigned binind = binind_compute(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)binind; sz = sizeof(nregs); assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); return nregs; } static unsigned arenas_create_mallctl(void) { unsigned arena_ind; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.create"); return arena_ind; } static void arena_reset_mallctl(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_BEGIN(test_pack) { bool prof_enabled; size_t sz = sizeof(prof_enabled); if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) { test_skip_if(prof_enabled); } unsigned arena_ind = arenas_create_mallctl(); size_t nregs_per_run = nregs_per_run_compute(); size_t nregs = nregs_per_run * NSLABS; VARIABLE_ARRAY(void *, ptrs, nregs); size_t i, j, offset; /* Fill matrix. */ for (i = offset = 0; i < NSLABS; i++) { for (j = 0; j < nregs_per_run; j++) { void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", SZ, arena_ind, i, j); ptrs[(i * nregs_per_run) + j] = p; } } /* * Free all but one region of each run, but rotate which region is * preserved, so that subsequent allocations exercise the within-run * layout policy. */ offset = 0; for (i = offset = 0; i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p = ptrs[(i * nregs_per_run) + j]; if (offset == j) { continue; } dallocx(p, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); } } /* * Logically refill matrix, skipping preserved regions and verifying * that the matrix is unmodified. */ offset = 0; for (i = offset = 0; i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p; if (offset == j) { continue; } p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], "Unexpected refill discrepancy, run=%zu, reg=%zu\n", i, j); } } /* Clean up. */ arena_reset_mallctl(arena_ind); } TEST_END int main(void) { return test( test_pack); } jemalloc-sys-0.3.2/rep/test/unit/pack.sh010064400007650000024000000001611344617474100163440ustar0000000000000000#!/bin/sh # Immediately purge to minimize fragmentation. export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" jemalloc-sys-0.3.2/rep/test/unit/pages.c010064400007650000024000000013261344617474100163410ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_pages_huge) { size_t alloc_size; bool commit; void *pages, *hugepage; alloc_size = HUGEPAGE * 2 - PAGE; commit = true; pages = pages_map(NULL, alloc_size, PAGE, &commit); assert_ptr_not_null(pages, "Unexpected pages_map() error"); if (init_system_thp_mode == thp_mode_default) { hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, "Unexpected pages_huge() result"); assert_false(pages_nohuge(hugepage, HUGEPAGE), "Unexpected pages_nohuge() result"); } pages_unmap(pages, alloc_size); } TEST_END int main(void) { return test( test_pages_huge); } jemalloc-sys-0.3.2/rep/test/unit/ph.c010064400007650000024000000163151344617474100156550ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ph.h" typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; phn(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the heap, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return ret; } static int node_cmp_magic(const node_t *a, const node_t *b) { assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); return node_cmp(a, b); } typedef ph(node_t) heap_t; ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); static void node_print(const node_t *node, unsigned depth) { unsigned i; node_t *leftmost_child, *sibling; for (i = 0; i < depth; i++) { malloc_printf("\t"); } malloc_printf("%2"FMTu64"\n", node->key); leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) { return; } node_print(leftmost_child, depth + 1); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { node_print(sibling, depth + 1); } } static void heap_print(const heap_t *heap) { node_t *auxelm; malloc_printf("vvv heap %p vvv\n", heap); if (heap->ph_root == NULL) { goto label_return; } node_print(heap->ph_root, 0); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); node_print(auxelm, 0); } label_return: malloc_printf("^^^ heap %p ^^^\n", heap); } static unsigned node_validate(const node_t *node, const node_t *parent) { unsigned nnodes = 1; node_t *leftmost_child, *sibling; if (parent != NULL) { assert_d_ge(node_cmp_magic(node, parent), 0, "Child is less than parent"); } leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) { return nnodes; } assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), (void *)node, "Leftmost child does not link to node"); nnodes += node_validate(leftmost_child, node); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, sibling)), sibling, "sibling's prev doesn't link to sibling"); nnodes += node_validate(sibling, node); } return nnodes; } static unsigned heap_validate(const heap_t *heap) { unsigned nnodes = 0; node_t *auxelm; if (heap->ph_root == NULL) { goto label_return; } nnodes += node_validate(heap->ph_root, NULL); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); nnodes += node_validate(auxelm, NULL); } label_return: if (false) { heap_print(heap); } return nnodes; } TEST_BEGIN(test_ph_empty) { heap_t heap; heap_new(&heap); assert_true(heap_empty(&heap), "Heap should be empty"); assert_ptr_null(heap_first(&heap), "Unexpected node"); assert_ptr_null(heap_any(&heap), "Unexpected node"); } TEST_END static void node_remove(heap_t *heap, node_t *node) { heap_remove(heap, node); node->magic = 0; } static node_t * node_remove_first(heap_t *heap) { node_t *node = heap_remove_first(heap); node->magic = 0; return node; } static node_t * node_remove_any(heap_t *heap) { node_t *node = heap_remove_any(heap); node->magic = 0; return node; } TEST_BEGIN(test_ph_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; heap_t heap; node_t nodes[NNODES]; unsigned i, j, k; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) { bag[j] = j; } break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; } break; default: for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); } } for (j = 1; j <= NNODES; j++) { /* Initialize heap and nodes. */ heap_new(&heap); assert_u_eq(heap_validate(&heap), 0, "Incorrect node count"); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { heap_insert(&heap, &nodes[k]); if (i % 13 == 12) { assert_ptr_not_null(heap_any(&heap), "Heap should not be empty"); /* Trigger merging. */ assert_ptr_not_null(heap_first(&heap), "Heap should not be empty"); } assert_u_eq(heap_validate(&heap), k + 1, "Incorrect node count"); } assert_false(heap_empty(&heap), "Heap should not be empty"); /* Remove nodes. */ switch (i % 6) { case 0: for (k = 0; k < j; k++) { assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, &nodes[k]); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; case 1: for (k = j; k > 0; k--) { node_remove(&heap, &nodes[k-1]); assert_u_eq(heap_validate(&heap), k - 1, "Incorrect node count"); } break; case 2: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = node_remove_first(&heap); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } prev = node; } break; } case 3: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = heap_first(&heap); assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } node_remove(&heap, node); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); prev = node; } break; } case 4: { for (k = 0; k < j; k++) { node_remove_any(&heap); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; } case 5: { for (k = 0; k < j; k++) { node_t *node = heap_any(&heap); assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, node); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; } default: not_reached(); } assert_ptr_null(heap_first(&heap), "Heap should be empty"); assert_ptr_null(heap_any(&heap), "Heap should be empty"); assert_true(heap_empty(&heap), "Heap should be empty"); } } fini_gen_rand(sfmt); #undef NNODES #undef SEED } TEST_END int main(void) { return test( test_ph_empty, test_ph_random); } jemalloc-sys-0.3.2/rep/test/unit/prng.c010064400007650000024000000137271344617474100162200ustar0000000000000000#include "test/jemalloc_test.h" static void test_prng_lg_range_u32(bool atomic) { atomic_u32_t sa, sb; uint32_t ra, rb; unsigned lg_range; atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_eq(ra, rb, "Repeated generation should produce repeated results"); atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, 32, atomic); assert_u32_eq(ra, rb, "Equivalent generation should produce equivalent results"); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_ne(ra, rb, "Full-width results must not immediately repeat"); atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); for (lg_range = 31; lg_range > 0; lg_range--) { atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, lg_range, atomic); assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u32_eq(rb, (ra >> (32 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_u64(void) { uint64_t sa, sb, ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_u64(&sa, 64); sa = 42; rb = prng_lg_range_u64(&sa, 64); assert_u64_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u64(&sb, 64); assert_u64_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u64(&sa, 64); rb = prng_lg_range_u64(&sa, 64); assert_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u64(&sa, 64); for (lg_range = 63; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u64(&sb, lg_range); assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_zu(bool atomic) { atomic_zu_t sa, sb; size_t ra, rb; unsigned lg_range; atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Repeated generation should produce repeated results"); atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Equivalent generation should produce equivalent results"); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_ne(ra, rb, "Full-width results must not immediately repeat"); atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; lg_range--) { atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, lg_range, atomic); assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range)), "Expected high order bits of full-width " "result, lg_range=%u", lg_range); } } TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { test_prng_lg_range_u32(false); } TEST_END TEST_BEGIN(test_prng_lg_range_u32_atomic) { test_prng_lg_range_u32(true); } TEST_END TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { test_prng_lg_range_u64(); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { test_prng_lg_range_zu(false); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_atomic) { test_prng_lg_range_zu(true); } TEST_END static void test_prng_range_u32(bool atomic) { uint32_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { atomic_u32_t s; unsigned rep; atomic_store_u32(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { uint32_t r = prng_range_u32(&s, range, atomic); assert_u32_lt(r, range, "Out of range"); } } } static void test_prng_range_u64(void) { uint64_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint64_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { uint64_t r = prng_range_u64(&s, range); assert_u64_lt(r, range, "Out of range"); } } } static void test_prng_range_zu(bool atomic) { size_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { atomic_zu_t s; unsigned rep; atomic_store_zu(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { size_t r = prng_range_zu(&s, range, atomic); assert_zu_lt(r, range, "Out of range"); } } } TEST_BEGIN(test_prng_range_u32_nonatomic) { test_prng_range_u32(false); } TEST_END TEST_BEGIN(test_prng_range_u32_atomic) { test_prng_range_u32(true); } TEST_END TEST_BEGIN(test_prng_range_u64_nonatomic) { test_prng_range_u64(); } TEST_END TEST_BEGIN(test_prng_range_zu_nonatomic) { test_prng_range_zu(false); } TEST_END TEST_BEGIN(test_prng_range_zu_atomic) { test_prng_range_zu(true); } TEST_END int main(void) { return test( test_prng_lg_range_u32_nonatomic, test_prng_lg_range_u32_atomic, test_prng_lg_range_u64_nonatomic, test_prng_lg_range_zu_nonatomic, test_prng_lg_range_zu_atomic, test_prng_range_u32_nonatomic, test_prng_range_u32_atomic, test_prng_range_u64_nonatomic, test_prng_range_zu_nonatomic, test_prng_range_zu_atomic); } jemalloc-sys-0.3.2/rep/test/unit/prof_accum.c010064400007650000024000000034121344617474100173560ustar0000000000000000#include "test/jemalloc_test.h" #define NTHREADS 4 #define NALLOCS_PER_THREAD 50 #define DUMP_INTERVAL 1 #define BT_COUNT_CHECK_INTERVAL 5 static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } static void * alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); } static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; i_prev = 0; bt_count_prev = 0; for (i = 0; i < NALLOCS_PER_THREAD; i++) { void *p = alloc_from_permuted_backtrace(thd_ind, i); dallocx(p, 0); if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } if (i % BT_COUNT_CHECK_INTERVAL == 0 || i+1 == NALLOCS_PER_THREAD) { bt_count = prof_bt_count(); assert_zu_le(bt_count_prev+(i-i_prev), bt_count, "Expected larger backtrace count increase"); i_prev = i; bt_count_prev = bt_count; } } return NULL; } TEST_BEGIN(test_idump) { bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END int main(void) { return test_no_reentrancy( test_idump); } jemalloc-sys-0.3.2/rep/test/unit/prof_accum.sh010064400007650000024000000002111344617474100175400ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_active.c010064400007650000024000000070111344617474100175400ustar0000000000000000#include "test/jemalloc_test.h" static void mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } #define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } #define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } #define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } #define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } #define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); mallctl_thread_prof_active_get(false); mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(false, false); /* prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(true, false); mallctl_thread_prof_active_set(false, false); /* !prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, false); mallctl_thread_prof_active_set(false, true); /* !prof.active, thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, true); mallctl_thread_prof_active_set(true, true); /* prof.active, thread.prof.active. */ prof_sampling_probe(true); /* Restore settings. */ mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(true, false); } TEST_END int main(void) { return test_no_reentrancy( test_prof_active); } jemalloc-sys-0.3.2/rep/test/unit/prof_active.sh010064400007650000024000000002051344617474100177260ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_gdump.c010064400007650000024000000037111344617474100174040ustar0000000000000000#include "test/jemalloc_test.h" static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); gdump = false; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); gdump = true; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); dallocx(p, 0); dallocx(q, 0); dallocx(r, 0); dallocx(s, 0); } TEST_END int main(void) { return test_no_reentrancy( test_gdump); } jemalloc-sys-0.3.2/rep/test/unit/prof_gdump.sh010064400007650000024000000001711344617474100175710ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true" fi jemalloc-sys-0.3.2/rep/test/unit/prof_idump.c010064400007650000024000000014741344617474100174120ustar0000000000000000#include "test/jemalloc_test.h" static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } TEST_BEGIN(test_idump) { bool active; void *p; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); assert_true(did_prof_dump_open, "Expected a profile dump"); } TEST_END int main(void) { return test( test_idump); } jemalloc-sys-0.3.2/rep/test/unit/prof_idump.sh010064400007650000024000000003171344617474100175750ustar0000000000000000#!/bin/sh export MALLOC_CONF="tcache:false" if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_log.c010064400007650000024000000064611344617474100170560ustar0000000000000000#include "test/jemalloc_test.h" #define N_PARAM 100 #define N_THREADS 10 static void assert_rep() { assert_b_eq(prof_log_rep_check(), false, "Rep check failed"); } static void assert_log_empty() { assert_zu_eq(prof_log_bt_count(), 0, "The log has backtraces; it isn't empty"); assert_zu_eq(prof_log_thr_count(), 0, "The log has threads; it isn't empty"); assert_zu_eq(prof_log_alloc_count(), 0, "The log has allocations; it isn't empty"); } void *buf[N_PARAM]; static void f() { int i; for (i = 0; i < N_PARAM; i++) { buf[i] = malloc(100); } for (i = 0; i < N_PARAM; i++) { free(buf[i]); } } TEST_BEGIN(test_prof_log_many_logs) { int i; test_skip_if(!config_prof); for (i = 0; i < N_PARAM; i++) { assert_b_eq(prof_log_is_logging(), false, "Logging shouldn't have started yet"); assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when starting logging"); assert_b_eq(prof_log_is_logging(), true, "Logging should be started by now"); assert_log_empty(); assert_rep(); f(); assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count"); assert_rep(); assert_b_eq(prof_log_is_logging(), true, "Logging should still be on"); assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); assert_b_eq(prof_log_is_logging(), false, "Logging should have turned off"); } } TEST_END thd_t thr_buf[N_THREADS]; static void *f_thread(void *unused) { int i; for (i = 0; i < N_PARAM; i++) { void *p = malloc(100); memset(p, 100, sizeof(char)); free(p); } return NULL; } TEST_BEGIN(test_prof_log_many_threads) { test_skip_if(!config_prof); int i; assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when starting logging"); for (i = 0; i < N_THREADS; i++) { thd_create(&thr_buf[i], &f_thread, NULL); } for (i = 0; i < N_THREADS; i++) { thd_join(thr_buf[i], NULL); } assert_zu_eq(prof_log_thr_count(), N_THREADS, "Wrong number of thread entries"); assert_rep(); assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); } TEST_END static void f3() { void *p = malloc(100); free(p); } static void f1() { void *p = malloc(100); f3(); free(p); } static void f2() { void *p = malloc(100); free(p); } TEST_BEGIN(test_prof_log_many_traces) { test_skip_if(!config_prof); assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when starting logging"); int i; assert_rep(); assert_log_empty(); for (i = 0; i < N_PARAM; i++) { assert_rep(); f1(); assert_rep(); f2(); assert_rep(); f3(); assert_rep(); } /* * There should be 8 total backtraces: two for malloc/free in f1(), * two for malloc/free in f2(), two for malloc/free in f3(), and then * two for malloc/free in f1()'s call to f3(). */ assert_zu_eq(prof_log_bt_count(), 8, "Wrong number of backtraces given sample workload"); assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); } TEST_END int main(void) { prof_log_dummy_set(true); return test_no_reentrancy( test_prof_log_many_logs, test_prof_log_many_traces, test_prof_log_many_threads); } jemalloc-sys-0.3.2/rep/test/unit/prof_log.sh010064400007650000024000000001471344617474100172410ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,lg_prof_sample:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_reset.c010064400007650000024000000164671344617474100174260ustar0000000000000000#include "test/jemalloc_test.h" static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return fd; } static void set_prof_active(bool active) { assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); return lg_prof_sample; } static void do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); } TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; test_skip_if(!config_prof); sz = sizeof(size_t); assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); /* Test simple resets. */ for (i = 0; i < 2; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure while resetting profile data"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected profile sample rate change"); } /* Test resets with prof.lg_sample changes. */ lg_prof_sample_next = 1; for (i = 0; i < 2; i++) { do_prof_reset(lg_prof_sample_next); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample, lg_prof_sample_next, "Expected profile sample rate change"); lg_prof_sample_next = lg_prof_sample_orig; } /* Make sure the test code restored prof.lg_sample. */ lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); } TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); return false; } TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; test_skip_if(!config_prof); set_prof_active(true); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header_orig = prof_dump_header; prof_dump_header = prof_dump_header_intercept; assert_false(prof_dump_header_intercepted, "Unexpected intercept"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_true(prof_dump_header_intercepted, "Expected intercept"); assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header = prof_dump_header_orig; dallocx(p, 0); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); set_prof_active(false); } TEST_END #define NTHREADS 4 #define NALLOCS_PER_THREAD (1U << 13) #define OBJ_RING_BUF_COUNT 1531 #define RESET_INTERVAL (1U << 10) #define DUMP_INTERVAL 3677 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; memset(objs, 0, sizeof(objs)); for (i = 0; i < NALLOCS_PER_THREAD; i++) { if (i % RESET_INTERVAL == 0) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile " "data"); } if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); assert_ptr_not_null(*pp, "Unexpected btalloc() failure"); } } /* Clean up any remaining objects. */ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } } return NULL; } TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; size_t bt_count, tdata_count; test_skip_if(!config_prof); bt_count = prof_bt_count(); assert_zu_eq(bt_count, 0, "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); lg_prof_sample_orig = get_lg_prof_sample(); do_prof_reset(5); set_prof_active(true); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); assert_zu_eq(prof_tdata_count(), tdata_count, "Unexpected remaining tdata structures"); set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NTHREADS #undef NALLOCS_PER_THREAD #undef OBJ_RING_BUF_COUNT #undef RESET_INTERVAL #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ #define NITER 10 TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; test_skip_if(!config_prof); lg_prof_sample_orig = get_lg_prof_sample(); set_prof_active(true); /* Reset profiling. */ do_prof_reset(0); for (i = 0; i < NITER; i++) { void *p; size_t sz, nsz; /* Reset profiling. */ do_prof_reset(0); /* Allocate small object (which will be promoted). */ p = ptrs[i] = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); /* Reset profiling. */ do_prof_reset(0); /* Perform successful xallocx(). */ sz = sallocx(p, 0); assert_zu_eq(xallocx(p, sz, 0, 0), sz, "Unexpected xallocx() failure"); /* Perform unsuccessful xallocx(). */ nsz = nallocx(sz+1, 0); assert_zu_eq(xallocx(p, nsz, 0, 0), sz, "Unexpected xallocx() success"); } for (i = 0; i < NITER; i++) { /* dallocx. */ dallocx(ptrs[i], 0); } set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NITER int main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; return test_no_reentrancy( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, test_xallocx); } jemalloc-sys-0.3.2/rep/test/unit/prof_reset.sh010064400007650000024000000001711344617474100175770ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_tctx.c010064400007650000024000000024231344617474100172510ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_prof_realloc) { tsdn_t *tsdn; int flags; void *p, *q; prof_tctx_t *tctx_p, *tctx_q; uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; test_skip_if(!config_prof); tsdn = tsdn_fetch(); flags = MALLOCX_TCACHE_NONE; prof_cnt_all(&curobjs_0, NULL, NULL, NULL); p = mallocx(1024, flags); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tctx_p = prof_tctx_get(tsdn, p, NULL); assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, "Expected valid tctx"); prof_cnt_all(&curobjs_1, NULL, NULL, NULL); assert_u64_eq(curobjs_0 + 1, curobjs_1, "Allocation should have increased sample size"); q = rallocx(p, 2048, flags); assert_ptr_ne(p, q, "Expected move"); assert_ptr_not_null(p, "Unexpected rmallocx() failure"); tctx_q = prof_tctx_get(tsdn, q, NULL); assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, "Expected valid tctx"); prof_cnt_all(&curobjs_2, NULL, NULL, NULL); assert_u64_eq(curobjs_1, curobjs_2, "Reallocation should not have changed sample size"); dallocx(q, flags); prof_cnt_all(&curobjs_3, NULL, NULL, NULL); assert_u64_eq(curobjs_0, curobjs_3, "Sample size should have returned to base level"); } TEST_END int main(void) { return test_no_reentrancy( test_prof_realloc); } jemalloc-sys-0.3.2/rep/test/unit/prof_tctx.sh010064400007650000024000000001471344617474100174420ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,lg_prof_sample:0" fi jemalloc-sys-0.3.2/rep/test/unit/prof_thread_name.c010064400007650000024000000062151344617474100205410ustar0000000000000000#include "test/jemalloc_test.h" static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, int line) { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } #define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, int line) { assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } #define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); mallctl_thread_name_get(""); mallctl_thread_name_set("hi there"); /* NULL input shouldn't be allowed. */ thread_name = NULL; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* Simultaneous read/write shouldn't be allowed. */ { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, (void *)&thread_name, sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } mallctl_thread_name_set(""); } TEST_END #define NTHREADS 4 #define NRESET 25 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); mallctl_thread_name_get(""); mallctl_thread_name_set(thread_name); for (i = 0; i < NRESET; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); mallctl_thread_name_get(thread_name); } mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); return NULL; } TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } } TEST_END #undef NTHREADS #undef NRESET int main(void) { return test( test_prof_thread_name_validation, test_prof_thread_name_threaded); } jemalloc-sys-0.3.2/rep/test/unit/prof_thread_name.sh010064400007650000024000000001501344617474100207210ustar0000000000000000#!/bin/sh if [ "x${enable_prof}" = "x1" ] ; then export MALLOC_CONF="prof:true,prof_active:false" fi jemalloc-sys-0.3.2/rep/test/unit/ql.c010064400007650000024000000106671344617474100156660ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ql.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; struct list_s { ql_elm(list_t) link; char id; }; static void test_empty_list(list_head_t *head) { list_t *t; unsigned i; assert_ptr_null(ql_first(head), "Unexpected element for empty list"); assert_ptr_null(ql_last(head, link), "Unexpected element for empty list"); i = 0; ql_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); i = 0; ql_reverse_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); } TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); test_empty_list(&head); } TEST_END static void init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { entries[i].id = 'a' + i; ql_elm_new(&entries[i], link); } } static void test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, "Element id mismatch"); i = 0; ql_foreach(t, head, link) { assert_c_eq(t->id, entries[i].id, "Element id mismatch"); i++; } i = 0; ql_reverse_foreach(t, head, link) { assert_c_eq(t->id, entries[nentries-i-1].id, "Element id mismatch"); i++; } for (i = 0; i < nentries-1; i++) { t = ql_next(head, &entries[i], link); assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); } assert_ptr_null(ql_next(head, &entries[nentries-1], link), "Unexpected element"); assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); for (i = 1; i < nentries; i++) { t = ql_prev(head, &entries[i], link); assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); } } TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); ql_tail_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); } test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); ql_head_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); a = &entries[0]; b = &entries[1]; c = &entries[2]; d = &entries[3]; e = &entries[4]; f = &entries[5]; g = &entries[6]; h = &entries[7]; /* * ql_remove(), ql_before_insert(), and ql_after_insert() are used * internally by other macros that are already tested, so there's no * need to test them completely. However, insertion/deletion from the * middle of lists is not otherwise tested; do so here. */ ql_tail_insert(&head, f, link); ql_before_insert(&head, f, b, link); ql_before_insert(&head, f, c, link); ql_after_insert(f, h, link); ql_after_insert(f, g, link); ql_before_insert(&head, b, a, link); ql_after_insert(c, d, link); ql_before_insert(&head, f, e, link); test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); } TEST_END int main(void) { return test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, test_ql_insert); } jemalloc-sys-0.3.2/rep/test/unit/qr.c010064400007650000024000000122301344617474100156600ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/qr.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 /* Split index, in [1..NENTRIES). */ #define SPLIT_INDEX 5 typedef struct ring_s ring_t; struct ring_s { qr(ring_t) link; char id; }; static void init_entries(ring_t *entries) { unsigned i; for (i = 0; i < NENTRIES; i++) { qr_new(&entries[i], link); entries[i].id = 'a' + i; } } static void test_independent_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_ptr_eq(t, &entries[i], "Next element in single-element ring should be same as " "current element"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_ptr_eq(t, &entries[i], "Previous element in single-element ring should be same as " "current element"); } } TEST_BEGIN(test_qr_one) { ring_t entries[NENTRIES]; init_entries(entries); test_independent_entries(entries); } TEST_END static void test_entries_ring(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } } TEST_BEGIN(test_qr_after_insert) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } test_entries_ring(entries); } TEST_END TEST_BEGIN(test_qr_remove) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[i+j].id, "Element id mismatch"); j++; } j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, "Element id mismatch"); j++; } qr_remove(&entries[i], link); } test_independent_entries(entries); } TEST_END TEST_BEGIN(test_qr_before_insert) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_before_insert(&entries[i - 1], &entries[i], link); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } } TEST_END static void test_split_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { if (i < SPLIT_INDEX) { assert_c_eq(t->id, entries[(i+j) % SPLIT_INDEX].id, "Element id mismatch"); } else { assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, "Element id mismatch"); } j++; } } } TEST_BEGIN(test_qr_meld_split) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); } qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); qr_split(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); qr_meld(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); } TEST_END int main(void) { return test( test_qr_one, test_qr_after_insert, test_qr_remove, test_qr_before_insert, test_qr_meld_split); } jemalloc-sys-0.3.2/rep/test/unit/rb.c010064400007650000024000000174101344617474100156460ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/rb.h" #define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ a_type *rbp_bh_t; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ rbp_bh_t)) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ (r_height)++; \ } \ } \ } while (0) typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the tree, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return ret; } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; tree_new(&tree); assert_true(tree_empty(&tree), "Tree should be empty"); assert_ptr_null(tree_first(&tree), "Unexpected node"); assert_ptr_null(tree_last(&tree), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); } TEST_END static unsigned tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; node_t *left_node; node_t *right_node; if (node == NULL) { return ret; } left_node = rbtn_left_get(node_t, link, node); right_node = rbtn_right_get(node_t, link, node); if (!rbtn_red_get(node_t, link, node)) { black_depth++; } /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { if (left_node != NULL) { assert_false(rbtn_red_get(node_t, link, left_node), "Node should be black"); } if (right_node != NULL) { assert_false(rbtn_red_get(node_t, link, right_node), "Node should be black"); } } /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ if (left_node != NULL) { ret += tree_recurse(left_node, black_height, black_depth); } else { ret += (black_depth != black_height); } /* Right subtree. */ if (right_node != NULL) { ret += tree_recurse(right_node, black_height, black_depth); } else { ret += (black_depth != black_height); } return ret; } static node_t * tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Test rb_search(). */ search_node = tree_search(tree, node); assert_ptr_eq(search_node, node, "tree_search() returned unexpected node"); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); assert_ptr_eq(search_node, node, "tree_nsearch() returned unexpected node"); /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); assert_ptr_eq(search_node, node, "tree_psearch() returned unexpected node"); (*i)++; return NULL; } static unsigned tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); return i; } static unsigned tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); return i; } static void node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; tree_remove(tree, node); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); if (search_node != NULL) { assert_u64_ge(search_node->key, node->key, "Key ordering error"); } /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); if (search_node != NULL) { assert_u64_le(search_node->key, node->key, "Key ordering error"); } node->magic = 0; rbtn_black_height(node_t, link, tree, black_height); imbalances = tree_recurse(tree->rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(tree), nnodes-1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(tree), nnodes-1, "Unexpected node iteration count"); } static node_t * remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); return ret; } static node_t * remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); return ret; } static void destroy_cb(node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); (*nnodes)--; } TEST_BEGIN(test_rb_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; node_t nodes[NNODES]; unsigned i, j, k, black_height, imbalances; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) { bag[j] = j; } break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; } break; default: for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); } } for (j = 1; j <= NNODES; j++) { /* Initialize tree and nodes. */ tree_new(&tree); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { tree_insert(&tree, &nodes[k]); rbtn_black_height(node_t, link, &tree, black_height); imbalances = tree_recurse(tree.rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(&tree), k+1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(&tree), k+1, "Unexpected node iteration count"); assert_false(tree_empty(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_first(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_last(&tree), "Tree should not be empty"); tree_next(&tree, &nodes[k]); tree_prev(&tree, &nodes[k]); } /* Remove nodes. */ switch (i % 5) { case 0: for (k = 0; k < j; k++) { node_remove(&tree, &nodes[k], j - k); } break; case 1: for (k = j; k > 0; k--) { node_remove(&tree, &nodes[k-1], k); } break; case 2: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_iter(&tree, start, remove_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 3: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_reverse_iter(&tree, start, remove_reverse_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 4: { unsigned nnodes = j; tree_destroy(&tree, destroy_cb, &nnodes); assert_u_eq(nnodes, 0, "Destruction terminated early"); break; } default: not_reached(); } } } fini_gen_rand(sfmt); #undef NNODES #undef NBAGS #undef SEED } TEST_END int main(void) { return test( test_rb_empty, test_rb_random); } jemalloc-sys-0.3.2/rep/test/unit/retained.c010064400007650000024000000113741344617474100170410ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/spin.h" static unsigned arena_ind; static size_t sz; static size_t esz; #define NEPOCHS 8 #define PER_THD_NALLOCS 1 static atomic_u_t epoch; static atomic_u_t nfinished; static unsigned do_arena_create(extent_hooks_t *h) { unsigned arena_ind; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, "Unexpected mallctl() failure"); return arena_ind; } static void do_arena_destroy(unsigned arena_ind) { size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } static void do_refresh(void) { uint64_t epoch = 1; assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } static size_t do_get_size_impl(const char *cmd, unsigned arena_ind) { size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); size_t z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = arena_ind; size_t size; assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); return size; } static size_t do_get_active(unsigned arena_ind) { return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; } static size_t do_get_mapped(unsigned arena_ind) { return do_get_size_impl("stats.arenas.0.mapped", arena_ind); } static void * thd_start(void *arg) { for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { /* Busy-wait for next epoch. */ unsigned cur_epoch; spin_t spinner = SPIN_INITIALIZER; while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != next_epoch) { spin_adaptive(&spinner); } assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); /* * Allocate. The main thread will reset the arena, so there's * no need to deallocate. */ for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE ); assert_ptr_not_null(p, "Unexpected mallocx() failure\n"); } /* Let the main thread know we've finished this iteration. */ atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); } return NULL; } TEST_BEGIN(test_retained) { test_skip_if(!config_stats); arena_ind = do_arena_create(NULL); sz = nallocx(HUGEPAGE, 0); esz = sz + sz_large_pad; atomic_store_u(&epoch, 0, ATOMIC_RELAXED); unsigned nthreads = ncpus * 2; VARIABLE_ARRAY(thd_t, threads, nthreads); for (unsigned i = 0; i < nthreads; i++) { thd_create(&threads[i], thd_start, NULL); } for (unsigned e = 1; e < NEPOCHS; e++) { atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); atomic_store_u(&epoch, e, ATOMIC_RELEASE); /* Wait for threads to finish allocating. */ spin_t spinner = SPIN_INITIALIZER; while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { spin_adaptive(&spinner); } /* * Assert that retained is no more than the sum of size classes * that should have been used to satisfy the worker threads' * requests, discounting per growth fragmentation. */ do_refresh(); size_t allocated = esz * nthreads * PER_THD_NALLOCS; size_t active = do_get_active(arena_ind); assert_zu_le(allocated, active, "Unexpected active memory"); size_t mapped = do_get_mapped(arena_ind); assert_zu_le(active, mapped, "Unexpected mapped memory"); arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); size_t usable = 0; size_t fragmented = 0; for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < arena->extent_grow_next; pind++) { size_t psz = sz_pind2sz(pind); size_t psz_fragmented = psz % esz; size_t psz_usable = psz - psz_fragmented; /* * Only consider size classes that wouldn't be skipped. */ if (psz_usable > 0) { assert_zu_lt(usable, allocated, "Excessive retained memory " "(%#zx[+%#zx] > %#zx)", usable, psz_usable, allocated); fragmented += psz_fragmented; usable += psz_usable; } } /* * Clean up arena. Destroying and recreating the arena * is simpler that specifying extent hooks that deallocate * (rather than retaining) during reset. */ do_arena_destroy(arena_ind); assert_u_eq(do_arena_create(NULL), arena_ind, "Unexpected arena index"); } for (unsigned i = 0; i < nthreads; i++) { thd_join(threads[i], NULL); } do_arena_destroy(arena_ind); } TEST_END int main(void) { return test( test_retained); } jemalloc-sys-0.3.2/rep/test/unit/rtree.c010064400007650000024000000153421344617474100163660ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/rtree.h" rtree_node_alloc_t *rtree_node_alloc_orig; rtree_node_dalloc_t *rtree_node_dalloc_orig; rtree_leaf_alloc_t *rtree_leaf_alloc_orig; rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; /* Potentially too large to safely place on the stack. */ rtree_t test_rtree; static rtree_node_elm_t * rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { rtree_node_elm_t *node; if (rtree != &test_rtree) { return rtree_node_alloc_orig(tsdn, rtree, nelms); } malloc_mutex_unlock(tsdn, &rtree->init_lock); node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); assert_ptr_not_null(node, "Unexpected calloc() failure"); malloc_mutex_lock(tsdn, &rtree->init_lock); return node; } static void rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { if (rtree != &test_rtree) { rtree_node_dalloc_orig(tsdn, rtree, node); return; } free(node); } static rtree_leaf_elm_t * rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { rtree_leaf_elm_t *leaf; if (rtree != &test_rtree) { return rtree_leaf_alloc_orig(tsdn, rtree, nelms); } malloc_mutex_unlock(tsdn, &rtree->init_lock); leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); assert_ptr_not_null(leaf, "Unexpected calloc() failure"); malloc_mutex_lock(tsdn, &rtree->init_lock); return leaf; } static void rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { if (rtree != &test_rtree) { rtree_leaf_dalloc_orig(tsdn, rtree, leaf); return; } free(leaf); } TEST_BEGIN(test_rtree_read_empty) { tsdn_t *tsdn; tsdn = tsdn_fetch(); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, false), "rtree_extent_read() should return NULL for empty tree"); rtree_delete(tsdn, rtree); } TEST_END #undef NTHREADS #undef NITERS #undef SEED TEST_BEGIN(test_rtree_extrema) { extent_t extent_a, extent_b; extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false, sz_size2index(SC_LARGE_MINCLASS), 0, extent_state_active, false, false, true); extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, true); tsdn_t *tsdn = tsdn_fetch(); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, extent_szind_get(&extent_a), extent_slab_get(&extent_a)), "Unexpected rtree_write() failure"); rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE, extent_szind_get(&extent_a), extent_slab_get(&extent_a)); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true), &extent_a, "rtree_extent_read() should return previously set value"); assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), &extent_b, extent_szind_get_maybe_invalid(&extent_b), extent_slab_get(&extent_b)), "Unexpected rtree_write() failure"); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), true), &extent_b, "rtree_extent_read() should return previously set value"); rtree_delete(tsdn, rtree); } TEST_END TEST_BEGIN(test_rtree_bits) { tsdn_t *tsdn = tsdn_fetch(); uintptr_t keys[] = {PAGE, PAGE + 1, PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; extent_t extent; extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, true); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], &extent, SC_NSIZES, false), "Unexpected rtree_write() failure"); for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[j], true), &extent, "rtree_extent_read() should return previously set " "value and ignore insignificant key bits; i=%u, " "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, j, keys[i], keys[j]); } assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, (((uintptr_t)2) << LG_PAGE), false), "Only leftmost rtree leaf should be set; i=%u", i); rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); } rtree_delete(tsdn, rtree); } TEST_END TEST_BEGIN(test_rtree_random) { #define NSET 16 #define SEED 42 sfmt_t *sfmt = init_gen_rand(SEED); tsdn_t *tsdn = tsdn_fetch(); uintptr_t keys[NSET]; rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); extent_t extent; extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, true); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); for (unsigned i = 0; i < NSET; i++) { keys[i] = (uintptr_t)gen_rand64(sfmt); rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx, keys[i], false, true); assert_ptr_not_null(elm, "Unexpected rtree_leaf_elm_lookup() failure"); rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES, false); assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), &extent, "rtree_extent_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), &extent, "rtree_extent_read() should return previously set value, " "i=%u", i); } for (unsigned i = 0; i < NSET; i++) { rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), "rtree_extent_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, keys[i], true), "rtree_extent_read() should return previously set value"); } rtree_delete(tsdn, rtree); fini_gen_rand(sfmt); #undef NSET #undef SEED } TEST_END int main(void) { rtree_node_alloc_orig = rtree_node_alloc; rtree_node_alloc = rtree_node_alloc_intercept; rtree_node_dalloc_orig = rtree_node_dalloc; rtree_node_dalloc = rtree_node_dalloc_intercept; rtree_leaf_alloc_orig = rtree_leaf_alloc; rtree_leaf_alloc = rtree_leaf_alloc_intercept; rtree_leaf_dalloc_orig = rtree_leaf_dalloc; rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; return test( test_rtree_read_empty, test_rtree_extrema, test_rtree_bits, test_rtree_random); } jemalloc-sys-0.3.2/rep/test/unit/sc.c010064400007650000024000000016421344617474100156500ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_update_slab_size) { sc_data_t data; memset(&data, 0, sizeof(data)); sc_data_init(&data); sc_t *tiny = &data.sc[0]; size_t tiny_size = (ZU(1) << tiny->lg_base) + (ZU(tiny->ndelta) << tiny->lg_delta); size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1; sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big); assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages"); sc_data_update_slab_size(&data, 1, 10 * PAGE, 1); for (int i = 0; i < data.nbins; i++) { sc_t *sc = &data.sc[i]; size_t reg_size = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << sc->lg_delta); if (reg_size <= PAGE) { assert_d_eq(sc->pgs, 1, "Ignored valid page size hint"); } else { assert_d_gt(sc->pgs, 1, "Allowed invalid page size hint"); } } } TEST_END int main(void) { return test( test_update_slab_size); } jemalloc-sys-0.3.2/rep/test/unit/seq.c010064400007650000024000000036051344617474100160340ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/seq.h" typedef struct data_s data_t; struct data_s { int arr[10]; }; static void set_data(data_t *data, int num) { for (int i = 0; i < 10; i++) { data->arr[i] = num; } } static void assert_data(data_t *data) { int num = data->arr[0]; for (int i = 0; i < 10; i++) { assert_d_eq(num, data->arr[i], "Data consistency error"); } } seq_define(data_t, data) typedef struct thd_data_s thd_data_t; struct thd_data_s { seq_data_t data; }; static void * seq_reader_thd(void *arg) { thd_data_t *thd_data = (thd_data_t *)arg; int iter = 0; data_t local_data; while (iter < 1000 * 1000 - 1) { bool success = seq_try_load_data(&local_data, &thd_data->data); if (success) { assert_data(&local_data); assert_d_le(iter, local_data.arr[0], "Seq read went back in time."); iter = local_data.arr[0]; } } return NULL; } static void * seq_writer_thd(void *arg) { thd_data_t *thd_data = (thd_data_t *)arg; data_t local_data; memset(&local_data, 0, sizeof(local_data)); for (int i = 0; i < 1000 * 1000; i++) { set_data(&local_data, i); seq_store_data(&thd_data->data, &local_data); } return NULL; } TEST_BEGIN(test_seq_threaded) { thd_data_t thd_data; memset(&thd_data, 0, sizeof(thd_data)); thd_t reader; thd_t writer; thd_create(&reader, seq_reader_thd, &thd_data); thd_create(&writer, seq_writer_thd, &thd_data); thd_join(reader, NULL); thd_join(writer, NULL); } TEST_END TEST_BEGIN(test_seq_simple) { data_t data; seq_data_t seq; memset(&seq, 0, sizeof(seq)); for (int i = 0; i < 1000 * 1000; i++) { set_data(&data, i); seq_store_data(&seq, &data); set_data(&data, 0); bool success = seq_try_load_data(&data, &seq); assert_b_eq(success, true, "Failed non-racing read"); assert_data(&data); } } TEST_END int main(void) { return test_no_reentrancy( test_seq_simple, test_seq_threaded); } jemalloc-sys-0.3.2/rep/test/unit/size_classes.c010064400007650000024000000156061344617474100177370ustar0000000000000000#include "test/jemalloc_test.h" static size_t get_max_size_class(void) { unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); return max_size_class; } TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; szind_t index, max_index; max_size_class = get_max_size_class(); max_index = sz_size2index(max_size_class); for (index = 0, size_class = sz_index2size(index); index < max_index || size_class < max_size_class; index++, size_class = sz_index2size(index)) { assert_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_true(size_class < max_size_class, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_u_eq(index, sz_size2index(size_class), "sz_size2index() does not reverse sz_index2size(): index=%u" " --> size_class=%zu --> index=%u --> size_class=%zu", index, size_class, sz_size2index(size_class), sz_index2size(sz_size2index(size_class))); assert_zu_eq(size_class, sz_index2size(sz_size2index(size_class)), "sz_index2size() does not reverse sz_size2index(): index=%u" " --> size_class=%zu --> index=%u --> size_class=%zu", index, size_class, sz_size2index(size_class), sz_index2size(sz_size2index(size_class))); assert_u_eq(index+1, sz_size2index(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (index > 0) ? sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class-1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), "sz_s2u() does not round up to next size class"); } assert_u_eq(index, sz_size2index(sz_index2size(index)), "sz_size2index() does not reverse sz_index2size()"); assert_zu_eq(max_size_class, sz_index2size( sz_size2index(max_size_class)), "sz_index2size() does not reverse sz_size2index()"); assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class-1), "sz_s2u() does not round up to size class"); assert_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_psize_classes) { size_t size_class, max_psz; pszind_t pind, max_pind; max_psz = get_max_size_class() + PAGE; max_pind = sz_psz2ind(max_psz); for (pind = 0, size_class = sz_pind2sz(pind); pind < max_pind || size_class < max_psz; pind++, size_class = sz_pind2sz(pind)) { assert_true(pind < max_pind, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_true(size_class < max_psz, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_u_eq(pind, sz_psz2ind(size_class), "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); if (size_class == SC_LARGE_MAXCLASS) { assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1), "Next size_class does not round up properly"); } else { assert_u_eq(pind + 1, sz_psz2ind(size_class + 1), "Next size_class does not round up properly"); } assert_zu_eq(size_class, (pind > 0) ? sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class-1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), "sz_psz2u() does not round up to next size class"); } assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)), "sz_psz2ind() does not reverse sz_pind2sz()"); assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), "sz_pind2sz() does not reverse sz_psz2ind()"); assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class-1), "sz_psz2u() does not round up to size class"); assert_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_overflow) { size_t max_size_class, max_psz; max_size_class = get_max_size_class(); max_psz = max_size_class + PAGE; assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); assert_zu_eq(sz_s2u(max_size_class+1), 0, "sz_s2u() should return 0 for unsupported size"); assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, "sz_s2u() should return 0 for unsupported size"); assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, "sz_s2u() should return 0 on overflow"); assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" " size"); assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " "size"); assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow"); } TEST_END int main(void) { return test( test_size_classes, test_psize_classes, test_overflow); } jemalloc-sys-0.3.2/rep/test/unit/slab.c010064400007650000024000000016101344617474100161570ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_arena_slab_regind) { szind_t binind; for (binind = 0; binind < SC_NBINS; binind++) { size_t regind; extent_t slab; const bin_info_t *bin_info = &bin_infos[binind]; extent_init(&slab, NULL, mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, binind, 0, extent_state_active, false, true, true); assert_ptr_not_null(extent_addr_get(&slab), "Unexpected malloc() failure"); for (regind = 0; regind < bin_info->nregs; regind++) { void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + (bin_info->reg_size * regind)); assert_zu_eq(arena_slab_regind(&slab, binind, reg), regind, "Incorrect region index computed for size %zu", bin_info->reg_size); } free(extent_addr_get(&slab)); } } TEST_END int main(void) { return test( test_arena_slab_regind); } jemalloc-sys-0.3.2/rep/test/unit/smoothstep.c010064400007650000024000000052541344617474100174530ustar0000000000000000#include "test/jemalloc_test.h" static const uint64_t smoothstep_tab[] = { #define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; TEST_BEGIN(test_smoothstep_integral) { uint64_t sum, min, max; unsigned i; /* * The integral of smoothstep in the [0..1] range equals 1/2. Verify * that the fixed point representation's integral is no more than * rounding error distant from 1/2. Regarding rounding, each table * element is rounded down to the nearest fixed point value, so the * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += smoothstep_tab[i]; } max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); min = max - SMOOTHSTEP_NSTEPS; assert_u64_ge(sum, min, "Integral too small, even accounting for truncation"); assert_u64_le(sum, max, "Integral exceeds 1/2"); if (false) { malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", max - sum, SMOOTHSTEP_NSTEPS); } } TEST_END TEST_BEGIN(test_smoothstep_monotonic) { uint64_t prev_h; unsigned i; /* * The smoothstep function is monotonic in [0..1], i.e. its slope is * non-negative. In practice we want to parametrize table generation * such that piecewise slope is greater than zero, but do not require * that here. */ prev_h = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { uint64_t h = smoothstep_tab[i]; assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); prev_h = h; } assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); } TEST_END TEST_BEGIN(test_smoothstep_slope) { uint64_t prev_h, prev_delta; unsigned i; /* * The smoothstep slope strictly increases until x=0.5, and then * strictly decreases until x=1.0. Verify the slightly weaker * requirement of monotonicity, so that inadequate table precision does * not cause false test failures. */ prev_h = 0; prev_delta = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { uint64_t h = smoothstep_tab[i]; uint64_t delta = h - prev_h; assert_u64_ge(delta, prev_delta, "Slope must monotonically increase in 0.0 <= x <= 0.5, " "i=%u", i); prev_h = h; prev_delta = delta; } prev_h = KQU(1) << SMOOTHSTEP_BFP; prev_delta = 0; for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { uint64_t h = smoothstep_tab[i]; uint64_t delta = prev_h - h; assert_u64_ge(delta, prev_delta, "Slope must monotonically decrease in 0.5 <= x <= 1.0, " "i=%u", i); prev_h = h; prev_delta = delta; } } TEST_END int main(void) { return test( test_smoothstep_integral, test_smoothstep_monotonic, test_smoothstep_slope); } jemalloc-sys-0.3.2/rep/test/unit/spin.c010064400007650000024000000004051344617474100162100ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/spin.h" TEST_BEGIN(test_spin) { spin_t spinner = SPIN_INITIALIZER; for (unsigned i = 0; i < 100; i++) { spin_adaptive(&spinner); } } TEST_END int main(void) { return test( test_spin); } jemalloc-sys-0.3.2/rep/test/unit/stats.c010064400007650000024000000301601344617474100163760ustar0000000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_stats_summary) { size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, "active should be less than resident"); assert_zu_lt(active, mapped, "active should be less than mapped"); } } TEST_END TEST_BEGIN(test_stats_large) { void *p; uint64_t epoch; size_t allocated; uint64_t nmalloc, ndalloc, nrequests; size_t sz; int expected = config_stats ? 0 : ENOENT; p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_le(nmalloc, nrequests, "nmalloc should no larger than nrequests"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_summary) { void *little, *large; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(large, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.dirty_npurge", (void *)&dirty_npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", (void *)&dirty_nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.dirty_purged", (void *)&dirty_purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge", (void *)&muzzy_npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.muzzy_purged", (void *)&muzzy_purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { if (!background_thread_enabled()) { assert_u64_gt(dirty_npurge + muzzy_npurge, 0, "At least one purge should have occurred"); } assert_u64_le(dirty_nmadvise, dirty_purged, "dirty_nmadvise should be no greater than dirty_purged"); assert_u64_le(muzzy_nmadvise, muzzy_purged, "muzzy_nmadvise should be no greater than muzzy_purged"); } } TEST_END void * thd_start(void *arg) { return NULL; } static void no_lazy_lock(void) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_BEGIN(test_stats_arenas_small) { void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.small.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be no greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_large) { void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } dallocx(p, 0); } TEST_END static void gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name); } TEST_BEGIN(test_stats_arenas_bins) { void *p; size_t sz, curslabs, curregs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nslabs, nreslabs; int expected = config_stats ? 0 : ENOENT; /* Make sure allocation below isn't satisfied by tcache. */ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); unsigned arena_ind, old_arena_ind; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Arena creation failure"); sz = sizeof(arena_ind); assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); p = malloc(bin_infos[0].reg_size); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); char cmd[128]; sz = sizeof(uint64_t); gen_mallctl_str(cmd, "nmalloc", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "ndalloc", arena_ind); assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nrequests", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); gen_mallctl_str(cmd, "curregs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); gen_mallctl_str(cmd, "nfills", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nflushes", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); gen_mallctl_str(cmd, "nreslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); gen_mallctl_str(cmd, "curslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_zu_gt(curregs, 0, "allocated should be greater than zero"); if (opt_tcache) { assert_u64_gt(nfills, 0, "At least one fill should have occurred"); assert_u64_gt(nflushes, 0, "At least one flush should have occurred"); } assert_u64_gt(nslabs, 0, "At least one slab should have been allocated"); assert_zu_gt(curslabs, 0, "At least one slab should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_lextents) { void *p; uint64_t epoch, nmalloc, ndalloc; size_t curlextents, sz, hsize; int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); p = mallocx(hsize, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", (void *)&curlextents, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(curlextents, 0, "At least one extent should be currently allocated"); } dallocx(p, 0); } TEST_END int main(void) { return test_no_reentrancy( test_stats_summary, test_stats_large, test_stats_arenas_summary, test_stats_arenas_small, test_stats_arenas_large, test_stats_arenas_bins, test_stats_arenas_lextents); } jemalloc-sys-0.3.2/rep/test/unit/stats_print.c010064400007650000024000000550511344617474100176200ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/util.h" typedef enum { TOKEN_TYPE_NONE, TOKEN_TYPE_ERROR, TOKEN_TYPE_EOI, TOKEN_TYPE_NULL, TOKEN_TYPE_FALSE, TOKEN_TYPE_TRUE, TOKEN_TYPE_LBRACKET, TOKEN_TYPE_RBRACKET, TOKEN_TYPE_LBRACE, TOKEN_TYPE_RBRACE, TOKEN_TYPE_COLON, TOKEN_TYPE_COMMA, TOKEN_TYPE_STRING, TOKEN_TYPE_NUMBER } token_type_t; typedef struct parser_s parser_t; typedef struct { parser_t *parser; token_type_t token_type; size_t pos; size_t len; size_t line; size_t col; } token_t; struct parser_s { bool verbose; char *buf; /* '\0'-terminated. */ size_t len; /* Number of characters preceding '\0' in buf. */ size_t pos; size_t line; size_t col; token_t token; }; static void token_init(token_t *token, parser_t *parser, token_type_t token_type, size_t pos, size_t len, size_t line, size_t col) { token->parser = parser; token->token_type = token_type; token->pos = pos; token->len = len; token->line = line; token->col = col; } static void token_error(token_t *token) { if (!token->parser->verbose) { return; } switch (token->token_type) { case TOKEN_TYPE_NONE: not_reached(); case TOKEN_TYPE_ERROR: malloc_printf("%zu:%zu: Unexpected character in token: ", token->line, token->col); break; default: malloc_printf("%zu:%zu: Unexpected token: ", token->line, token->col); break; } UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO, &token->parser->buf[token->pos], token->len); malloc_printf("\n"); } static void parser_init(parser_t *parser, bool verbose) { parser->verbose = verbose; parser->buf = NULL; parser->len = 0; parser->pos = 0; parser->line = 1; parser->col = 0; } static void parser_fini(parser_t *parser) { if (parser->buf != NULL) { dallocx(parser->buf, MALLOCX_TCACHE_NONE); } } static bool parser_append(parser_t *parser, const char *str) { size_t len = strlen(str); char *buf = (parser->buf == NULL) ? mallocx(len + 1, MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, MALLOCX_TCACHE_NONE); if (buf == NULL) { return true; } memcpy(&buf[parser->len], str, len + 1); parser->buf = buf; parser->len += len; return false; } static bool parser_tokenize(parser_t *parser) { enum { STATE_START, STATE_EOI, STATE_N, STATE_NU, STATE_NUL, STATE_NULL, STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE, STATE_T, STATE_TR, STATE_TRU, STATE_TRUE, STATE_LBRACKET, STATE_RBRACKET, STATE_LBRACE, STATE_RBRACE, STATE_COLON, STATE_COMMA, STATE_CHARS, STATE_CHAR_ESCAPE, STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD, STATE_STRING, STATE_MINUS, STATE_LEADING_ZERO, STATE_DIGITS, STATE_DECIMAL, STATE_FRAC_DIGITS, STATE_EXP, STATE_EXP_SIGN, STATE_EXP_DIGITS, STATE_ACCEPT } state = STATE_START; size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); size_t token_line JEMALLOC_CC_SILENCE_INIT(1); size_t token_col JEMALLOC_CC_SILENCE_INIT(0); assert_zu_le(parser->pos, parser->len, "Position is past end of buffer"); while (state != STATE_ACCEPT) { char c = parser->buf[parser->pos]; switch (state) { case STATE_START: token_pos = parser->pos; token_line = parser->line; token_col = parser->col; switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': break; case '\0': state = STATE_EOI; break; case 'n': state = STATE_N; break; case 'f': state = STATE_F; break; case 't': state = STATE_T; break; case '[': state = STATE_LBRACKET; break; case ']': state = STATE_RBRACKET; break; case '{': state = STATE_LBRACE; break; case '}': state = STATE_RBRACE; break; case ':': state = STATE_COLON; break; case ',': state = STATE_COMMA; break; case '"': state = STATE_CHARS; break; case '-': state = STATE_MINUS; break; case '0': state = STATE_LEADING_ZERO; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EOI: token_init(&parser->token, parser, TOKEN_TYPE_EOI, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_N: switch (c) { case 'u': state = STATE_NU; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NU: switch (c) { case 'l': state = STATE_NUL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NUL: switch (c) { case 'l': state = STATE_NULL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_NULL: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_NULL, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_F: switch (c) { case 'a': state = STATE_FA; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FA: switch (c) { case 'l': state = STATE_FAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FAL: switch (c) { case 's': state = STATE_FALS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FALS: switch (c) { case 'e': state = STATE_FALSE; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FALSE: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_FALSE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_T: switch (c) { case 'r': state = STATE_TR; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TR: switch (c) { case 'u': state = STATE_TRU; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TRU: switch (c) { case 'e': state = STATE_TRUE; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_TRUE: switch (c) { case ' ': case '\b': case '\n': case '\r': case '\t': case '\0': case '[': case ']': case '{': case '}': case ':': case ',': break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_TRUE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_LBRACKET: token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_RBRACKET: token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_LBRACE: token_init(&parser->token, parser, TOKEN_TYPE_LBRACE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_RBRACE: token_init(&parser->token, parser, TOKEN_TYPE_RBRACE, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_COLON: token_init(&parser->token, parser, TOKEN_TYPE_COLON, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_COMMA: token_init(&parser->token, parser, TOKEN_TYPE_COMMA, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_CHARS: switch (c) { case '\\': state = STATE_CHAR_ESCAPE; break; case '"': state = STATE_STRING; break; case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; default: break; } break; case STATE_CHAR_ESCAPE: switch (c) { case '"': case '\\': case '/': case 'b': case 'n': case 'r': case 't': state = STATE_CHARS; break; case 'u': state = STATE_CHAR_U; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_U: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UDD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UDD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHAR_UDDD; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_CHAR_UDDD: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': state = STATE_CHARS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_STRING: token_init(&parser->token, parser, TOKEN_TYPE_STRING, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; case STATE_MINUS: switch (c) { case '0': state = STATE_LEADING_ZERO; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_LEADING_ZERO: switch (c) { case '.': state = STATE_DECIMAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case '.': state = STATE_DECIMAL; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_DECIMAL: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_FRAC_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_FRAC_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case 'e': case 'E': state = STATE_EXP; break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; case STATE_EXP: switch (c) { case '-': case '+': state = STATE_EXP_SIGN; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EXP_SIGN: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - token_pos, token_line, token_col); return true; } break; case STATE_EXP_DIGITS: switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; default: token_init(&parser->token, parser, TOKEN_TYPE_NUMBER, token_pos, parser->pos - token_pos, token_line, token_col); state = STATE_ACCEPT; break; } break; default: not_reached(); } if (state != STATE_ACCEPT) { if (c == '\n') { parser->line++; parser->col = 0; } else { parser->col++; } parser->pos++; } } return false; } static bool parser_parse_array(parser_t *parser); static bool parser_parse_object(parser_t *parser); static bool parser_parse_value(parser_t *parser) { switch (parser->token.token_type) { case TOKEN_TYPE_NULL: case TOKEN_TYPE_FALSE: case TOKEN_TYPE_TRUE: case TOKEN_TYPE_STRING: case TOKEN_TYPE_NUMBER: return false; case TOKEN_TYPE_LBRACE: return parser_parse_object(parser); case TOKEN_TYPE_LBRACKET: return parser_parse_array(parser); default: return true; } not_reached(); } static bool parser_parse_pair(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Pair should start with string"); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COLON: if (parser_tokenize(parser)) { return true; } return parser_parse_value(parser); default: return true; } } static bool parser_parse_values(parser_t *parser) { if (parser_parse_value(parser)) { return true; } while (true) { if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COMMA: if (parser_tokenize(parser)) { return true; } if (parser_parse_value(parser)) { return true; } break; case TOKEN_TYPE_RBRACKET: return false; default: return true; } } } static bool parser_parse_array(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET, "Array should start with ["); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_RBRACKET: return false; default: return parser_parse_values(parser); } not_reached(); } static bool parser_parse_pairs(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Object should start with string"); if (parser_parse_pair(parser)) { return true; } while (true) { if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_COMMA: if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_STRING: if (parser_parse_pair(parser)) { return true; } break; default: return true; } break; case TOKEN_TYPE_RBRACE: return false; default: return true; } } } static bool parser_parse_object(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE, "Object should start with {"); if (parser_tokenize(parser)) { return true; } switch (parser->token.token_type) { case TOKEN_TYPE_STRING: return parser_parse_pairs(parser); case TOKEN_TYPE_RBRACE: return false; default: return true; } not_reached(); } static bool parser_parse(parser_t *parser) { if (parser_tokenize(parser)) { goto label_error; } if (parser_parse_value(parser)) { goto label_error; } if (parser_tokenize(parser)) { goto label_error; } switch (parser->token.token_type) { case TOKEN_TYPE_EOI: return false; default: goto label_error; } not_reached(); label_error: token_error(&parser->token); return true; } TEST_BEGIN(test_json_parser) { size_t i; const char *invalid_inputs[] = { /* Tokenizer error case tests. */ "{ \"string\": X }", "{ \"string\": nXll }", "{ \"string\": nuXl }", "{ \"string\": nulX }", "{ \"string\": nullX }", "{ \"string\": fXlse }", "{ \"string\": faXse }", "{ \"string\": falXe }", "{ \"string\": falsX }", "{ \"string\": falseX }", "{ \"string\": tXue }", "{ \"string\": trXe }", "{ \"string\": truX }", "{ \"string\": trueX }", "{ \"string\": \"\n\" }", "{ \"string\": \"\\z\" }", "{ \"string\": \"\\uX000\" }", "{ \"string\": \"\\u0X00\" }", "{ \"string\": \"\\u00X0\" }", "{ \"string\": \"\\u000X\" }", "{ \"string\": -X }", "{ \"string\": 0.X }", "{ \"string\": 0.0eX }", "{ \"string\": 0.0e+X }", /* Parser error test cases. */ "{\"string\": }", "{\"string\" }", "{\"string\": [ 0 }", "{\"string\": {\"a\":0, 1 } }", "{\"string\": {\"a\":0: } }", "{", "{}{", }; const char *valid_inputs[] = { /* Token tests. */ "null", "false", "true", "{}", "{\"a\": 0}", "[]", "[0, 1]", "0", "1", "10", "-10", "10.23", "10.23e4", "10.23e-4", "10.23e+4", "10.23E4", "10.23E-4", "10.23E+4", "-10.23", "-10.23e4", "-10.23e-4", "-10.23e+4", "-10.23E4", "-10.23E-4", "-10.23E+4", "\"value\"", "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", /* Parser test with various nesting. */ "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", }; for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) { const char *input = invalid_inputs[i]; parser_t parser; parser_init(&parser, false); assert_false(parser_append(&parser, input), "Unexpected input appending failure"); assert_true(parser_parse(&parser), "Unexpected parse success for input: %s", input); parser_fini(&parser); } for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) { const char *input = valid_inputs[i]; parser_t parser; parser_init(&parser, true); assert_false(parser_append(&parser, input), "Unexpected input appending failure"); assert_false(parser_parse(&parser), "Unexpected parse error for input: %s", input); parser_fini(&parser); } } TEST_END void write_cb(void *opaque, const char *str) { parser_t *parser = (parser_t *)opaque; if (parser_append(parser, str)) { test_fail("Unexpected input appending failure"); } } TEST_BEGIN(test_stats_print_json) { const char *opts[] = { "J", "Jg", "Jm", "Jd", "Jmd", "Jgd", "Jgm", "Jgmd", "Ja", "Jb", "Jl", "Jx", "Jbl", "Jal", "Jab", "Jabl", "Jax", "Jbx", "Jlx", "Jablx", "Jgmdablx", }; unsigned arena_ind, i; for (i = 0; i < 3; i++) { unsigned j; switch (i) { case 0: break; case 1: { size_t sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); break; } case 2: { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); mib[1] = arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib failure"); break; } default: not_reached(); } for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) { parser_t parser; parser_init(&parser, true); malloc_stats_print(write_cb, (void *)&parser, opts[j]); assert_false(parser_parse(&parser), "Unexpected parse error, opts=\"%s\"", opts[j]); parser_fini(&parser); } } } TEST_END int main(void) { return test( test_json_parser, test_stats_print_json); } jemalloc-sys-0.3.2/rep/test/unit/test_hooks.c010064400007650000024000000013771344617474100174320ustar0000000000000000#include "test/jemalloc_test.h" static bool hook_called = false; static void hook() { hook_called = true; } static int func_to_hook(int arg1, int arg2) { return arg1 + arg2; } #define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook) TEST_BEGIN(unhooked_call) { test_hooks_libc_hook = NULL; hook_called = false; assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_false(hook_called, "Nulling out hook didn't take."); } TEST_END TEST_BEGIN(hooked_call) { test_hooks_libc_hook = &hook; hook_called = false; assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_true(hook_called, "Hook should have executed."); } TEST_END int main(void) { return test( unhooked_call, hooked_call); } jemalloc-sys-0.3.2/rep/test/unit/ticker.c010064400007650000024000000037721344617474100165320ustar0000000000000000#include "test/jemalloc_test.h" #include "jemalloc/internal/ticker.h" TEST_BEGIN(test_ticker_tick) { #define NREPS 2 #define NTICKS 3 ticker_t ticker; int32_t i, j; ticker_init(&ticker, NTICKS); for (i = 0; i < NREPS; i++) { for (j = 0; j < NTICKS; j++) { assert_u_eq(ticker_read(&ticker), NTICKS - j, "Unexpected ticker value (i=%d, j=%d)", i, j); assert_false(ticker_tick(&ticker), "Unexpected ticker fire (i=%d, j=%d)", i, j); } assert_u32_eq(ticker_read(&ticker), 0, "Expected ticker depletion"); assert_true(ticker_tick(&ticker), "Expected ticker fire (i=%d)", i); assert_u32_eq(ticker_read(&ticker), NTICKS, "Expected ticker reset"); } #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_ticks) { #define NTICKS 3 ticker_t ticker; ticker_init(&ticker, NTICKS); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_copy) { #define NTICKS 3 ticker_t ta, tb; ticker_init(&ta, NTICKS); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); ticker_tick(&ta); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END int main(void) { return test( test_ticker_tick, test_ticker_ticks, test_ticker_copy); } jemalloc-sys-0.3.2/rep/test/unit/tsd.c010064400007650000024000000155311344617474100160370ustar0000000000000000#include "test/jemalloc_test.h" /* * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't * be asserting that we're on one. */ static bool originally_fast; static int data_cleanup_count; void data_cleanup(int *data) { if (data_cleanup_count == 0) { assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, "Argument passed into cleanup function should match tsd " "value"); } ++data_cleanup_count; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ bool reincarnate = false; switch (*data) { case MALLOC_TSD_TEST_DATA_INIT: *data = 1; reincarnate = true; break; case 1: *data = 2; reincarnate = true; break; case 2: return; default: not_reached(); } if (reincarnate) { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } } static void * thd_start(void *arg) { int d = (int)(uintptr_t)arg; void *p; tsd_t *tsd = tsd_fetch(); assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT, "Initial tsd get should return initialization value"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); tsd_test_data_set(tsd, d); assert_x_eq(tsd_test_data_get(tsd), d, "After tsd set, tsd get should return value that was set"); d = 0; assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); tsd_test_callback_set(tsd, &data_cleanup); free(p); return NULL; } TEST_BEGIN(test_tsd_main_thread) { thd_start((void *)(uintptr_t)0xa5f3e329); } TEST_END TEST_BEGIN(test_tsd_sub_thread) { thd_t thd; data_cleanup_count = 0; thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT); thd_join(thd, NULL); /* * We reincarnate twice in the data cleanup, so it should execute at * least 3 times. */ assert_x_ge(data_cleanup_count, 3, "Cleanup function should have executed multiple times."); } TEST_END static void * thd_start_reincarnated(void *arg) { tsd_t *tsd = tsd_fetch(); assert(tsd); void *p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); /* Manually trigger reincarnation. */ assert_ptr_not_null(tsd_arena_get(tsd), "Should have tsd arena set."); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared."); assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory, "TSD state should be purgatory\n"); free(p); assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated, "TSD state should be reincarnated\n"); p = mallocx(1, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "Should not have tsd arena set after reincarnation."); free(p); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared after 2nd cleanup."); return NULL; } TEST_BEGIN(test_tsd_reincarnation) { thd_t thd; thd_create(&thd, thd_start_reincarnated, NULL); thd_join(thd, NULL); } TEST_END typedef struct { atomic_u32_t phase; atomic_b_t error; } global_slow_data_t; static void * thd_start_global_slow(void *arg) { /* PHASE 0 */ global_slow_data_t *data = (global_slow_data_t *)arg; free(mallocx(1, 0)); tsd_t *tsd = tsd_fetch(); /* * No global slowness has happened yet; there was an error if we were * originally fast but aren't now. */ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST); /* PHASE 2 */ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) { } free(mallocx(1, 0)); atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST); /* PHASE 4 */ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) { } free(mallocx(1, 0)); atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST); /* PHASE 6 */ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) { } free(mallocx(1, 0)); /* Only one decrement so far. */ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST); /* PHASE 8 */ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) { } free(mallocx(1, 0)); /* * Both decrements happened; we should be fast again (if we ever * were) */ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST); return NULL; } TEST_BEGIN(test_tsd_global_slow) { global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)}; /* * Note that the "mallocx" here (vs. malloc) is important, since the * compiler is allowed to optimize away free(malloc(1)) but not * free(mallocx(1)). */ free(mallocx(1, 0)); tsd_t *tsd = tsd_fetch(); originally_fast = tsd_fast(tsd); thd_t thd; thd_create(&thd, thd_start_global_slow, (void *)&data.phase); /* PHASE 1 */ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) { /* * We don't have a portable condvar/semaphore mechanism. * Spin-wait. */ } assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); tsd_global_slow_inc(tsd_tsdn(tsd)); free(mallocx(1, 0)); assert_false(tsd_fast(tsd), ""); atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST); /* PHASE 3 */ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) { } assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); /* Increase again, so that we can test multiple fast/slow changes. */ tsd_global_slow_inc(tsd_tsdn(tsd)); atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST); free(mallocx(1, 0)); assert_false(tsd_fast(tsd), ""); /* PHASE 5 */ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) { } assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); tsd_global_slow_dec(tsd_tsdn(tsd)); atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST); /* We only decreased once; things should still be slow. */ free(mallocx(1, 0)); assert_false(tsd_fast(tsd), ""); /* PHASE 7 */ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) { } assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); tsd_global_slow_dec(tsd_tsdn(tsd)); atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST); /* We incremented and then decremented twice; we should be fast now. */ free(mallocx(1, 0)); assert_true(!originally_fast || tsd_fast(tsd), ""); /* PHASE 9 */ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) { } assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); thd_join(thd, NULL); } TEST_END int main(void) { /* Ensure tsd bootstrapped. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); return test_status_fail; } return test_no_reentrancy( test_tsd_main_thread, test_tsd_sub_thread, test_tsd_reincarnation, test_tsd_global_slow); } jemalloc-sys-0.3.2/rep/test/unit/witness.c010064400007650000024000000176421344617474100167460ustar0000000000000000#include "test/jemalloc_test.h" static witness_lock_error_t *witness_lock_error_orig; static witness_owner_error_t *witness_owner_error_orig; static witness_not_owner_error_t *witness_not_owner_error_orig; static witness_depth_error_t *witness_depth_error_orig; static bool saw_lock_error; static bool saw_owner_error; static bool saw_not_owner_error; static bool saw_depth_error; static void witness_lock_error_intercept(const witness_list_t *witnesses, const witness_t *witness) { saw_lock_error = true; } static void witness_owner_error_intercept(const witness_t *witness) { saw_owner_error = true; } static void witness_not_owner_error_intercept(const witness_t *witness) { saw_not_owner_error = true; } static void witness_depth_error_intercept(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { saw_depth_error = true; } static int witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); assert(oa == (void *)a); assert(ob == (void *)b); return strcmp(a->name, b->name); } static int witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); assert(oa == (void *)a); assert(ob == (void *)b); return -strcmp(a->name, b->name); } TEST_BEGIN(test_witness) { witness_t a, b; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); witness_init(&a, "a", 1, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &a); witness_lock(&witness_tsdn, &a); witness_assert_owner(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0); witness_init(&b, "b", 2, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &b); witness_lock(&witness_tsdn, &b); witness_assert_owner(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 2); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); witness_unlock(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); witness_unlock(&witness_tsdn, &b); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); } TEST_END TEST_BEGIN(test_witness_comp) { witness_t a, b, c, d; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, witness_comp, &a); witness_assert_not_owner(&witness_tsdn, &a); witness_lock(&witness_tsdn, &a); witness_assert_owner(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_init(&b, "b", 1, witness_comp, &b); witness_assert_not_owner(&witness_tsdn, &b); witness_lock(&witness_tsdn, &b); witness_assert_owner(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 2); witness_unlock(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 1); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_init(&c, "c", 1, witness_comp_reverse, &c); witness_assert_not_owner(&witness_tsdn, &c); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &c); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &c); witness_assert_depth(&witness_tsdn, 1); saw_lock_error = false; witness_init(&d, "d", 1, NULL, NULL); witness_assert_not_owner(&witness_tsdn, &d); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &d); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &d); witness_assert_depth(&witness_tsdn, 1); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_reversal) { witness_t a, b; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); witness_init(&b, "b", 2, NULL, NULL); witness_lock(&witness_tsdn, &b); witness_assert_depth(&witness_tsdn, 1); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(&witness_tsdn, &a); witness_assert_depth(&witness_tsdn, 1); witness_unlock(&witness_tsdn, &b); witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_recursive) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_not_owner_error_orig = witness_not_owner_error; witness_not_owner_error = witness_not_owner_error_intercept; saw_not_owner_error = false; witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); witness_lock(&witness_tsdn, &a); assert_false(saw_lock_error, "Unexpected witness lock error"); assert_false(saw_not_owner_error, "Unexpected witness not owner error"); witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); assert_true(saw_not_owner_error, "Expected witness not owner error"); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_unlock_not_owned) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_owner_error_orig = witness_owner_error; witness_owner_error = witness_owner_error_intercept; saw_owner_error = false; witness_assert_lockless(&witness_tsdn); witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_owner_error, "Unexpected owner error"); witness_unlock(&witness_tsdn, &a); assert_true(saw_owner_error, "Expected owner error"); witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; } TEST_END TEST_BEGIN(test_witness_depth) { witness_t a; witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); witness_depth_error_orig = witness_depth_error; witness_depth_error = witness_depth_error_intercept; saw_depth_error = false; witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_depth_error, "Unexpected depth error"); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_lock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); assert_true(saw_depth_error, "Expected depth error"); witness_unlock(&witness_tsdn, &a); witness_assert_lockless(&witness_tsdn); witness_assert_depth(&witness_tsdn, 0); witness_depth_error = witness_depth_error_orig; } TEST_END int main(void) { return test( test_witness, test_witness_comp, test_witness_reversal, test_witness_recursive, test_witness_unlock_not_owned, test_witness_depth); } jemalloc-sys-0.3.2/rep/test/unit/zero.c010064400007650000024000000024011344617474100162140ustar0000000000000000#include "test/jemalloc_test.h" static void test_zero(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; #define MAGIC ((uint8_t)0x61) sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_u_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); s[i] = MAGIC; } if (xallocx(s, sz+1, 0, 0) == sz) { s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); #undef MAGIC } TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SC_SMALL_MAXCLASS - 1); } TEST_END TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1)); } TEST_END int main(void) { return test( test_zero_small, test_zero_large); } jemalloc-sys-0.3.2/rep/test/unit/zero.sh010064400007650000024000000001551344617474100164100ustar0000000000000000#!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then export MALLOC_CONF="abort:false,junk:false,zero:true" fi jemalloc-sys-0.3.2/.cargo_vcs_info.json0000644000000001120000000000000134710ustar00{ "git": { "sha1": "35ae4197c25ffb75c53ae0defb0345f092154db4" } }